query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get the absolute property value with propertyname for the given atom.
def GetRelativeAtomicProperty(element='C',propertyname='m'): CpropertyDic = float(GetAbsoluteAtomicProperty('C', propertyname)) PropertyDic = float(GetAbsoluteAtomicProperty(element, propertyname)) return PropertyDic/CpropertyDic
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetAbsoluteAtomicProperty(element='C',propertyname='m'):\n\n if propertyname == \"m\":\n return periodicTable.GetAtomicWeight(element)\n elif propertyname == \"V\":\n r = periodicTable.GetRvdw(element)\n V = 4/3*pi*r**3\n return V\n elif propertyname == \"Z\":\n return periodicTable.GetAtomicNumber(element)\n elif propertyname == \"Rv\":\n return periodicTable.GetRvdw(element)\n elif propertyname == \"Rc\":\n return periodicTable.GetRb0(element)\n elif propertyname == \"Zv\":\n return periodicTable.GetDefaultValence(element)\n else:\n PropertyDic = AtomProperty[element]\n return PropertyDic[propertyname]", "def get_property(self,name):\n return self.dp.get_property(name)", "def find_atom_by_property(atoms: t.List[Atom], property_value: any, property_name: str = \"id\") -> Atom:\r\n\r\n if (all([hasattr(a, property_name) for a in atoms])):\r\n all_hits = list(filter(lambda a: a.id == property_value, atoms))\r\n if len(all_hits) == 0:\r\n raise ValueError('There is no atom with id: ' + str(id))\r\n elif len(all_hits) > 1:\r\n raise ValueError('There is more than one atom with id: ' + str(id))\r\n else:\r\n return all_hits[0]\r\n else:\r\n raise ValueError(\"atom does not have Property: \" + property_name + \" but: \" + str(vars(atoms[0])))", "def getAtom(self, atomname):\n if self.hasAtom(atomname):\n return self.atoms[atomname]\n else:\n return None", "def get_atom(self, name, alt_loc = None):\n if alt_loc:\n if self.alt_loc_dict.has_key(name):\n altloc = self.alt_loc_dict[name]\n if altloc.has_key(alt_loc):\n return altloc[alt_loc]\n return None\n else:\n if not self.atom_dict.has_key(name):\n return None\n return self.atom_dict[name]", "def get_window_property(self, connection, window, atom):\n self.logger.debug(\"Getting property %s from window %s\", atom, window)\n cookie = connection.core.GetProperty(\n False,\n window,\n atom,\n GetPropertyType.Any,\n 0,\n 2 ** 32 - 1\n )\n reply = cookie.reply()\n return self.get_property_value(reply)", "def value_of_css_property(self, name):\n return self.element.value_of_css_property(name)", "def get_property(self, property):\n return self.shell([\"getprop\", property])", "def getprop(self, prop_name):\n return self.shell(\"getprop %s\" % prop_name)", "def getProperty(propname):", "def getprop(self, prop_name):\n return self.shell('getprop %s' % prop_name).decode('utf-8').strip()", "def get_equivalent_atom(self, atom):\n try:\n return self.atom_dict[atom.name]\n except KeyError:\n return None", "def getProperty(unique_name):", "def get_dynamic_property(vim, mobj, type, property_name):\n properties = get_dynamic_properties(vim, mobj, [property_name], type)\n property_value = None\n if property_name in properties:\n property_value = properties.get(property_name)\n return property_value", "def get_equivalent_atom(self, atom):\n try:\n return self.chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def get_equivalent_atom(self, atom):\n try:\n return self.fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def get_property(self, name):\n if (not name in self.properties):\n raise KeyError(\"Key '\" + name + \"' not found\")\n return self.properties[name]", "def get(self, prop):\r\n prop_parts = prop.split(\".\")\r\n val = None\r\n for part in prop_parts:\r\n if val is None:\r\n val = self.obj.get(part)\r\n else:\r\n val = val.get(part)\r\n return val", "def get_property_value(prop, paths):\n\n data = parse_config(paths)\n return data.get(prop)", "def get_property_value(self, property, db):\n try:\n for p in self.properties:\n if p.idProperty == int(property):\n return p.get_value()\n except:\n return None", "def get_equivalent_atom(self, atom):\n try:\n return self.model_dict[atom.model_id].chain_dict[atom.chain_id].fragment_dict[atom.fragment_id].atom_dict[atom.name]\n except KeyError:\n return None", "def get_prop(node, name):\n title = node.get(\"title\")\n props = title.split(\";\")\n for prop in props:\n (key, args) = prop.split(None, 1)\n args = args.strip('\"')\n if key == name:\n return args\n return None", "def get_property(prop, project):\n result = re.search(\n r'{}\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]'.format(prop),\n open(project + \"/__init__.py\").read(),\n )\n return result.group(1)", "def get_value (property):\n assert is_iterable_typed(property, basestring) or isinstance(property, basestring)\n return replace_grist (property, '')", "def get_dynamic_property(vim, mobj, type, property_name):\r\n obj_content = \\\r\n get_object_properties(vim, None, mobj, type, [property_name])\r\n property_value = None\r\n if obj_content:\r\n dynamic_property = obj_content[0].propSet\r\n if dynamic_property:\r\n property_value = dynamic_property[0].val\r\n return property_value", "def propget(self, name):\r\n res = self._svn('propget', name)\r\n return res[:-1] # strip trailing newline\r", "def prop(self):\n return getattr(self, name)", "def do_get_property(self, spec):\n attribute = self.find_attribute(spec.name)\n if attribute is not None and isinstance(attribute, property):\n return attribute.fget(self)\n else:\n raise ValueError(\"No such property\", spec.name)", "def getprop(name):\n return _slp.getprop(name)", "def get_owner(self, property_name):\n\n property_owner = self.db.read_value(property_name, \"owner\")\n return property_owner" ]
[ "0.71773916", "0.6322747", "0.61566937", "0.60482395", "0.60195005", "0.5999082", "0.5951637", "0.59448093", "0.5930747", "0.5896596", "0.58638", "0.585692", "0.5847448", "0.58387524", "0.5810104", "0.57515085", "0.574562", "0.5737195", "0.5735811", "0.5706954", "0.5691982", "0.56782854", "0.56739104", "0.5666777", "0.56634206", "0.5662688", "0.5657446", "0.56308323", "0.55689484", "0.5559896" ]
0.65785086
1
Picks the best model based on KFold validation. Ridge (lr) Random Forrest Regressor (rf) Support Vector Machine Regressor (svr)
def pick_model(self): self.x = self.train[self.use_columns] try: self.x = pd.get_dummies(self.x) except: pass # if no categorical features self.final_columns = self.x.columns print(self.x.columns) self.scaler = StandardScaler() self.x = self.scaler.fit_transform(self.x) self.y = self.train['y'] if len(np.unique(self.y))<50: print('Consider using classification, probably not continuos target variable!') # for picking the best model lr = Ridge(max_iter=1500) rf = RandomForestRegressor(n_estimators=500, max_depth=20, min_samples_leaf=3, max_features='auto', n_jobs=-1) svr = SVR(max_iter=-1) self.models = {'lr': lr, 'rf': rf, 'svr': svr} self.scores = {'lr': [], 'rf': [], 'svr': []} print('selecting model') for i, (train_index, test_index) in enumerate(self.kf.split(self.x, self.y)): x_tr, x_val = self.x[train_index], self.x[test_index] y_tr, y_val = self.y[train_index], self.y[test_index] if len(x_tr)>10000: print('reduced train size') y_tr.index, y_val.index = range(len(y_tr)), range(len(y_val)) mask_train = np.random.choice(range(len(x_tr)),size=10000) x_tr, y_tr = x_tr[mask_train], y_tr[mask_train] for k, model in self.models.items(): print('fold: ', i+1) print('model: ', k) model = clone(self.models[k]) model.fit(x_tr, y_tr) p = model.predict(x_val) # score = mean_squared_error(y_val, p) score = mean_absolute_error(y_val, p) self.scores[k] = self.scores[k] + [score] self.best_score = 9e10 self.old_score = 9e10 self.best_model = '' self.old_model = '' for k, l in self.scores.items(): mean = np.mean(l) if mean < self.best_score: self.old_score = self.best_score self.old_model = self.best_model self.best_score = mean self.best_model = k print(self.best_model, self.best_score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_best_model(self, df):\n params = {\n # check whether unigrams give good results or bigrams.\n \"vectorizer__vectorizer\": [self.feature_name_to_class[self.feature]],\n \"vectorizer__ngram_range\": [(1,1), (1,2), (2,2)],\n # check pca parameters\n \"pca__n_components\": [30, 40, 50],\n # stemmer to use for preprocessing\n \"preprocessor__stemmer\": [self.stemmer_name_to_method[self.stemmer_method]],\n 'extractor__punctuations': [True, False]\n\n }\n # select the tunable parameters according to the model\n if self.model == MODELS_SVM:\n params.update({\n 'model__kernel': ['linear'],\n 'model__gamma': [1e-3, 1e-4],\n 'model__C': [0.5, 1, 10]\n })\n elif self.model == MODELS_RANDOM_FOREST:\n params.update({\n 'model__n_estimators': [5, 10, 15]\n })\n elif self.model == MODELS_LOGISTIC_REGRESSION:\n params.update({\n 'model__C': [1.0, 10],\n 'model__tol': [0.001, 0.01, 0.1]\n })\n clf = GridSearchCV(self.get_pipeline(), params, cv=5,\n scoring='%s_macro' % self.training_param)\n X = df.drop([\"Value\"], axis=1)\n Y = df[\"Value\"].values\n clf.fit(X, Y)\n print clf.best_params_\n # print clf.best_estimator_\n print clf.best_score_", "def fit_model(X, y,metric, model):\n cv_sets = ShuffleSplit(n_splits=10, test_size= 0.2, train_size= 0.8, random_state=42)\n \n\n if model == 'regression_tree':\n\n clf = DecisionTreeRegressor(random_state=42)\n\n # Creating a dictionary for the parameter 'max_depth' with a range from 1 to 10\n param = {\n 'max_depth': [1,2,3,4,5,6,7,8,9,10]\n }\n\n\n elif model == 'ridge':\n clf = Ridge(random_state=42, fit_intercept=False)\n param = {\n 'alpha': [0, 0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000]\n }\n\n\n if metric == 'r2':\n scoring_fnc = make_scorer(r_squared,greater_is_better=True)\n\n elif metric == 'rss':\n scoring_fnc = make_scorer(rss, greater_is_better=False)\n\n # Creating the grid search cv object --> GridSearchCV()\n grid = GridSearchCV(estimator=clf, param_grid=param, cv=cv_sets,scoring= scoring_fnc)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_", "def train_model(X,y,params,model,scale=False,nfolds=10,n_jobs=8,cv = None,n_params=100):\n\t\n\t# Do we need to scale the feature?\n\tif scale: X = maxabs_scale(X)\n\telse: X = X.apply(lambda x: pd.to_numeric(x, errors='coerce'))\n\t\n\t# Make sure we make a hardcopy; so that it is just not a reference between crossv_mod and ret_mod\n\tcrossv_mod = clone(model)\n\tret_mod = clone(model)\n\n\tgrid = RandomizedSearchCV(model, params,cv=cv,scoring='mean_absolute_error',verbose=0,n_jobs=n_jobs,n_iter=n_params,refit=False)\n\tgrid.fit(X,y)\n\tprint \"Parameters chosen:\"\n\tprint grid.best_params_\n\tprint \"Best score:\"\n\tprint grid.best_score_\n\n\t# Use the same parameters for the training set to get CV predictions\n\tcv_pred = cv\n\tcrossv_mod.set_params(**grid.best_params_)\n\tpreds = cross_val_predict(crossv_mod, X=X, y=y, cv=cv_pred, n_jobs=n_jobs, verbose=0)\n\n\t# Train the final model\n\tret_mod.set_params(**grid.best_params_)\n\tret_mod.fit(X,y)\n\n\treturn(ret_mod,preds)", "def best_model_based_selector(train_features, test_features, train_similarity_target, test_similarity_target, regressor):\n\tmodel_based_score = 0\n\tscaling_factors = [\"0.25*mean\", \"0.5*mean\", \"median\", \"1.25*mean\", \"1.5*mean\"]\n\t# scaling_factors = [\"0.5*mean\", \"median\"]\n\tmodel_based_selector = None\n\tmodel_based_train_features_selected = None\n\tmodel_based_test_features_selected = None\n\n\tfor factor in scaling_factors:\n\t\tprint(factor)\n\t\ttemp_model_based_selector = SelectFromModel(RandomForestRegressor(n_estimators=100), threshold=factor)\n\t\ttemp_model_based_selector.fit(train_features, train_similarity_target)\n\t\ttemp_model_based_train_features_selected = temp_model_based_selector.transform(train_features)\n\t\ttemp_model_based_test_features_selected = temp_model_based_selector.transform(test_features)\n\n\t\tregressor.fit(temp_model_based_train_features_selected, train_similarity_target)\n\n\t\ttemp_score = regressor.score(temp_model_based_test_features_selected, test_similarity_target)\n\t\tprint(\"The score on the selected features (Model Based Selector): %.3f\" % temp_score)\n\n\t\tif temp_score > model_based_score:\n\t\t\tmodel_based_score = temp_score\n\t\t\tmodel_based_selector = temp_model_based_selector\n\t\t\tmodel_based_train_features_selected = temp_model_based_train_features_selected\n\t\t\tmodel_based_test_features_selected = temp_model_based_test_features_selected\n\n\tmodel_based_mask = model_based_selector.get_support()\n\tprint(\"This is the model based mask: \")\n\tprint(model_based_mask)\n\n\treturn model_based_selector, model_based_score, model_based_train_features_selected, model_based_test_features_selected, model_based_mask", "def train_model_and_score(X,y_train):\n scaler = MinMaxScaler()\n X_scaled = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n #split train/test\n x_train,x_test,y_train,y_test = train_test_split(X_scaled,y_train,test_size=0.33,random_state =42)\n\n #train\n model.fit(x_train,y_train)\n\n #evaluation\n sc = model.score(x_test,y_test), model.score(x_train,y_train)\n\n print(sc)\n\n return model,sc", "def train_best_model():\r\n best_model = LSTM_train_best_model()\r\n return best_model", "def select_model(self, verbose = 0):\n\n #first get list of the available models\n tickers = [x.split('\\\\')[1] for x,_,_ in os.walk(ModelLoader.root_path()) if len(x.split('\\\\')) > 1 ]\n \n #now let find the best model\n best_model = None\n lowest_test_error = 2.0\n \n #prepare our sequence data\n for idx, ticker in enumerate(tickers,1):\n try: \n loaded_model = ModelLoader(ticker)\n seq_obj = MultiSequence(self.symbol,loaded_model.window_size,1)\n testing_error =loaded_model.model.evaluate(seq_obj.X,seq_obj.y, verbose=0)\n \n if verbose == 1:\n print(\">{0:>3}) Now checking model: {1:<5} Test error result: {2:.4f}\".format(idx,ticker, testing_error))\n \n if lowest_test_error > testing_error:\n best_model = loaded_model\n lowest_test_error = testing_error\n except :\n pass\n \n #save best model\n self.__best_model = best_model\n self.__test_error = lowest_test_error\n if verbose in[1,2]:\n print(\"==> Best model ticker {0:} with error of {1:.4f}\".format(self.__best_model.ticker,self.__test_error))", "def run_model(self):\r\n param_grid = { \r\n 'n_estimators': [30, 60],\r\n 'max_features': ['sqrt', 'log2'],\r\n 'max_depth':[5,38],\r\n 'min_samples_split' : [12,40],\r\n 'min_samples_leaf' :[12,40], \r\n }\r\n\r\n CV_rfc = GridSearchCV(estimator=self.model, param_grid=param_grid, cv= 7)\r\n CV_rfc.fit(self.Xtrain, self.ytrain)\r\n #print(CV_rfc.best_params_)\r\n self.model = CV_rfc.best_estimator_\r\n return", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def best_cv_training(args: dict):\n assert len(args.learning_rates) == args.s, (\n \"learning_rates should be of size s\"\n )\n np.random.seed(args.seed) # set seed\n dataloader_pairs = get_dataloaders(\n args.folder, args.dimensions, args.batch_size, args.s, args.num_workers\n )\n train_losses, val_losses, metrics = [], [], []\n best_metric = -1\n final_model = None\n for i, pair in enumerate(dataloader_pairs):\n mod, train_loss, val_loss, metric = train(args, i, pair)\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n metrics.append(metric)\n if metric > best_metric:\n best_metric = metric\n final_model = mod\n else:\n del mod # remove from GPU resources\n return final_model, best_metric", "def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2", "def test_find_best_model(self):\n parameters = dict(\n model=('spherical', 'gaussian', 'exponential', 'matern')\n )\n gs = GridSearchCV(\n VariogramEstimator(n_lags=15, normalize=False),\n parameters,\n cv=3\n )\n\n gs = gs.fit(self.c, self.v)\n\n # Python 3.6 yields 'exponential', \n # while 3.7, 3.8 yield 'gaussian' - this is so stupid\n self.assertTrue(gs.best_params_['model'] in ['gaussian', 'exponential'])", "def fit(self,\n X_train,\n y_train, \n X_test, \n y_test,\n max_evals,\n **kwargs,\n ):\n \n self.max_evals = max_evals\n \n for key in self.models_dict.keys():\n \n path_model_dir = self.path_model_dirs[key]\n \n if self.verbose >=1: \n print('\\n----',key,'----')\n print('path_model_dir:',path_model_dir)\n \n model_dict = self.models_dict[key]\n model_type = str(type(model_dict['model']))\n \n if 'sklearn' in model_type or 'xgboost' in model_type:\n path_file = _os.path.join(path_model_dir,'model_dict.dill')\n elif 'Net' in key:\n path_file = _os.path.join(path_model_dir,'best_model.h5')\n \n if self.retrain or _os.path.isfile(path_file)==False:\n model_dict = self._single_model_BayesianSearchCV(key, \n model_dict, \n X_train, y_train, \n X_test, y_test,\n path_model_dir,\n **kwargs)\n self.models_dict[key] = model_dict\n \n\n else: #reload previously trained model\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.models_dict[key] = self.load('model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n #check kwargs for epochs\n epochs = 100\n for item in self.kwargs.items():\n if 'epochs' in item[0]: epochs = item[1]\n self.models_dict[key]['best_model'] = _NeuralNet.utils.load_model(\n _os.path.join(path_model_dir,'best_model.h5'))\n self.models_dict[key]['best_params'] = self.load('best_params', 'dill', path_model_dir)\n \n if 'Net' in key:\n y_pred = self.models_dict[key]['best_model'].predict(_np.array(X_test))\n else:\n y_pred = self.models_dict[key]['best_model'].predict(X_test)\n \n\n if 'Net' not in key:\n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].score(X_test, y_test)\n y_pred_proba = self.models_dict[key]['best_model'].predict_proba(X_test)[:,1]\n else:\n \n if 'crossentropy' in self.models_dict[key]['best_model'].loss:\n y_pred_proba = y_pred\n y_pred = (y_pred < 0.5).astype(int)\n \n self.models_dict[key]['best_pred_score'] = self.models_dict[key]['best_model'].evaluate(_np.array(X_test), \n _np.array(y_test),\n verbose =0)\n \n if self.verbose >=1:\n try:\n print('\\tbest_cv_score:',self.models_dict[key]['best_cv_score'])\n except Exception as e:\n print('Exception occured for:'+str(e))\n try:\n print('\\tbest_pred_score:',self.models_dict[key]['best_pred_score'])\n except Exception as e:\n print('Exception occured for:'+str(e))\n\n for metric_key in self.metrics.keys():\n if self.metrics[metric_key] !=None:\n try:\n if 'roc' in metric_key:\n self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred_proba)\n else:\n self.models_dict[key][metric_key] = self.metrics[metric_key](y_test, y_pred)\n print('\\t',metric_key,':',self.models_dict[key][metric_key])\n except Exception as e:\n print('Exception occured for',metric_key,':',str(e))\n\n if 'sklearn' in str(type(self.models_dict[key]['model'])):\n self.save(self.models_dict[key], 'model_dict', 'dill', path_model_dir)\n elif 'Net' in key:\n model_dict_subset = self.models_dict[key].copy()\n for key in self.models_dict[key].keys():\n if key not in ['y_test','y_pred','best_pred_score'] +list(self.metrics.keys()):\n model_dict_subset.pop(key)", "def choose_model(\n name: str,\n log_dir: str = \"logs\",\n n_estimators: int = 100,\n max_depth: int = 6,\n xgb_lr: float = 0.3,\n gamma_xgb: float = 0.0,\n min_child_weight: float = 1.0,\n subsample: float = 1.0,\n colsample_bytree: float = 1.0,\n reg_lambda: float = 1.0,\n C: float = 1.0,\n nn_wt: float = 1.0,\n epochs: int = 50,\n batch_size: int = 64,\n nn_lr: float = 1e-3,\n lr_step: int = 10000,\n lr_decay: float = 0.75,\n weight_decay: float = 1e-3,\n balance_weights: bool = True,\n **kwargs,\n) -> BaseClassifier:\n xgb_model = XGBClassifier(\n n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=xgb_lr,\n gamma=gamma_xgb,\n min_child_weight=min_child_weight,\n subsample=subsample,\n colsample_bytree=colsample_bytree,\n reg_lambda=reg_lambda,\n random_state=0,\n )\n svm_model = SVC(C=C, class_weight=\"balanced\", random_state=0)\n random_forest_classifier = RandomForestClassifier()\n\n nn_model = NN(\n epochs=epochs,\n batch_size=batch_size,\n log_dir=log_dir,\n learning_rate=nn_lr,\n lr_step=lr_step,\n lr_decay=lr_decay,\n weight_decay=weight_decay,\n balance_weights=balance_weights,\n random_state=0,\n )\n\n if name == \"xgb\":\n return xgb_model\n elif name == \"svm\":\n return svm_model\n elif name == \"ensemble\":\n model_wt = np.array([1.0, nn_wt])\n model_wt /= sum(model_wt)\n return VotingClassifier(\n [(\"xgb\", xgb_model), (\"nn\", nn_model)], voting=\"soft\", weights=model_wt\n )\n elif name == \"forest\":\n return random_forest_classifier\n elif name == \"nn\":\n return nn_model\n else:\n raise ValueError(f\"Invalid model name: {name}\")", "def generate_best_model(model_name: str,\n X: np.ndarray,\n y: np.ndarray) -> BaseEstimator:\n model_list = {\n \"svm\": BestSVM(),\n \"naive_bayes\": BestNaiveBayes(),\n 'lr': BestLogisticRegression()\n }\n\n model = model_list[model_name]\n return model.fit_best_model(X, y)", "def get_best_model(self):\n return self.best_model", "def load_best_model():\r\n best_model = LSTM_load_best_model()\r\n return best_model", "def get_score(data, labels, fold_pairs, name, model, param, numTopVars,\r\n rank_per_fold=None, parallel=True, rand_iter=-1):\r\n assert isinstance(name, str)\r\n logging.info(\"Classifying %s\" % name)\r\n ksplit = len(fold_pairs)\r\n# if name not in NAMES:\r\n# raise ValueError(\"Classifier %s not supported. \"\r\n# \"Did you enter it properly?\" % name)\r\n\r\n # Redefine the parameters to be used for RBF SVM (dependent on\r\n # training data)\r\n if \"SGD\" in name:\r\n param[\"n_iter\"] = [25] # [np.ceil(10**3 / len(fold_pairs[0][0]))]\r\n classifier = get_classifier(name, model, param, rand_iter=rand_iter)\r\n \r\n if name == \"RBF SVM\": #This doesn't use labels, but looks as ALL data\r\n logging.info(\"RBF SVM requires some preprocessing.\"\r\n \"This may take a while\")\r\n #\r\n is_data_computed_gamma = True\r\n #\r\n if not is_data_computed_gamma:\r\n # Sahil commented the code below that computes the gamma choices from data.\r\n # The computed gamma choices seem too low thereby making SVM very slow. Instead, trying out fixed values.\r\n print param\r\n gamma = param['gamma']\r\n gamma = np.array(gamma)\r\n print 'gamma', gamma\r\n else:\r\n #Euclidean distances between samples\r\n # sahil switched from the first call to second one for computing the dist as the first one is giving error.\r\n # dist = pdist(StandardScaler().fit(data), \"euclidean\").ravel()\r\n dist = pdist(RobustScaler().fit_transform(data), \"euclidean\").ravel()\r\n print 'dist', dist\r\n #Estimates for sigma (10th, 50th and 90th percentile)\r\n sigest = np.asarray(np.percentile(dist, [10, 50, 90]))\r\n print 'sigest', sigest\r\n #Estimates for gamma (= -1/(2*sigma^2))\r\n gamma = 1./(2*sigest**2)\r\n print 'gamma', gamma\r\n #\r\n #\r\n #Set SVM parameters with these values\r\n # sahil changed the code a bit to remove a bug\r\n # param = [{\"kernel\": [\"rbf\"],\r\n # \"gamma\": gamma.tolist(),\r\n # \"C\": np.logspace(-2,2,5).tolist()}]\r\n param = {\"kernel\": [\"rbf\"],\r\n \"gamma\": gamma.tolist(),\r\n \"C\": np.logspace(-2, 2, 5).tolist()}\r\n # if name not in [\"Decision Tree\", \"Naive Bayes\"]:\r\n if param:\r\n if hasattr(classifier,'param_grid'): \r\n # isinstance(classifier, GridSearchCV):\r\n print 'param', param\r\n N_p = np.prod([len(l) for l in param.values()])\r\n elif isinstance(classifier, RandomizedSearchCV):\r\n N_p = classifier.n_iter\r\n else:\r\n N_p = 1\r\n# is_cv = isinstance(classifier, GridSearchCV) or \\\r\n# isinstance(classifier, RandomizedSearchCV)\r\n# print('Name: {}, ksplit: {}, N_p: {}'.format(name, ksplit, N_p))\r\n if (not parallel) or ksplit <= N_p or \\\r\n (name == \"Random Forest\") or (\"SGD\" in name):\r\n logging.info(\"Attempting to use grid search...\")\r\n classifier.n_jobs = PROCESSORS\r\n classifier.pre_dispatch = 1 # np.floor(PROCESSORS/24)\r\n allConfMats = []\r\n allTotalErrs = []\r\n allFittedClassifiers = []\r\n for i, fold_pair in enumerate(fold_pairs):\r\n confMats = []\r\n totalErrs = []\r\n fitted_classifiers = []\r\n logging.info(\"Classifying a %s the %d-th out of %d folds...\"\r\n % (name, i+1, len(fold_pairs)))\r\n if rank_per_fold is not None:\r\n rankedVars = rank_per_fold[i]\r\n else:\r\n rankedVars = np.arange(data.shape[1])\r\n #\r\n for numVars in numTopVars:\r\n logging.info('Classifying for top %i variables' % numVars)\r\n #\r\n # print 'rankedVars', rankedVars\r\n #\r\n confMat, totalErr, fitted_classifier = classify(data[:, rankedVars[:numVars]],\r\n labels,\r\n fold_pair,\r\n classifier)\r\n confMats.append(confMat)\r\n totalErrs.append(totalErr)\r\n fitted_classifiers.append(fitted_classifier)\r\n # recheck the structure of area and fScore variables\r\n allConfMats.append(confMats)\r\n allTotalErrs.append(totalErrs)\r\n allFittedClassifiers.append(fitted_classifiers)\r\n else:\r\n print 'parallel computing going on (debug Sahil ...) ..........................'\r\n #\r\n classifier.n_jobs = PROCESSORS\r\n logging.info(\"Multiprocessing folds for classifier {}.\".format(name))\r\n pool = Pool(processes=min(ksplit, PROCESSORS))\r\n out_list = pool.map(per_split_classifier(data, labels, classifier,\r\n numTopVars),\r\n zip(rank_per_fold, fold_pairs))\r\n pool.close()\r\n pool.join()\r\n #allConfMats = [el[0] for el in out_list]\r\n #allTotalErrs = [el[1] for el in out_list]\r\n #allFittedClassifiers = [el[2] for el in out_list]\r\n allConfMats, allTotalErrs, allFittedClassifiers = tuple(zip(*out_list))\r\n return classifier, allConfMats, allTotalErrs, allFittedClassifiers", "def regression_model_cv(X_train_set,y_train_set,scaler,model,cv,print_iterations=False, save_model_data=False, model_filename=None):\n \n r2_scores_lst = []\n mean_squared_error_lst = []\n mean_absolute_error_lst = []\n\n print(f'Number Of Splits {cv.get_n_splits()}')\n for index, (train_index,val_index) in enumerate(cv.split(X_train_set)):\n\n # construct training and testing sets for every split\n X_train, X_val = X_train_set[train_index], X_train_set[val_index]\n y_train, y_val = y_train_set[train_index], y_train_set[val_index]\n\n # Perform scaling on our training and test sets within the CV\n # to prevent data leakage\n scaler = scaler\n X_train = scaler.fit_transform(X_train.reshape(-1,1))\n X_val = scaler.transform(X_val.reshape(-1,1))\n\n # predict the data\n y_pred = model.fit(X_train,y_train).predict(X_val)\n \n # grab the scores for every metric\n regression_scores = regression_scoring(y_val,y_pred,regression_metrics=[r2_score,mse,mae],print_values=False)\n\n # unpack the scores\n r2_scores_lst.append(regression_scores['r2_score'])\n mean_squared_error_lst.append(regression_scores['mean_squared_error'])\n mean_absolute_error_lst.append(regression_scores['mean_absolute_error'])\n\n # print statement every 100 iterations to let us know where we are at\n if (index+1) % 100 == 0 and print_iterations:\n print(f'Finished {index+1} Iterations')\n \n \n \n # save the model if user wants\n if save_model_data:\n if model_filename is None:\n model_filename == str(model).split('(')[0]\n else:\n # save the file\n print('model_filename')\n save_model(model,model_filename)\n\n # save and create metadata\n metadata = {'model':model,\n 'X_train_set':X_train_set,\n 'y_train_set':y_train_set,\n 'cv':cv,\n 'regression_scores':regression_scores}\n \n save_model_metadata(metadata,model_filename)\n \n \n return [r2_scores_lst,mean_squared_error_lst,mean_absolute_error_lst]", "def random_forest_regression(dataset,\n model=saved_pickle_model,\n fit=False):\n\n\n # Preparing the training and test sets\n # ------------------------------------\n # Exoplanet and Solar system dataset\n dataset_exo = dataset[:501]\n dataset_sol = dataset[501:]\n\n # Separating the data into dependent and independent variables\n features = dataset_exo.iloc[:, :-1] # mass, teq, etc\n labels = dataset_exo.iloc[:, -1] # radius\n\n # Splitting the dataset into the Training set and Test set\n X_train, X_test, y_train, y_test = train_test_split(features,\n labels,\n test_size=0.25,\n random_state=0)\n features_sol = dataset_sol.iloc[:, :-1]\n labels_sol = dataset_sol.iloc[:, -1]\n\n X_train_sol, X_test_sol, y_train_sol, y_test_sol = train_test_split(features_sol,\n labels_sol,\n test_size=0.25,\n random_state=0)\n\n X_train = X_train.append(X_train_sol)\n y_train = y_train.append(y_train_sol)\n X_test = X_test.append(X_test_sol)\n y_test = y_test.append(y_test_sol)\n\n # Outliers in the sample\n # Remove HATS-12 b from the training set\n X_test = X_test.drop(['HATS-12 b'])\n y_test = y_test.drop(labels=['HATS-12 b'])\n print('\\nHATS-12 b removes from test set\\n')\n\n # Remove K2-95 b from the training set\n X_train = X_train.drop(['K2-95 b'])\n y_train = y_train.drop(labels=['K2-95 b'])\n print('\\nK2-95 b removes from training set\\n')\n\n # Remove Kepler-11 g from the training set\n X_train = X_train.drop(['Kepler-11 g'])\n y_train = y_train.drop(labels=['Kepler-11 g'])\n print('\\nKepler-11 g removes from training set\\n')\n\n train_test_values = [X_train.values, X_test.values,\n y_train.values, y_test.values]\n train_test_sets = [X_train, X_test, y_train, y_test]\n\n # Fitting the hyperparameters of the random forest model\n # with the grid search method\n # ------------------------------------------------------\n if fit:\n # Setting up the grid of hyperparameters\n rf = GridSearchCV(RandomForestRegressor(),\n param_grid={'n_estimators': np.arange(80, 200),\n 'max_depth': np.arange(4, 10),\n 'max_features': np.arange(3, 6),\n 'min_samples_split': np.arange(4, 5)},\n cv=3, verbose=1, n_jobs=-1)\n\n # Fitting training set - finding best hyperparameters\n rf.fit(X_train, y_train)\n\n # Best hyperparameters found by the grid search\n print(rf.best_params_)\n\n # Random forest model with the best hyperparameters\n regr = RandomForestRegressor(n_estimators=rf.best_params_['n_estimators'],\n max_depth=rf.best_params_['max_depth'],\n max_features=rf.best_params_['max_features'],\n min_samples_split=rf.best_params_['min_samples_split'],\n random_state=0, oob_score=True)\n\n # Saving the random forest model in a file\n outdir = 'bem_output'\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n name_Rf = 'r2_' + str(round(rf.best_score_, 2)) + '_' + str(datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")) + '.pkl'\n name_Rf = os.path.join(outdir, name_Rf)\n\n joblib.dump(regr, name_Rf)\n print('RF model save in : ', name_Rf)\n\n else:\n # Loading the random forest model saved\n print('Loading random forest model: ', model)\n regr = joblib.load(model)\n\n # Fit the best random forest model to the training set\n # ----------------------------------------------------\n regr.fit(X_train, y_train)\n\n # Predict the radius for the training and testing sets\n y_train_predict = regr.predict(X_train)\n y_test_predict = regr.predict(X_test)\n\n # Scores of the random forest\n test_score = r2_score(y_test, y_test_predict)\n pearson = pearsonr(y_test, y_test_predict)\n print(f'Test set, R-2 score: {test_score:>5.3}')\n print(f'\\nTest set, Pearson correlation: {pearson[0]:.3}')\n\n # Mean squared errors of the train and test set\n print('Root mean squared errors')\n print('Train set: ', np.sqrt(np.mean((y_train-y_train_predict)**2)),\n '\\nTest set: ', np.sqrt(np.mean((y_test-y_test_predict)**2)))\n\n # Feature importance\n name_features = dataset.columns.tolist()\n print('\\nFeature importance')\n _ = [print(name, ': \\t', value)\n for name, value\n in zip(name_features, regr.feature_importances_)]\n\n return regr, y_test_predict, train_test_values, train_test_sets", "def test_find_best_model_future_cv(self):\n parameters = dict(\n model=('spherical', 'gaussian', 'exponential', 'matern')\n )\n gs = GridSearchCV(\n VariogramEstimator(n_lags=15, normalize=False),\n parameters,\n cv=5\n )\n\n gs = gs.fit(self.c, self.v)\n\n self.assertEqual(gs.best_params_['model'], 'exponential')", "def model_thresold(X_train,y_train,X_test,t,model,**param):\n if model == 'randomforest':\n rf = RandomForestClassifier(param)\n rf.fit(X_train, y_train)\n y_pred = rf.predict_proba(X_test)\n y_pred = adjusted_classes(y_pred,t)\n else:\n lgb_class = lgb.LGBMClassifier(param)\n lgb_class.fit(X_train,y_train)\n y_pred = lgb_class.predict_proba(X_test)\n y_pred = adjusted_classes(y_pred,t)\n\n return y_pred", "def obtain_best_model(optimal_weights):\n gnn = NeuralNetwork(optimal_weights)\n gnn.compile_train(5)\n\n gnn.save_accuracy_chart()\n\n gnn.model.save('spy_classifier')", "def train_model(lrmodel, X, Y, devX, devY, devscores):\n done = False\n best = -1.0\n r = np.arange(1,6)\n \n while not done:\n # Every 100 epochs, check Pearson on development set\n lrmodel.fit(X, Y, verbose=2, shuffle=False, validation_data=(devX, devY))\n yhat = np.dot(lrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n if score > best:\n print score\n best = score\n bestlrmodel = prepare_model(ninputs=X.shape[1])\n bestlrmodel.set_weights(lrmodel.get_weights())\n else:\n done = True\n\n yhat = np.dot(bestlrmodel.predict_proba(devX, verbose=2), r)\n score = pearsonr(yhat, devscores)[0]\n print 'Dev Pearson: ' + str(score)\n return bestlrmodel", "def findBestModel(X_train, X_test, Y_test, model='iForest'):\n if model == 'iForest':\n total_score = 0;\n parameters = [0,0,0,0]\n for max_features in range(1,X_train.shape[1]+1):\n for contamination in range(1,101):\n iForest = IsolationForest(n_estimators = 100, max_features = max_features, contamination = contamination/1000, random_state = 0).fit(X_train)\n \n scores = []\n for x_test,y_test in zip(X_test,Y_test):\n y_hat = iForest.predict(x_test)\n score = evaluate(y_test,y_hat) # returns similarity percentage\n scores.append(score)\n \n if sum(scores) > total_score:\n total_score = sum(scores)\n parameters[0] = max_features\n parameters[1] = contamination/1000\n parameters[2] = total_score\n parameters[3] = scores\n print(parameters, contamination)\n \n return parameters", "def fit(self, train_data, train_labels, val_data, val_labels):\r\n split = np.append(-np.ones(train_labels.shape, dtype=np.float32),\r\n np.zeros(val_labels.shape, dtype=np.float32))\r\n ps = PredefinedSplit(split)\r\n\r\n sh = train_data.shape\r\n train_data = np.append(train_data, val_data , axis=0)\r\n train_labels = np.append(train_labels , val_labels, axis=0)\r\n del val_data, val_labels\r\n \r\n model = RandomForestClassifier(n_jobs=self.n_jobs,\r\n **self.scikit_args) \r\n \r\n params = {'n_estimators':np.arange(1,1001,50)} \r\n #Coarse search \r\n gs = GridSearchCV(model, params, refit=False, n_jobs=self.n_jobs, \r\n verbose=self.verbose, cv=ps)\r\n gs.fit(train_data, train_labels)\r\n \r\n #Fine-Tune Search\r\n params = {'n_estimators':np.arange(gs.best_params_['n_estimators']-50,\r\n gs.best_params_['n_estimators']+50)} \r\n \r\n self.gs = GridSearchCV(model, params, refit=self.refit, n_jobs=self.n_jobs, \r\n verbose=self.verbose, cv=ps)\r\n self.gs.fit(train_data, train_labels)\r\n \r\n if not self.refit:\r\n model.set_params(n_estimators=gs.best_params_['n_estimators'])\r\n self.gs = model\r\n self.gs.fit(train_data[:sh[0]], train_labels[:sh[0]])", "def get_model_6(parameters):\n # Generate model\n n_estimators = parameters['num_classes'] * parameters['n_estimators']\n model = RandomForestClassifier(\n class_weight='balanced',\n n_estimators=n_estimators,\n min_samples_split=parameters['min_child_samples'])\n \n # Return the model\n return model", "def getBestFittedModel( models, features ):\r\n\r\n\tvalidModels = []\r\n\tclusteringScores = []\r\n\tfor model in models:\r\n\t\t#Skip mono cluster models\r\n\t\tif st.getNbClusters( model ) < 2: continue\r\n\t\tvalidModels.append( model )\r\n\t\tlabels = model.labels_\r\n\t\tclusteringScore = evaluateClusters(features, labels)\r\n\t\tclusteringScores.append( clusteringScore)\r\n\tif len(clusteringScores) == 0: return False, -1\r\n\tbestScoreIndex = np.argmax(clusteringScores)\r\n\treturn validModels[bestScoreIndex], clusteringScores[bestScoreIndex]", "def train_eval_model(model, model_name, X_train, y_train, X_test, y_test):\n\n model_predictions_train = model.predict(X_train) # Wyniki regresji dla zbioru treningowego\n model_mse_train = mean_squared_error(y_train, model_predictions_train) # MSE dla zbioru treningowego\n model_rmse_train = np.sqrt(model_mse_train) # RMSE dla zbioru treningowego\n model_predictions_test = model.predict(X_test)\n model_mse_test = mean_squared_error(y_test, model_predictions_test)\n model_rmse_test = np.sqrt(model_mse_test)\n # Kroswalidacja modelu\n model_scores = cross_val_score(model, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\n model_rmse_scores = np.sqrt(-model_scores)\n\n model_result = ResultDataRegressors(model_name, model, model_rmse_train, model_rmse_test, model_rmse_scores)\n return model_result", "def random_objective(params, iteration, n_folds = N_FOLDS):\n\n start = timer()\n \n # Perform n_folds cross validation\n cv_results = lgb.cv(params, train_set, num_boost_round = 10000, nfold = n_folds, \n early_stopping_rounds = 100, metrics = 'l2', seed = 50, stratified=False)\n end = timer()\n best_score = np.max(cv_results['l2-mean'])\n \n # Loss must be minimized\n loss = 1 - best_score\n \n # Boosting rounds that returned the highest cv score\n n_estimators = int(np.argmax(cv_results['l2-mean']) + 1)\n \n # Return list of results\n return [loss, params, iteration, n_estimators, end - start]" ]
[ "0.70401394", "0.6594376", "0.65197843", "0.6514612", "0.6472461", "0.64660364", "0.64625597", "0.6340449", "0.63050693", "0.625302", "0.6244924", "0.6203827", "0.6200401", "0.6178384", "0.6176473", "0.6137107", "0.61194456", "0.6112998", "0.6086907", "0.60695493", "0.60676", "0.5997115", "0.5987833", "0.5967072", "0.5962271", "0.5956605", "0.59544796", "0.5948741", "0.5929333", "0.59182817" ]
0.8048125
0
Check if s is a type of list
def is_list ( self, s ): return isinstance ( s, type( list () ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_list(s_list):\n return isa(s_list, List)", "def _is_list(item):\n return isinstance(item, list)", "def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False", "def isList(obj):\n return type(obj)==types.ListType", "def _is_list(val):\n\n return isinstance(val, list)", "def is_list(obj):\n return type(obj) is list", "def is_list(value):\n return isinstance(value, list)", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)", "def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()", "def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def is_list(value):\n return isinstance(value, list) or None", "def is_list(self) -> bool:\n return False", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def _is_valid_list(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:list\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)", "def is_list(annotation):\n\n annotation_origin = getattr(annotation, \"__origin__\", None)\n\n return annotation_origin == list", "def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))", "def is_list(self):\n answer = self._call('is_list')\n return answer.yes", "def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))", "def _is_list(arg):\n if isinstance(arg, dict):\n return False\n if isinstance(arg, str): # Python 3-only, as str has __iter__\n return False\n return (\n not _has_method(arg, \"strip\")\n and _has_method(arg, \"__getitem__\")\n or _has_method(arg, \"__iter__\")\n )", "def test_return_type(self):\n self.assertEqual(type(self.s0.from_json_string(self.string)), list)", "def isValidTypeForList(self, *args):\n return _libsbml.SBasePlugin_isValidTypeForList(self, *args)", "def is_list_of_strings(vals):\n try:\n # check if everything is a string\n for val in vals:\n if not isinstance(val, six.string_types):\n return False\n except Exception:\n # vals is not enumerable\n return False\n\n # everything is a string\n return True", "def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)", "def isnondet(r):\n return isinstance(r, list) # BAD", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def _can_be_list(pair):\n assert(isa(pair, Pair))\n return str(pair).find(' . ') < 0" ]
[ "0.84522414", "0.80067027", "0.80044675", "0.798943", "0.78784645", "0.78642255", "0.7797972", "0.77470225", "0.7729441", "0.770684", "0.7510177", "0.74303883", "0.73520446", "0.73322815", "0.73082334", "0.72993803", "0.71319544", "0.71102685", "0.7052229", "0.69552034", "0.69334066", "0.6887565", "0.6879453", "0.6863277", "0.68116313", "0.6700512", "0.6674004", "0.66717225", "0.6667027", "0.66435176" ]
0.9174904
0
Check if s is a type of str
def is_str ( self, s ): return isinstance ( s, type( str () ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_string(s):\n try:\n return isinstance(s, basestring)\n except NameError:\n return isinstance(s, str)", "def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)", "def is_str(x):\n return isinstance(x, str)", "def strtype(x):\n if type(x) == str:\n return True\n if type(x) == unicode:\n return True\n return False", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False", "def is_str(value):\n return isinstance(value, str)", "def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True", "def _is_string(arg):\n return isinstance(arg, types.StringTypes)", "def _is_str(item):\n return isinstance(item, str)", "def is_string(atype):\n if atype == str:\n return True\n elif PY2:\n if atype == unicode:\n return True\n return False", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def is_string(obj):\n return isinstance(obj, str)", "def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )", "def is_string(value):\n return isinstance(value, basestring)", "def is_string(value):\n return isinstance(value, string_types)", "def is_string(value):\n return isinstance(value, (str, bytes))", "def is_string(obj):\n return isinstance(obj, basestring)", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def not_a_string(obj):\n my_type = str(type(obj))\n if is_py3():\n is_str = my_type.find('bytes') < 0 and my_type.find('str') < 0\n return is_str\n\n return my_type.find('str') < 0 and \\\n my_type.find('unicode') < 0", "def isString(data):\n\ttry:\n\t\tfrom types import UnicodeType, StringType\n\t\tif type(data) == UnicodeType or type(data) == StringType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type(\"\"):\n\t\t\treturn True\n\treturn False", "def _validate_str(s):\n if not isinstance(s, str):\n raise TypeError(\"Expected string, got {}\".format(type(s)))\n if len(s) == 0:\n raise ValueError(\"Empty variant string.\")\n return", "def has_string_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.string", "def is_string(document):\r\n return isinstance(document, str)", "def is_string(value):\n try:\n basestring\n def is_string(value):\n \"\"\"Python 2 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, basestring)\n except NameError:\n def is_string(value):\n \"\"\"Python 3 compatible implementation of is_string(value).\"\"\"\n return isinstance(value, (str, bytes))\n return is_string(value)", "def _isstr(value):\n\n if isinstance(value, string_types + (bytes,)):\n return True\n elif hasattr(value, \"__iter__\"):\n return all(_isstr(v) for v in value)\n else:\n return False", "def is_string(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_str)", "def is_string(self):\n answer = self._call('is_string')\n return answer.yes", "def _validate_str(val):\n if not isinstance(val, str):\n raise ValueError(\"Passed value {} is not a string\".format(val))\n return val", "def string_check(param, name):\n\tif not isinstance(param, strcomp):\n\t\traise TypeError(\"Keyword arg '%s' must be of type string. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass" ]
[ "0.8550737", "0.84955674", "0.8198416", "0.81717855", "0.8066725", "0.8013463", "0.79932284", "0.78747827", "0.7864553", "0.7827748", "0.78243095", "0.7745743", "0.7659679", "0.75780845", "0.75697416", "0.7567884", "0.7553253", "0.7485784", "0.7336275", "0.73066056", "0.720742", "0.7193452", "0.71912915", "0.71814716", "0.71102804", "0.70600945", "0.69918066", "0.69572115", "0.687367", "0.6662263" ]
0.89311457
0
The Action Group resource IDs.
def group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]: return pulumi.get(self, "group_ids")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def groups(self):\r\n return resources.Groups(self)", "def action_group_id(self) -> Optional[str]:\n return pulumi.get(self, \"action_group_id\")", "def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")", "def resource_group_ids_scope(self) -> str:\n return pulumi.get(self, \"resource_group_ids_scope\")", "def resource_group_ids_scope(self) -> str:\n return pulumi.get(self, \"resource_group_ids_scope\")", "def admin_group_object_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"admin_group_object_ids\")", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def rule_groups(self) -> Sequence['outputs.SubResourceResponse']:\n return pulumi.get(self, \"rule_groups\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def groups(self):\n return self.get_data(\"groups\")", "def security_group_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"security_group_ids\")", "def groups_ids(self) -> List[int]:\n\n _, groups_ids = Skeleton._group_get_ids(self.groups)\n\n return groups_ids", "def security_group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"security_group_ids\")", "def management_groups(self) -> pulumi.Output[Optional[Sequence['outputs.ResourceIdResponse']]]:\n return pulumi.get(self, \"management_groups\")", "def actions(self, request, action_list, group):\n return action_list", "def groups(self):\n return []", "def security_group_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"security_group_ids\")", "def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")", "def admin_group_object_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"admin_group_object_ids\")", "def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"associated_resource_ids\")", "def associated_resource_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"associated_resource_ids\")", "def api_groups(self):\n return self._api_groups", "def braid_group_action(self):\n G = []\n for c in self:\n c = c.relabel()\n if any(c in g for g in G):\n continue\n G.append(c.braid_group_orbit())\n return G", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")" ]
[ "0.667532", "0.6325436", "0.6276232", "0.6123759", "0.6123759", "0.60838944", "0.607983", "0.6047004", "0.5986194", "0.5986194", "0.5986194", "0.5986194", "0.5986194", "0.5975283", "0.5970267", "0.5931191", "0.58320796", "0.5817181", "0.5813919", "0.579841", "0.5774376", "0.57450956", "0.57450956", "0.5742109", "0.5742109", "0.5695493", "0.5652327", "0.56519544", "0.56519544", "0.56519544" ]
0.64152503
1
An optional custom email subject to use in email notifications.
def custom_email_subject(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "custom_email_subject")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def subject(self, subject: \"str\"):\n self._attrs[\"subject\"] = subject", "def subject(self):\n return self.mail.get('Subject')", "def _get_email_subject(app_name):\n return '{} <==> Tote'.format(app_name)", "def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")", "def subject(self) -> \"str\":\n return self._attrs.get(\"subject\")", "def subject(self):\n return self.properties.get(\"subject\", None)", "def subject(self):\n if \"subject\" in self._prop_dict:\n return self._prop_dict[\"subject\"]\n else:\n return None", "def subject(self):\n return self.get(\"subject\")", "def email_address() -> str:\n\n return os.environ.get(\"EMAIL_NOTIFICATION\", \"\")", "def getSubject(self):\r\n return self.msg[\"Subject\"]", "def setSubject(self,value): \n self.PDFreactorConfiguration.in1[\"subject\"] = value", "def subject(self) -> str:\n return self[\"Sns\"][\"Subject\"]", "def publisher_email(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"publisher_email\")", "def publisher_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"publisher_email\")", "def set_subject(self):\n\t\tfrom email.errors import HeaderParseError\n\t\ttry:\n\t\t\t_subject = decode_header(self.mail.get(\"Subject\", \"No Subject\"))\n\t\t\tself.subject = _subject[0][0] or \"\"\n\t\t\n\t\t\tif _subject[0][1]:\n\t\t\t\tself.subject = self.subject.decode(_subject[0][1])\n\t\t\telse:\n\t\t\t\t# assume that the encoding is utf-8\n\t\t\t\tself.subject = self.subject.decode(\"utf-8\")[:140]\n\t\texcept (UnicodeDecodeError, HeaderParseError):\n\t\t\t#try:\n\t\t\t#\tself.subject = self.subject.decode(\"gb18030\")\n\t\t\t#except UnicodeDecodeError:\n\t\t\tself.subject = u'Error Decoding Subject'\n\t\t#if self.subject and len(self.subject)>140:\n\t\t#\tself.subject = self.subject[:135]\n\t\timport re\n\n\t\temoji_pattern = re.compile(\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n\t\tself.subject = emoji_pattern.sub(r'', self.subject)\n\n\t\tif not self.subject:\n\t\t\tself.subject = \"No Subject\"", "def notification_email(self, sender, subject, body):\n\n\t\tts = str(int(time.time())*1000)\n\t\tparts = [sender, body, ts, subject]\n\t\tself._send_message(\"NOTIFICATION\", self._pack_message_data(0, parts))", "def subject(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subject\")", "def send_mail(self, subject):\r\n pass", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def notification_sender_email(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"notification_sender_email\")", "def publisher_email(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"publisher_email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def email(self) -> Optional[str]:\n return pulumi.get(self, \"email\")", "def get_user_provided_subject_identifier_attrname(self):\n return None", "def subject(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"subject\")", "def set_subject(self, subject):\n self._subject = subject", "def subject(self, value):\n self.set_property(\"subject\", value)" ]
[ "0.6489472", "0.6489472", "0.6424295", "0.6249044", "0.62452984", "0.62452984", "0.61771196", "0.6170477", "0.61662596", "0.61575764", "0.61519724", "0.6137915", "0.6137259", "0.6122633", "0.61057836", "0.6089166", "0.6057406", "0.5999996", "0.5969143", "0.5958554", "0.5958554", "0.59360015", "0.574188", "0.574188", "0.574188", "0.574188", "0.56942534", "0.56734705", "0.5672056", "0.5672029" ]
0.8148969
0
An optional custom webhook payload to use in webhook notifications.
def custom_webhook_payload(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "custom_webhook_payload")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webhook_payload(self, webhook_payload: \"str\"):\n self._attrs[\"webhookPayload\"] = webhook_payload", "def webhook_payload(self, webhook_payload: \"str\"):\n self._attrs[\"webhookPayload\"] = webhook_payload", "def webhook(self) -> Optional[pulumi.Input['WebhookArgs']]:\n return pulumi.get(self, \"webhook\")", "def webhook_payload(self) -> \"str\":\n return self._attrs.get(\"webhookPayload\")", "def webhook_payload(self) -> \"str\":\n return self._attrs.get(\"webhookPayload\")", "def webhook(self):\n raise NotImplementedError()", "def append_payload(self, payload: Payload) -> Payload:\n ...", "def payload(self, payload: \"dict\"):\n self._attrs[\"payload\"] = payload", "def example_webhook(self, incoming_request):\n return \"Example\"", "def example_webhook(self, incoming_request):\n return \"Example\"", "def payload_string(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"payload_string\")", "def set_payload(self, payload):\n self.payload = json.dumps(payload)", "def add_extra_args(self):\n super(AwsNetworkBootstrapMethod, self).add_extra_args()\n self.parser.add_argument(\"--custom_payload\", required=False,\n help=\"JSON payload of per-region data.\")", "def on_push(self, payload):\n pass", "def _getCustomizedPayload(self, payload: str):\n ajustedPayload = [payload]\n if self._prefix:\n ajustedPayload = [(prefix+payload) for prefix in self._prefix for payload in ajustedPayload]\n if self._suffix:\n ajustedPayload = [(payload+suffix) for suffix in self._suffix for payload in ajustedPayload]\n return self.__case(self.__encode(ajustedPayload))", "def build_payload(self, **kwargs):\n\n return None", "def webhook_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"webhook_id\")", "def webhook_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionWebhookEndpointArgs']]:\n return pulumi.get(self, \"webhook_endpoint\")", "def webhook_endpoint(self) -> Optional[pulumi.Input['EventSubscriptionWebhookEndpointArgs']]:\n return pulumi.get(self, \"webhook_endpoint\")", "def example_payload(self, example_payload):\n\n self._example_payload = example_payload", "def set_webhook(self, webhook):\n self.webhook = webhook\n return", "def add_extra_args(self):\n super(AwsNetworkCleanupMethod, self).add_extra_args()\n self.parser.add_argument(\"--custom_payload\", required=False,\n help=\"JSON payload of per-region data.\")", "def webhook_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"webhook_url\")", "def InvocationAddPayload(builder, payload):\n return AddPayload(builder, payload)", "def calling_webhook(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"calling_webhook\")", "def payload(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"payload\")", "def discord_webhook(title, url, thumbnail, sizes):\n fields = []\n for size in sizes:\n fields.append({\"name\": size, \"value\": \"Available\", \"inline\": True})\n\n data = {\n \"username\": CONFIG['USERNAME'],\n \"avatar_url\": CONFIG['AVATAR_URL'],\n \"embeds\": [{\n \"title\": title,\n \"url\": CONFIG['URL'].replace('.json', '/') + url, \n \"thumbnail\": {\"url\": thumbnail},\n \"fields\": fields,\n \"color\": int(CONFIG['COLOUR']),\n \"footer\": {\"text\": \"Made by Yasser\"},\n \"timestamp\": str(datetime.utcnow()),\n }]\n }\n\n result = rq.post(CONFIG['WEBHOOK'], data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n logging.error(err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def get_transformed_webhook_payload(gh_payload, default_branch=None, lookup_user=None):\n try:\n validate(gh_payload, GITHUB_WEBHOOK_PAYLOAD_SCHEMA)\n except Exception as exc:\n raise InvalidPayloadException(exc.message)\n\n payload = JSONPathDict(gh_payload)\n\n if payload[\"head_commit\"] is None:\n raise SkipRequestException\n\n config = SafeDictSetter()\n config[\"commit\"] = payload[\"head_commit.id\"]\n config[\"ref\"] = payload[\"ref\"]\n config[\"default_branch\"] = payload[\"repository.default_branch\"] or default_branch\n config[\"git_url\"] = payload[\"repository.ssh_url\"]\n\n config[\"commit_info.url\"] = payload[\"head_commit.url\"]\n config[\"commit_info.message\"] = payload[\"head_commit.message\"]\n config[\"commit_info.date\"] = payload[\"head_commit.timestamp\"]\n\n config[\"commit_info.author.username\"] = payload[\"head_commit.author.username\"]\n config[\"commit_info.author.url\"] = payload.get(\"head_commit.author.html_url\")\n config[\"commit_info.author.avatar_url\"] = payload.get(\"head_commit.author.avatar_url\")\n\n config[\"commit_info.committer.username\"] = payload.get(\"head_commit.committer.username\")\n config[\"commit_info.committer.url\"] = payload.get(\"head_commit.committer.html_url\")\n config[\"commit_info.committer.avatar_url\"] = payload.get(\"head_commit.committer.avatar_url\")\n\n # Note: GitHub doesn't always return the extra information for users, so we do the lookup\n # manually if possible.\n if (\n lookup_user\n and not payload.get(\"head_commit.author.html_url\")\n and payload.get(\"head_commit.author.username\")\n ):\n author_info = lookup_user(payload[\"head_commit.author.username\"])\n if author_info:\n config[\"commit_info.author.url\"] = author_info[\"html_url\"]\n config[\"commit_info.author.avatar_url\"] = author_info[\"avatar_url\"]\n\n if (\n lookup_user\n and payload.get(\"head_commit.committer.username\")\n and not payload.get(\"head_commit.committer.html_url\")\n ):\n committer_info = lookup_user(payload[\"head_commit.committer.username\"])\n if committer_info:\n config[\"commit_info.committer.url\"] = committer_info[\"html_url\"]\n config[\"commit_info.committer.avatar_url\"] = committer_info[\"avatar_url\"]\n\n return config.dict_value()", "def discord_webhook(self, product_item):\n\n data = {}\n data[\"username\"] = CONFIG['USERNAME']\n data[\"avatar_url\"] = CONFIG['AVATAR_URL']\n data[\"embeds\"] = []\n\n embed = {}\n \n if product_item == 'initial':\n embed[\"description\"] = \"Thank you for using Yasser's Sneaker Monitors. This message is to let you know \" \\\n \"that everything is working fine! You can find more monitoring solutions at \" \\\n \"https://github.com/yasserqureshi1/Sneaker-Monitors \"\n else:\n embed[\"title\"] = product_item[0] + ' - ' + product_item[1] + ' - ' + product_item[2]\n embed[\"description\"] = product_item[3]\n embed[\"thumbnail\"] = {'url': product_item[4]}\n embed['url'] = product_item[5]\n\n embed[\"color\"] = CONFIG['COLOUR']\n embed[\"footer\"] = {'text': 'Made by Yasser & Bogdan'}\n embed[\"timestamp\"] = str(datetime.utcnow())\n data[\"embeds\"].append(embed)\n\n result = rq.post(self.webhook, data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n print(err)\n logging.error(msg=err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))", "def test_webhook_empty_event(self):\n event = {\n 'body': json.dumps({})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))" ]
[ "0.72018594", "0.72018594", "0.6760789", "0.6682386", "0.6682386", "0.5861288", "0.5847646", "0.5813326", "0.5803336", "0.5803336", "0.57824904", "0.5756584", "0.56841034", "0.5636358", "0.5582661", "0.5581426", "0.5537865", "0.5537564", "0.5537564", "0.55360824", "0.5485823", "0.5461414", "0.5442937", "0.54273945", "0.54258734", "0.53936726", "0.5388329", "0.53587633", "0.53573483", "0.5356607" ]
0.81966394
0
A utility that returns the path value after argument paramsfile.
def get_launch_params_filepath(): try: cli_args = sys.argv return sys.argv[sys.argv.index("--params-file") + 1] except ValueError: return "Failed to parse params file path from command line arguments. Check that --params-file command line argument is specified."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_params_filepath(self):\n\t\treturn os.path.join(self.workdir, \"params.txt\")", "def real_path(self):\n\t\treturn self.args[0]", "def file_path(self) -> global___Expression:", "def get_path(f=sys.argv[0]):\n\n return os.path.split(f)", "def os_open_parmfile( self, ):\r\n #a_filename = self.starting_dir + os.path.sep + \"parameters.py\"\r\n AppGlobal.os_open_txt_file( \"parameters.py\" )", "def second_path(self):\n\t\treturn self.args[2]", "def get_paramfile(path, cases):\n data = None\n if isinstance(path, six.string_types):\n for prefix, function_spec in cases.items():\n if path.startswith(prefix):\n function, kwargs = function_spec\n data = function(prefix, path, **kwargs)\n return data", "def get_parameter(par_name):\r\n config_file = open('./config.txt', 'r')\r\n lines = config_file.readlines()\r\n for line in lines:\r\n line = line.rstrip('\\n\\r')\r\n if line.startswith(par_name):\r\n return line.split('=')[1]", "def path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"path\")", "def first_path(self):\n\t\treturn self.args[1]", "def getconfig(filepath, param, default=None):\n with open(filepath, 'rb') as f:\n for line in f:\n if line.strip().startswith('#') or '=' not in line:\n continue\n k, v = line.split('=', 1)\n if k.strip() == param:\n return v.strip()\n return default", "def parameters_path(self):\n return self._parameters_path", "def get_default_params_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / RUN_PARAMS_FILENAME", "def get_pardir(file):\n return os.path.dirname(file)", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"path\")", "def filepath(p):\n if os.path.isfile(p):\n return os.path.realpath(p)\n else:\n raise ArgumentTypeError('{} is not a file.'.format(p))", "def _getAbsolutePath(self, filename):\n\n # find the correct path, in the experiment file they are either\n # relative to the experiment file, or an absolute path\n if filename != os.path.abspath(filename):\n return os.path.join(self._path, filename)\n else:\n return filename", "def full_file_path_f(self, *args, **kwargs):\n return '%s/%s' % (self.location_f(*args, **kwargs), self.key_f(*args, **kwargs))", "def getInputFilename():\n\n argvList = sys.argv\n # print \"argvList=%s\"%(argvList)\n return argvList[0]", "def get_abs_path(self, value):\n return os.path.abspath(os.path.expanduser(os.path.expandvars(value)))" ]
[ "0.7624893", "0.68737656", "0.670442", "0.6365876", "0.6337929", "0.6162294", "0.615216", "0.61517036", "0.6106824", "0.60423446", "0.6040788", "0.5980116", "0.5948549", "0.5932665", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58959836", "0.58887947", "0.58802277", "0.58759207", "0.5869326", "0.5866889" ]
0.7436746
1
Tests if friendship target>friend is defined
def test_friendship (target, friend): db = getattr(g, 'db', None) if target == friend: return True else: qry = "SELECT target, friend FROM friends WHERE \ target=(SELECT id FROM profiles WHERE username = %s) AND \ friend=(SELECT id FROM profiles WHERE username = %s);" with db as cur: lines = cur.execute(qry, (friend, target)) return lines>0 return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_friendliness(self):\n trait = self.traitDao.get_friendliness(self.name)\n if trait is None:\n return False\n else:\n return True", "def is_mutual_frined(self, friend):\n if friend in self.friends.all():\n return True\n return False", "def is_mutual_friend(self, friend):\n if friend in self.friends.all():\n return True\n return False", "def is_mutual_friend(self, friend):\n\t\tif friend in self.friends.all():\n\t\t\treturn True\n\t\treturn False", "def has(self, target):\r\n return target in self.by_target", "def has(self, target):\n return target in self.by_target", "def confirm_request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n\n if self.database.delete_pending_friend_request(user_id, target_id):\n return self.database.create_friend(user_id, target_id)\n return False", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def goal_check(current_node, target_node):\n if current_node.id == target_node.id:\n return True\n else:\n return False", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n return True", "def request_to_be_friends(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.create_pending_friend_request(user_id, target_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def has_neighbor(self):\n if self.cur_neighbor is None:\n return False\n if self.cur_neighbor['app_feat'] is None:\n return False\n return True", "def is_member(self, node):\n return node in self._members", "def has_target(self):\n return self.target is not None", "def has_relation(\n self, source: Tuple[str, str], target: Tuple[str, str], relation: str\n ) -> bool:\n res = self.get_relations(source, target, relation, limit=1)\n if res:\n return True\n else:\n return False", "def has_object_permission(self, request, view, obj):\n if request.user == obj.family or obj.family is None:\n return True\n return False", "async def send_friend_request(self, TargetId: int):\n data = {\n 'targetUserId': TargetId\n }\n e = await self.request.request(url=f'https://friends.roblox.com/v1/users/{TargetId}/request-friendship',\n method='post',\n data=data)\n return e", "def is_target(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = (\n name for name in vars(Target) if not name.startswith(\"_\")\n )\n\n return all([hasattr(X, name) for name in require_attrs])", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def addFriendship(self, userID, friendID):\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)", "def at_target(self):\n return self.location == self.target_location", "def is_member(self) -> bool:\n if self._is_member is _missing:\n return False\n return self._is_member", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def ensure_access(self, target_member : M, accessor : M, permission : str):\n if not permission: \n return True\n if accessor is None:\n raise errors.NotAllowed(\"Accessor not found\")\n if target_member != accessor:\n raise errors.NotAllowed(\"Access not allowed for permission '%s'\" % permission)\n return True", "def is_relation(self, rel_name):\n return rel_name in self._declaration", "def route_is_contained_in_other_route(route,target):\n id_route = 0\n id_target = 0\n found = True\n while found and id_route < len(route) and id_target < len(target):\n found = False\n while not found and id_target < len(target):\n if route[id_route] == target[id_target]:\n found = True\n else:\n id_target += 1\n id_route += 1\n return found" ]
[ "0.6714983", "0.6594233", "0.6423168", "0.6402457", "0.5981811", "0.5911614", "0.59064144", "0.5853339", "0.5714427", "0.56824875", "0.5639276", "0.56333655", "0.56333655", "0.56333655", "0.54966736", "0.5464046", "0.5442402", "0.5438603", "0.5426189", "0.5373081", "0.5360212", "0.53313947", "0.5319267", "0.5319267", "0.53096694", "0.53077656", "0.530382", "0.52966714", "0.5296015", "0.52776456" ]
0.73897105
0
Retrieves profile for username
def get(self, username): db = getattr(g, 'db', None) qry = "SELECT username,email,active,steamid FROM\ profiles WHERE username = %s;" with db as cursor: cursor.execute(qry, (username,)) return {'profile':cursor.fetchone()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_profile(username):\n if username not in Profiles.keys():\n return jsonify({'message': 'User {} not found'.format(username)}), 404\n\n return jsonify(Profiles[username]), 200", "def get(self, username):\n q = \"SELECT * FROM profiles where username = ?\"\n r = self._query(q, (username,), fetch='one')\n try:\n return r\n except Exception as e:\n raise e", "def get_user_profile(self, username: str = None) -> Profile:\n if username:\n res: dict[str, Any] = self.api.users.get(user_ids=username).pop()\n else:\n res = self.api.users.get().pop()\n return VkontakteConnector.build_profile(res)", "def read_user(self, username):\n\n self.c.execute(\"SELECT * FROM profiles WHERE name=?\", (username,))\n user_profile = self.c.fetchone()\n print user_profile\n return user_profile", "async def get_profile_by_username(self, *, username: str) -> ProfileInDB:\n profile = await self.db.fetch_one(query=GET_PROFILE_BY_USERNAME_QUERY, values={\"username\": username})\n if profile:\n return ProfileInDB(**profile)", "def get_username_profile(db, username):\n return db['user'].find_one({'username': username})", "def get_user_profile(self):\n return self.request('get', 'id/users')", "def profile(username):\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n return render_template(\"profile.html\", username=username)", "def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')", "def retrieve_profile(self, name):\n\n url = get_url('profile details', profile=name)\n response = self._get(url)\n raise_on_error(response)\n if response.status_code == 404:\n raise QarnotGenericException(response.json()['message'])\n return Profile(response.json())", "def get_my_profile(self):\n\n url = self.api_base_url + \"user/profile\"\n\n try:\n raw_response = self.request_handler.make_request(ApiRequestHandler.GET, url)\n except RequestFailed:\n raise\n\n jsonified_response = json.loads(raw_response.text)\n user_profile = jsonified_response\n\n return user_profile", "def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)", "def profile(self, name=\"johndoe\"):\r\n url = \"/account/%s\" % name\r\n return self.app.get(url, follow_redirects=True)", "def get_user(self, username):\n\t\treturn self.users.get(username, None)", "def get(self, request, username, *args, **kwargs):\n try:\n profile = Profile.objects.select_related('user').get(\n user__username=username\n )\n except Profile.DoesNotExist:\n raise ProfileDoesNotExist\n\n serializer = self.serializer_class(profile)\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def read_user_profile():\n logger.debug(\"entering function read_profile\")\n find_query = {\"user_id\": current_user.id}\n project_query = {\"_id\": 0, \"user_id\": 0, \"password\": 0}\n result = run_find_one_query(config.USERS_COL, find_query, project_query, error=True,\n error_msg=NO_USER_ERR_MSG)\n logger.info(\"fetched user profile for %s\", current_user.id)\n response = get_success_response(data=result)\n logger.debug(\"exiting function read_profile\")\n return response", "def _username_to_profile(self, username: str) -> Dict[str, Any]:\n\n base_url = self.keys.pre_profile + username + self.keys.rank_token + self.keys.post_profile\n\n # Build the page source url for the given user's account\n con = urllib.request.urlopen(base_url)\n user_profile = con.read().decode('utf-8')\n\n # Convert the webpage to a profile JSON\n profile: dict = json.loads(str(user_profile))\n return profile", "def get_profile():\n logger.debug(\"entering function get_profile\")\n response = read_user_profile()\n logger.debug(\"exiting function get_profile\")\n return jsonify(response)", "def get_by_username(self, username):\r\n return social_models.DjangoStorage.user.user_model().objects.get(username=username)", "def profile(username):\n # grab the session user's username from db\n username = mongo.db.users.find_one(\n {\"username\": session[\"user\"]})[\"username\"]\n if session[\"user\"]:\n return render_template(\"profile.html\", username=username)\n return redirect(url_for(\"login\"))", "def get(self, username=None):\n ownprofile = False\n if username is None:\n # try to use the logged in user if existing\n user = self.user\n if user is None:\n raise werkzeug.exceptions.NotFound()\n else:\n user = self.settings.users.get_by_id(username)\n if user is None:\n raise werkzeug.exceptions.NotFound()\n \n if self.user is not None:\n ownprofile = self.user['_id'] == user['_id']\n\n return self.render(myuser = user, ownprofile = ownprofile)", "def get_profile(user):\n if user.is_authenticated():\n # Return the PootleProfile associated with authenticated users\n return user.get_profile()\n else:\n # Anonymous users get the PootleProfile associated with the 'nobody' user\n return User.objects.get(username='nobody').get_profile()", "def get_user_profile(self):\n return self.user.profile", "def get_user_info_by_name(self, username: str) -> dict:", "def profile(username):\n user = session.get('username')\n if user is not None:\n if user == username:\n current_user = User.from_mongo(**mongo.db.users.find_one({\"name\": session.get('username')}))\n return render_template(\"user_profile.html\", title=\"My Profile\", user=current_user)\n else:\n user = User.from_mongo(**mongo.db.users.find_one({'name': username}))\n return render_template(\"user_profile.html\", title=f\"{user.name}'s Profile\", user=user)\n else:\n flash('Please log in to access user profile')\n return redirect(url_for('login'))", "def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile", "def profile(request, username):\n # Get profile information for a user. Use iexact for case-insensitive query\n try:\n profile = User.objects.get(username__iexact=username)\n except ObjectDoesNotExist:\n profile = None\n return render(request, \"network/profile.html\", {\"profile\": profile})\n\n # Find all users following the user whose profile being visited\n followers = User.objects.filter(following=profile.id)\n\n # Get posts for users and put in paginator format\n posts = Post.objects.filter(author=profile).order_by('-timestamp')\n paginator = Paginator(posts, 10)\n\n page_number = request.GET.get('page')\n page_object = paginator.get_page(page_number)\n\n return render(request, \"network/profile.html\", {\n \"profile\": profile, \"followers\": followers, \"posts\": page_object\n })", "def get_user_profile(self):\n\t\treturn Job(SDK.PrlSrv_GetUserProfile(self.handle)[0])", "def profile_details(self, profile_name):\n url = get_url('profile details', profile=profile_name)\n response = self._get(url)\n if response.status_code == 404:\n return None\n raise_on_error(response)\n return Profile(response.json())" ]
[ "0.8413499", "0.82812774", "0.8042954", "0.7941889", "0.78936595", "0.779309", "0.76990896", "0.7561721", "0.74403584", "0.7425508", "0.7377645", "0.7368524", "0.73320585", "0.7236102", "0.7218599", "0.7194637", "0.7192519", "0.71801066", "0.7172978", "0.7171257", "0.71467376", "0.7129256", "0.7101104", "0.7092585", "0.7063038", "0.7049928", "0.7028689", "0.7028117", "0.70273864", "0.698442" ]
0.8399312
1
The mapping of the redirectes.
def redirects(self): return self.data.setdefault('redirects', {})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_forward_mapping(self):", "def redirects(self) -> List[str]:\n return self.root_hartree.redirects", "def redirect_info(self) -> global___RedirectInfo:", "def mapping(self):\n return self.request('_mapping', pylastica.request.Request.GET).data", "def redirects(self) -> Sequence['outputs.RedirectResponse']:\n return pulumi.get(self, \"redirects\")", "def mapping(self):\n return self._mapping", "def _load_redirects(self):\n if self.redirects is None:\n self.redirects = dict()\n redirectsfile = osp.join(self.basepath, 'redirects.csv')\n if os.path.exists(redirectsfile):\n reader = unicode_csv_reader(open(redirectsfile))\n self.redirects = dict((rows[0], rows[1]) for rows in reader)", "def _mapping(self):\n return [('auth.check', self.notify)]", "def _do_mapping(self):\n pass", "def mapping_names(self):\n return [self.basename]", "def redirect_configs(self) -> Sequence['outputs.GetRulesRuleRuleActionRedirectConfigResult']:\n return pulumi.get(self, \"redirect_configs\")", "def edge_mapping(self):\n ...", "def handler_mappings(self):\n return {}", "def load_redirects(self) -> None:\n\n self._redirects_file = self._siteroot / \"_redirects\"\n\n if not self._redirects_file.exists():\n return\n\n with open(str(self._redirects_file)) as file:\n for line in file:\n if not line or line.strip() == \"\" or line[0] == \"#\":\n continue\n\n p = line.split()\n if len(p) < 2:\n continue\n\n self._redirects[p[0]] = p[1]", "def career_map(self):\n return reverse(\n \"career_map\", args=[self.slug]\n )", "def _url_map(self):\n return Map([\n Rule('/init', endpoint='init'),\n Rule('/op/<name>', endpoint='op'),\n Rule('/handler/<name>', endpoint='handler'),\n Rule('/hook/<name>', endpoint='hook'),\n Rule('/provider/<name>/<action>', endpoint='provider'),\n Rule('/timer/<name>', endpoint='timer'),\n ])", "def redirect_uris(self) -> Sequence[str]:\n return pulumi.get(self, \"redirect_uris\")", "def _get_static_route_map(self):\n return self.__static_route_map", "def pathMap(self):\n pass", "def _mapping(self):\n return [('message.received', self.on_new_message), \\\n ('message.read.prevent', self.can_not_read)]", "def itermappings(self):\r\n return self.by_target.iteritems()", "def _get_route_map(self):\n return self.__route_map", "def action_map(self):\n return self._action_map", "def applyMapping(self):\n pass", "def redirect_uris(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"redirect_uris\")", "def security_mappings(self):\n return self._security_mappings", "def copy_forward_mapping(self) -> Dict[str, Set[str]]:\n return deepcopy(self._forward_mapping)", "def callback_map(self):\n return self._callback_map", "def _add_redirects(self):\n with open(REDIRECTS_FILE) as mapping_fd:\n reader = csv.reader(mapping_fd)\n for row in reader:\n if not row or row[0].strip().startswith(\"#\"):\n continue\n\n html_path = os.path.join(BUILD_PATH, \"html\")\n path = os.path.join(html_path, *row[0].split(\"/\")) + \".html\"\n\n if not self.include_api and (\n os.path.join(html_path, \"reference\") in path\n or os.path.join(html_path, \"generated\") in path\n ):\n continue\n\n try:\n title = self._get_page_title(row[1])\n except Exception:\n # the file can be an ipynb and not an rst, or docutils\n # may not be able to read the rst because it has some\n # sphinx specific stuff\n title = \"this page\"\n\n with open(path, \"w\") as moved_page_fd:\n html = f\"\"\"\\\n<html>\n <head>\n <meta http-equiv=\"refresh\" content=\"0;URL={row[1]}.html\"/>\n </head>\n <body>\n <p>\n The page has been moved to <a href=\"{row[1]}.html\">{title}</a>\n </p>\n </body>\n<html>\"\"\"\n\n moved_page_fd.write(html)", "def node_mapping(self):\n ..." ]
[ "0.6916905", "0.6815443", "0.67125463", "0.662277", "0.61875254", "0.60155004", "0.59925747", "0.59136647", "0.5907251", "0.59005374", "0.5879071", "0.58469695", "0.584341", "0.5838869", "0.5830796", "0.58191055", "0.57895535", "0.57752067", "0.5740669", "0.5731347", "0.57131934", "0.57054615", "0.569902", "0.5687463", "0.56659716", "0.5647317", "0.56199336", "0.56187826", "0.56073797", "0.5604737" ]
0.7429613
0
Setup the point and the opponent population.
def setup(self, teams_population): # initialize point population if self.first_sampling_: self.first_sampling_ = False population = self._initialize_random_population_of_points( Config.USER['training_parameters']['populations']['points'], ignore_cache = False) subsets_per_label = self._get_data_per_label(population) total_samples_per_class = (Config.USER['training_parameters']['populations']['points'] /self.total_labels_) balanced_subsets = [] for subset in subsets_per_label: if len(subset) > total_samples_per_class: subset = random.sample(subset, total_samples_per_class) balanced_subsets.append(subset) self.point_population_ = flatten(balanced_subsets) else: # uses attributes defined in evaluate_point_population() self._remove_points(self.samples_per_class_to_remove_, teams_population) self.point_population_ = self.samples_per_class_to_keep_ random.shuffle(self.point_population_) # setup hall of fame if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']: hall_of_fame = self.opponent_population_['hall_of_fame'] if self.team_to_add_to_hall_of_fame_: team_to_copy = self.team_to_add_to_hall_of_fame_ copied_team = Team(team_to_copy.generation, list(team_to_copy.programs), team_to_copy.environment) copied_team.team_id_ = team_to_copy.team_id_ copied_team.fitness_ = team_to_copy.fitness_ copied_team.active_programs_ = list(team_to_copy.active_programs_) copied_team.validation_active_programs_ = list(team_to_copy.validation_active_programs_) copied_team.encodings_ = copy.deepcopy(team_to_copy.encodings_) copied_team.extra_metrics_ = dict(team_to_copy.extra_metrics_) copied_team.opponent_id = "hall_of_fame" hall_of_fame.append(copied_team) if len(hall_of_fame) > Config.USER['reinforcement_parameters']['hall_of_fame']['size']: if Config.USER['reinforcement_parameters']['hall_of_fame']['diversity']: novelty = Config.USER['reinforcement_parameters']['hall_of_fame']['diversity'] DiversityMaintenance.calculate_diversities_based_on_distances(hall_of_fame, k = Config.USER['reinforcement_parameters']['hall_of_fame']['size'], distances = [novelty]) keep_teams, remove_teams, pareto_front = ParetoDominanceForTeams.run(hall_of_fame, novelty, Config.USER['reinforcement_parameters']['hall_of_fame']['size']) removed_point = [p for p in hall_of_fame if p == remove_teams[0]] worst_point = removed_point[0] else: score = [p.fitness_ for p in hall_of_fame] worst_point = hall_of_fame[score.index(min(score))] self.opponent_population_['hall_of_fame'].remove(worst_point) self.team_to_add_to_hall_of_fame_ = None # add hall of fame opponents to opponent population if Config.USER['reinforcement_parameters']['hall_of_fame']['enabled']: if len(self.opponent_population_['hall_of_fame']) >= Config.USER['reinforcement_parameters']['hall_of_fame']['opponents']: options = list(self.opponent_population_['hall_of_fame']) self.current_hall_of_fame_opponents_ = [] for option in range(Config.USER['reinforcement_parameters']['hall_of_fame']['opponents']): opponent = random.choice(options) options.remove(opponent) self.current_hall_of_fame_opponents_ += [opponent]*self.matches_per_opponent_per_generation_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n self.precip_cube = setup_precipitation_cube()\n self.oe_cube = setup_orographic_enhancement_cube()\n self.vel_x = set_up_xy_velocity_cube(\"advection_velocity_x\")\n self.vel_y = set_up_xy_velocity_cube(\"advection_velocity_y\")\n for cube in [self.precip_cube, self.oe_cube]:\n cube.coord(\"projection_x_coordinate\").points = 600 * np.arange(3)\n cube.coord(\"projection_y_coordinate\").points = 600 * np.arange(4)", "def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()", "def setUp(self):\n N = 10\n pA = 0.5\n pB = 0.5\n qA = 1 - pA\n qB = 1 - pB\n locus_A = (['A'] * int(N * pA)) + (['a'] * int(round(N * qA)))\n locus_B = (['B'] * int(N * pB)) + (['b'] * int(round(N * qB)))\n self.pop = population.Population(N, locus_A, locus_B)", "def setUp(self):\n\n self.eps = 0.001 # Accept 0.1 % relative error\n\n self.RSISE = Point(-35.27456, 149.12065)\n self.Home = Point(-35.25629, 149.12494) # 28 Scrivener Street, ACT\n self.Syd = Point(-33.93479, 151.16794) # Sydney Airport\n self.Nadi = Point(-17.75330, 177.45148) # Nadi Airport\n self.Kobenhavn = Point(55.70248, 12.58364) # Kobenhavn, Denmark\n self.Muncar = Point(-8.43, 114.33) # Muncar, Indonesia", "def assign_points(players):\n pass", "def setUp(self):\n self.location = [(0, 0), (0, 1)]\n self.hit = (0, 0)", "def setup(self):\n self.poly2 = Polygon([(145, 60), (201, 69), (265, 46), (333, 61), (352, 99), (370, 129), (474, 138), (474, 178), (396, 225), (351, 275), (376, 312), (382, 356), (338, 368), (287, 302), (224, 304), (128, 338), (110, 316), (129, 270), (83, 231), (65, 51), (83, 163), (103, 201), (90, 74), (126, 162)])\n self.poly2.set_direction(\"E\")\n self.poly1 = Polygon([(905, 328),(877, 367),(944, 413),(1004, 384),(1019, 307),(953, 248),(880, 250),(865, 278),(883, 325)])\n self.poly1.set_direction(\"SW\")\n self.poly3 = Polygon([(900, 600), (950,650), (1000, 500)])\n self.poly3.set_direction(\"N\")\n self.p1 = Point(485, 138)\n self.p1.set_direction(\"SE\")\n self.p2 = Point(self.width/2, self.height/2)\n self.p2.set_direction(\"NW\")\n self.p3 = Point(86,163)\n self.p3.set_direction(\"SE\")\n #a separate list for each different type of shape for collision purposes.\n self.polys = [self.poly1, self.poly2, self.poly3]\n self.points = [self.p1, self.p2, self.p3]", "def setUp(self):\n d = self.deck = TestDeck()\n self.game = test_setup.two_player_lead('Laborer', deck=d)\n self.p1, self.p2 = self.game.players", "def __init__(self, pt1, pt2):\n self.set_points(pt1, pt2)", "def setup_game(self, player, opponent):\n\n self.display.clear_screen()\n\n ship_index = 0\n\n while not player.ready(len(self.SHIP_INFO)):\n # prints the currrent board\n board = self.display.construct_player_board(player, opponent, True)\n self.display.print_board(board)\n\n ship_name, ship_length = self.SHIP_INFO[ship_index]\n ship_to_add = Ship(ship_name, ship_length)\n\n try:\n player.add_ship(ship_to_add)\n except Exception as e:\n ship_to_add = player.ships[ship_index]\n\n origin, orientation = self.display.prompt_for_ship_placement(\n ship_to_add)\n\n try:\n player.place_ship(ship_to_add, origin, orientation,\n self.BOARD_SIZE)\n except ValueError as ve:\n self.display.clear_screen()\n print(ve)\n print()\n continue\n\n self.display.clear_screen()\n ship_index += 1\n self.display.prompt_switch(opponent.name)", "def setUp(self):\n self.player = ship.Player(\n constants.PLAYER_START_PLACE,\n constants.PLAYER_WIDTH,\n constants.PLAYER_HEIGHT,\n constants.PLAYER_IMG,\n constants.PLAYER_HEALTH\n )\n\n self.alien = ship.Alien(\n [320, 300],\n 30,\n 30,\n constants.GREEN_ALIEN_IMG,\n 1\n )\n\n self.alien.shooting([320, 300], 5, False)\n\n self.player.shooting([self.player.position[0] + 3, self.player.position[1]], 1, True)", "def setup(self):\n\n self.points = [[0.360502, 0.535494],\n [0.476489, 0.560185],\n [0.503125, 0.601218],\n [0.462382, 0.666667],\n [0.504702, 0.5]]\n self.max_neighbors = 4\n self.beta = 1\n self.graph = 'beta skeleton'\n self.edges = [0, 1, 0, 2, 0, 3, 0, 4,\n 1, 3, 1, 4,\n 2, 3, 2, 4,\n 3, 4]", "def init_population(self):\n print('Initializing...')\n for i in range(self.part_num):\n x = Particle()\n # initialize random position\n x.Pos = np.zeros(self.dim)\n for j in range(len(x.Pos)):\n x.Pos[j] = np.random.uniform(self.var_size[j][0], self.var_size[j][1])\n # calculate cost from random parameters\n #print(x.Pos)\n x.Cost = self.objective(x.Pos)\n x.Vel = np.zeros(self.dim)\n x.Best_pos = x.Pos\n x.Best_cost = x.Cost\n self.particle.append(x)\n\n if self.particle[i].Best_cost < self.GlobalBest_Cost:\n self.GlobalBest_Cost = self.particle[i].Best_cost\n self.GlobalBest_Pos = self.particle[i].Best_pos\n self.Best_Cost.append(self.GlobalBest_Cost)\n print('Initialize complete, with best cost =',\n self.GlobalBest_Cost, \n \"\\nTemporary best solution:\", \n self.GlobalBest_Pos)", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "def setupTown(self):\n\t\t# create a test square to determine participant distance\n\t\tself.vr.resetEnvironment()\n\t\t\n\t\tself.vr.addSkyBox(self.config.blackImage)\n\t\tself.vr.addFloorBox(0.0, -1.0, 0.0, self.config.unitScale, self.config.unitScale, self.config.unitScale,\n\t\t\t\t\t\tself.config.blackImage, None, self.config.blackImage, None)\n\t\tself.vr.setGravity(0.0, -0.1, 0.0)\n\t\tself.vr.addPlaneGeom(0.0, 1.0, 0.0, 0.0, mu = 0.0)\n\t\tself.vr.addBuildingBox(0.0, 0.95, -0.5, self.config.whiteImage, 0.1, 0.1)", "def setUp(self):\n self.pts = ((0, 0, 0), (1, 1, 1), (1, 0, 2), (0, 1, 2), (0.5, 1.5, 1))\n self.tris = (0, 2, 1, 0, 1, 3, 3, 1, 4)", "def __init__(self, grid_size, num_pokemon):\n self._game_board = UNEXPOSED * (grid_size ** 2)\n self._num_pokemon = num_pokemon\n self._pokemon_location = self.generate_pokemons(grid_size)", "def init_population(self):\n pass", "def setup(config, session, pts_all):\n optic = config['Optic']\n general = config['General']\n\n numFrames_total_rough = session['frames_total']\n numVids = session['num_vids']\n spacing = optic['spacing']\n\n bbox_subframe_displacement = pts_all['bbox_subframe_displacement']\n pts_displacement = pts_all['pts_displacement']\n pts_x_displacement = pts_all['pts_x_displacement']\n pts_y_displacement = pts_all['pts_y_displacement']\n mask_frame_displacement = pts_all['mask_frame_displacement']\n\n ## Make point cloud\n pts_spaced = np.ones((np.int64(bbox_subframe_displacement[3] * bbox_subframe_displacement[2] / spacing),\n 2)) * np.nan ## preallocation\n cc = 0 ## set idx counter\n\n # make spaced out points\n for ii in range(len(pts_x_displacement)):\n if (pts_x_displacement[ii] % spacing == 0) and (pts_y_displacement[ii] % spacing == 0):\n pts_spaced[cc, 0] = pts_x_displacement[ii]\n pts_spaced[cc, 1] = pts_y_displacement[ii]\n cc = cc + 1\n\n pts_spaced = np.expand_dims(pts_spaced, 1).astype('single')\n pts_spaced = np.delete(pts_spaced, np.where(np.isnan(pts_spaced[:, 0, 0])), axis=0)\n print(f'number of points: {pts_spaced.shape[0]}')\n\n ## Define random colors for points in cloud\n color_tuples = list(np.arange(len(pts_x_displacement)))\n for ii in range(len(pts_x_displacement)):\n color_tuples[ii] = (np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255, np.random.rand(1)[0] * 255)\n\n ## Preallocate output variables\n\n # I add a bunch of NaNs to the end because the openCV estimate is usually less than the actual number of frames\n displacements = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n positions_recursive = np.ones((pts_spaced.shape[0], 2, np.uint64(\n numFrames_total_rough + numFrames_total_rough * 0.1 + (numVids * 1000)))) * np.nan\n\n ## Preset point tracking variables\n pointInds_toUse = copy.deepcopy(pts_spaced)\n pointInds_tracked = pointInds_toUse ## set the first frame to have point locations be positions in the point cloud\n pointInds_tracked_tuple = list(np.arange(pointInds_toUse.shape[0]))\n\n return pointInds_toUse, pointInds_tracked, pointInds_tracked_tuple, displacements, pts_spaced, color_tuples , positions_recursive", "def __init__(self, points):\n self.points = points\n self.init()", "def _setup_move(self, position):\n self.log.debug(\"%s.setpoint = %s\", self.name, position)\n self.setpoint.put(position, wait=True)\n if self.actuate is not None:\n self.log.debug(\"%s.actuate = %s\", self.name, self.actuate_value)\n self.actuate.put(self.actuate_value, wait=False)", "def run(self):\n print('PSO start running...')\n self.init_population()\n self.iterator()\n print(\"Iteration completed.\")\n self.plot_curve()\n print_params(self.GlobalBest_Pos, self.candidate, net=self.net)", "def choose_location(self):\n location = self.data['locations'].pop()\n self.stats['opponent'] = location", "def setUp (self) :\n\t\t\n\t\tself.person1 = Person(\"Black Knight\", -100)\n\t\tself.healthEffect = -5\n\t\tself.person2 = Person(\"Knights who say Ni\", self.healthEffect)\n\t\tself.person3 = Person(\"King Arthur\")\n\t\tself.healthEffect2 = -55\n\t\tself.person3.setHealthEffect (self.healthEffect2)", "def InitPointR(session):\n global point_r\n # points are only buildings\n q = session.query(melt.BuildAssoc).filter_by(point=1)\n point_r = set([it.osm_build for it in q.all()])", "def setUp(self):\r\n self.spaceship = SpaceShipGame()\r\n self.spaceship.init()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()", "def setUp(self):\r\n self.spaceship = SpaceShipGame()" ]
[ "0.6427155", "0.6339535", "0.62995", "0.6293595", "0.62396276", "0.622806", "0.6038252", "0.60198253", "0.59984106", "0.5886223", "0.5802406", "0.5793876", "0.57286704", "0.5708907", "0.570502", "0.5701593", "0.5697477", "0.5675635", "0.56636167", "0.5645536", "0.563043", "0.56068826", "0.55953777", "0.5592649", "0.5587599", "0.55655503", "0.55646116", "0.5560134", "0.5560134", "0.5560134" ]
0.6626061
0
Remove the points to remove from the teams, in order to save memory.
def _remove_points(self, points_to_remove, teams_population): for team in teams_population: for point in points_to_remove: if point.point_id_ in team.results_per_points_: team.results_per_points_.pop(point.point_id_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def del_points(self):\r\n del self._points", "def destroy(self):\n\t\tfor team in range(len(self.dots)): #will cycle through each team\n\t\t\tfor i in range(len(self.dots[team])): #will cycle through each member of the team\n\t\t\t\tdot = self.dots[team][i]\n\t\t\t\tdot.removeNode()\n\t\tself.mousePosition.removeNode()\n\t\tself.mapimage.removeNode()\n\t\tself.map.removeNode()", "def cull(self) -> None:\n for player in self.players:\n to_remove = [creature for creature in player.battle_line if creature.damage_taken >= creature.power()]\n for creature in to_remove:\n player.battle_line.remove(creature)\n to_remove.destroyed(self, creature)", "def clear_points(self):\n print \"clearing each frame of selected points\"\n self.point_3d = None\n self.allUVs = []\n for iFrame in self.frames:\n iFrame.lastClick = None; \n self.displayImage(iFrame)", "def clear(self):\n self.pointscontroller.pop(self.currentlyadded)", "def remove(self):\n\n self.last_move = \"\"\n self.collision_boxes = []\n self.removed=True", "def remove_users_from_team(team, users):\n team_member_list = []\n for user in users:\n member_list = TeamMember.objects.filter(team_fk=team, user_fk=user)\n if not member_list:\n raise Exception('Some users do not belong this team')\n team_member_list.append(member_list[0])\n \n if any([m.is_leader for m in team_member_list]):\n team.delete()\n else:\n for m in team_member_list:\n m.delete()", "def removeFromPlayerList(self):\n\t\tfor x in self.playerRemoveList:\n\t\t\tself.removePlayer(x)", "def clear(self):\n self.best_moves = []\n self.best_times = []", "def test_unassign_managing_team(self):\n pass", "def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)", "def test_teams_remove_user_from_team_v2(self):\n pass", "def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []", "def remove():", "def test_teams_remove_user_from_team_v1(self):\n pass", "def __isub__(self, point):\n self.points.remove(point)\n return self", "def clear_for_new_board(self):\r\n self.game_board = []\r\n self.good_contours = []\r\n self.game_board_contours = []", "def check_existing_teams(user, teams_from_lms):\n teams = user.teams.all()\n for team in teams:\n if team not in teams_from_lms:\n user.teams.remove(team)", "def deleteBolts(self):\n a = self.get_bolts()\n for i in self.get_bolts():\n if i.y>GAME_HEIGHT:\n a.remove(i)\n self.set_plyrbolts(0)\n elif i.y<=-BOLT_HEIGHT:\n a.remove(i)", "def remove_figures_already_got(results: list, points: dict) -> list:\n\n results_copy = results[:]\n for result in results_copy:\n if points[result[0]] != 0:\n results.remove(result)\n\n return results", "def removePoint(self, point):\n self.points.remove(point)", "def remove_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n self.remove_poss(row, col)", "def test_remove_team_manager_from_team(self):\n pass", "def undoScore(list_teams, roundScore):\n\tfor t, s in zip(list_teams, roundScore):\n\t\tt.roundPoints(-s)\n\t\tt.roundNumber -= 1", "def pruning(self):\n data = self.data.copy()\n for d in self.data:\n # cascade purning method. Inspired from \"Efficient Computation of Group Skyline Queries on MapReduce (FCU)\"\n if d in data:\n pastart = [self.drange[1] if i+self.radius>self.drange[1] else i+self.radius for i in d.getLocationMax()]\n pamax = [self.drange[1] for j in range(self.dim)]\n # prune data points that are obviously dominated by current data point\n pruned = (self.index.intersection(tuple(pastart+pamax),objects=True))\n for p in pruned:\n if p.object in data:\n data.remove(p.object)\n self.pruned = data", "def removeIfDead(self):\n if self.y < 0:\n del projectiles[findPlace(self, projectiles)]", "def clearList(self):\r\n self.players.clear()", "def remove(self):", "def remove(self, pieces):\n for piece in pieces:\n self.board[piece.row][piece.col] = None\n if piece.get_player() is Player.white:\n self.num_white_pieces -= 1\n if piece.is_king():\n self.num_white_kings -= 1\n\n elif piece.get_player() is Player.black:\n self.num_black_pieces -= 1\n if piece.is_king():\n self.num_black_kings -= 1", "def undeletepoints(self, x, y=None):\n if len(self.dxp)<1: return\n if len(self.dxp)==1: \n self.xp.append(self.dxp[0])\n self.wp.append(self.dwp[0])\n self.dxp.__delitem__(0)\n self.dwp.__delitem__(0)\n return\n\n dist=(self.dxp-x)**2\n if y is not None: \n w=self.ws.value(np.array(self.dxp))\n #dist += (self.dwp-w-y)**2\n in_minw=dist.argmin()\n\n self.xp.append(self.dxp[in_minw])\n self.wp.append(self.dwp[in_minw])\n self.dxp.__delitem__(in_minw)\n self.dwp.__delitem__(in_minw)\n\n return" ]
[ "0.67026097", "0.6327885", "0.62851137", "0.62131107", "0.6038065", "0.6032935", "0.5973593", "0.596316", "0.5955202", "0.5929192", "0.5922632", "0.58826", "0.58732504", "0.5854974", "0.5841126", "0.5835358", "0.5800378", "0.5777191", "0.57631856", "0.57612944", "0.5752895", "0.5731823", "0.57163036", "0.5711382", "0.57031584", "0.5700004", "0.5694804", "0.567065", "0.5668086", "0.5602469" ]
0.8073968
0
Fixes the y coordinates of external port dummies in the given layer.
def fixCoordinates(self, layer: LNodeLayer, layeredGraph: LGraph): portConstraints = layeredGraph.portConstraints if not (portConstraints.isRatioFixed() or portConstraints.isPosFixed()): # If coordinates are free to be set, we're done return graphHeight = self.layeredGraph.getActualSize().y # Iterate over the layer's nodes for node in layer: # We only care about external port dummies... if node.type != NodeType.EXTERNAL_PORT: continue # ...representing eastern or western ports. extPortSide = node.extPortSide if extPortSide != PortSide.EAST and extPortSide != PortSide.WEST: continue finalYCoordinate = node.portRatioOrPosition if portConstraints == PortConstraints.FIXED_RATIO: # finalYCoordinate is a ratio that must be multiplied with the # graph's height finalYCoordinate *= graphHeight # Apply the node's new Y coordinate node.possition.y = finalYCoordinate - node.portAnchor.y node.borderToContentAreaCoordinates(False, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_dense(self, layer):\n pass", "def layer(self, layer):\n self._layer = layer", "def move_stage_to_y(self, y):\n raise NotImplementedError", "def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)", "def apply_preprocess_y(self,Y):\n Y = super(Diff_Generator, self).apply_preprocess_y(Y)\n Y[:,1:,1:,:] = Y[:,1:,1:,:] - Y[:,1:,:-1,:] # 0 correspond to the upper layer and is always 0\n return Y", "def ipset_y_0d():\n return IPSet(x=np.linspace(0, 10, 1), y=np.array(1), x_new=np.linspace(2, 5, 7))", "def append_or_update_layer(self, layer: 'Layer'):\n if layer.has_pos():\n self.layers[layer.get_pos()] = layer\n else:\n self.layers.append(layer)\n layer.set_pos(self.get_num_layers() - 1)", "def input_layer(self, input_layer):\n x = self._from_nhwc(input_layer)\n x = tf.cast(x, self.dtype)\n # Rescale and shift to [-1,1]\n x = x * (1./127.5) - 1\n return x", "def update_Y_with_correction(self, correction):\n self.y += correction[1]\n self.bbox.y0 += correction[1]\n self.bbox.y1 += correction[1]", "def update(self):\n for i in range(self.min_y, self.max_y + 1):\n for j in range(self.min_x, self.max_x + 1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = self.lis[i-self.min_y][j-self.min_x]\n except IndexError:\n pass", "def y31(self, nx, ny, x_des):\n\n [c_d, a1, output] = [self.component_dependency['y_31'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[9 * ny + 2 * nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.pro_int.y31_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output", "def _update_bias_for_layer_from_libpymo_obj(self, layer_param: libpymo.LayerParams,\n module: onnx_pb.NodeProto):\n bias = ParamUtils.get_param(self._model.model, module, BIAS_INDEX)\n\n bias.raw_data = np.asarray(layer_param.bias, dtype=np.float32).tobytes()", "def layer_offsets(self):\n ...", "def set_2d_location(self, x, y):\r\n self.unif[42:44] = [x, y]", "def reset_paddle(self):\r\n self.y = self.screen_Height // 2\r\n self.vy = 0", "def _init_XY(self, X, y):\n d_in = X.shape[1]\n self._XtX = np.eye(d_in + 1) * self.alpha\n self._XtX[0, 0] = 0\n if len(y.shape) == 1:\n self._XtY = np.zeros((d_in + 1,))\n else:\n self._XtY = np.zeros((d_in + 1, y.shape[1]))", "def y2(self, nx, ny, x_des):\n\n c_d, a1, output = self.component_dependency['y_2'], self.dependency_matrix, []\n for i in range(ny):\n [sum_i, row] = [[], a1[4 * ny + nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.aer_int.y2_int([x_des[k]], assign - 1)) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output", "def adjust_y_pos():\n pos = self.variables.table.get_current_position()\n self.variables.table.set_joystick(False)\n self.variables.table.set_axis(\n [True, True, True]\n ) # so all axis can be adressed\n ypos = self.table_move_ui.y_move.value()\n error = self.variables.table.move_to(\n [pos[0], ypos, pos[2]],\n self.variables.default_values_dict[\"settings\"][\"height_movement\"],\n )\n # if error:\n # self.variables.message_to_main.put(error)\n self.variables.table.set_joystick(True)\n self.variables.table.set_axis(\n [True, True, False]\n ) # so z axis cannot be adressed", "def __removeSoftMax(self,layer):\n newLayer = layer.__class__.from_config(layer.get_config())\n if hasattr(newLayer,\"activation\") and newLayer.activation == tf.keras.activations.softmax:\n newLayer.activation = tf.keras.activations.linear #No computa nada, deja pasar los valores --> f(x) = x\n return newLayer", "def transform(self, y_idx):\n self.y = [row.pop(y_idx) for row in self.data]\n self.X = self.data", "def putLayer(self, layer):\t\n\t\t# force use different address id ( prevent use same defined layer more than once, eg: bottleneck in torchvision)\n\t\t# tmp_layer = copy.deepcopy(layer)\n\t\tlayer_id = id(layer)\n\t\tself.tmp_list.append(layer)\n\t\tlayer_id = id(self.tmp_list[-1])\n\t\tif layer_id in self.graph:\n\t\t\ttmp_layer = copy.deepcopy(layer)\n\t\t\tself.tmp_list.append(tmp_layer)\n\t\t\t# layer_id = id(self.tmp_list[-1])\n\t\t\tlayer_id = id(tmp_layer)\n\n\t\tself.graph[layer_id] = layer\n\t\tself.bottoms[layer_id] = [self.cur_id]\n\t\tself.cur_id = layer_id\n\t\t# del layer, tmp_layer, layer_id", "def layer_sweep(self):\n for fixed_id, fixed_layer in enumerate(self.layers):\n if fixed_id + 1 == len(self.layers):\n break\n moving_layer = self.layers[fixed_id + 1]\n for node in moving_layer.nodes:\n self.find_neighbors(node)\n if len(node.neighbors) > 0:\n self.calculate_barycenter(node)\n else:\n node.barycenter = 0 #1000\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.barycenter, reverse=False)\n for slot, node in enumerate(sorted_nodes):\n node.slot = slot + 1\n barys = set([n.barycenter for n in sorted_nodes])\n bary_nodes = [list(filter(lambda x: x.barycenter == b, sorted_nodes)) for b in barys]\n for b in bary_nodes:\n if len(b) > 1:\n for node in b:\n if len(node.sl_neighbors) == 1:\n n_slot = node.sl_neighbors[0].slot\n if n_slot > node.slot:\n other_node = max(b, key=lambda s: s.slot)\n elif n_slot < node.slot:\n other_node = min(b, key=lambda s: s.slot)\n temp = node.slot\n node.slot = other_node.slot\n other_node.slot = temp\n sorted_nodes = sorted(moving_layer.nodes, key=lambda n: n.slot, reverse=False)\n moving_layer.nodes = sorted_nodes", "def n_y(self, level):\n resolution = self.resolution(level)\n return (self.y_extent // resolution + 63) // 64", "def processLayerDataDecoded(self, header, layer):\n self.terrain.apply_patch(layer, header.x, header.y)", "def setY(self, value):\n self.components[1] = value", "def setY(self, value):\n self.components[1] = value", "def new_x_y(self, patch, points, idx):\n raise NotImplementedError", "def init_layer(layer):\n \n if layer.weight.ndimension() == 4:\n (n_out, n_in, height, width) = layer.weight.size()\n n = n_in * height * width\n \n elif layer.weight.ndimension() == 2:\n (n_out, n) = layer.weight.size()\n\n std = math.sqrt(2. / n)\n scale = std * math.sqrt(3.)\n layer.weight.data.uniform_(-scale, scale)\n\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def __set_y__(self,y):\n\n # Input vaidation\n try:\n y = int(y)\n except:\n raise ValueError('H Bridge direction is not valid')\n \n if(y != 0 and y != 1 and y != -1):\n raise ValueError('H Bridge direction is not valid')\n \n self.direction['y'] = y\n self.HBridges['y'].SetDirection(y)", "def pointConstraint(*args, layer: AnyStr=\"\", maintainOffset: bool=True, name: Union[AnyStr,\n bool]=\"\", offset: Union[List[float, float, float], bool]=None, remove:\n bool=True, skip: Union[AnyStr, List[AnyStr]]=\"\", targetList: bool=True,\n weight: Union[float, bool]=0.0, weightAliasList: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass" ]
[ "0.51316124", "0.49926874", "0.4936033", "0.47214425", "0.47075492", "0.46918088", "0.46397915", "0.46308416", "0.46218592", "0.46196276", "0.46006182", "0.4595828", "0.45934147", "0.45880044", "0.45867378", "0.45682788", "0.4562465", "0.4542178", "0.45411748", "0.4537493", "0.45362335", "0.45272171", "0.45066747", "0.45061028", "0.45037916", "0.45037916", "0.45001656", "0.45", "0.44987744", "0.44971293" ]
0.705069
0
Test the command to bulkupdate patients contact
def test_update_contact(mock_app, gpx4_patients): runner = mock_app.test_cli_runner() patients_collection = mock_app.db.patients # GIVEN a database with some patients patients_collection.insert_many(gpx4_patients) test_patients = patients_collection.find() # Sharing a contact information contacts = test_patients.distinct(CONTACT_HREF) assert len(contacts) == 1 # WHEN their contact info is updated using the cli new_href = "[email protected]" result = runner.invoke( cli, [ "update", "contact", "--old-href", contacts[0], "--href", new_href, "--name", NEW_NAME, "--institution", TEST_INST, ], input="y", ) assert result.exit_code == 0 # THEN the config info should be updated updated_patient = patients_collection.find({CONTACT_HREF: ":".join(["mailto", new_href])}) assert len(list(updated_patient)) > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_patch_action(self):\n pass", "def test_update_batch(self):\n self.batch_data['batch_id'] = self.batch_info.id\n resp = self.query_with_token(\n self.access_token_master,\n update_batch_info.format(**self.batch_data))\n\n self.assertIn('data', resp)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['supplier']['name'],\n self.supplier.name)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['batchNo'],\n self.batch_info.batch_no)", "def test_update_record(self):\n pass", "def test_update_contact_multiple_href_match(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n assert len(gpx4_patients) == 2\n # GIVEN a database with 2 patients with sligthly different contact href\n gpx4_patients[0][\"contact\"][\"href\"] = \"[email protected]\"\n gpx4_patients[0][\"contact\"][\"href\"] = \"[email protected]\"\n patients_collection.insert_many(gpx4_patients)\n\n # WHEN their contact info is updated using the cli but the search for the old href returns multiple contacts\n old_href = \"test_\"\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n old_href,\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n )\n\n # THEN no patients contact should be updated\n assert patients_collection.find_one({CONTACT_HREF: \":\".join([\"mailto\", new_href])}) is None", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def test_update():\n payload = {'age': 99}\n sample_uuid = get_sample_id()\n response = requests.put(f'http://localhost:5000/api/persons/{sample_uuid}', json=payload)\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def test_request_do_update_all(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert not DUT.request_do_update_all()", "def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')", "def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def test_case_mme_update(populated_database, case_obj, user_obj, mme_patient, mme_submission):\n adapter = populated_database\n\n mme_submission['server_responses'] = [ {'patient': mme_patient }]\n \n # Make sure no case has MME submission:\n assert adapter.case_collection.find({'mme_submission' : { '$exists' : True}}).count() == 0\n\n\n updated_case = adapter.case_mme_update(case_obj, user_obj, mme_submission)\n\n # One case has MME submission now\n assert updated_case['mme_submission']\n assert adapter.case_collection.find({'mme_submission' : { '$exists' : True}}).count()", "def test_update_contact_no_href_match(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n # GIVEN a database with some patients\n patients_collection.insert_many(gpx4_patients)\n test_patients = patients_collection.find()\n # Sharing a contact information\n contacts = test_patients.distinct(CONTACT_HREF)\n assert len(contacts) == 1\n old_contact_href = contacts[0]\n\n # GIVEN a contact href without matches in the patients documents\n wrong_href = \"some_href\"\n assert wrong_href not in old_contact_href\n\n # WHEN their contact info is updated using the cli\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n wrong_href,\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n )\n assert result.exit_code == 0\n\n # THEN no patients contact should be updated\n assert patients_collection.find_one({CONTACT_HREF: \":\".join([\"mailto\", new_href])}) is None", "def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']", "def test_sync_updates_materials(self):\n expected_data = [\n ['1', '{wool,AAA,BBB,CCC}'],\n ['2', '{cotton,AAA,BBB,CCC}'],\n ['3', '{cotton,AAA,BBB,CCC}']\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'AAA,BBB ,CCC')\n click(bp.operation_apply())\n\n click(bp.sync_updates_button())\n\n # Check updated data in DB\n wait_for_assert(expected_data,\n lambda: run_sql('HIVE', 'select_materials_modified', True),\n 'Unexpected materials data in DB')", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_delete_action(self):\n pass", "def do_bulk(self, args):\n pass", "def test_update_case(self):\n pass", "def test_handle(self, create, update):\n filedata = \"PROJ_NO,SECTOR,OTHER_FIELD,PROJ_REQ,UNIDENT_BAL\\n\"\n filedata += '123-456,IT,Some Content,5555,\"1,234\"\\n'\n filedata += \"444-444,IT,Not Here,5.00,1.23\\n\"\n filedata += \"111-222,IT,Other Co¿ntent,5,1.23\\n\"\n # Note the non-utf character ^\n csv_file_handle, csv_path = tempfile.mkstemp()\n with open(csv_file_handle, 'wb') as csv_file:\n csv_file.write(filedata.encode('iso-8859-1'))\n\n account456 = Account.objects.create(name='account1', code='123-456')\n account222 = Account.objects.create(name='account2', code='111-222')\n\n with self.assertLogs('peacecorps.sync_accounting') as logger:\n command = sync.Command()\n command.handle(csv_path)\n self.assertEqual(3, len(logger.output))\n self.assertTrue('123-456' in logger.output[0])\n self.assertTrue('Updating' in logger.output[0])\n self.assertTrue('5555' in logger.output[0])\n self.assertTrue('1,234' in logger.output[0])\n self.assertTrue('444-444' in logger.output[1])\n self.assertTrue('Creating' in logger.output[1])\n self.assertTrue('111-222' in logger.output[2])\n self.assertTrue('Updating' in logger.output[2])\n\n self.assertEqual(create.call_count, 1)\n self.assertEqual(update.call_count, 2)\n account222.delete()\n account456.delete()", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'email', 'phone', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contacts set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n await dbcon.transact(_run)", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_update_entity_action(self):\n pass", "def test_updating_patient_account(self):\n \n form_data = {\"fname\": \"Jill\", \"lname\": \"Jones\", \n \"email\": \"[email protected]\", \"password\": \"password\", \n \"street-address\": \"33 Blue St\", \"city\": \"San Francisco\", \n \"state\": \"CA\", \"zipcode\": \"43223\", \"phone\": \"8884445555\",\n \"birthdate\":\"1984-05-05\"}\n\n update_patient_account(1, form_data)\n\n patient = Patient.query.get(1)\n self.assertEqual(\"Jill\", patient.fname)", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "async def test_updates_no_user(database,valid_data):\n #reset the database and add values with ids [0,10]\n test_valid_insert(database,valid_data)\n\n for _id in range(100,150):\n try:\n await database.update(_id=_id,user_id=_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_updateall():\n url = baseUrl + userurl + emailId\n payload = {'firstName': new_firstName, 'lastName': new_lastName, 'emailId': new_emailId}\n logging.info(\"Update a user's firstName to: %s, lastName to: %s and emailId to: %s\" % (new_firstName, new_lastName, new_emailId))\n r = requests.put(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 200\n resp = r.json()\n assert resp[\"userName\"] == emailId and resp[\"lastName\"] == new_lastName and resp[\"firstName\"] == new_firstName \\\n and resp[\"licenseType\"] == licensetype and resp[\"subscriptionIds\"][0] == subscriptionid and \\\n resp[\"isActive\"] is True and resp[\"source\"] == \"publicapi\" and resp[\"emailId\"] == new_emailId\n global user_id\n user_id = resp[\"id\"]\n assert user_id is not None", "def test_request_do_update(test_dao, test_configuration):\r\n DUT = dtcFunction(test_dao, test_configuration, test=True)\r\n DUT.request_do_select_all(revision_id=1)\r\n\r\n assert not DUT.request_do_update(1)", "def test_partially_update_device_by_id1(self):\n pass" ]
[ "0.7028907", "0.67930585", "0.666327", "0.62609684", "0.6216473", "0.6037648", "0.5950919", "0.59361154", "0.593158", "0.589418", "0.58618575", "0.58603084", "0.585794", "0.5840429", "0.5838186", "0.58320105", "0.58251095", "0.58173716", "0.5804996", "0.5788242", "0.57680327", "0.57662517", "0.5755657", "0.57362306", "0.57323796", "0.57201856", "0.5719992", "0.5713279", "0.5707439", "0.5695912" ]
0.6999571
1
Test the command to bulkupdate patients contact when old contact href is not matching any patients
def test_update_contact_no_href_match(mock_app, gpx4_patients): runner = mock_app.test_cli_runner() patients_collection = mock_app.db.patients # GIVEN a database with some patients patients_collection.insert_many(gpx4_patients) test_patients = patients_collection.find() # Sharing a contact information contacts = test_patients.distinct(CONTACT_HREF) assert len(contacts) == 1 old_contact_href = contacts[0] # GIVEN a contact href without matches in the patients documents wrong_href = "some_href" assert wrong_href not in old_contact_href # WHEN their contact info is updated using the cli new_href = "[email protected]" result = runner.invoke( cli, [ "update", "contact", "--old-href", wrong_href, "--href", new_href, "--name", NEW_NAME, "--institution", TEST_INST, ], ) assert result.exit_code == 0 # THEN no patients contact should be updated assert patients_collection.find_one({CONTACT_HREF: ":".join(["mailto", new_href])}) is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_contact_multiple_href_match(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n assert len(gpx4_patients) == 2\n # GIVEN a database with 2 patients with sligthly different contact href\n gpx4_patients[0][\"contact\"][\"href\"] = \"[email protected]\"\n gpx4_patients[0][\"contact\"][\"href\"] = \"[email protected]\"\n patients_collection.insert_many(gpx4_patients)\n\n # WHEN their contact info is updated using the cli but the search for the old href returns multiple contacts\n old_href = \"test_\"\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n old_href,\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n )\n\n # THEN no patients contact should be updated\n assert patients_collection.find_one({CONTACT_HREF: \":\".join([\"mailto\", new_href])}) is None", "def test_update_contact(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n # GIVEN a database with some patients\n patients_collection.insert_many(gpx4_patients)\n test_patients = patients_collection.find()\n # Sharing a contact information\n contacts = test_patients.distinct(CONTACT_HREF)\n assert len(contacts) == 1\n\n # WHEN their contact info is updated using the cli\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n contacts[0],\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n input=\"y\",\n )\n assert result.exit_code == 0\n\n # THEN the config info should be updated\n updated_patient = patients_collection.find({CONTACT_HREF: \":\".join([\"mailto\", new_href])})\n assert len(list(updated_patient)) > 0", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_patch_action(self):\n pass", "def locate_bulk_write_error(self, dbname, collname, reqs):\n for op in reqs:\n while True:\n try:\n self._mc[dbname][collname].replace_one(op._filter, op._doc)\n break\n except pymongo.errors.AutoReconnect as e:\n log.error('%s' % e)\n self.reconnect()\n except Exception as e:\n log.error('%s when excuting %s' % (e, op))\n break", "def test_update_batch(self):\n self.batch_data['batch_id'] = self.batch_info.id\n resp = self.query_with_token(\n self.access_token_master,\n update_batch_info.format(**self.batch_data))\n\n self.assertIn('data', resp)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['supplier']['name'],\n self.supplier.name)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['batchNo'],\n self.batch_info.batch_no)", "def test_client_verification_document_partial_update(self):\n pass", "def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')", "def test_modify_phonebook(self):\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n if not self.connect_and_verify(phone_numbers_added):\n return False\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 110, 2)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n return self.connect_and_verify(phone_numbers_added)", "def test_patient_detail_after_updating(self):\n url = reverse('doctor-list')\n response = self.client.get(reverse(\n 'patient:patient-detail', kwargs={'pk': Patient.objects.get(patient_name='testpatient1').id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Patient.objects.count(), 1)", "def test_update_record(self):\n pass", "def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def test_case_mme_update(populated_database, case_obj, user_obj, mme_patient, mme_submission):\n adapter = populated_database\n\n mme_submission['server_responses'] = [ {'patient': mme_patient }]\n \n # Make sure no case has MME submission:\n assert adapter.case_collection.find({'mme_submission' : { '$exists' : True}}).count() == 0\n\n\n updated_case = adapter.case_mme_update(case_obj, user_obj, mme_submission)\n\n # One case has MME submission now\n assert updated_case['mme_submission']\n assert adapter.case_collection.find({'mme_submission' : { '$exists' : True}}).count()", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def test_update_domain_only(self):\n self.test_update()", "def test_update_existent_campaign_by_admin_passes(self):\n response = self.client.patch(\n f\"{self.endpoint_url}{self.test_campaign.id}/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [],\n \"url\": None,\n },\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body[\"Success\"], \"Campaign 1 updated\")", "async def test_updates_no_user(database,valid_data):\n #reset the database and add values with ids [0,10]\n test_valid_insert(database,valid_data)\n\n for _id in range(100,150):\n try:\n await database.update(_id=_id,user_id=_id)\n assert False\n except:\n assert True\n await database.close_pool()", "async def update_contact(dbcon: DBConnection, contact_id: int, data: Dict[str, str]) -> None:\n\n async def _run(cur: Cursor) -> None:\n for key, value in data.items():\n if key not in ['name', 'email', 'phone', 'active']:\n raise errors.IrisettError('invalid contact key %s' % key)\n q = \"\"\"update contacts set %s=%%s where id=%%s\"\"\" % key\n q_args = (value, contact_id)\n await cur.execute(q, q_args)\n\n if not await contact_exists(dbcon, contact_id):\n raise errors.InvalidArguments('contact does not exist')\n await dbcon.transact(_run)", "def test_staff_update_duplicate_procedure_fails(self):\n res = self.client.post(PROCEDURE_URL, self.payload, format='json')\n second_payload = {\n 'name': 'abc',\n 'speciality': [self.speciality.id],\n 'overview': 'bla bla bla'\n }\n self.client.post(PROCEDURE_URL, second_payload, format='json')\n\n url = get_item_url(res.data['id'])\n new_payload = {\n 'name': 'abc',\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_bulk_activity(self):\n file_path_ac = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_act)\n data = {\n 'bulk_upload' : open(file_path_ac, 'rb'),\n }\n\n existing_acs = Activity.objects.filter(activitygroup__keyflow=self.kic)\n existing_nace = list(existing_acs.values_list('nace', flat=True))\n\n encoding = 'cp1252'\n df_file_ags = pd.read_csv(file_path_ac, sep='\\t', encoding=encoding)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_nace = df_file_ags['nace']\n new_nace = [c for c in file_nace if str(c) not in existing_nace]\n\n res = self.client.post(self.ac_url, data)\n assert res.status_code == status.HTTP_201_CREATED\n res_json = res.json()\n assert res_json['count'] == len(file_nace)\n assert len(res_json['created']) == len(new_nace)\n\n # assert that the number of activities matches\n all_ac = Activity.objects.filter(activitygroup__keyflow=self.kic)\n assert len(all_ac) == len(existing_nace) + len(new_nace)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n # ToDo: different test case if activitygroups don't exist\n ag = ActivityGroup.objects.get(code=row.ag)\n ac = Activity.objects.get(activitygroup=ag,\n nace=row.nace)\n assert ac.name == row.name", "def test_sync_updates_materials(self):\n expected_data = [\n ['1', '{wool,AAA,BBB,CCC}'],\n ['2', '{cotton,AAA,BBB,CCC}'],\n ['3', '{cotton,AAA,BBB,CCC}']\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'AAA,BBB ,CCC')\n click(bp.operation_apply())\n\n click(bp.sync_updates_button())\n\n # Check updated data in DB\n wait_for_assert(expected_data,\n lambda: run_sql('HIVE', 'select_materials_modified', True),\n 'Unexpected materials data in DB')", "def test_client_verification_document_update(self):\n pass", "def test_partial_update(self):\n self.client.force_authenticate(user=self.admin)\n\n data = {\n 'retreat': reverse(\n 'retreat:retreat-detail', args=[self.retreat.id]\n ),\n 'user': reverse('user-detail', args=[self.user2.id]),\n }\n\n response = self.client.put(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n data,\n format='json',\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_update_contact_no_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n\n with pytest.raises(BusinessException) as exception:\n org.update_contact(TestContactInfo.contact2)\n assert exception.value.code == Error.DATA_NOT_FOUND.name", "def test_update_non_existent_campaign_by_id_fails(self):\n response = self.client.patch(\n f\"{self.endpoint_url}99/\",\n json={\n \"logo\": None,\n \"name\": NEW_CAMPAIGN_NAME,\n \"organisations\": [],\n \"url\": None,\n },\n headers={\"Authorization\": self.admin_token},\n )\n response_body = response.get_json()\n error_details = response_body[\"error\"]\n self.assertEqual(response.status_code, 404)\n self.assertEqual(error_details[\"message\"], CAMPAIGN_NOT_FOUND_MESSAGE)\n self.assertEqual(error_details[\"sub_code\"], CAMPAIGN_NOT_FOUND_SUB_CODE)\n self.assertEqual(error_details[\"details\"], {\"campaign_id\": 99})", "def test_partial_update_metadata(self):\n pass" ]
[ "0.77498806", "0.6971011", "0.5990428", "0.5918833", "0.5805121", "0.5728971", "0.5712339", "0.56514984", "0.56374675", "0.5603962", "0.5593991", "0.5562969", "0.5558192", "0.5555756", "0.555545", "0.5505084", "0.5498885", "0.5491362", "0.5473769", "0.5473551", "0.546801", "0.54622096", "0.54532737", "0.5451474", "0.54216266", "0.54126483", "0.5392813", "0.5391514", "0.5389015", "0.53868425" ]
0.7636279
1
Test the command to bulkupdate patients contact when old contact href is matching more than one patient contact
def test_update_contact_multiple_href_match(mock_app, gpx4_patients): runner = mock_app.test_cli_runner() patients_collection = mock_app.db.patients assert len(gpx4_patients) == 2 # GIVEN a database with 2 patients with sligthly different contact href gpx4_patients[0]["contact"]["href"] = "[email protected]" gpx4_patients[0]["contact"]["href"] = "[email protected]" patients_collection.insert_many(gpx4_patients) # WHEN their contact info is updated using the cli but the search for the old href returns multiple contacts old_href = "test_" new_href = "[email protected]" result = runner.invoke( cli, [ "update", "contact", "--old-href", old_href, "--href", new_href, "--name", NEW_NAME, "--institution", TEST_INST, ], ) # THEN no patients contact should be updated assert patients_collection.find_one({CONTACT_HREF: ":".join(["mailto", new_href])}) is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_contact_no_href_match(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n # GIVEN a database with some patients\n patients_collection.insert_many(gpx4_patients)\n test_patients = patients_collection.find()\n # Sharing a contact information\n contacts = test_patients.distinct(CONTACT_HREF)\n assert len(contacts) == 1\n old_contact_href = contacts[0]\n\n # GIVEN a contact href without matches in the patients documents\n wrong_href = \"some_href\"\n assert wrong_href not in old_contact_href\n\n # WHEN their contact info is updated using the cli\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n wrong_href,\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n )\n assert result.exit_code == 0\n\n # THEN no patients contact should be updated\n assert patients_collection.find_one({CONTACT_HREF: \":\".join([\"mailto\", new_href])}) is None", "def test_update_contact(mock_app, gpx4_patients):\n\n runner = mock_app.test_cli_runner()\n patients_collection = mock_app.db.patients\n\n # GIVEN a database with some patients\n patients_collection.insert_many(gpx4_patients)\n test_patients = patients_collection.find()\n # Sharing a contact information\n contacts = test_patients.distinct(CONTACT_HREF)\n assert len(contacts) == 1\n\n # WHEN their contact info is updated using the cli\n new_href = \"[email protected]\"\n result = runner.invoke(\n cli,\n [\n \"update\",\n \"contact\",\n \"--old-href\",\n contacts[0],\n \"--href\",\n new_href,\n \"--name\",\n NEW_NAME,\n \"--institution\",\n TEST_INST,\n ],\n input=\"y\",\n )\n assert result.exit_code == 0\n\n # THEN the config info should be updated\n updated_patient = patients_collection.find({CONTACT_HREF: \":\".join([\"mailto\", new_href])})\n assert len(list(updated_patient)) > 0", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_update_action(self):\n pass", "def test_save_multiple_contact(self):\n self.new_contact.save_contact()\n # new contact\n test_contact = Contact(\"Test\", \"user\", \"0798765432\", \"[email protected]\")\n test_contact.save_contact()\n self.assertEqual(len(Contact.contact_list), 2)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_bulk_patch_action(self):\n pass", "def test_save_multiple_contacts(self):\n self.new_contact.save_contact() # saving the new contact\n test_contact = Contact(\"Test\", \"User\", 254712345678, \"[email protected]\") # new user\n test_contact.save_contact() # saving the new contact\n self.assertEqual(len(Contact.contact_list), 2)", "def test_update_contact_association(self):\n patient1 = self.create_patient({'mobile_number': '12223334444'})\n patient2 = self.create_patient()\n subject_number = patient1.subject_number\n node = self.create_xml_patient({'Subject_Number': subject_number,\n 'Mobile_Number': '43332221111'})\n payload = self.create_payload([node])\n parse_patient(node, payload)\n patient = payload.patients.all()[0]\n self.assertNotEqual(patient.pk, patient2.pk)\n self.assertEqual(patient.pk, patient1.pk)\n self.assertNotEqual(patient.contact.pk, patient2.contact.pk)\n self.assertEqual(patient.contact.pk, patient1.contact.pk)\n self.assertEqual(patient.mobile_number, '+43332221111')\n self.assertEqual(patient.contact.phone, '+43332221111')", "def test_modify_phonebook(self):\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 100)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n if not self.connect_and_verify(phone_numbers_added):\n return False\n\n bt_contacts_utils.erase_contacts(self.pse)\n bt_contacts_utils.generate_contact_list(self.contacts_destination_path,\n PSE_CONTACTS_FILE, 110, 2)\n phone_numbers_added = bt_contacts_utils.import_device_contacts_from_vcf(\n self.pse, self.contacts_destination_path, PSE_CONTACTS_FILE)\n return self.connect_and_verify(phone_numbers_added)", "def test_with_multiple_contacts(self, data_flow_api_client):\n with freeze_time('2019-01-01 12:30:00'):\n contact_1 = ContactFactory()\n with freeze_time('2019-01-03 12:00:00'):\n contact_2 = ContactFactory()\n with freeze_time('2019-01-01 12:00:00'):\n contact_3 = ContactFactory()\n contact_4 = ContactFactory()\n\n response = data_flow_api_client.get(self.view_url)\n assert response.status_code == status.HTTP_200_OK\n response_results = response.json()['results']\n assert len(response_results) == 4\n expected_contact_list = sorted([contact_3, contact_4],\n key=lambda item: item.pk) + [contact_1, contact_2]\n for index, contact in enumerate(expected_contact_list):\n assert contact.email == response_results[index]['email']", "def test_bulk_activity(self):\n file_path_ac = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_act)\n data = {\n 'bulk_upload' : open(file_path_ac, 'rb'),\n }\n\n existing_acs = Activity.objects.filter(activitygroup__keyflow=self.kic)\n existing_nace = list(existing_acs.values_list('nace', flat=True))\n\n encoding = 'cp1252'\n df_file_ags = pd.read_csv(file_path_ac, sep='\\t', encoding=encoding)\n df_file_ags = df_file_ags.rename(\n columns={c: c.lower() for c in df_file_ags.columns})\n file_nace = df_file_ags['nace']\n new_nace = [c for c in file_nace if str(c) not in existing_nace]\n\n res = self.client.post(self.ac_url, data)\n assert res.status_code == status.HTTP_201_CREATED\n res_json = res.json()\n assert res_json['count'] == len(file_nace)\n assert len(res_json['created']) == len(new_nace)\n\n # assert that the number of activities matches\n all_ac = Activity.objects.filter(activitygroup__keyflow=self.kic)\n assert len(all_ac) == len(existing_nace) + len(new_nace)\n\n # assert that the Name matches in all values\n for row in df_file_ags.itertuples(index=False):\n # ToDo: different test case if activitygroups don't exist\n ag = ActivityGroup.objects.get(code=row.ag)\n ac = Activity.objects.get(activitygroup=ag,\n nace=row.nace)\n assert ac.name == row.name", "def locate_bulk_write_error(self, dbname, collname, reqs):\n for op in reqs:\n while True:\n try:\n self._mc[dbname][collname].replace_one(op._filter, op._doc)\n break\n except pymongo.errors.AutoReconnect as e:\n log.error('%s' % e)\n self.reconnect()\n except Exception as e:\n log.error('%s when excuting %s' % (e, op))\n break", "def test_update_contact(session): # pylint:disable=unused-argument\n org = factory_org_service()\n org.add_contact(TestContactInfo.contact1)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact1['email']\n\n org.update_contact(TestContactInfo.contact2)\n\n dictionary = org.as_dict()\n assert len(dictionary['contacts']) == 1\n assert dictionary['contacts'][0]['email'] == TestContactInfo.contact2['email']", "def update_recipes(self, update_existing = True):\n\n for url in self.links:\n\n existing = self.collection.find_one({ \"url\": url })\n if existing is None:\n self.logger.info(\"Record does not exist: %s\" % url)\n continue\n\n try:\n data = self.get_url(url)\n records = self.extract(data, url)\n except:\n self.logger.error(\"Processing failed for %s\" % url, exc_info = True)\n continue\n\n for record in records:\n try:\n if update_existing:\n updates = record\n else:\n updates = dict([ (k, v) for k, v in record.items() if k not in existing ])\n updates[\"update_time\"] = datetime.utcnow()\n self.collection.update_one({ \"url\": url }, { \"$set\": updates })\n except Exception as exc:\n self.logger.error(\"Could not update record: %s\" % record[\"url\"], exc_info = True)\n continue\n self.logger.info(\"Updated %s\" % url)\n\n time.sleep(self.pause)", "def test_bulk_observable_refang_add(self):\n observables = [\"hxxp://{}{}.com\".format(_random_domain(), i) for i in range(20)]\n info = self.api.observable_bulk_add(observables, ['bulk'])\n self.assertEqual(len(info), 20)\n for url in info:\n self.assertIn('http://', url['value'])", "def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"", "def test_update_batch(self):\n self.batch_data['batch_id'] = self.batch_info.id\n resp = self.query_with_token(\n self.access_token_master,\n update_batch_info.format(**self.batch_data))\n\n self.assertIn('data', resp)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['supplier']['name'],\n self.supplier.name)\n self.assertEqual(\n resp['data']['updateBatchInfo']['batchInfo']['batchNo'],\n self.batch_info.batch_no)", "def test_client_verification_document_partial_update(self):\n pass", "def test_patient_detail_after_updating(self):\n url = reverse('doctor-list')\n response = self.client.get(reverse(\n 'patient:patient-detail', kwargs={'pk': Patient.objects.get(patient_name='testpatient1').id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Patient.objects.count(), 1)", "def test_sync_from_sugar_contact(self):\n LOG.debug('test_sync_from_sugar_contact')\n business = Business.objects.get(id=114)\n advertiser = Advertiser.objects.get(id=114)\n email = advertiser.email\n module = \"Contacts\"\n query = build_recent_entry_query(module=module, test_mode=True, \n get_modified=False, start=None)\n sugar_list = self.sugar.get_entry_list(module, query)\n sugar_dict = sugar_list[0]\n sugar_dict['advertiser_id_c'] = ''\n self.sugar.set_entry(module, dict_to_name_value(sugar_dict))\n billing_record = BillingRecord.objects.get(id=114)\n order = billing_record.orders.all()[0]\n order.delete()\n billing_record.delete()\n business.delete()\n consumer = Consumer.objects.get(email=email)\n consumer.delete()\n advertiser.delete()\n sync_business_from_sugar(test_mode=True, sugar=self.sugar)\n # business is not created since Sugar record modified by 10Coupons user\n try:\n business = Business.objects.get(advertiser=advertiser)\n self.assertTrue(False)\n except business.DoesNotExist:\n self.assertTrue(True)", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def test_bulk_locations(self):\n # do twice to check if it really updates\n lengths = []\n for i in range(2):\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_201_CREATED, (\n responses.get(res.status_code, res.status_code), res.content)\n lengths.append(len(AdministrativeLocation.objects.all()))\n\n assert lengths[0] == lengths[1]\n\n file_path = os.path.join(os.path.dirname(__file__),\n self.testdata_folder,\n self.filename_locations_duplicates)\n data = {\n 'bulk_upload' : open(file_path, 'rb'),\n }\n\n res = self.client.post(self.location_url, data)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_updateContact(self):\n qs = Contact.objects.all()\n contact = qs[0]\n contact2 = Contact.objects.get(id=contact.id)\n to_update_value = 'address 2'\n contact2.address = to_update_value\n contact2.save()\n # refresh from db\n contact3 = Contact.objects.get(id=contact.id)\n self.assertEqual(contact3.address, to_update_value)", "def update_many(collection: Collection, query, data_to_update):\n return collection.update_many(query, {'$set': data_to_update}).matched_count", "def test_bulk_delete_iterates_doc_ids_only_once(self):\n doc = self._index_new_doc()\n doc_ids = OneshotIterable([doc[\"_id\"]])\n self.adapter.bulk_delete(doc_ids) # does not raise IterableExhaustedError", "def test_edit_contact_list(self):\n c1 = ContactFactory(company_id=self.company.id)\n contact_list = ContactList.objects.first()\n data = ContactListSerializer(contact_list).data\n\n data['title'] = 'Nestle'\n data['contact_ids'] = [c1.id]\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n \n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.put(url, data, HTTP_HOST=parsed.netloc, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n url, parsed = self.prepare_urls('v1:contact_list-detail', subdomain=self.company.subdomain, kwargs={'pk':contact_list.id})\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n content = json.loads(response.content)\n self.assertEqual(content['title'], 'Nestle')\n self.assertEqual(content['contacts'], [c1.id])", "def test_integrity_error_bulk_create(self):\n link1, link2 = LinkFactory.create_batch(2)\n self.service.get_clicks_for_date.return_value = {\n unicode(link1.pk): '4',\n unicode(link2.pk): '7'\n }\n\n with patch.object(collect_ga_data, 'DataPoint') as MockDataPoint:\n MockDataPoint.objects.bulk_create.side_effect = IntegrityError\n\n with self.assertRaises(CommandError):\n self.command.execute()", "def test_sync_updates_materials(self):\n expected_data = [\n ['1', '{wool,AAA,BBB,CCC}'],\n ['2', '{cotton,AAA,BBB,CCC}'],\n ['3', '{cotton,AAA,BBB,CCC}']\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'AAA,BBB ,CCC')\n click(bp.operation_apply())\n\n click(bp.sync_updates_button())\n\n # Check updated data in DB\n wait_for_assert(expected_data,\n lambda: run_sql('HIVE', 'select_materials_modified', True),\n 'Unexpected materials data in DB')", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def test_staff_update_duplicate_procedure_fails(self):\n res = self.client.post(PROCEDURE_URL, self.payload, format='json')\n second_payload = {\n 'name': 'abc',\n 'speciality': [self.speciality.id],\n 'overview': 'bla bla bla'\n }\n self.client.post(PROCEDURE_URL, second_payload, format='json')\n\n url = get_item_url(res.data['id'])\n new_payload = {\n 'name': 'abc',\n }\n\n response = self.client.patch(url, new_payload, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)" ]
[ "0.7225289", "0.6630789", "0.5863282", "0.58182335", "0.5813409", "0.571845", "0.5649232", "0.5634012", "0.56237847", "0.5583595", "0.55478853", "0.5496923", "0.5478822", "0.5456204", "0.5439767", "0.5429322", "0.5392458", "0.5379931", "0.53564715", "0.53475344", "0.5325671", "0.531757", "0.5317156", "0.5283924", "0.5265269", "0.5245436", "0.5243534", "0.5238251", "0.52337015", "0.51829016" ]
0.7989029
0
Code to extract spike frames from the specified unit.
def get_unit_spike_train(self, unit_id, start_frame=None, end_frame=None): if self.spike_train is None: self.spike_train = np.load(self.fname_spike_train) # find unit id spike times idx = np.where(self.spike_train[:,1]==unit_id) spike_times = self.spike_train[idx,0].squeeze() # find spike times if start_frame is None: start_frame = 0 if end_frame is None: end_frame = 1E50 # use large time idx2 = np.where(np.logical_and(spike_times>=start_frame, spike_times<end_frame))[0] spike_times = spike_times[idx2] return spike_times
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_into_frames(filename_raw, thr_var_per_event=5e-4, downsampling_factor=2, disable_display=False,\n filename_output_video=None):\n\n assert downsampling_factor == int(downsampling_factor), \"Error: downsampling_factor must be an integer\"\n assert downsampling_factor >= 0, \"Error: downsampling_factor must be >= 0\"\n\n mv_adaptive_rate_iterator = AdaptiveRateEventsIterator(input_path=filename_raw,\n thr_var_per_event=thr_var_per_event,\n downsampling_factor=downsampling_factor)\n\n height, width = mv_adaptive_rate_iterator.get_size()\n\n if filename_output_video == None:\n video_process = None\n else:\n assert not os.path.exists(filename_output_video)\n video_process = FFmpegWriter(filename_output_video)\n\n if video_process or not disable_display:\n img_bgr = np.zeros((height, width, 3), dtype=np.uint8)\n\n cv2.namedWindow(\"img\", cv2.WINDOW_NORMAL)\n\n for events in mv_adaptive_rate_iterator:\n assert events.size > 0\n start_ts = events[0][\"t\"]\n end_ts = events[-1][\"t\"]\n print(\"frame: {} -> {} delta_t: {} fps: {} nb_ev: {}\".format(start_ts, end_ts,\n end_ts - start_ts,\n 1e6 / (end_ts - start_ts),\n events.size))\n if video_process or not disable_display:\n img = events_to_diff_image(events, sensor_size=(height, width))\n img_bgr[...] = 0\n img_bgr[img < 0, 0] = 255\n img_bgr[img > 0, 1] = 255\n\n chunk_start_ts = events[0][\"t\"]\n chunk_end_ts = events[-1][\"t\"]\n delta_t_frame = chunk_end_ts - chunk_start_ts + 1\n frame_txt = \"ts: {} -> {} delta_t: {} fps: {} (nb_ev): {}\".format(chunk_start_ts, chunk_end_ts,\n delta_t_frame,\n int(1.e6/delta_t_frame),\n events.size)\n img_bgr[20:45, ...] = 0\n cv2.putText(img_bgr,\n frame_txt,\n (int(0.05 * width), 40),\n cv2.FONT_HERSHEY_PLAIN, 1.0, (200, 200, 100))\n\n if video_process:\n video_process.writeFrame(img_bgr.astype(np.uint8)[..., ::-1])\n if not disable_display:\n cv2.imshow(\"img\", img_bgr)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n if video_process:\n video_process.close()\n if not disable_display:\n cv2.destroyAllWindows()", "def get_measured_subframes(self):\r\r\n loggerCmw = logging.getLogger('get_measured_subframes')\r\r\n numSf_str = self.read('FETCh:WCDMa:SIGN:HACK:MSFRames?')\r\r\n numSf_list = numSf_str.split(',')\r\r\n\r\r\n num = -1\r\r\n measured_subframes = self.NO_MEASURED_FRAMES_STR\r\r\n reliability = numSf_list[0]\r\r\n if reliability == '0':\r\r\n measured_subframes = numSf_list[1]\r\r\n else:\r\r\n loggerCmw.info(\"Measurements are not reliable, reliability indicator %s\" %reliability)\r\r\n\r\r\n return measured_subframes", "def calculate_psf_tilts():\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)", "def pressures_in_mb( pressures ):\n if not hasattr( pressures, 'units' ): return None\n if pressures.units=='mb':\n pressures.units = 'mbar' # udunits uses mb for something else\n return pressures[:]\n tmp = udunits(1.0,pressures.units)\n s,i = tmp.how('mbar')\n pressmb = s*pressures[:] + i\n return pressmb", "def ExtractThroughput(regex, output, metadata, metric, unit):\n matches = regex_util.ExtractAllMatches(regex, output)\n samples = []\n for index, value in enumerate(matches):\n metadata_with_index = copy.deepcopy(metadata)\n metadata_with_index['index'] = index\n samples.append(sample.Sample(metric, float(value), unit,\n metadata_with_index))\n return samples", "def get_unit(self,tag):", "def find_tape():\n\n _, frame = CAP.read()\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n mask = cv2.inRange(hsv, color_lower, color_upper)\n _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # Find all valid pair rects, and reutrn if none found\n pair_rects = get_pair_rects(contours)\n if len(pair_rects) == 0:\n return\n\n # If found, continue on and post results\n center = closest_center(pair_rects)\n\n to_send = '{}:{}\\n'.format(\n round(time.time(), 3), round(degrees(horizontal_angle(center[0])), 3))\n print(to_send)\n s.send(bytearray(to_send, 'utf-8'))", "def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]", "def findspikes(t, v, thresh):\n tm = np.array(t)\n s0 = np.array(v) > thresh # np.where(v > thresh) # np.array(v) > thresh # find points above threshold\n\n# print ('v: ', v)\n dsp = tm[s0]\n if dsp.shape[0] == 1:\n dsp = np.array(dsp)\n sd = np.append(True, np.diff(dsp) > 1.0) # find first points of spikes\n if len(dsp) > 0:\n sp = dsp[sd]\n else:\n sp = []\n return(sp) # list of spike times.", "def my_slow_function():\r\n d = load_wine(as_frame=True)\r\n return d", "def get_fits_key_unit(self, key):\n #if self.get(key) is None:\n # raise KeyError(f'No key \"{key}\" found')\n kcomment = self.comments[key]\n # Extract unit together with lead-in and delimiter\n # https://stackoverflow.com/questions/8569201/get-the-string-within-brackets-in-python\n m = re.search(self._unit_regexp, kcomment)\n if m is None:\n log.debug(f'no unit matching \"(unit)\" in \"{kcomment}\"')\n return None\n # Strip off delemeters\n punit_str = m.group(0)\n # No escaping needed because within re '[]'\n s = self.unit_str_start\n l = self.unit_str_delimeters[0]\n r = self.unit_str_delimeters[1]\n e = self.unit_str_end\n unit_str = re.sub(f'[{s}{l}{r}{e}]', '', punit_str)\n try:\n unit = u.Unit(unit_str)\n except ValueError as e:\n log.warning(f'Card comment: {kcomment}')\n log.warning(e)\n return None\n return unit", "def decode(self, spikes: np.ndarray) -> np.ndarray:\n pass", "def get_fixation_frames(subject, run=0):\n\n trial_frames = np.append(condition_frames(load_evs(subject, 'wm', 'all_bk_cor'))[run],\n condition_frames(load_evs(subject, 'wm', 'all_bk_err'))[run]) # TODO: include no response trials\n trial_frames = np.sort(trial_frames)\n\n fixation_start = np.array([], dtype=int) # initialize\n\n for idx, i in enumerate(trial_frames):\n if idx == 0:\n continue\n\n # find frames with difference greater than 10s\n if i - trial_frames[idx - 1] > 10 / TR:\n fixation_start = np.append(fixation_start, trial_frames[idx - 1])\n\n fixation_duration = np.ceil(15 / TR) # always 15s duration\n\n # get range of frames corresponding to duration of fixation block\n fixation_frames = np.concatenate([i + np.arange(0, fixation_duration, dtype=int) for i in fixation_start])\n\n return fixation_frames", "def get_tod(self, unit=None):\n tod = Tod.empty((self.get_ndetectors(), np.sum(self.get_nsamples())))\n sizeofpmatrix = self.info.npixels_per_sample * tod.size\n pmatrix = np.zeros(sizeofpmatrix, dtype=int)\n status = tmf.madmap1_read_tod(self.info.todfile, self.info.invnttfile,\n self.info.convert, self.info.npixels_per_sample, 0, tod.T, pmatrix)\n if status != 0: raise RuntimeError()\n if unit is not None:\n tod.unit = unit\n return tod", "def wave_get_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 0, 0))", "def consume_units(self, units):\n pass", "def gen_beat_output(e):\n return [playback_char(e,t) for t in range(70000)]", "def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)", "def test_r0_500(self):\n np.random.seed(57721)\n for _ in range(10):\n airmass = np.random.uniform(1.001, 1.5)\n rawSeeing = np.random.uniform(0.5, 1.5)\n band = 'ugrizy'[np.random.randint(6)]\n boresight = galsim.CelestialCoord(0 * galsim.radians, 0 * galsim.radians)\n rng = galsim.BaseDeviate(np.random.randint(2**32))\n atmPSF = imsim.AtmosphericPSF(airmass, rawSeeing, band, boresight, rng, screen_size=6.4)\n\n wlen = dict(u=365.49, g=480.03, r=622.20, i=754.06, z=868.21, y=991.66)[band]\n targetFWHM = rawSeeing * airmass**0.6 * (wlen/500)**(-0.3)\n\n r0_500 = atmPSF.atm.r0_500_effective\n L0 = atmPSF.atm[0].L0\n vkFWHM = imsim.AtmosphericPSF._vkSeeing(r0_500, wlen, L0)\n\n np.testing.assert_allclose(targetFWHM, vkFWHM, atol=1e-3, rtol=0)", "def nearest_test_pulse(self):", "def eye_timings(self, nr_dummy_scans = 6, mystery_threshold = 0.05,saccade_duration_threshold = 10):\n\n\t\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\t# shell()\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr = round(niiFile.rtime*1)/1000.0\n\t\t\twith open (self.runFile(stage = 'processed/eye', run = r, extension = '.msg')) as inputFileHandle:\n\t\t\t\tmsg_file = inputFileHandle.read()\n\n\n\t\t\tsacc_re = 'ESACC\\t(\\S+)[\\s\\t]+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)\\s+(-?\\d+.?\\d+)'\n\t\t\tfix_re = 'EFIX\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?\\s+(-?\\d+\\.?\\d*)?'\n\t\t\tblink_re = 'EBLINK\\t(\\S+)\\s+(-?\\d*\\.?\\d*)\\t(-?\\d+\\.?\\d*)\\s+(-?\\d?.?\\d*)?'\n\t\t\tstart_eye = 'START\\t(-?\\d+\\.?\\d*)'\n\n\t\t\t# self.logger.info('reading eyelink events from %s', os.path.split(self.message_file)[-1])\n\t\t\tsaccade_strings = re.findall(re.compile(sacc_re), msg_file)\n\t\t\tfix_strings = re.findall(re.compile(fix_re), msg_file)\n\t\t\tblink_strings = re.findall(re.compile(blink_re), msg_file)\n\t\t\tstart_time_scan = float(re.findall(re.compile(start_eye),msg_file)[0])\n\t\t\t\n\t\t\tif len(saccade_strings) > 0:\n\t\t\t\tself.saccades_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'start_x':float(e[4]),'start_y':float(e[5]),'end_x':float(e[6]),'end_y':float(e[7]), 'mystery_measure':float(e[8]),'peak_velocity':float(e[9])} for e in saccade_strings]\n\t\t\t\tself.fixations_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3]),'x':float(e[4]),'y':float(e[5]),'pupil_size':float(e[6])} for e in fix_strings]\n\t\t\t\tself.blinks_from_message_file = [{'eye':e[0],'start_timestamp':float(e[1]),'end_timestamp':float(e[2]),'duration':float(e[3])} for e in blink_strings]\n\t\t\t\n\t\t\t\tself.saccade_type_dictionary = np.dtype([(s , np.array(self.saccades_from_message_file[0][s]).dtype) for s in self.saccades_from_message_file[0].keys()])\n\t\t\t\tself.fixation_type_dictionary = np.dtype([(s , np.array(self.fixations_from_message_file[0][s]).dtype) for s in self.fixations_from_message_file[0].keys()])\n\t\t\t\tif len(self.blinks_from_message_file) > 0:\n\t\t\t\t\tself.blink_type_dictionary = np.dtype([(s , np.array(self.blinks_from_message_file[0][s]).dtype) for s in self.blinks_from_message_file[0].keys()])\n\t\t\t\n\t\t\teye_blinks = [[((self.blinks_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.blinks_from_message_file[i]['duration']/1000,1] for i in range(len(self.blinks_from_message_file)) if (self.blinks_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000)]\n\t\t\t\n\t\t\t\n\t\t\tsaccades = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\tsaccades_thresholded = [[((self.saccades_from_message_file[i]['start_timestamp']- start_time_scan)/1000) - nr_dummy_scans*tr, self.saccades_from_message_file[i]['duration']/1000,1] for i in range(len(self.saccades_from_message_file)) if np.all([(self.saccades_from_message_file[i]['start_timestamp']- start_time_scan) > (nr_dummy_scans*tr*1000), (self.saccades_from_message_file[i]['mystery_measure'] > mystery_threshold), (self.saccades_from_message_file[i]['duration'] > saccade_duration_threshold)]) ]\n\t\t\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['eye_blinks']), np.array(eye_blinks), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades']), np.array(saccades), fmt = '%3.2f', delimiter = '\\t')\n\t\t\tnp.savetxt(self.runFile(stage = 'processed/eye', run = r, extension = '.txt', postFix = ['saccades_thresholded']), np.array(saccades_thresholded), fmt = '%3.2f', delimiter = '\\t')", "def get_stripe82_file(rerun, run, camcol=1, filter='u', framenum=1):\n data_url = get_stripe82_url(rerun, run, camcol, filter, framenum)\n local_file = os.path.split(data_url)[-1]\n\n if os.path.exists(local_file):\n print \"using local image:\", local_file\n else:\n buf = download_with_progress_bar(data_url, return_buffer=True)\n open(local_file, 'wb').write(buf.read())\n\n bz2f = BZ2File(local_file)\n hdulist = pyfits.open(StringIO(bz2f.read()))\n\n return hdulist", "def frame_unit(self):\n self.skip_over_label['text'] = \"Start at (frames): \"\n self.sampled_rate_label['text'] = \"Sample every (frames): \"\n self.total_frames_label['text'] = \"Total frames: \"", "def second2frame(self, second):\n sample = int(second * self.fs)\n frame = int(round(sample/self.hop_len))\n return frame", "def extract_units(url, headers):\n _print(\"Processing '%s'...\" % url)\n page = get_page_contents(url, headers)\n\n re_splitter = re.compile(r'data-streams=(?:&#34;|\").*1.0[0]*:')\n re_subs = re.compile(r'data-transcript-translation-url=(?:&#34;|\")([^\"&]*)(?:&#34;|\")')\n re_available_subs = re.compile(r'data-transcript-available-translations-url=(?:&#34;|\")([^\"&]*)(?:&#34;|\")')\n re_units = re_splitter.split(page)[1:]\n units = []\n for unit_html in re_units:\n video_id = unit_html[:YOUTUBE_VIDEO_ID_LENGTH]\n sub_urls = {}\n match_subs = re_subs.search(unit_html)\n if match_subs:\n match_available_subs = re_available_subs.search(unit_html)\n if match_available_subs:\n available_subs_url = BASE_URL + match_available_subs.group(1)\n try:\n available_subs = get_page_contents_as_json(available_subs_url, headers)\n except HTTPError:\n available_subs = ['en']\n\n for sub_prefix in available_subs:\n sub_urls[sub_prefix] = BASE_URL + match_subs.group(1) + \"/\" + sub_prefix + \"?videoId=\" + video_id\n\n video_youtube_url = 'https://youtube.com/watch?v=' + video_id\n units.append(Unit(video_youtube_url=video_youtube_url,\n sub_urls=sub_urls))\n\n # Try to download some extra videos which is referred by iframe\n re_extra_youtube = re.compile(r'//w{0,3}\\.youtube.com/embed/([^ \\?&]*)[\\?& ]')\n extra_ids = re_extra_youtube.findall(page)\n for extra_id in extra_ids:\n video_youtube_url = 'https://youtube.com/watch?v=' + extra_id[:YOUTUBE_VIDEO_ID_LENGTH]\n units.append(Unit(video_youtube_url=video_youtube_url))\n\n return units", "def get_state(self, frames=3):\n if frames<2:\n raise ValueError('Needs at least 2 frames to determine velocity')\n self.flush_buffer()\n start_t = time.time()\n \n # time this to make sure we aren't blocking on get_pos for too long\n puck_history = []\n time_history = []\n p_pos, p_vel, p_pos_test = [0,0], [0,0], [0,0]\n s1_pos, s2_pos = [0,0], [0,0]\n for i in range(frames):\n _, frame = self.cam.read()\n t = time.time()-start_t\n p = self.get_pos(frame)\n \n if p[0] is not None:\n puck_history.append(p[0])\n time_history.append(t)\n # choose last nonzero striker locations\n if p[0] is not None:\n p_pos_test = p[0]\n if p[1] is not None:\n s1_pos = p[1]\n if p[2] is not None:\n s2_pos = p[2]\n \n # estimate puck position at current time\n if len(puck_history)==0:\n pass\n elif len(puck_history)==1:\n p_pos = puck_history[0]\n else:\n # do linear regression\n a = np.array([[t,1] for t in time_history])\n b = np.array(puck_history)\n m = np.linalg.lstsq(a, b, rcond=None)[0]\n \n t = np.array([[time.time()-start_t, 1]])\n p_pos = np.dot(t,m)[0]\n p_vel = m[:,0]\n \n return np.array([p_pos, p_vel, s1_pos, s2_pos])", "def get_ticker_images(video, ticker, frame_numbers):\n \n images = []\n for frame_number in frame_numbers:\n frame = video.frame(frame_number)\n images.append(cut_window(frame, ticker))\n \n return images", "def test(self):\n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n \n pulses=1000*3\n winsound.Beep(200, 1000) # .Beep(1650Hz, (XXXXms)) #e.g 1000ms=1second\n self.run(pulses); self.run(pulses, ANTI_CLK_W)\n sleep(1)\n\n winsound.Beep(400, 1000)\n self.swing(128, count=30); self.stop() #0.9 degrees\n sleep(1)\n\n winsound.Beep(800, 1000)\n print('Testing I.....')\n self.swing(32, count=120); self.stop() #0.225 degrees \n sleep(1)\n\n winsound.Beep(1600, 1000)\n print('Testing II.....')\n self.swing(2, count=1800); self.stop() #0.05625 degrees\n \n winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS)\n print(' Testings Done! ')\n return self.stop() #set low before exist ", "def extractSubwav(fn: str, outputFN: str, startTime: float, endTime: float) -> None:\n wav = QueryWav(fn)\n frames = wav.getFrames(startTime, endTime)\n wav.outputFrames(frames, outputFN)", "def getSamples(self, section, pitch, target=\"beats\"):\n sample_list = audio.AudioQuantumList()\n if target == \"beats\":\n sample_list.extend([b for x in section.children() for b in x.children()]);\n elif target == \"bars\":\n sample_list.extend(section.children())\n return sample_list.that(overlap_ends_of(self.original.analysis.segments.that(have_pitch_max(pitch)).that(overlap_starts_of(sample_list))))" ]
[ "0.4972694", "0.49719194", "0.49164146", "0.49041563", "0.48662052", "0.48609337", "0.48275054", "0.4764923", "0.47384405", "0.47236058", "0.4705282", "0.46844137", "0.4676775", "0.46520847", "0.46477193", "0.46457043", "0.46244732", "0.4619206", "0.4593396", "0.4579161", "0.45319575", "0.45257828", "0.4523807", "0.45031747", "0.44955298", "0.44954613", "0.44759578", "0.4473669", "0.4460022", "0.44476393" ]
0.5579445
0
Convert snake case argument name to a command line name.
def arg_name(name): return "--" + name.replace('_', '-')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _arg_to_flag(name: str) -> str:\n arg = '-'.join(name.split('_'))\n return f'--{arg}'", "def FixArgFileName(fileName):\n import os\n\n path, fname = os.path.split(fileName)\n if len(path) == 0:\n path = os.curdir\n path = os.path.abspath(path)\n # must check that the command line arg's path is in sys.path\n for syspath in sys.path:\n if os.path.abspath(syspath) == path:\n break\n else:\n sys.path.append(path)\n return os.path.splitext(fname)[0]", "def _case_name(command):\n found = re.findall('(\\w*)\\.java', command)\n assert len(found) == 1, 'Could not parse name from command: %s' % command\n return found[0]", "def _get_arg_name(self, arg, variable_name):", "def _getArgStr(self):\n return \"name=%r\" % (self.name)", "def name(self):\n\t\treturn self.args[0]", "def get_cli_string():\n return os.path.basename(sys.argv[0]) + \" \" + \" \".join(sys.argv[1:])", "def _FormalizeName(cls, name):\n name = name.replace(\"_\", \"-\").lower()\n name = name[:cls.NAME_LENGTH_LIMIT]\n if name[-1] == \"-\":\n name = name[:-1] + cls.REPLACER\n return name", "def name_to_snake_case(name: str) -> str:\n\n # From COBOL entity\n if '-' in name or name.isupper():\n return name.strip().lower().replace('-', '_')\n\n # From camelCase\n return re.sub(r'(?<!^)(?=[A-Z])', '_', name.strip()).lower()", "def _flag_to_arg(flag: str) -> str:\n arg = flag.split('--')[1].split('-')\n arg = '_'.join(arg)\n return arg", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def normalize_name(cls, name):\n\t\treturn ' '.join(name.lower().strip().split())", "def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()", "def _name(self):\n return self._arguments[0].split('(')[0]", "def camel_to_snake_case(name: str) -> str:\n return CAPITALS.sub(r'_\\1', name).lower().lstrip('_')", "def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])", "def _name(self):\n return self.arguments[0].split('(')[0]", "def convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def get_class_name(*args):\n name = '_'.join(args)\n name = slugify(name, separator='_')\n return underscore_to_camelcase(name)", "def camel_to_snake(name: str) -> str:\n name = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", name).lower()", "def _parse_option_name(line):\n return line.split('=')[0].strip()", "def camel_to_snake(name):\n name = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', name).lower()", "def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])", "def get_snake_case_from_camel_case(name: str) -> str:\n\n new_chars = []\n for i, char in enumerate(name): \n if i == len(name)-1 or i == 0: \n new_chars.append(char)\n elif char.isupper() and name[i+1].islower():\n new_chars.append('_')\n new_chars.append(char)\n elif char.islower() and name[i+1].isupper(): \n new_chars.append(char)\n new_chars.append('_')\n else: \n new_chars.append(char)\n\n new_name = ''.join(new_chars)\n return new_name.lower().replace('__', '_')", "def create_env_name(name):\n new_name = re.sub(r'''(?<=[a-z])([A-Z])''', '_\\\\1', name)\n new_name = re.sub(r'\\W+', '_', new_name)\n new_name = re.sub(r'_{2,}', '_', new_name)\n return new_name.upper().strip(\"_\")", "def get_python_name(cls, name):\n first_cap_re = re.compile(\"(.)([A-Z](?!s([A-Z])*)[a-z]+)\")\n all_cap_re = re.compile(\"([a-z0-9])([A-Z])\")\n\n s1 = first_cap_re.sub(r\"\\1_\\2\", Utils._clean_name(name))\n return all_cap_re.sub(r\"\\1_\\2\", s1).lower()", "def cachename_from_args(undirected, supervised, with_authors, collate_coauthorship):\n name = ''\n if undirected:\n name += 'undirected'\n else:\n name += 'directed'\n name += '-'\n if supervised:\n name += 'supervised'\n else:\n name += 'unsupervised'\n name += '-'\n if with_authors:\n if collate_coauthorship:\n name += 'collated-authors'\n else:\n name += 'first-class-authors'\n else:\n name += 'no-authors'\n return name", "def get_command_name(args):\n\n # First argument would always be atlas or manage.py, i.e the calling interface\n if len(args) < 2:\n CommandError.print_to_err(f\"Name of command missing. Valid commands are - {VALID_COMMANDS}\")\n\n return args[1]", "def get_word():\n return ' '.join(sys.argv[1:])" ]
[ "0.637582", "0.6308252", "0.6223311", "0.6106777", "0.6087217", "0.60846734", "0.60713154", "0.5925403", "0.5907787", "0.5903477", "0.5897198", "0.5897198", "0.5897198", "0.58864826", "0.58782953", "0.5855721", "0.58493316", "0.58275014", "0.575536", "0.5754576", "0.57038325", "0.56934935", "0.5673581", "0.5665833", "0.5655093", "0.56505054", "0.56458616", "0.56306624", "0.5630053", "0.56115943" ]
0.7715281
0
Space separated resource references in filename=blobsource format.
def resource_file_format(value): try: file_name, blob_source = value.split('=') except ValueError: message = ("Incorrectly formatted resource reference. " "Argmuent values should be in the format filename=blobsource") raise ValueError(message) return {'file_path': file_name, 'blob_source': blob_source}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_blobs_to_resource_files(blobs, resource_properties):\n resource_files = []\n if not blobs:\n raise ValueError('No input data found with reference {}'.\n format(resource_properties.source.prefix))\n try:\n prefix = resource_properties.source.prefix\n except AttributeError:\n prefix = None\n if len(blobs) == 1 and blobs[0]['filePath'] == prefix:\n # Single file reference: filePath should be treated as file path\n file_path = resource_properties.file_path if resource_properties.file_path \\\n else blobs[0]['filePath']\n resource_files.append(models.ExtendedResourceFile(\n http_url=blobs[0]['url'],\n file_path=file_path,\n ))\n else:\n # Multiple file reference: filePath should be treated as a directory\n base_file_path = ''\n if resource_properties.file_path:\n base_file_path = '{}/'.format(\n FileUtils.STRIP_PATH.sub('', resource_properties.file_path))\n\n for blob in blobs:\n file_path = '{}{}'.format(base_file_path, blob['filePath'])\n resource_files.append(models.ExtendedResourceFile(\n http_url=blob['url'],\n file_path=file_path\n ))\n\n # Add filemode to every resourceFile\n if resource_properties.file_mode:\n for f in resource_files:\n f.file_mode = resource_properties.file_mode\n return resource_files", "def makeSourceRefs(refs):\n s = ''\n if refs:\n if isiterable(refs):\n for ref in refs:\n s += '<SourceRef>B%s-%s</SourceRef>' % (NODEID, ref)\n else: s += '<SourceRef>B%s-%s</SourceRef>' % (NODEID, refs)\n return s", "def get_input_file(self, *args, refsep='$', docopy=True):\n # filename = self.get_data(*args, docopy=docopy)\n filename = args[1]\n ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE',\n 'DGEOFILE', 'MDRIZTAB'])\n print(\"Looking for REF_FILES: {}\".format(ref_files))\n\n for ref_file in ref_files:\n if ref_file.strip() == '':\n continue\n if refsep not in ref_file: # Local file\n refname = self.get_data('customRef', ref_file)\n else: # Download from FTP, if applicable\n refname = os.path.join(ref_file)\n if self.use_ftp_crds:\n download_crds(refname, self.timeout)\n return filename", "def source_file_name_feature(self):\n return \"_\".join((C_FILE_NAME, self.file_image_name.value))", "def external_array_references(self):\n return self._to_ears(self.fileuris)", "def filename_for_resource(resource):\n name = resource['url'].encode('utf8', 'ignore').split('/')[-1]\n return unquote(name)", "def load_resources(resource_filename):", "def get_sortmerna_ref_string(dbs):\n files = [\"resources/rRNA_databases/{db}\".format(db=db) for db in dbs]\n ref_string = \":\".join([\"{},{}\".format(f, f) for f in files])\n return ref_string", "def get_filename_and_url( filenames ):\n\tfor filename in filenames:\n\t\tfor url in get_links_from_file( filename ):\n\t\t\t yield ( filename, url )", "def resolve_resource_file(self, resource_file):\n if resource_file.http_url:\n # Support original resourceFile reference\n if not resource_file.file_path:\n raise ValueError('Malformed ResourceFile: \\'httpUrl\\' must '\n 'also have \\'file_path\\' attribute')\n return [resource_file]\n\n if resource_file.storage_container_url or resource_file.auto_storage_container_name:\n return [resource_file]\n\n if not hasattr(resource_file, 'source') or not resource_file.source:\n raise ValueError('Malformed ResourceFile: Must have either '\n ' \\'source\\' or \\'httpUrl\\'')\n\n storage_client = self.resolve_storage_account()\n container = None\n blobs = []\n\n if resource_file.source.file_group:\n # Input data stored in auto-storage\n container = get_container_name(resource_file.source.file_group)\n blobs = self.list_container_contents(resource_file.source, container, storage_client)\n return convert_blobs_to_resource_files(blobs, resource_file)\n if resource_file.source.container_url:\n return resource_file.source.container_url\n if resource_file.source.url:\n # TODO: Input data from an arbitrary HTTP GET source\n raise ValueError('Not implemented')\n raise ValueError('Malformed ResourceFile')", "def construct_sas_url(blob, uri):\n newuri = copy.copy(uri)\n newuri.pathname = '{}/{}'.format(uri.path, quote(blob.name.encode('utf-8')))\n return newuri.geturl()", "def link(self, fname):\n return fname", "def BootstrapSource (name, source, filename):\n source = binascii.b2a_base64 (zlib.compress (source.encode ('utf-8'))).strip ().decode ('utf-8')\n return source_payload.format (name = name, filename = filename, source = '\\\\\\n'.join (textwrap.wrap (source, 78)))", "def _blob_file(self, blob_name):\r\n return f\"{self._blob_folder()}/{blob_name}\"", "def generate_link(resources):\n\n links = \"\"\n for i, resource in enumerate(resources):\n link = \"<\" + resource[\"path\"] + \">\"\n if \"parameters\" in resource:\n for parameter in resource[\"parameters\"]:\n link += \";\" + str(parameter) + \"=\" + str(resource[\"parameters\"][parameter])\n links += link\n if i != len(resources) - 1:\n links += \",\"\n return links", "def getFileAccessUrls(samweb, filenameorid, schema, locationfilter=None):\n params = { \"schema\": schema }\n if locationfilter:\n params[\"location\"] = locationfilter\n response = samweb.getURL(_make_file_path(filenameorid) + '/locations/url', params=params)\n return convert_from_unicode(response.json())", "def activateResourceFilePaths(self, Iterable, p_str=None): # real signature unknown; restored from __doc__\n pass", "def resource_prefix(self):", "def filename_url_pairs(self, year=None):\n raise NotImplementedError()", "def file_url(self, fname):\n gs_url = f\"{self.gs_base_url}/{fname}\"\n return f\"{gs_url}\"", "def filename(self):\n fname1, fname2 = self.ad1.filename, self.ad2.filename\n if fname1 != fname2:\n return [f'{fname1} v {fname2}']", "def sources(obj, reftype):", "def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)", "def getImageName(self):\n return [os.path.basename(name) for name in self.meta['sources']]", "def add_ref_tag(basicSeqs):\r\n\r\n formattedBasicSeqs=list(basicSeqs) \r\n for record in formattedBasicSeqs:\r\n record.id=record.id+'_Ref'\r\n record.name=record.name+'_Ref'\r\n record.description=record.description+'_Ref'\r\n return formattedBasicSeqs", "def form_resource_name(resource_link: str) -> str:\n parsed_url = urlparse(resource_link)\n link = '{a}{b}'.format(a=parsed_url.hostname, b=parsed_url.path)\n file_extention = get_extention(link)\n return '{p}{e}'.format(\n p=re.sub(LINK_RE_PATTERN, '-', link.replace(file_extention, '')),\n e=file_extention,\n )", "def ResourcePath(self, name):\n pass", "def getFilePath( source, paper, month, day, year ):\r\n attributes = { SOURCE : source,\r\n PAPER : paper,\r\n MONTH : month,\r\n DAY : day,\r\n YEAR : year \r\n }\r\n return \"\\\\\".join( [ settings.CLEAN_STORE ] + [ attributes[ key ] for key in STORE_ORDER ] )", "def getFileURL(filename:str)->str:\n if '--develop' in sys.argv:\n return settings.REACT_DEV_PATH + filename\n\n return settings.EEL_PATH + filename", "def filepaths(self):\n pass" ]
[ "0.59185004", "0.54456174", "0.5418338", "0.53790176", "0.5303639", "0.5276898", "0.52433187", "0.5213896", "0.5195129", "0.519059", "0.5167885", "0.5135179", "0.5133125", "0.51307464", "0.51267403", "0.51265955", "0.5118821", "0.50666434", "0.5009204", "0.49958915", "0.4988842", "0.49764267", "0.49705395", "0.49680462", "0.49359262", "0.4921678", "0.4888284", "0.48722893", "0.48545453", "0.4847685" ]
0.67306453
0
Validate the destination path for a file download.
def validate_file_destination(namespace): try: path = namespace.destination except AttributeError: return else: # TODO: Need to confirm this logic... file_path = path file_dir = os.path.dirname(path) if os.path.isdir(path): file_name = os.path.basename(namespace.file_name) file_path = os.path.join(path, file_name) elif not os.path.isdir(file_dir): try: os.mkdir(file_dir) except EnvironmentError as exp: message = "Directory {} does not exist, and cannot be created: {}" raise ValueError(message.format(file_dir, exp)) if os.path.isfile(file_path): raise ValueError("File {} already exists.".format(file_path)) namespace.destination = file_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def validateDownload(obj, *args, **kwargs):\n\n if (obj.filename.rpartition('.')[2] == 'prop'):\n if (PackageUtil.validateProp(obj.filename) == False):\n raise AgentException(Errors.INVALID_PACKAGE, 'Package prop (%s) not valid' % obj.filename)\n return\n\n # figure the prop file name from the filename\n if (obj.filename.rpartition('.')[2] == 'inprogress'):\n propFilename = obj.filename.rpartition('.')[0] + '.prop'\n else:\n propFilename = obj.filename + '.prop'\n\n if (PackageUtil.validatePackage(obj.filename, propFilename) == False):\n raise AgentException(Errors.INVALID_PACKAGE, 'Package (%s) not valid' % obj.filename)", "def verify_destination(self, destination):\n # Make sure the text file was copied to the destination.\n text_file = os.path.join(destination, 'notes.txt')\n assert os.path.isfile(text_file)\n with open(text_file) as handle:\n assert handle.read() == \"This file should be included in the backup.\\n\"\n # Make sure the subdirectory was copied to the destination.\n subdirectory = os.path.join(destination, 'subdirectory')\n assert os.path.isdir(subdirectory)\n # Make sure the symbolic link was copied to the destination.\n symlink = os.path.join(subdirectory, 'symbolic-link')\n assert os.path.islink(symlink)", "def check_download_file_options(download, file):\n if file is None:\n err_str = \"download option should come along with a filepath\"\n raise ValueError(err_str)\n if download is None:\n err_str = \"file option should come along with a download option\"\n raise ValueError(err_str)\n check_download_option(download)", "def __validate_location(self):\n if not os.path.exists(self._file_path):\n raise FileNotFoundError(\"Directory does not exist\")\n if not os.path.isfile(self._path_name):\n raise FileNotFoundError('File does not exist')", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def _check_filename(self, filename):\n if len(os.path.dirname(filename)) != 0:\n raise ValueError(f\"Expecting file name but got path {filename}\")", "def verify_file_path(self) -> None:\n path = \"/data\"\n verify_file_path(path)", "def _validate_path(self, key, path):\n if path is None:\n raise TypeError(\"FileLink.path can not be None\")\n \n if not isinstance(path, (str, unicode)):\n raise TypeError(\"FileLink.path should be a str or unicode, \"\n \"not %s\" % path.__class__.__name__)\n return path", "def valid_file(self, path_to_torrent):\n \n if file and os.path.isfile(path_to_torrent):\n return path_to_torrent\n else:\n return None", "def _validate_path(self, path: str, is_file: bool) -> bool:\n is_valid_path = True\n if is_file and not os.path.isfile(path):\n is_valid_path = False\n elif not is_file and not os.path.isdir(path):\n is_valid_path = False\n if is_valid_path:\n logging.info('github_source_interceptor: Located path: ' + path)\n else:\n logging.error('github_source_interceptor: Could not locate path: ' + path)\n\n return is_valid_path", "def _target_is_valid_filename(self):\n filename = self.target\n if not filename_is_valid(filename):\n raise BadFilenameError(f\"{repr(filename)} must be a valid filename.\")\n return True", "def path_validate(path):\n # functionality to be added later\n return path", "def maybe_download(filename, expected_bytes):\n filepath = datapath + filename\n if not os.path.exists(filepath):\n # urlretrieve returns a tuple of saved filepath and info() of the downloaded file\n filepath, _ = urllib.request.urlretrieve(url+filename, filepath)\n statinfo = os.stat(filepath)\n if statinfo.st_size == expected_bytes:\n print('Found and verified', filepath)\n else:\n print(statinfo.st_size)\n raise Exception(\n 'Failed to verify ' + filepath + '. Can you get to it with a browser?')\n return filepath", "def file_downloaded(filename):\n fc = pathlib.Path(filename)\n if fc.is_file():\n return True\n else:\n return False", "def __maybeDownload():\n if not os.path.isdir(Download.DATA_ROOT): # 若 data 目录不存在,创建 data 目录\n os.mkdir(Download.DATA_ROOT)\n file_path = os.path.join(Download.DATA_ROOT, Download.FILE_NAME)\n\n if os.path.exists(file_path): # 若已存在该文件\n statinfo = os.stat(file_path)\n if statinfo.st_size == Download.FILE_SIZE: # 若该文件正确,直接返回 file_path\n print('Found and verified %s' % file_path)\n return file_path\n else: # 否则,删除文件重新下载\n os.remove(file_path)\n\n download_url = Download.URL + Download.FILE_NAME\n print('Downloading %s ...' % download_url)\n filename, _ = urlretrieve(download_url, file_path) # 下载数据\n print('Finish downloading')\n\n statinfo = os.stat(filename)\n if statinfo.st_size == Download.FILE_SIZE: # 校验数据是否正确下载\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser ?')\n return filename", "def clean(self):\n if not self.direct_url and not self.file:\n raise ValidationError('File or direct url required.')", "def download_link(self): # pragma: no cover\n\n if PyFunceble.Check(self.file).is_url():\n # We get the destination.\n destination = self.file.split(\"/\")[-1]\n\n if self.file and self.autocontinue.is_empty():\n # The given file is an URL.\n\n if (\n not PyFunceble.path.isfile(destination)\n or PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] == 0\n ):\n # The filename does not exist in the current directory\n # or the currently number of tested is equal to 0.\n\n # We download the content of the link.\n Download(self.file, destination).text()\n\n # We update the global file with the destination.\n self.file = destination", "def copy_file_check(self):\n pass", "def _validateFilename(self, filePath):\n # assert True\n raise NotImplementedError", "def test_file_download(self):\n\n # Downloading without auth = unauthorized error (401)\n with self.assertRaises(requests.exceptions.HTTPError):\n self.assertFalse(self.api.downloadFile('/media/part/files/1/test.pdf', 'test.pdf'))", "def maybe_download(url, dest):\n if not os.path.exists(dest):\n logger.info('Downloading %s to %s', url, dest)\n download(url, dest)", "def maybe_download(url, dest):\n if not os.path.exists(dest):\n logger.info('Downloading %s to %s', url, dest)\n download(url, dest)", "def _download_if_needed(file_path, url, show_progress):\n if file_path.exists() and not file_path.is_file():\n raise NotAFileError(file_path)\n elif not file_path.exists():\n get_logger().info('Downloading %s ...', file_path)\n reporthook = None\n if show_progress:\n reporthook = _UrlRetrieveReportHook()\n urllib.request.urlretrieve(url, str(file_path), reporthook=reporthook)\n if show_progress:\n print()\n else:\n get_logger().info('%s already exists. Skipping download.', file_path)", "def validate_url(self):\n pass", "def is_downloaded(self) -> bool:\n if not self.download_path:\n return False\n return Path(self.download_path).exists()", "def _ensure_attachment_exists(target):\n if target is not None:\n target = Path(target)\n if not target.exists():\n msg = f'COMMUNICATOR WARNING: The file specified for attachment to email does not exist'\n fancy_print(msg, fg=COMMUNICATOR_WARN_COLOR)\n return False\n return True", "def validate(self, document) -> None:\n path = Path(document.text).expanduser()\n if self._is_file and not path.is_file():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )\n elif self._is_dir and not path.is_dir():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )\n elif not path.exists():\n raise ValidationError(\n message=self._message,\n cursor_position=document.cursor_position,\n )", "def is_file_downloading(self, data_url):\n\n # Sometimes it takes too long to load the list\n self.parent.wait_for_element_displayed(DOM.DownloadManager.download_list[0],\n DOM.DownloadManager.download_list[1], 60)\n return self.get_download_status(data_url) == \"downloading\"" ]
[ "0.6747319", "0.6341576", "0.6253002", "0.6199938", "0.6128621", "0.61062557", "0.61062557", "0.60761595", "0.6049992", "0.5987556", "0.5965086", "0.59535706", "0.58831483", "0.58813345", "0.5876718", "0.58764267", "0.587319", "0.5862184", "0.5838729", "0.57819915", "0.57474273", "0.5731171", "0.57000995", "0.57000995", "0.5689059", "0.5671603", "0.56629974", "0.56572866", "0.5656862", "0.56315863" ]
0.7108033
0
Validate whether two or more mutually exclusive arguments or argument groups have been set correctly.
def validate_mutually_exclusive(namespace, required, param1, param2): value1 = getattr(namespace, param1, None) value2 = getattr(namespace, param2, None) message = None if not value1 and not value2 and required: message = "One of the following arguments are required: \n" elif value1 and value2: message = ("The follow arguments are mutually " "exclusive and cannot be combined: \n") if message: missing = ','.join([arg_name(param1), arg_name(param2)]) message += missing raise ValueError(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_two_mutually_exclusive_args_provided(self):\n from plone.api.exc import InvalidParameterError\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n with self.assertRaises(InvalidParameterError):\n _func('ahoy', 'there')\n\n with self.assertRaises(InvalidParameterError):\n _func(arg1='ahoy', arg2='there')", "def validate_args(*args: Any) -> bool:\n\n return len(args) == 4 and Item.validate_price(args[2]) and Entity.validate_discount(args[3])", "def mutually_exclusive(*funcs, **kwargs):\n # in argparse, mutually exclusive groups ignore the description\n return group(None, *funcs, mutually_exclusive=True, **kwargs)", "def is_valid_arg(self, muts, arg):\n for mut in muts:\n if arg in mut.args():\n return True\n\n return False", "def valid_args(args):\n return args is not None and len(args) > 0", "def validate_args(cmd_args):\n valid = cmd_args.bag is not None\n\n if not valid:\n print('Must specify a bag file')\n\n if valid:\n for bag_file in cmd_args.bag:\n valid = os.path.isfile(bag_file)\n if not valid:\n print('Invalid bag file: ' + bag_file)\n break\n\n if valid:\n \"\"\" 1. If info is requested, that is the only argument allowed.\n 2. Topics and output files may be specified.\n 3. Topics may be specified. Output file names will be autogenerated.\n 4. Stats may be requested.\n \"\"\"\n ops_requested = [False] * 3\n ops_requested[0] = cmd_args.info\n ops_requested[1] = (cmd_args.topic is not None)\n ops_requested[2] = cmd_args.stats\n\n valid = (sum(ops_requested) == 1)\n if not valid:\n print('Must specify either bag info, a topic and output file, or statistics')\n\n if valid and cmd_args.out_file is not None:\n valid = (len(cmd_args.out_file) == len(cmd_args.bag) * len(cmd_args.topic))\n if not valid:\n print('Number of output files must be enough for bags and topics passed in')\n\n return valid", "def validate_args(self):\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-v\", \"--verbose\", help=\"Verbose output\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--user\", help=\"Google user email\", default=\"none\")\n parser.add_argument(\"-p\", \"--password\", help=\"Google user email password\", default=\"none\")\n parser.add_argument(\"-l\", \"--library\", help=\"Remove duplicate songs from library\", action=\"store_true\")\n parser.add_argument(\"-y\", \"--playlist\", help=\"Remove duplicate songs from playlists\", action=\"store_true\")\n # Built-in:\n # parser.add_argument(\"-h\", \"--help\", help=\"Usage help\", action=\"store_true\")\n\n args = parser.parse_args()\n if len(self.argv) == 0 or args.user == \"none\" or args.password == \"none\" or not (args.library or args.playlist):\n parser.print_help()\n exit(0)\n\n return args", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def _check_group(self):\n if len(self.groups) != 2:\n raise ValueError(\"There have to be two groups!\")\n\n # Check the number of atoms in each group is the same\n n_group1 = 0\n for key, value in self.groups[0].items():\n n_group1 += value\n\n n_group2 = 0\n for key, value in self.groups[1].items():\n n_group2 += value\n\n if n_group1 != n_group2:\n f1 = self._group2formula(self.groups[0])\n f2 = self._group2formula(self.groups[1])\n msg = \"The two groups have to have the same number of atoms.\\n\"\n msg += \"Group 1: {} Group 2: {}\".format(f1, f2)\n raise ValueError(msg)", "def _check_args(self, args_):\n\n pass", "def __checkArgs(self, kwargs):\n requiredArgs = self.__class__.__requiredArgs + \\\n self.__class__.__singleCompArgs if self.singleComp else\\\n self.__class__.__requiredArgs + self.__class__.__doubleCompArgs\n for arg in requiredArgs:\n if arg not in kwargs:\n raise ValueError(\"Essential keyword argument %s missing\" % arg)\n for (k, v) in kwargs.items():\n assert k in self.__class__.__allowedArgs, \"Invalid Argument %s\" % k", "def check_arguments(self):\n ## only four test operation is permitted, if given anything apart from this, then it should print error message\n if (self.args.snap is False and self.args.snapcheck is False and self.args.check is False and self.args.diff is False and self.args.version is False):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n\n if(((self.args.snap is True and (self.args.pre_snapfile is None or self.args.file is None)) or\n (self.args.snapcheck is True and self.args.file is None) or\n (self.args.check is True and self.args.file is None)) and \n (self.args.testfiles is None or self.args.hostname is None)\n ):\n self.logger.error(colorama.Fore.RED +\n \"Arguments not given correctly, Please refer help message\", extra=self.log_detail)\n self.parser.print_help()\n sys.exit(1)\n if self.args.diff is True:\n if (self.args.pre_snapfile is not None and os.path.isfile(self.args.pre_snapfile)) and (\n self.args.post_snapfile is not None and os.path.isfile(self.args.post_snapfile)):\n comp = Comparator()\n comp.compare_diff(\n self.args.pre_snapfile,\n self.args.post_snapfile,\n None)\n sys.exit(1)\n else:\n if (self.args.file is None) and (\n self.args.testfiles is None or self.args.hostname is None):\n self.parser.print_help()\n sys.exit(1)", "def _check_parameters(self, ep, params):\n\n any_group_satisfied = False\n for group in ep.REQUIRED:\n if all(required_param in params for required_param in group):\n any_group_satisfied = True\n\n if not any_group_satisfied:\n raise ValueError(f\"Got parameters {params}, expected one of {ep.REQUIRED}\")\n\n for key in params:\n if key not in ep.POSSIBLE:\n raise ValueError(f\"Got {key}, expected one of {ep.POSSIBLE}\")", "def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)", "def test_empty_arguments(self):\n arg1 = {'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg1)\n\n arg2 = {'src': '.',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg2)\n\n arg3 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg3)\n\n arg4 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg4)", "def check_args(self):\n parser = get_base_arguments(get_parser())\n parser = get_tc_arguments(parser)\n # Disable \"Do not use len(SEQ) as condition value\"\n # pylint: disable=C1801\n if len(sys.argv) < 2:\n self.logger.error(\"Icetea called with no arguments! \")\n parser.print_help()\n return False\n elif not self.args.ignore_invalid_params and self.unknown:\n self.logger.error(\"Unknown parameters received, exiting. \"\n \"To ignore this add --ignore_invalid_params flag.\")\n self.logger.error(\"Following parameters were unknown: {}\".format(self.unknown))\n parser.print_help()\n return False\n return True", "def validate_args(args):\n command = args[0]\n args_length = len(args) - 1\n return VALID_COMMANDS[command] == args_length", "def test_no_mutually_exclusive_args_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func(), 'foo')\n self.assertEqual(_func(arg3='hello'), 'foo')", "def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True", "def _arguments_valid(self) -> bool:\n return self.find and self.near and self.max_results >= 1", "def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))\r\n\r\n if all([args['--userdata'], args['--userfile']]):\r\n raise ArgumentError('[-u | --userdata] not allowed with '\r\n '[-F | --userfile]')\r\n\r\n if args['--hourly'] in FALSE_VALUES:\r\n args['--hourly'] = False\r\n\r\n if args['--monthly'] in FALSE_VALUES:\r\n args['--monthly'] = False\r\n\r\n if all([args['--hourly'], args['--monthly']]):\r\n raise ArgumentError('[--hourly] not allowed with [--monthly]')\r\n\r\n if not any([args['--hourly'], args['--monthly']]):\r\n raise ArgumentError('One of [--hourly | --monthly] is required')\r\n\r\n image_args = [args['--os'], args['--image']]\r\n if all(image_args):\r\n raise ArgumentError('[-o | --os] not allowed with [--image]')\r\n\r\n if not any(image_args):\r\n raise ArgumentError('One of [--os | --image] is required')\r\n\r\n if args['--userfile']:\r\n if not os.path.exists(args['--userfile']):\r\n raise ArgumentError(\r\n 'File does not exist [-u | --userfile] = %s'\r\n % args['--userfile'])", "def test_parse_arguments1():\n args = []\n parsed_args = parse_arguments.parse_arguments(args)\n assert parsed_args.logging_level == logging.ERROR\n assert parsed_args.group_size == defaults.DEFAULT_GRPSIZE\n assert parsed_args.students_file == defaults.DEFAULT_CSVFILE\n assert (parsed_args.grouping_method == group_random) is False", "def validate_command_line_input(args):\n valid = False\n if 0 < len(args) <= 4:\n valid = True\n for arg in args:\n if int(arg) > 4:\n valid = False\n break\n else:\n pass\n if valid:\n CRUDStarter.load_operations(args)\n pass\n else:\n CRUDStarter.logger.info(\"Argument maximum acceptable value is 4\")\n else:\n CRUDStarter.logger.info(\"at least One at most Four argument(s) required\")", "def _is_supplied_by_config(group: argparse._MutuallyExclusiveGroup, conf: Dict[str, Any]) -> bool:\n group_args = []\n for arg in group._group_actions:\n group_args.append(arg.dest)\n\n count = 0\n for val in group_args:\n if val in conf:\n count += 1\n return count == len(group_args) or count == 0", "def cli_has_errors(arguments):\n all_arguments = (\n arguments['<single_date>'] is not None and\n arguments['<early_date>'] is not None and\n arguments['<late_date>'] is not None)\n\n if all_arguments:\n # print(\"Must use single date or date range, but not both.\")\n return True\n\n no_arguments = (\n arguments['<single_date>'] is not None and\n arguments['<early_date>'] is not None and\n arguments['<late_date>'] is not None)\n\n if no_arguments:\n # print(\"You must supply at least one date.\")\n return True\n\n single_and_other_arguments = (\n (\n arguments['<single_date>'] is not None and\n arguments['<early_date>'] is not None\n ) or\n (\n arguments['<single_date>'] is not None and\n arguments['<late_date>'] is not None\n ))\n\n if single_and_other_arguments:\n # print(\"Cannot use a single date and a date range bound.\")\n return True\n\n one_date_bound_only = (\n (\n arguments['<early_date>'] is not None and\n arguments['<late_date>'] is None\n ) or\n (\n arguments['<early_date>'] is None and\n arguments['<late_date>'] is not None\n ))\n\n if one_date_bound_only:\n # print(\"Must pick both ends of a date range bound.\")\n return True\n\n # All good\n return False", "def valid_args(args):\n is_valid = True\n if not args.ts_url or not args.username or not args.password or not args.from_user or not args.to_user:\n eprint(\"Missing required parameters.\")\n is_valid = False\n\n return is_valid", "def check_args(self):\n spec = _action_args_dict[self.action]\n \n if not spec.has_extended and len(self.args) != spec.n_req_args:\n raise GameActionError(\n 'Number of args for {0} doesn\\'t match (args={1}, n_args must be {2})'\n .format(spec.name, self.args, spec.n_req_args))\n\n elif spec.has_extended and len(self.args) < spec.n_req_args:\n raise GameActionError(\n 'Number of args for {0} doesn\\'t match (args={1}, n_args must be >= {2})'\n .format(spec.name, self.args, n_args))\n\n # Regular args\n for i, arg, (_type,name) in izip(count(), self.args, spec.required_arg_specs):\n card_arg_match = _type is Card and type(arg) is int\n bad_arg_match = type(arg) is not _type\n arg_is_none = arg is None\n arg_invalid_bool = type(arg) is not bool and _type is bool\n str_unicode_error = type(arg) is str and _type is unicode \\\n or type(arg) is unicode and _type is str\n\n if bad_arg_match and not arg_is_none and not\\\n str_unicode_error and not card_arg_match:\n raise GameActionError(\n 'Argument {0} (\"{1}\"), {2} doesn\\'t match type ({3} != {4})'\n .format(i, name, str(arg), str(_type), str(type(arg))))\n\n if arg_invalid_bool:\n raise GameActionError(\n 'Argument {0} (\"{1}\") must be boolean (received {2})'\n .format(i, name, str(arg)))\n\n # Extended args\n for i, arg in izip(count(spec.n_req_args), self.args[spec.n_req_args:]):\n _type, name = spec.extended_arg_spec\n\n card_arg_match = _type is Card and type(arg) is int\n bad_arg_match = type(arg) is not _type\n arg_is_none = arg is None\n arg_invalid_bool = type(arg) is not bool and _type is bool\n str_unicode_error = type(arg) is str and _type is unicode \\\n or type(arg) is unicode and _type is str\n\n if bad_arg_match and not arg_is_none and not\\\n str_unicode_error and not card_arg_match\\\n and not arg_invalid_bool:\n raise GameActionError(\n 'Argument {0} (\"{1}\"), {2} doesn\\'t match type ({3} != {4})'\n .format(i, name, str(arg), str(_type), str(type(arg))))\n\n if arg_invalid_bool:\n raise GameActionError(\n 'Argument {0} (\"{1}\") must be boolean (received {2})'\n .format(i, name, str(arg)))", "def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))", "def test_one_mutually_exclusive_arg_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func('hello'), 'foo')\n self.assertEqual(_func(arg1='hello'), 'foo')\n self.assertEqual(_func(arg2='hello'), 'foo')", "def validate_args(self, parser: argparse):\n pass" ]
[ "0.6724702", "0.66577977", "0.6617761", "0.6611643", "0.65800476", "0.65641516", "0.6562692", "0.6554089", "0.65193367", "0.6493309", "0.6490098", "0.6442468", "0.6430487", "0.6388471", "0.6387228", "0.6347186", "0.63322484", "0.6319627", "0.6304081", "0.62839407", "0.6283635", "0.6266193", "0.62377614", "0.6229042", "0.622716", "0.62113464", "0.62095034", "0.62041265", "0.6195657", "0.61549073" ]
0.7363016
0
Getter method for ldp_sync_enabled, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_enabled (isisstatus)
def _get_ldp_sync_enabled(self): return self.__ldp_sync_enabled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_ldp_sync_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_enabled must be of a type compatible with isis-status\"\"\",\n 'defined-type': \"brocade-isis-operational:isis-status\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_enabled = t\n if hasattr(self, '_set'):\n self._set()", "def _set_ldp_in_sync(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_in_sync must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_in_sync = t\n if hasattr(self, '_set'):\n self._set()", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def get_config_sync_status(self):\n \n try:\n device_group = self.connection.Management.DeviceGroup.get_list()\n print self.connection.Management.DeviceGroup.get_sync_status([device_group])\n \n except:\n raise Exception(\"Target system has pending configuration, please sync beforehand.\")", "def get_linear_track_is_enabled(self):\r\n return self._arm.get_linear_track_is_enabled()", "def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))", "def get_sync_mode():\n return sync_mode", "def _enable_sync(self, enable_sync: bool = True):\n self.__enable_sync = enable_sync", "def lfs_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"lfs_enabled\")", "def get_time_sync_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetTimeSyncInterval', self.handle)", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def getAddressInSync(self):\n return self._addrInSyncMode", "def ms_get_rstp_enabled(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n dropdown_value = page_utils.get_dropdown_value(\n self.get_page(),\n var_id='node_group_use_stp')\n return dropdown_value == 'Enable RSTP'", "def lfs_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"lfs_enabled\")", "def lfs_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"lfs_enabled\")", "def find_sync_loop(self):\n self._find_xbb_infos()\n self._find_exit_only_bbs()\n if not self._build_rd_wrt_list():\n return False, None\n self._find_sync_loop_vars()\n return self.syncinfo.is_sync_loop, self.syncinfo", "async def enabled(self) -> bool:\n response = await self.adguard.request(\"parental/status\")\n return response[\"enabled\"]", "def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'ConfigServerEnabledState']]]:\n return pulumi.get(self, \"enabled_state\")", "def get_sync_status(self, connector_id, previous_completed_at):\n # @todo Need logic here to tell if the sync is not running at all and not\n # likely to run in the near future.\n connector_details = self.get_connector(connector_id)\n succeeded_at = self._parse_timestamp(connector_details[\"succeeded_at\"])\n failed_at = self._parse_timestamp(connector_details[\"failed_at\"])\n current_completed_at = (\n succeeded_at if succeeded_at > failed_at else failed_at\n )\n\n # The only way to tell if a sync failed is to check if its latest\n # failed_at value is greater than then last known \"sync completed at\" value.\n if failed_at > previous_completed_at:\n service_name = connector_details[\"service\"]\n schema_name = connector_details[\"schema\"]\n raise AirflowException(\n f'Fivetran sync for connector \"{connector_id}\" failed; '\n f\"please see logs at \"\n f\"{self._connector_ui_url_logs(service_name, schema_name)}\"\n )\n\n sync_state = connector_details[\"status\"][\"sync_state\"]\n self.log.info(f'Connector \"{connector_id}\": sync_state = {sync_state}')\n\n # Check if sync started by FivetranOperator has finished\n # indicated by new 'succeeded_at' timestamp\n if current_completed_at > previous_completed_at:\n self.log.info('Connector \"{}\": succeeded_at: {}'.format(\n connector_id, succeeded_at.to_iso8601_string())\n )\n return True\n else:\n return False", "def is_enabled(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgNet_IsEnabled', self.handle))", "def get_isenabled(self):\n return self.isenabled", "def get_linear_track_status(self):\r\n return self._arm.get_linear_track_status()", "def is_enabled(self):\n siteconfig = SiteConfiguration.objects.get_current()\n return siteconfig.get('%s_enabled' % self.backend_id, False)", "def _is_drs_enabled(session, cluster):\n drs_config = session._call_method(vim_util, \"get_object_property\", cluster,\n \"configuration.drsConfig\")\n if drs_config and hasattr(drs_config, 'enabled'):\n return drs_config.enabled\n\n return False" ]
[ "0.79613835", "0.61158735", "0.60724086", "0.5726894", "0.5613925", "0.5613925", "0.55791795", "0.53533936", "0.5005335", "0.4987678", "0.49827105", "0.49728572", "0.49603918", "0.49596107", "0.4955472", "0.49513027", "0.48996565", "0.48462626", "0.48093173", "0.48037514", "0.48037514", "0.47827238", "0.47253767", "0.4722109", "0.4719143", "0.4662773", "0.4624642", "0.45747212", "0.45705274", "0.45504406" ]
0.73131764
1
Setter method for ldp_sync_enabled, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_enabled (isisstatus)
def _set_ldp_sync_enabled(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="ldp-sync-enabled", rest_name="ldp-sync-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_sync_enabled must be of a type compatible with isis-status""", 'defined-type': "brocade-isis-operational:isis-status", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name="ldp-sync-enabled", rest_name="ldp-sync-enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)""", }) self.__ldp_sync_enabled = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def _set_ldp_in_sync(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_in_sync must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_in_sync = t\n if hasattr(self, '_set'):\n self._set()", "def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def _enable_sync(self, enable_sync: bool = True):\n self.__enable_sync = enable_sync", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def set_time_synchronization_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSynchronizationEnabled', self.handle, bEnabled)", "def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))", "def lfs_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"lfs_enabled\")", "def get_linear_track_is_enabled(self):\r\n return self._arm.get_linear_track_is_enabled()", "def is_enabled(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfgNet_IsEnabled', self.handle))", "def enabled_state(self) -> Optional[pulumi.Input[Union[str, 'ConfigServerEnabledState']]]:\n return pulumi.get(self, \"enabled_state\")", "def get_sync_mode():\n return sync_mode", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def lfs_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"lfs_enabled\")", "def lfs_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"lfs_enabled\")", "async def enabled(self) -> bool:\n response = await self.adguard.request(\"parental/status\")\n return response[\"enabled\"]", "def getAddressInSync(self):\n return self._addrInSyncMode", "def enabled(self):\n return intera_interface.RobotEnable(\n intera_interface.CHECK_VERSION).state().enabled", "def ms_get_rstp_enabled(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n dropdown_value = page_utils.get_dropdown_value(\n self.get_page(),\n var_id='node_group_use_stp')\n return dropdown_value == 'Enable RSTP'", "def get_config_sync_status(self):\n \n try:\n device_group = self.connection.Management.DeviceGroup.get_list()\n print self.connection.Management.DeviceGroup.get_sync_status([device_group])\n \n except:\n raise Exception(\"Target system has pending configuration, please sync beforehand.\")", "def isSync(self):\n return False", "def get_time_sync_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetTimeSyncInterval', self.handle)", "def _is_drs_enabled(session, cluster):\n drs_config = session._call_method(vim_util, \"get_object_property\", cluster,\n \"configuration.drsConfig\")\n if drs_config and hasattr(drs_config, 'enabled'):\n return drs_config.enabled\n\n return False", "def shell_enabled_changed(self, enabled):\n self.set_enabled(enabled)", "def _set_lsp_frr_operational_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-operational-status\", rest_name=\"lsp-frr-operational-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_operational_status must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-frr-operational-status\", rest_name=\"lsp-frr-operational-status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_operational_status = t\n if hasattr(self, '_set'):\n self._set()" ]
[ "0.69946873", "0.6472956", "0.59491473", "0.5824803", "0.5717719", "0.5544052", "0.5461094", "0.54108495", "0.54108495", "0.5194292", "0.51579636", "0.49878374", "0.4895498", "0.48504594", "0.4797328", "0.47689405", "0.47490603", "0.47434294", "0.47279575", "0.47279575", "0.4717986", "0.46569166", "0.46440846", "0.46287465", "0.46085346", "0.4590351", "0.45850322", "0.45835608", "0.45798197", "0.45679155" ]
0.8409099
0
Getter method for ldp_sync_hold_down, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_hold_down (uint16)
def _get_ldp_sync_hold_down(self): return self.__ldp_sync_hold_down
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_ldp_sync_hold_down(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"ldp-sync-hold-down\", rest_name=\"ldp-sync-hold-down\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_hold_down must be of a type compatible with uint16\"\"\",\n 'defined-type': \"uint16\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name=\"ldp-sync-hold-down\", rest_name=\"ldp-sync-hold-down\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_hold_down = t\n if hasattr(self, '_set'):\n self._set()", "def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def _set_lsp_frr_down_reason(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"lsp-frr-down-reason\", rest_name=\"lsp-frr-down-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_down_reason must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"lsp-frr-down-reason\", rest_name=\"lsp-frr-down-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_down_reason = t\n if hasattr(self, '_set'):\n self._set()", "def setDown(self, down):\n # type: (tp.Union[str, tp.List[str]])->None\n if isinstance(down, list):\n self._ifAttributes['down'] = down\n elif isinstance(down, str):\n self._ifAttributes['down'] = [down]\n else:\n raise ValueError(\"Invalid value type {0}, expected str or List[str]\".format(type(down)))", "def _get_lsp_frr_down_reason(self):\n return self.__lsp_frr_down_reason", "def set_downlink_cnt(self, downlink_counter: int) -> None:\n\n if downlink_counter < 0 or downlink_counter > 4294967295:\n raise ValueError('Bad downlink counter value')\n\n cmd = b'\\x54\\x04' + downlink_counter.to_bytes(4, 'little', signed=False)\n\n try:\n self._serial.transmit(cmd)\n self._get_reply(0x54, 0, 0.25)\n finally:\n self._gpio.sleep()\n\n return", "def TunnelDown(self):\n if self.force_auto_sync:\n self.get('TunnelDown')\n return self._TunnelDown", "def appendDown(self, cmd):\n # type: (tp.Union[str, tp.List[str]])->None\n self._ensure_list(self._ifAttributes, \"down\", cmd)", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def unconfigure_lldp_holdtime(device): \r\n try:\r\n device.configure('no lldp holdtime')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not unconfigure LLDP holdtime\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def is_down(self):\n \n return self.is_level('down')", "def up_down(self, up):\n if up == 'u':\n up = 1\n elif up == 'n':\n up = 0\n elif up == 'd':\n up = -1\n else:\n raise ValueError(\"The heck you doing Servo?? u d or n ONLY\")\n self.h += up\n if self.get_pos() == blocks['wall']:\n self.h -= up", "def set_pull_up_down(gpio, pud):\n return _u2i(_pigpio_command(_control, _PI_CMD_PUD, gpio, pud))", "def net_get_updown():\n\ttry:\n\t\tf = open(\"/proc/net/dev\", \"r\")\n\t\tdata = f.readlines(2000)\n\t\tf.close()\n\t\tnewNetUp = 0\n\t\tnewNetDown = 0\n\t\tfor i in data:\n\t\t\tif i.find(':') != -1 and i.strip().startswith('lo:') == False:\n\t\t\t\tv = i.split(':')[1].split()\n\t\t\t\tnewNetUp = float( v[8] )+newNetUp\n\t\t\t\tnewNetDown = float( v[0] )+newNetDown\n\n\t\n\t\treturn (newNetUp/1024), (newNetDown/1024)\n\texcept:\n\t\tprint(_(\"Can't open /proc/net/dev\"))\n\t\treturn 0,0", "async def async_volume_down(self):\n if int(self._volume) == 0:\n return\n\n volume = int(self._volume) - int(self._volume_step)\n if volume < 0:\n volume = 0\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)", "def isDown ( self ) :\n return self._ilhcbmagnet.isDown()", "def _get_ldp_sync_hd_expired(self):\n return self.__ldp_sync_hd_expired", "def GetDown(self, *args, **kwargs):\n pass", "def move_down(self, dist):\r\n self.send_command_without_response(f'down {dist}')", "def Down(n=1):\n return ESC + str(n) + 'B'", "def checkDownstatus(self, mjd, config): \n # Adjust time in mjd to time contained in downtime records (which counts from 'start' only).\n night = mjd - config['sim_start']\n # Calculate the 'night' of the survey to match against integer values in downtime.\n night = numpy.floor(night - config['midnight'] - 0.5)\n if isinstance(night, float):\n night = numpy.array([night,], float)\n downstate = numpy.zeros(len(night), int)\n for i in range(len(night)):\n if night[i] in self.alldowndates:\n downstate[i] = 1\n if len(night) == 1:\n downstate = downstate[0]\n return downstate", "def show_down_sense(context, sense=''):\n request = context['request']\n if sense == 'user':\n user = request.user\n if user.profile.sense_client and user.profile.sense_slot:\n sense_client = user.profile.sense_client\n sense_slot = user.profile.sense_slot\n else:\n sense_client = settings.SENSE_DOWN_CLIENT\n sense_slot = settings.SENSE_DOWN_SLOT\n else:\n sense_client = settings.SENSE_DOWN_CLIENT\n sense_slot = settings.SENSE_DOWN_SLOT\n\n sense_enabled = settings.ENABLE_ADSENSE\n if settings.DEBUG:\n sense_enabled = False\n\n return {\n 'sense_enabled': sense_enabled,\n 'sense_native': False,\n 'sense_client': sense_client,\n 'sense_slot': sense_slot,\n }", "def moved_down(self):\n return self.moved(MovementType.DOWN)", "def distance_down(self, distance_down):\n\n self._distance_down = distance_down", "def swd_read16(self, offset):\n value = self._dll.JLINK_SWD_GetU16(offset)\n return ctypes.c_uint16(value).value", "def report_status_down(self, reason=''):\n self._update_sandesh_status(ConnectionStatus.DOWN, msg=reason)", "def get_closed_state(self):\n return 1 if self.pull_up_down == GPIO.PUD_DOWN else 0", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def GetIsDown(self):\n\n return self.isDown", "def downvote_cache_key(self, kind):\r\n return 'account_%d_%s_downvotes' % (self._id, kind)" ]
[ "0.8303432", "0.49328032", "0.4887266", "0.48021436", "0.47671673", "0.47562605", "0.4665075", "0.4552927", "0.45074293", "0.44553697", "0.44231933", "0.4418984", "0.4417889", "0.44146112", "0.4406024", "0.43649313", "0.4336277", "0.42344445", "0.42294973", "0.42198023", "0.42129755", "0.42128986", "0.4209491", "0.4198785", "0.41807175", "0.41768134", "0.41524634", "0.41436464", "0.41411793", "0.4139942" ]
0.69384503
1
Setter method for ldp_sync_hold_down, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_hold_down (uint16)
def _set_ldp_sync_hold_down(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="ldp-sync-hold-down", rest_name="ldp-sync-hold-down", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_sync_hold_down must be of a type compatible with uint16""", 'defined-type': "uint16", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="ldp-sync-hold-down", rest_name="ldp-sync-hold-down", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint16', is_config=False)""", }) self.__ldp_sync_hold_down = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def setDown(self, down):\n # type: (tp.Union[str, tp.List[str]])->None\n if isinstance(down, list):\n self._ifAttributes['down'] = down\n elif isinstance(down, str):\n self._ifAttributes['down'] = [down]\n else:\n raise ValueError(\"Invalid value type {0}, expected str or List[str]\".format(type(down)))", "def _set_lsp_frr_down_reason(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"lsp-frr-down-reason\", rest_name=\"lsp-frr-down-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_down_reason must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"lsp-frr-down-reason\", rest_name=\"lsp-frr-down-reason\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='string', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_down_reason = t\n if hasattr(self, '_set'):\n self._set()", "def set_downlink_cnt(self, downlink_counter: int) -> None:\n\n if downlink_counter < 0 or downlink_counter > 4294967295:\n raise ValueError('Bad downlink counter value')\n\n cmd = b'\\x54\\x04' + downlink_counter.to_bytes(4, 'little', signed=False)\n\n try:\n self._serial.transmit(cmd)\n self._get_reply(0x54, 0, 0.25)\n finally:\n self._gpio.sleep()\n\n return", "def up_down(self, up):\n if up == 'u':\n up = 1\n elif up == 'n':\n up = 0\n elif up == 'd':\n up = -1\n else:\n raise ValueError(\"The heck you doing Servo?? u d or n ONLY\")\n self.h += up\n if self.get_pos() == blocks['wall']:\n self.h -= up", "def distance_down(self, distance_down):\n\n self._distance_down = distance_down", "def unconfigure_lldp_holdtime(device): \r\n try:\r\n device.configure('no lldp holdtime')\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not unconfigure LLDP holdtime\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def appendDown(self, cmd):\n # type: (tp.Union[str, tp.List[str]])->None\n self._ensure_list(self._ifAttributes, \"down\", cmd)", "async def async_volume_down(self):\n if int(self._volume) == 0:\n return\n\n volume = int(self._volume) - int(self._volume_step)\n if volume < 0:\n volume = 0\n\n if not (self._slave_mode and self._multiroom_wifidirect):\n\n if self._is_master:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:slave_vol:{0}\".format(str(volume)), None)\n else:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:vol:{0}\".format(str(volume)), None)\n\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n if self._snapshot_active:\n return\n value = await self._master.async_call_linkplay_httpapi(\"multiroom:SlaveVolume:{0}:{1}\".format(self._slave_ip, str(volume)), None)\n if value == \"OK\":\n self._volume = volume\n else:\n _LOGGER.warning(\"Failed to set volume_down. Device: %s, Got response: %s\", self.entity_id, value)", "def move_down(self, dist):\r\n self.send_command_without_response(f'down {dist}')", "def set_drhold(self, value):\n self._connector.set_drhold()", "def set_pull_up_down(gpio, pud):\n return _u2i(_pigpio_command(_control, _PI_CMD_PUD, gpio, pud))", "def down(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(DOWN)", "def report_status_down(self, reason=''):\n self._update_sandesh_status(ConnectionStatus.DOWN, msg=reason)", "def TunnelDown(self):\n if self.force_auto_sync:\n self.get('TunnelDown')\n return self._TunnelDown", "def is_down(self):\n \n return self.is_level('down')", "def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")", "def move_down(self, lifting, **kwargs):\n self.log.debug(\"Moving table down by {!s} microns\".format(lifting))\n if self.variables[\"Table_state\"]:\n success = self.move_to([0, 0, -lifting], False, 0, True, **kwargs)\n if success:\n self.variables[\"Table_state\"] = False\n return success\n else:\n self.queue.put({\"Info\": \"Table already in the down position...\"})\n return True", "def Down(n=1):\n return ESC + str(n) + 'B'", "def down(self):\n if self.head.heading() != UP and self.last_direction != UP:\n self.head.setheading(DOWN)", "def isDown ( self ) :\n return self._ilhcbmagnet.isDown()", "def _get_lsp_frr_down_reason(self):\n return self.__lsp_frr_down_reason", "def set_dcmpwr(self, pwr):\n self.dcmpwr = pwr", "def moved_down(self):\n return self.moved(MovementType.DOWN)", "def GetDown(self, *args, **kwargs):\n pass", "def pin_pulldown(self, pin):\n port_num = self._convert_pin_port(pin)\n gpio.pullup(port_num, gpio.PULLDOWN)", "def on_down_rcr_btn_clicked(self):\n self.status = 'down'\n height = self.height_set_rcr_lineEdit.text()\n vel = self.vel_set_rcr_lineEdit.text()\n # self.speed_rcr_hSlider.setValue(vel)\n # self.height_rcr_hSlider.setValue(height)\n height = -float(height)\n vel = float(vel)\n direction = \"DOWN\"\n self.control1.device.cmd_precise_move(height, vel)\n self.set_rcr_txt(height, vel, direction)\n self.set_status_txt(self.status)\n # self.set_ab_height()", "def upside_down_off(self):\n if self._firmware >= 268:\n self.write(self.ASCII_ESC, '{', 0)\n else:\n self._unset_print_mode(self.UPDOWN_MASK)", "def _set_ldp_sync_hd_expired(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_hd_expired must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_hd_expired = t\n if hasattr(self, '_set'):\n self._set()" ]
[ "0.65523595", "0.53751856", "0.5216387", "0.5215131", "0.49139932", "0.48896587", "0.48503286", "0.4806491", "0.47906452", "0.47712207", "0.47645", "0.46650887", "0.4643589", "0.4639629", "0.4599972", "0.45872274", "0.45475742", "0.45354202", "0.44561124", "0.4455607", "0.4433527", "0.44259244", "0.4372009", "0.43422803", "0.43350717", "0.432982", "0.43054768", "0.42905927", "0.4284313", "0.42701417" ]
0.8820209
0
Getter method for ldp_in_sync, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_in_sync (boolean)
def _get_ldp_in_sync(self): return self.__ldp_in_sync
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_ldp_in_sync(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_in_sync must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-in-sync\", rest_name=\"ldp-in-sync\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_in_sync = t\n if hasattr(self, '_set'):\n self._set()", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def _set_ldp_sync_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_enabled must be of a type compatible with isis-status\"\"\",\n 'defined-type': \"brocade-isis-operational:isis-status\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_enabled = t\n if hasattr(self, '_set'):\n self._set()", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def getAddressInSync(self):\n return self._addrInSyncMode", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def find_sync_loop(self):\n self._find_xbb_infos()\n self._find_exit_only_bbs()\n if not self._build_rd_wrt_list():\n return False, None\n self._find_sync_loop_vars()\n return self.syncinfo.is_sync_loop, self.syncinfo", "def isSync(self):\n return False", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def is_interface_igp_sync_mpls_enabled(\n interface, device, vrf=\"\", parsed_output=\"\"):\n\n if not parsed_output:\n try:\n parsed_output = device.parse(\n \"show mpls ldp igp sync interface {intf}\".format(\n intf=interface\n )\n )\n except SchemaEmptyParserError:\n raise SchemaEmptyParserError(\n \"Fail to parse 'show mpls ldp igp sync \"\n \"interface {intf}' command\".format(intf=interface)\n )\n\n vrf = vrf if vrf else \"default\"\n\n try:\n igp_synchronization_enabled = (\n parsed_output[\"vrf\"]\n .get(vrf, {})\n .get(\"interface\", {})\n .get(interface, {})\n .get(\"ldp\", {})\n .get(\"igp_synchronization_enabled\", False)\n )\n\n sync_achieved = (\n parsed_output[\"vrf\"]\n .get(vrf, {})\n .get(\"interface\", {})\n .get(interface, {})\n .get(\"sync\", {})\n .get(\"status\", {})\n .get(\"sync_achieved\", False)\n )\n except KeyError:\n return False\n\n return igp_synchronization_enabled and sync_achieved", "def get_sync_mode():\n return sync_mode", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")", "def get_time_sync_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetTimeSyncInterval', self.handle)", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def getSyncObj(self):\n \n return self.sync_obj", "def swd_sync(self, pad=False):\n if pad:\n self._dll.JLINK_SWD_SyncBytes()\n else:\n self._dll.JLINK_SWD_SyncBits()\n return None", "def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))", "def is_study_mapping_in_sync(self, unit_id: str, compare_time: float):\n return compare_time > self._last_study_mapping_update_times[unit_id]", "def is_synchronized(self):\r\n sync_state = True\r\n \r\n for particle in self.population:\r\n sync_state = (sync_state and particle.sync)\r\n \r\n if not sync_state:\r\n break;\r\n \r\n return sync_state", "def getSyncFor (self, conn) :\r\n for pw, _conn in self.clients :\r\n if _conn and _conn.getSyncInfo(conn) :\r\n self.ongoing_sync_count += 1\r\n return True\r\n \r\n return False", "def tob_connection_synced():\n global app_config\n\n return (\"TOB_CONNECTION\" in app_config) and (app_config[\"TOB_CONNECTION\"] in synced) and (synced[app_config[\"TOB_CONNECTION\"]])", "def update_last_sync(user, app_id, sync_date, version):\n from corehq.apps.users.models import LastSync\n last_sync = filter_by_app(user.reporting_metadata.last_syncs, app_id)\n if _last_sync_needs_update(last_sync, sync_date):\n if last_sync is None:\n last_sync = LastSync()\n user.reporting_metadata.last_syncs.append(last_sync)\n last_sync.sync_date = sync_date\n last_sync.build_version = version\n last_sync.app_id = app_id\n\n if _last_sync_needs_update(user.reporting_metadata.last_sync_for_user, sync_date):\n user.reporting_metadata.last_sync_for_user = last_sync\n\n return True\n return False", "def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)", "def get_sync_status(self, connector_id, previous_completed_at):\n # @todo Need logic here to tell if the sync is not running at all and not\n # likely to run in the near future.\n connector_details = self.get_connector(connector_id)\n succeeded_at = self._parse_timestamp(connector_details[\"succeeded_at\"])\n failed_at = self._parse_timestamp(connector_details[\"failed_at\"])\n current_completed_at = (\n succeeded_at if succeeded_at > failed_at else failed_at\n )\n\n # The only way to tell if a sync failed is to check if its latest\n # failed_at value is greater than then last known \"sync completed at\" value.\n if failed_at > previous_completed_at:\n service_name = connector_details[\"service\"]\n schema_name = connector_details[\"schema\"]\n raise AirflowException(\n f'Fivetran sync for connector \"{connector_id}\" failed; '\n f\"please see logs at \"\n f\"{self._connector_ui_url_logs(service_name, schema_name)}\"\n )\n\n sync_state = connector_details[\"status\"][\"sync_state\"]\n self.log.info(f'Connector \"{connector_id}\": sync_state = {sync_state}')\n\n # Check if sync started by FivetranOperator has finished\n # indicated by new 'succeeded_at' timestamp\n if current_completed_at > previous_completed_at:\n self.log.info('Connector \"{}\": succeeded_at: {}'.format(\n connector_id, succeeded_at.to_iso8601_string())\n )\n return True\n else:\n return False", "def sync(self):\n return self._sync" ]
[ "0.8355211", "0.6763967", "0.6396875", "0.5638439", "0.55594766", "0.55594766", "0.5441413", "0.5346826", "0.52634716", "0.52562106", "0.5183778", "0.5142834", "0.5115734", "0.50605816", "0.49895975", "0.49873945", "0.49772185", "0.49214455", "0.47861922", "0.47472188", "0.46497208", "0.46291634", "0.45880428", "0.45617563", "0.45305416", "0.44909534", "0.44542545", "0.44491333", "0.4416292", "0.4402528" ]
0.6949639
1
Setter method for ldp_in_sync, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_in_sync (boolean)
def _set_ldp_in_sync(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ldp-in-sync", rest_name="ldp-in-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_in_sync must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ldp-in-sync", rest_name="ldp-in-sync", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)""", }) self.__ldp_in_sync = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_ldp_sync_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_enabled must be of a type compatible with isis-status\"\"\",\n 'defined-type': \"brocade-isis-operational:isis-status\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_enabled = t\n if hasattr(self, '_set'):\n self._set()", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def on_premises_sync_enabled(self):\n if \"onPremisesSyncEnabled\" in self._prop_dict:\n return self._prop_dict[\"onPremisesSyncEnabled\"]\n else:\n return None", "def getAddressInSync(self):\n return self._addrInSyncMode", "def setSyncMode(self, IsPauseOn = True):\n self._IsPauseOn = IsPauseOn", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def isSync(self):\n return False", "def getSyncInfo (self, connection) :\r\n \r\n if self.state != 'valid' :\r\n return False\r\n \r\n if self.sync_target :\r\n return False\r\n \r\n self.state = 'recv_sync'\r\n self.sync_target = connection\r\n self.do_sync_get()\r\n \r\n return True", "def is_interface_igp_sync_mpls_enabled(\n interface, device, vrf=\"\", parsed_output=\"\"):\n\n if not parsed_output:\n try:\n parsed_output = device.parse(\n \"show mpls ldp igp sync interface {intf}\".format(\n intf=interface\n )\n )\n except SchemaEmptyParserError:\n raise SchemaEmptyParserError(\n \"Fail to parse 'show mpls ldp igp sync \"\n \"interface {intf}' command\".format(intf=interface)\n )\n\n vrf = vrf if vrf else \"default\"\n\n try:\n igp_synchronization_enabled = (\n parsed_output[\"vrf\"]\n .get(vrf, {})\n .get(\"interface\", {})\n .get(interface, {})\n .get(\"ldp\", {})\n .get(\"igp_synchronization_enabled\", False)\n )\n\n sync_achieved = (\n parsed_output[\"vrf\"]\n .get(vrf, {})\n .get(\"interface\", {})\n .get(interface, {})\n .get(\"sync\", {})\n .get(\"status\", {})\n .get(\"sync_achieved\", False)\n )\n except KeyError:\n return False\n\n return igp_synchronization_enabled and sync_achieved", "def _enable_sync(self, enable_sync: bool = True):\n self.__enable_sync = enable_sync", "def setSyncObj(self, sync_obj):\n \n self.sync_obj = sync_obj", "def check_sync_mode():\n global sync_mode\n _description = ''\n\n _modes = {\n SyncMode.RECEIVER: '(REMOTE ➔ LOCAL)',\n SyncMode.SENDER: '(LOCAL ➔ REMOTE)',\n SyncMode.PROXY: '(REMOTE ➔ LOCAL ➔ REMOTE)',\n SyncMode.DUMP_LOCAL: '(LOCAL, ONLY EXPORT)',\n SyncMode.DUMP_REMOTE: '(REMOTE, ONLY EXPORT)',\n SyncMode.IMPORT_LOCAL: '(REMOTE, ONLY IMPORT)',\n SyncMode.IMPORT_REMOTE: '(LOCAL, ONLY IMPORT)',\n SyncMode.SYNC_LOCAL: '(LOCAL ➔ LOCAL)',\n SyncMode.SYNC_REMOTE: '(REMOTE ➔ REMOTE)'\n }\n\n for _mode, _desc in _modes.items():\n if getattr(SyncMode, 'is_' + _mode.lower())():\n sync_mode = _mode\n _description = _desc\n\n if is_import():\n output.message(\n output.Subject.INFO,\n f'Import file {output.CliFormat.BLACK}{system.config[\"import\"]}{output.CliFormat.ENDC}',\n True\n )\n\n system.config['is_same_client'] = SyncMode.is_same_host()\n\n output.message(\n output.Subject.INFO,\n f'Sync mode: {sync_mode} {output.CliFormat.BLACK}{_description}{output.CliFormat.ENDC}',\n True\n )", "def lookup_sync(self, flag=0):\n if flag == 1 or self.ser.read() == self.sync[3]:\n if self.ser.read() == self.sync[2]:\n if self.ser.read() == self.sync[1]:\n if self.ser.read() == self.sync[0]:\n return True\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n elif self.ser.read() == self.sync[-1]:\n return self.lookup_sync(flag=1)\n else:\n return False\n else:\n return False", "def get_sync_mode():\n return sync_mode", "def is_delta_sync_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"delta_sync_enabled\"]\n except KeyError:\n return False", "def set_time_sync_smart_mode_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncSmartModeEnabled', self.handle, bEnabled)", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def find_sync_loop(self):\n self._find_xbb_infos()\n self._find_exit_only_bbs()\n if not self._build_rd_wrt_list():\n return False, None\n self._find_sync_loop_vars()\n return self.syncinfo.is_sync_loop, self.syncinfo", "def swd_sync(self, pad=False):\n if pad:\n self._dll.JLINK_SWD_SyncBytes()\n else:\n self._dll.JLINK_SWD_SyncBits()\n return None", "def set_time_sync_interval(self, nTimeSyncInterval):\n\t\tcall_sdk_function('PrlVmCfg_SetTimeSyncInterval', self.handle, nTimeSyncInterval)", "def config_sync(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSync']:\n return pulumi.get(self, \"config_sync\")", "def in_smartctl_database(self, in_smartctl_database: bool):\n\n self._in_smartctl_database = in_smartctl_database", "def is_time_synchronization_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSynchronizationEnabled', self.handle))", "def is_study_mapping_in_sync(self, unit_id: str, compare_time: float):\n return compare_time > self._last_study_mapping_update_times[unit_id]", "def get_time_sync_interval(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetTimeSyncInterval', self.handle)", "def set_in_check(self, state):\n\n self._in_check = state", "def _set_lsp_config_notify_isis(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-isis\", rest_name=\"lsp-config-notify-isis\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_notify_isis must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"lsp-config-notify-isis\", rest_name=\"lsp-config-notify-isis\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='boolean', is_config=True)\"\"\",\n })\n\n self.__lsp_config_notify_isis = t\n if hasattr(self, '_set'):\n self._set()", "def _is_sync_var(self, vdef):\n # Pick memory variables among invaraint-loop variables\n if AsmParser.is_memory(vdef):\n # Thread local variable (e.g., %fs:0x28) cannot\n # be a sync. variable\n if AsmParser.is_thread_local_memory(self.arch, vdef):\n return False\n\n # Pick variables, which are written in a lexical form\n # This is needed since we will read the synchronization\n # variables in runtime.\n for rd_wrt in self.syncinfo.rd_wrt_set:\n if vdef.find(rd_wrt) != -1:\n return True\n return False" ]
[ "0.6656208", "0.6542917", "0.6353083", "0.5340248", "0.5340248", "0.5332905", "0.532342", "0.5271925", "0.5264226", "0.52454627", "0.49301267", "0.48848814", "0.48773974", "0.48615354", "0.48599523", "0.4857011", "0.48422334", "0.4809144", "0.47774222", "0.47425586", "0.46833166", "0.4661526", "0.46251988", "0.46075004", "0.45676622", "0.45496768", "0.45372936", "0.45348117", "0.4482563", "0.44463086" ]
0.8821603
0
Getter method for remain_hd_time, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/remain_hd_time (uint32)
def _get_remain_hd_time(self): return self.__remain_hd_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_remain_hd_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"remain-hd-time\", rest_name=\"remain-hd-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"remain_hd_time must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"remain-hd-time\", rest_name=\"remain-hd-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__remain_hd_time = t\n if hasattr(self, '_set'):\n self._set()", "def remaintime_hour(self):\n return self._get_time_info([\"Remain_Time_H\", \"remainTimeHour\"])", "def _get_ldp_sync_hd_expired(self):\n return self.__ldp_sync_hd_expired", "def get_remaining(self, curtime=None):\n if curtime is None:\n curtime = time.time()\n return self.time - curtime", "def calculate_time_percentage_left(self):\n time_left = self.calculate_time_left()\n return time_left / self.attributes[AT.TIME_TO_EXPIRE]", "def time_left(self):\n return lib.sp_offline_time_left(self._session._sp_session)", "def time_left(self) -> str:\n return self._time_to_auto_off", "def remaintime_min(self):\n return self._get_time_info([\"Remain_Time_M\", \"remainTimeMinute\"])", "def get_timer_remaining_time(self) -> Optional[int]:\n current_mode = self.get_mode()\n # Check that 'Timer' program is enabled.\n # Retreiving the remaining time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Timer':\n self.logger.warning(\"Can't retreive remaining time of the 'Timer' \"\n \"program since this program is not currently \"\n f\"selected (selected program is '{current_mode}'). \"\n \"Select 'Timer' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_TIMER_REMAINING_TIME)", "def time_left(self):\n return self.timeout - self.current_milli_time()", "def calculate_time_left(self):\n time_left = self.attributes[AT.TIME_CREATED] \\\n + self.attributes[AT.TIME_TO_EXPIRE] \\\n - get_ticks()\n if time_left < 0:\n time_left = 0\n return time_left", "def sms_lock_time_remaining(time_of_lock):\n\treturn PERSONAL_GROUP_SMS_LOCK_TTL - (time.time() - time_of_lock)", "def get_remaining(self) -> str:\n hex_remaining_time = hexlify(self.message)[294:302]\n int_remaining_time_seconds = int(\n hex_remaining_time[6:8]\n + hex_remaining_time[4:6]\n + hex_remaining_time[2:4]\n + hex_remaining_time[0:2],\n 16,\n )\n return seconds_to_iso_time(int_remaining_time_seconds)", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def remain_time(chat_id):\n\n if bot_collection[chat_id].paused:\n return bot_collection[chat_id].timers.extended\n\n current_time = datetime.now()\n time_passed = convert_time(current_time - bot_collection[chat_id].last_timer_start)\n time_was = bot_collection[chat_id].timers.current_time\n remain = time_was - time_passed\n\n return remain if remain > 0 else 0", "def _set_ldp_sync_hd_expired(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_hd_expired must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_hd_expired = t\n if hasattr(self, '_set'):\n self._set()", "def get_last_sync(self):\n return self.app.sync_thread.last_sync.strftime('%H:%M')", "async def getDelayTimeRemaining(self):\n delay_time_remaining = await self.director.getItemVariableValue(\n self.item_id, \"DELAY_TIME_REMAINING\"\n )\n return delay_time_remaining", "def PreferredLifeTime(self):\n if self.force_auto_sync:\n self.get('PreferredLifeTime')\n return self._PreferredLifeTime", "def time_remaining(self) -> float:\n\n return self.event.time - time.time()", "def time_left(self) -> float:\n return self._alarm_silence - time.monotonic()", "def remaining(self):\n return self.value - time.time()", "def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)", "def Remaining(self):\n if self._timeout is None:\n return None\n\n # Get start time on first calculation\n if self._start_time is None:\n self._start_time = self._time_fn()\n\n # Calculate remaining time\n remaining_timeout = self._start_time + self._timeout - self._time_fn()\n\n if not self._allow_negative:\n # Ensure timeout is always >= 0\n return max(0.0, remaining_timeout)\n\n return remaining_timeout", "def mod_time(self):\n return self._mod_time", "def mod_time(self):\n return self._mod_time", "def remaining_ms():", "def _get_lsp_frr_hold_time(self):\n return self.__lsp_frr_hold_time", "def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())", "def last_seen_hours(self):\n return self.last_seen.seconds / 3600" ]
[ "0.75369066", "0.5767875", "0.54118884", "0.52375525", "0.51527005", "0.51103103", "0.50226915", "0.49990252", "0.49933913", "0.49863082", "0.4969344", "0.4923328", "0.49016246", "0.4872389", "0.48345572", "0.48165497", "0.4742409", "0.47297677", "0.46843266", "0.46405488", "0.4584181", "0.45702824", "0.45489272", "0.45313635", "0.44176483", "0.44176483", "0.4407098", "0.44037595", "0.43989298", "0.43885034" ]
0.6868881
1
Setter method for remain_hd_time, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/remain_hd_time (uint32)
def _set_remain_hd_time(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="remain-hd-time", rest_name="remain-hd-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """remain_hd_time must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="remain-hd-time", rest_name="remain-hd-time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)""", }) self.__remain_hd_time = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_remain_hd_time(self):\n return self.__remain_hd_time", "def remaintime_hour(self):\n return self._get_time_info([\"Remain_Time_H\", \"remainTimeHour\"])", "def _set_ldp_sync_hd_expired(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_hd_expired must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_hd_expired = t\n if hasattr(self, '_set'):\n self._set()", "def time_left(self) -> str:\n return self._time_to_auto_off", "def get_remaining(self, curtime=None):\n if curtime is None:\n curtime = time.time()\n return self.time - curtime", "def calculate_time_percentage_left(self):\n time_left = self.calculate_time_left()\n return time_left / self.attributes[AT.TIME_TO_EXPIRE]", "def time_left(self):\n return self.timeout - self.current_milli_time()", "def time_left(self):\n return lib.sp_offline_time_left(self._session._sp_session)", "def _get_ldp_sync_hd_expired(self):\n return self.__ldp_sync_hd_expired", "def calculate_time_left(self):\n time_left = self.attributes[AT.TIME_CREATED] \\\n + self.attributes[AT.TIME_TO_EXPIRE] \\\n - get_ticks()\n if time_left < 0:\n time_left = 0\n return time_left", "def remain_time(chat_id):\n\n if bot_collection[chat_id].paused:\n return bot_collection[chat_id].timers.extended\n\n current_time = datetime.now()\n time_passed = convert_time(current_time - bot_collection[chat_id].last_timer_start)\n time_was = bot_collection[chat_id].timers.current_time\n remain = time_was - time_passed\n\n return remain if remain > 0 else 0", "def get_timer_remaining_time(self) -> Optional[int]:\n current_mode = self.get_mode()\n # Check that 'Timer' program is enabled.\n # Retreiving the remaining time without\n # this programm being selected first would trigger\n # a key error when unpacking the device reply.\n if current_mode != 'Timer':\n self.logger.warning(\"Can't retreive remaining time of the 'Timer' \"\n \"program since this program is not currently \"\n f\"selected (selected program is '{current_mode}'). \"\n \"Select 'Timer' program first.\")\n return None\n else:\n return self.send(self.cmd.GET_TIMER_REMAINING_TIME)", "def sms_lock_time_remaining(time_of_lock):\n\treturn PERSONAL_GROUP_SMS_LOCK_TTL - (time.time() - time_of_lock)", "def _set_lsp_frr_hold_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-frr-hold-time\", rest_name=\"lsp-frr-hold-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_frr_hold_time must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-frr-hold-time\", rest_name=\"lsp-frr-hold-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_frr_hold_time = t\n if hasattr(self, '_set'):\n self._set()", "def remaintime_min(self):\n return self._get_time_info([\"Remain_Time_M\", \"remainTimeMinute\"])", "def time_remaining(self) -> float:\n\n return self.event.time - time.time()", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)", "def set_remain_time(self, time):\n for task in self.tasks:\n task.remain_time = time", "def time_left(self) -> float:\n return self._alarm_silence - time.monotonic()", "def get_remaining(self) -> str:\n hex_remaining_time = hexlify(self.message)[294:302]\n int_remaining_time_seconds = int(\n hex_remaining_time[6:8]\n + hex_remaining_time[4:6]\n + hex_remaining_time[2:4]\n + hex_remaining_time[0:2],\n 16,\n )\n return seconds_to_iso_time(int_remaining_time_seconds)", "def time_remaining_seconds(self, time_remaining_seconds):\n\n self._time_remaining_seconds = time_remaining_seconds", "def remaining(self):\n return self.value - time.time()", "def Remaining(self):\n if self._timeout is None:\n return None\n\n # Get start time on first calculation\n if self._start_time is None:\n self._start_time = self._time_fn()\n\n # Calculate remaining time\n remaining_timeout = self._start_time + self._timeout - self._time_fn()\n\n if not self._allow_negative:\n # Ensure timeout is always >= 0\n return max(0.0, remaining_timeout)\n\n return remaining_timeout", "def setHoldTime(self, channel, time, unitCode=0):\n # Ensure compliance with level boundary conditions\n\n resp = self.XAPCommand('GHOLD', channel, time, unitCode=unitCode)\n return float(resp)", "def PreferredLifeTime(self):\n if self.force_auto_sync:\n self.get('PreferredLifeTime')\n return self._PreferredLifeTime", "def SetTimeLeft(self, *args, **kwargs):\n pass", "def time_until_full(self):\n if ((datetime.datetime.utcnow() - self.initial_time).total_seconds() \n > UPDATE_PERIOD\n and self.space_decay_rate() < 0):\n secs_until_full = self.available_space() / -self.space_decay_rate() \n return datetime.timedelta(seconds=secs_until_full)", "def set_dwell_time(self, dwell_time):\n raise NotImplementedError", "def secondsLeft(self)->int:\n t = datetime.utcnow()\n if self._scenario == LM_HardDate.Scenario.ValidSince:\n return 0 if t >= self.timeBegin else int((self.timeBegin - t).total_seconds())\n else:\n return 0 if t >= self.timeEnd else int((self.timeEnd - t).total_seconds())" ]
[ "0.6527339", "0.56419486", "0.5406978", "0.51207715", "0.50473714", "0.5004871", "0.49524683", "0.48564476", "0.48525473", "0.48316664", "0.48039407", "0.48016867", "0.47546086", "0.47412762", "0.46979687", "0.46725908", "0.4671195", "0.46258426", "0.45471296", "0.44961315", "0.44937912", "0.4471524", "0.44681957", "0.44631752", "0.4460603", "0.4445415", "0.44392982", "0.44196543", "0.44069377", "0.44064072" ]
0.82343465
0
Getter method for ldp_sync_hd_expired, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_hd_expired (boolean)
def _get_ldp_sync_hd_expired(self): return self.__ldp_sync_hd_expired
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_ldp_sync_hd_expired(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_hd_expired must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"ldp-sync-hd-expired\", rest_name=\"ldp-sync-hd-expired\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_hd_expired = t\n if hasattr(self, '_set'):\n self._set()", "def _get_ldp_sync_enabled(self):\n return self.__ldp_sync_enabled", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def check_if_last_deadline_has_expired(group):\n assignment = group.parentnode\n if group.last_deadline_datetime < datetime.now():\n if assignment.deadline_handling == Assignment.DEADLINEHANDLING_HARD:\n return 'hard'\n else:\n return 'soft'\n else:\n return False", "def is_expired (self, now=None):\n if now is None: now = time.time()\n return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)", "def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def _set_ldp_sync_enabled(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"ldp_sync_enabled must be of a type compatible with isis-status\"\"\",\n 'defined-type': \"brocade-isis-operational:isis-status\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'is-enabled': {'value': 1}, u'is-disabled': {'value': 0}},), is_leaf=True, yang_name=\"ldp-sync-enabled\", rest_name=\"ldp-sync-enabled\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-status', is_config=False)\"\"\",\n })\n\n self.__ldp_sync_enabled = t\n if hasattr(self, '_set'):\n self._set()", "def EnableAsyncConfMasterFlowRemovedHardTimeOut(self):\n\t\treturn self._get_attribute('enableAsyncConfMasterFlowRemovedHardTimeOut')", "def is_expired(self):\n return self._bExpired", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def isFreeTimeExpired(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n if self.accountOldAuth:\n return 0\n\n # check for the config overrides\n # if true, free-time-expired takes precedence over unlimited-free-time\n if base.config.GetBool(\"free-time-expired\", 0):\n return 1\n if base.config.GetBool(\"unlimited-free-time\", 0):\n return 0\n\n # -1 == never expires (paid/exempt)\n if self.freeTimeExpiresAt == -1:\n return 0\n\n # 0 == expired\n if self.freeTimeExpiresAt == 0:\n return 1\n\n if self.freeTimeExpiresAt < -1:\n self.notify.warning('freeTimeExpiresAt is less than -1 (%s)' %\n self.freeTimeExpiresAt)\n\n # freeTimeExpiresAt is an epoch time\n # is it in the past?\n if self.freeTimeExpiresAt < time.time():\n return 1\n else:\n return 0", "def EnableAsyncConfSlaveFlowRemovedHardTimeOut(self):\n\t\treturn self._get_attribute('enableAsyncConfSlaveFlowRemovedHardTimeOut')", "def is_offline_management_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsOfflineManagementEnabled', self.handle))", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def token_is_stale(self):\n return self.m_token_expiry < datetime.datetime.now(tz=pytz.utc)", "def _get_ldp_in_sync(self):\n return self.__ldp_in_sync", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def has_expired(self, now):\n if now < self._expires:\n return False\n\n return self._enclave_wait_timer.has_expired()", "def is_expired(self) -> bool:\n return now() > self.expires", "def is_expired(self):\n return utcnow() >= self.expires", "def is_time_sync_smart_mode_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsTimeSyncSmartModeEnabled', self.handle))", "def is_expired_cursor(self):\n return self._tag == 'expired_cursor'", "def check_luser_email_column_outdated(workflow: models.Workflow):\n if workflow.luser_email_column:\n md5_hash = sql.get_text_column_hash(\n workflow.get_data_frame_table_name(),\n workflow.luser_email_column.name)\n if md5_hash != workflow.luser_email_column_md5:\n # Information is outdated\n workflow.lusers_is_outdated = True\n workflow.save(update_fields=['lusers_is_outdated'])", "def is_expired(self) -> bool:\n if self.purpose == Purpose.password_reset:\n now = datetime.utcnow()\n expires_after = timedelta(hours=24)\n return now >= (self.created_at + expires_after)\n else:\n return False", "def is_cache_outdated(self, domain, language):\n\n return self.is_outdated(self.domain_cache[domain][language]['update'])", "def is_expired(snap):\n exp_epoch = int(snap.split(\"_\")[const.VLAB_SNAP_EXPIRES])\n current_time = int(time.time())\n return exp_epoch < current_time", "def expired(self): # pragma: no cover\n return self._state in (_State.EXPIRING, _State.EXPIRED)", "def time_left(self):\n return lib.sp_offline_time_left(self._session._sp_session)", "def is_expired(self):\n return self.expiration_date <= self._now()" ]
[ "0.82901675", "0.48444963", "0.48373258", "0.47381613", "0.46870935", "0.46846142", "0.4662554", "0.4562524", "0.45272446", "0.4494591", "0.4440222", "0.44194195", "0.44167075", "0.43977875", "0.43757567", "0.43720958", "0.43687713", "0.43644282", "0.4342112", "0.4325486", "0.43074095", "0.42912343", "0.42869046", "0.4280808", "0.42644772", "0.42573473", "0.42330772", "0.4224479", "0.42230183", "0.4219978" ]
0.7557055
1
Setter method for ldp_sync_hd_expired, mapped from YANG variable /isis_state/interface_detail/isis_intf/ldp_sync_info/ldp_sync_hd_expired (boolean)
def _set_ldp_sync_hd_expired(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="ldp-sync-hd-expired", rest_name="ldp-sync-hd-expired", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_sync_hd_expired must be of a type compatible with boolean""", 'defined-type': "boolean", 'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="ldp-sync-hd-expired", rest_name="ldp-sync-hd-expired", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='boolean', is_config=False)""", }) self.__ldp_sync_hd_expired = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ldp_sync_hd_expired(self):\n return self.__ldp_sync_hd_expired", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def is_expired (self, now=None):\n if now is None: now = time.time()\n return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)", "def _has_expired(self):\n try:\n expires = datetime.fromtimestamp(\n os.stat(self.lockfile).st_mtime\n )\n except OSError as e:\n if e in self.NOT_EXIST_ERRORS:\n return False\n raise\n return datetime.now() > expires", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def is_expired(self):\n return self._bExpired", "def check_if_last_deadline_has_expired(group):\n assignment = group.parentnode\n if group.last_deadline_datetime < datetime.now():\n if assignment.deadline_handling == Assignment.DEADLINEHANDLING_HARD:\n return 'hard'\n else:\n return 'soft'\n else:\n return False", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def has_expired(self, now):\n if now < self._expires:\n return False\n\n return self._enclave_wait_timer.has_expired()", "def is_expired(self):\n return utcnow() >= self.expires", "def is_expired(self) -> bool:\n return now() > self.expires", "def is_expired(self):\n return self.expiration_date <= self._now()", "def isFreeTimeExpired(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n if self.accountOldAuth:\n return 0\n\n # check for the config overrides\n # if true, free-time-expired takes precedence over unlimited-free-time\n if base.config.GetBool(\"free-time-expired\", 0):\n return 1\n if base.config.GetBool(\"unlimited-free-time\", 0):\n return 0\n\n # -1 == never expires (paid/exempt)\n if self.freeTimeExpiresAt == -1:\n return 0\n\n # 0 == expired\n if self.freeTimeExpiresAt == 0:\n return 1\n\n if self.freeTimeExpiresAt < -1:\n self.notify.warning('freeTimeExpiresAt is less than -1 (%s)' %\n self.freeTimeExpiresAt)\n\n # freeTimeExpiresAt is an epoch time\n # is it in the past?\n if self.freeTimeExpiresAt < time.time():\n return 1\n else:\n return 0", "def token_is_stale(self):\n return self.m_token_expiry < datetime.datetime.now(tz=pytz.utc)", "def is_expired(self) -> bool:\n if self.purpose == Purpose.password_reset:\n now = datetime.utcnow()\n expires_after = timedelta(hours=24)\n return now >= (self.created_at + expires_after)\n else:\n return False", "def is_expired_cursor(self):\n return self._tag == 'expired_cursor'", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def is_expired(snap):\n exp_epoch = int(snap.split(\"_\")[const.VLAB_SNAP_EXPIRES])\n current_time = int(time.time())\n return exp_epoch < current_time", "def expired(self): # pragma: no cover\n return self._state in (_State.EXPIRING, _State.EXPIRED)", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def has_expired(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def hd_sync_epoch(self, hd_sync_epoch):\n if (self._configuration.client_side_validation and\n hd_sync_epoch is not None and hd_sync_epoch < 0): # noqa: E501\n raise ValueError(\"Invalid value for `hd_sync_epoch`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._hd_sync_epoch = hd_sync_epoch", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def _set_remain_hd_time(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"remain-hd-time\", rest_name=\"remain-hd-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"remain_hd_time must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"remain-hd-time\", rest_name=\"remain-hd-time\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__remain_hd_time = t\n if hasattr(self, '_set'):\n self._set()", "def test_is_expired_time_based(self):\n expired_dt = datetime.now() + timedelta(hours=-1)\n good_dt = datetime.now() + timedelta(hours=1)\n expired_pass = DoorPassFactory.create(device=self.device, expires_at=expired_dt)\n good_pass = DoorPassFactory.create(device=self.device, expires_at=good_dt)\n self.assertTrue(expired_pass.is_expired())\n self.assertFalse(good_pass.is_expired())", "def isStale(self):\n return self.m_expirationDate < datetime.datetime.now(tz=pytz.utc)", "def expired(self):\n return int(time.time()) > self.expires_at", "def EnableAsyncConfMasterFlowRemovedHardTimeOut(self):\n\t\treturn self._get_attribute('enableAsyncConfMasterFlowRemovedHardTimeOut')", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False" ]
[ "0.71649355", "0.48745316", "0.48418942", "0.48061666", "0.47438446", "0.47073004", "0.46888512", "0.46572825", "0.464538", "0.46299013", "0.4616753", "0.46122965", "0.4593519", "0.45350564", "0.45292705", "0.44804588", "0.44363025", "0.44244212", "0.44181457", "0.43888035", "0.4379859", "0.43796602", "0.4358483", "0.43450758", "0.4344636", "0.43400544", "0.43302506", "0.43265548", "0.4322498", "0.43122047" ]
0.8733901
0
Check for imports of deprecated modules
def test_deprecated_modules(self): deprecated_modules_present = False deprecated_modules = [ "game_assets", "models", "world", "modular_assets", ] for path in self.application_files: for module in deprecated_modules: module_text = open(path).read() found_reference = False if "import %s" % module in module_text: found_reference = True if "from %s" % module in module_text: found_reference = True if found_reference: print("Found '%s' reference in %s" % (module, path)) deprecated_modules_present = True self.assertFalse(deprecated_modules_present)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deprecated_cant_find_module() -> None:\n with patch(\"inspect.getmodule\", return_value=None):\n # This used to raise.\n cv.deprecated(\n \"mars\",\n replacement_key=\"jupiter\",\n default=False,\n )\n\n with patch(\"inspect.getmodule\", return_value=None):\n # This used to raise.\n cv.removed(\n \"mars\",\n default=False,\n )", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def check_import():\n print('[GenHub] Checking Python modules.')\n\n basemod = [('yaml', 'pyyaml'), ('pycurl', 'pycurl')]\n devmod = ['pep8', 'pytest', 'pytest-cov', 'coverage']\n\n packages = dict()\n for importname, packagename in basemod:\n try:\n importlib.import_module(importname)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n for packagename in devmod:\n try:\n importlib.import_module(packagename)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n\n rundep = False\n for pkg in packages:\n char = '+'\n msg = 'Installed.'\n if packages[pkg] is False:\n char = '-'\n msg = 'Not installed!'\n rundep = True\n print('%c package %-12s: %s' % (char, pkg, msg))\n if rundep is True:\n print('Please install these dependencies before proceding')\n print('')", "def test_ensureWhenFailedToImport(self):\n modules = {\"m2\": None}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None})", "def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()", "def test_ensureWhenNotImported(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\", \"m3\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None, \"m3\": None})", "def _initIsDeprecated(self):\n self._isDeprecated = re.search(\"deprecated\", self._description, re.IGNORECASE) is not None", "def _check_imported_packages(self, node, module_name):\n if not module_name:\n # skip local packages because is not a external dependency.\n return\n if not self.manifest_dict:\n # skip if is not a module of odoo\n return\n if not isinstance(node.parent, astroid.Module):\n # skip nested import sentences\n return\n if self._is_absolute_import(node, module_name):\n # skip absolute imports\n return\n if self._is_module_name_in_whitelist(module_name):\n # ignore whitelisted modules\n return\n isort_obj = isort.SortImports(file_contents='')\n import_category = isort_obj.place_module(module_name)\n if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):\n # skip if is not a external library or is a white list library\n return\n relpath = os.path.relpath(\n node.parent.file, os.path.dirname(self.manifest_file))\n if os.path.dirname(relpath) == 'tests':\n # import errors rules don't apply to the test files\n # since these files are loaded only when running tests\n # and in such a case your\n # module and their external dependencies are installed.\n return\n self.add_message('missing-import-error', node=node,\n args=(module_name,))\n\n ext_deps = self.manifest_dict.get('external_dependencies') or {}\n py_ext_deps = ext_deps.get('python') or []\n if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:\n return\n if module_name not in py_ext_deps and \\\n module_name.split('.')[0] not in py_ext_deps:\n self.add_message('missing-manifest-dependency', node=node,\n args=(module_name,))", "def check_module(name):\n return importlib.util.find_spec(name) is not None", "def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})", "def _check_imports():\n\n optlist = ['ALPSO', 'CONMIN', 'FSQP', 'IPOPT', 'NLPQLP',\n 'NSGA2', 'PSQP', 'SLSQP', 'SNOPT', 'NLPY_AUGLAG', 'NOMAD']\n\n for optimizer in optlist[:]:\n try:\n __import__('pyoptsparse', globals(), locals(), [optimizer], 0)\n except ImportError:\n optlist.remove(optimizer)\n\n return optlist", "def checkImport(self):\r\n for imp in self.cap_file.Import.packages:\r\n if a2s(imp.aid) not in export_refs:\r\n return False\r\n return True", "def check_imports():\n try:\n import dns # pylint: disable=C0415,W0611 # noqa: F401\n import ecdsa # pylint: disable=C0415,W0611 # noqa: F401\n import google.protobuf # pylint: disable=C0415,W0611 # noqa: F401\n import jsonrpclib # pylint: disable=C0415,W0611 # noqa: F401\n import pyaes # pylint: disable=C0415,W0611 # noqa: F401\n import qrcode # pylint: disable=C0415,W0611 # noqa: F401\n import requests # pylint: disable=C0415 # noqa: F401\n except ImportError as i_e:\n sys.exit(\"Error: %s. Try 'sudo pip install <module-name>'\" % str(i_e))\n from google.protobuf import descriptor # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import message # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import reflection # pylint: disable=C0415,W0611 # noqa: F401\n from google.protobuf import ( # pylint: disable=C0415,W0611 # noqa: F401\n descriptor_pb2,\n )\n from jsonrpclib import ( # pylint: disable=C0415,W0611 # noqa: F401\n SimpleJSONRPCServer,\n )\n\n # make sure that certificates are here\n certs = requests.utils.DEFAULT_CA_BUNDLE_PATH\n if not os.path.exists(certs):\n raise AssertionError(\"Certificates not found\")", "def _check_deprecated(self, name, current, deprecated):\n if name in deprecated and name not in self._emitted_deprecations:\n self._emitted_deprecations.add(name)\n current = (current[0] or 'DEFAULT', current[1])\n format_dict = {'dep_option': name[1], 'dep_group': name[0],\n 'option': current[1], 'group': current[0]}\n _report_deprecation(self._deprecated_opt_message, format_dict)", "def _CheckForRequiredImports(self, module):\n try:\n reqMods = module.required_modules # Attempt to find required_modules variable\n\n except AttributeError:\n return # Assume no imports are needed if variable not found.\n\n for modName in reqMods:\n self._ImportSystemModule(modName)", "def _does_require_deprecation(self):\n\n for index, version_number in enumerate(self.current_version[0][:2]):\n # We loop through the 2 last elements of the version.\n\n if version_number > self.version_yaml[index]:\n # The currently read version number is greater than the one we have in\n # the version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to deprecate anything.\n return False", "def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise", "def test_redirection_weldx_widgets_not_found():\n orig_import = __import__ # Store original __import__\n\n def import_mock(name, *args, **kwargs):\n if \"weldx_widgets\" in name:\n raise ModuleNotFoundError(\"weldx_widgets not found\")\n if \"matplotlib\" in name:\n raise ModuleNotFoundError(\"matplotlib not found\")\n return orig_import(name, *args, **kwargs)\n\n pattern = \".*weldx_widget.*unavailable\"\n\n with patch(\"builtins.__import__\", side_effect=import_mock):\n with pytest.warns(match=pattern):\n import weldx.visualization as vs\n\n # ensure that using declared features emits the warning again.\n for name in vs.__all__:\n with pytest.warns(match=pattern):\n obj = getattr(vs, name)\n obj()", "def test_deps( deps ):\n\n success = []\n error = []\n for dep in deps:\n mod_name = dep[0]\n\n try:\n mod = __import__( mod_name )\n except ImportError:\n print \"FAILURE: Could not import\", mod_name\n error.append( mod_name )\n continue\n\n try:\n mod_version = mod.__version__.split('.')\n requested_mod_version = dep[1].split('.')\n for i in range( len( requested_mod_version ) ):\n if int( mod_version[i] ) < int( requested_mod_version[i] ):\n raise ImportError\n except ImportError:\n print \"FAILURE: Module\", mod_name, \"needs version\", requested_mod_version, \"but version\", mod_version, \"found\"\n error.append( mod_name )\n continue\n except AttributeError:\n# no .__version__\n pass\n\n print \"Success: \", mod_name\n success.append( mod_name )\n\n return ( success, error )", "def test_check_module(self) -> None:\n check_module(\"os\")", "def checklib(module):\n import imp\n for mod in module:\n try:\n imp.find_module(mod)\n ret = 1\n except ImportError as imperror:\n print(imperror)\n ret = 0\n return ret", "def check_dependencies(cls):\n\n missing = []\n for name in cls.DEPENDENCIES:\n try:\n import_module(name)\n except ModuleNotFoundError:\n missing.append(name)\n\n if any(missing):\n msg = ('The sup3r stitching module depends on the following '\n 'special dependencies that were not found in the active '\n 'environment: {}'.format(missing))\n logger.error(msg)\n raise ModuleNotFoundError(msg)", "def _check_deprecated(self, dest: str, kwargs, print_warning: bool = True) -> None:\n removal_version = kwargs.get(\"removal_version\", None)\n if removal_version is not None:\n warn_or_error(\n removal_version=removal_version,\n entity=f\"option '{dest}' in {self._scope_str()}\",\n start_version=kwargs.get(\"deprecation_start_version\", None),\n hint=kwargs.get(\"removal_hint\", None),\n print_warning=print_warning,\n )", "def ModuleAvailability(module_name):\r\n\r\n try:\r\n imp.find_module(module_name)\r\n return True\r\n except ImportError:\r\n return False", "def check_pkg_consistency():\n pass", "def unavailable_importer(**kwargs):\n return LazyImportTester(\"_qiskit_this_module_does_not_exist_\", **kwargs)", "def supports_ordinary_make_module_imports(self):\n return True", "def find_deprecated_usages(\n schema: GraphQLSchema, ast: DocumentNode\n) -> List[GraphQLError]:\n\n type_info = TypeInfo(schema)\n visitor = FindDeprecatedUsages(type_info)\n visit(ast, TypeInfoVisitor(type_info, visitor))\n return visitor.errors", "def try_import(module_name):\n have_module = True\n try:\n importlib.import_module(module_name)\n except ImportError:\n logging.warn(\"Module '%s' cannot be imported, certain system information will not be available\", module_name)\n have_module = False\n return have_module", "def has_warnings(self) -> bool:" ]
[ "0.7030158", "0.6704425", "0.6590874", "0.6568182", "0.65557975", "0.655543", "0.6490232", "0.6423565", "0.6423465", "0.64211744", "0.641312", "0.6384087", "0.6377264", "0.6273178", "0.62635475", "0.62426454", "0.6239808", "0.6232984", "0.6232351", "0.6216431", "0.6201774", "0.61052775", "0.6083356", "0.60820717", "0.6066837", "0.6040095", "0.6040064", "0.6016532", "0.60070544", "0.5980884" ]
0.806409
0
Given 3 points or arrays, and their corresponding height coordinates, calculate the maximum of the Lagrange polynomial interpolation of those points. Useful for fast calculations.
def interpolate_max(arr, heights=(0., 1., 2.)): if len(arr) != 3: return None y1, y2, y3 = arr x1, x2, x3 = heights x1 = float(x1) x2 = float(x2) x3 = float(x3) num = -(y1*(x2 - x3)*(-x2 - x3) + y2*(x1 - x3)*(x1 + x3) + y3*(x1 - x2)*(-x1 - x2)) den = 2. * (y1*(x2 - x3) - y2*(x1 - x3) + y3*(x2 - x3)) non_zero_den = np.array(den != 0, dtype=bool) zero_den = np.array(den == 0, dtype=bool) # print zero_den max_heights = np.zeros(num.shape, dtype=float) old_err_state = np.seterr(divide='raise') ignore_states = np.seterr(**old_err_state) max_heights = np.copy(num) max_heights[non_zero_den] = max_heights[non_zero_den]/den[non_zero_den] max_heights[zero_den] = 0 # print np.isnan(max_heights).sum() # The maximum of the interpolation may lie outside the given height # values. If so, ouput the highest value from the data. i = np.logical_or( max_heights > max(heights), max_heights < min(heights)) max_heights[i] = np.argmax(arr, axis=0)[i] return max_heights
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_height(i_vy , g , i_h):\n t = i_vy/g\n return float(i_h + (i_vy*t)-(.5*g*math.pow(t,2)))", "def maxArea(height):\n num_pt = len(height)\n max_area = 0\n for i in range(num_pt):\n for j in range(i + 1, num_pt):\n h = min(height[i], height[j])\n a = h * (j - i)\n if a > max_area:\n max_area = a\n return max_area", "def compute_max_ei_point(\n points: numpy.ndarray | Sequence[numpy.ndarray],\n below_likelis: Sequence[float] | numpy.ndarray,\n above_likelis: Sequence[float] | numpy.ndarray,\n) -> numpy.ndarray:\n max_ei = -numpy.inf\n point_index = 0\n for i, (lik_b, lik_a) in enumerate(zip(below_likelis, above_likelis)):\n ei = lik_b - lik_a\n if ei > max_ei:\n max_ei = ei\n point_index = i\n return points[point_index]", "def _lagrange2(x, y):\n\n def P(x_ip):\n total = 0\n n = len(x)\n for i in range(0, n):\n\n def g(i, n):\n tot_mul = 1\n for j in range(0, n):\n if i == j:\n continue\n if x[i] == x[j]:\n log.fatal(\n f\"Leads to division by zero (x = {x[i]}). Identical values given in x array. \"\n \"For example by using Lagrange interpolation for precise orbit, \"\n \"check if identical observation epochs are given in SP3 file\"\n )\n tot_mul *= (x_ip - x[j]) / float(x[i] - x[j])\n return tot_mul\n\n total += y[i] * g(i, n)\n return total\n\n return P", "def interp_z(array, heights, goal):\n\n f = interpolate.interp1d(heights, array, fill_value='extrapolate')\n return f(goal)", "def trap(height: List[int]) -> int:\n # No heights passed!\n if not height:\n return 0\n # Max from left\n max_L = 0\n L = len(height)\n left = [0] * L\n for i in range(L):\n if height[i] > max_L:\n max_L = height[i]\n left[i] = max_L\n # Max from right\n max_R = 0\n right = [0] * L\n for i in range(L-1, -1, -1):\n if height[i] > max_R:\n max_R = height[i]\n right[i] = max_R\n # Get water height / area at each point on map\n area = 0\n for i in range(1, L-1):\n area += max(0, min(left[i-1], right[i+1]) - height[i])\n return area", "def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)", "def H(x, X, Y, dY):\n\n def L(i):\n #return p[i] * (x ** i)\n p = [(x - X[i]) / (X[j] - X[i]) for j in range(n) if j != i]\n return reduce(op.mul, p)\n\n def dL(i):\n #return d[i-1] * (x ** (i-1))\n if i < n-1:\n return (Y[i+1] - Y[i]) / (X[i+1] - X[i])\n else:\n return (Y[i] - Y[i-1]) / (X[i] - X[i-1])\n\n def A(i):\n return (1 - 2 * (x - X[i]) * dL(i)) * (L(i) ** 2)\n\n def B(i):\n return (x - X[i]) * (L(i) ** 2)\n\n assert(len(X) != 0 and len(X) == len(Y)), 'Quantidade de valores em X e Y diferentes'\n n = len(X)\n #p = interpolate.lagrange(X, Y)\n #d = polyder(p)\n h1 = sum(A(i) * Y[i] for i in range(n))\n h2 = sum(B(i) * dY[i] for i in range(n))\n return h1 + h2", "def Interpolate(ax, ay, x, npoints):\r\n\r\n assert(ax[1]>ax[0]) # test for ascending order, at least for first point\r\n \r\n if (verbose): \r\n print 'interpolate/extrapolate to x=',x,', npoints=',npoints\r\n\r\n # Find best data points to use, based on which are closest to \r\n # requested point x. Will find <npoints> (or fewer) best data points and \r\n # return as an array.\r\n ibest = FindBest(ax,x,npoints)\r\n npoints = len(ibest) # make sure npoints is updated in case was reduced\r\n if (verbose): \r\n print 'ibest',ibest\r\n\r\n # Build the polynomial y(x), evaluated at the point x.\r\n y = 0.0\r\n for i in range(npoints): # do i=0,npoints-1\r\n li = 1.0\r\n ni = ibest[i] # index to ith best point\r\n # build up li[x] term, evaluated at the point x\r\n for j in range(npoints): # do j=0,npoints-1\r\n if (i != j): # exclude j=i term\r\n nj = ibest[j] # index to jth best point\r\n li = li*(x-ax[nj])/(ax[ni]-ax[nj])\r\n y = y+ay[ni]*li\r\n \r\n return y", "def interpol(x, y, newX):\r\n \r\n #// Bracket newX:\r\n p1 = 0 \r\n p2 = 1\r\n x1 = x[p1]\r\n x2 = x[p2]\r\n\r\n for i in range(1, len(x)):\r\n if (x[i] >= newX): \r\n #// Found upper bracket\r\n p2 = i\r\n p1 = i - 1\r\n x2 = x[p2]\r\n x1 = x[p1]\r\n break\r\n \r\n \r\n\r\n step = x2 - x1\r\n\r\n #//Interpolate\r\n #//First order Lagrange formula\r\n #// newY = y[1][p2] * (newX - x1) / step\r\n #// + y[1][p1] * (x2 - newX) / step;\r\n \r\n newY = y[p2] * (newX - x1) / step \\\r\n + y[p1] * (x2 - newX) / step\r\n\r\n #//System.out.println(\"Interpol: p1, p2, x1, x2, y1, y2, newX, newY: \" + \r\n #// p1 + \" \" + p2 + \" \" + x1 + \" \" + x2 + \" \" + y[1][p1] + \" \" + y[1][p2] + \" \" + newX + \" \" + newY + \" \");\r\n return newY", "def maximum_slope(times, magnitudes):\n max_slope = None\n\n mag_diffs = magnitudes[1:] - magnitudes[0:-1]\n time_diffs = times[1:] - times[0:-1]\n\n slopes = np.divide(mag_diffs, time_diffs)\n\n max_slope = np.max(slopes[np.logical_not(np.isinf(slopes))])\n\n return max_slope", "def height(self, x):\n\t\treturn np.interp(x, self.x, self.z)", "def lagrange_poly(x, xp, fp):\n\n f = 0.0\n \n # sum over points\n m = 0\n while (m < len(xp)):\n\n # create the Lagrange basis polynomial for point m \n l = None\n\n n = 0\n while (n < len(xp)):\n if n == m:\n n += 1\n continue\n\n if l == None:\n l = (x - xp[n])/(xp[m] - xp[n])\n else:\n l *= (x - xp[n])/(xp[m] - xp[n])\n\n n += 1\n\n \n f += fp[m]*l\n\n m += 1\n\n return f", "def submax(left, middle, right):\n L = middle - left # L and R are both positive if middle is the\n R = middle - right # observed max of the integer samples\n return 0.5 * (R - L) / (R + L)\n # Derivation: Consider a quadratic q(x) := P(0) - P(x). Then q(x) has\n # two roots, one at 0 and one at z, and the extreme is at (0+z)/2\n # (i.e. at z/2)\n # q(x) = bx*(x-z) # a may be positive or negative\n # q(1) = b*(1 - z) = R\n # q(-1) = b*(1 + z) = L\n # (1+z)/(1-z) = L/R (from here it's just algebra to find a)\n # z + 1 = R/L - (R/L)*z\n # z*(1+R/L) = R/L - 1\n # z = (R/L - 1)/(R/L + 1) = (R-L)/(R+L)", "def fitMax(x,y,n=2):\n #-- position of max\n kmax = y.argmax()\n\n #-- 5 points around max\n k = range(kmax-n, kmax+n)\n k = np.array(k)\n #-- slide if to close to the edges\n if k.max()>(len(y)-2):\n k -= (len(y)-k.max())+10\n #print ' ->', k\n if k.min()<0:\n #print 'CORR:', k, '(', len(y), ')'\n k -= k.min()\n #print ' ->', k\n #-- fit poly #2\n c = np.polyfit(x[k], y[k], 2)\n xmax = np.clip(-c[1]/(2*c[0]), x.min(), x.max())\n return xmax", "def lagrange(x, w):\n M = len(x)\n p = poly1d(0.0)\n for j in xrange(M):\n pt = poly1d(w[j])\n for k in xrange(M):\n if k == j:\n continue\n fac = x[j]-x[k]\n pt *= poly1d([1.0, -x[k]])/fac\n p += pt\n return p", "def structured_maximum(x, y):\r\n # see decorator for function body\r", "def lagrange(xint, yint, points):\n l_basis = []\n for i, x in enumerate(xint):\n denom = np.prod([x - xk for xk in xint if xk != x])\n numer = 1\n for xk in xint:\n if xk != x:\n numer *= (points - xk)\n l_basis.append(numer/denom)\n p = sum([y*l_basis[i] for i,y in enumerate(yint)])\n\n return p", "def get_xmax(self, start, hours, param):\n\n # Process variable\n data = self.get_hour_data(start, param)\n for hour in range(1, hours):\n try:\n data = np.amax([data, self.get_hour_data(start + hour, param)],\n axis=0)\n except ValueError as e:\n continue\n return data", "def max_pairwise_product_linear(array):\n\n if len(array) <= 1:\n return 0\n\n two_biggest_values = [0, 0]\n\n for element in array:\n if element > two_biggest_values[0]:\n two_biggest_values[0] = element\n elif element > two_biggest_values[1]:\n two_biggest_values[1] = element\n\n return two_biggest_values[0] * two_biggest_values[1]", "def get_max(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))", "def extreme_points_area(x):\n n = len(x)\n\n pos_min = np.argmin(x)\n pos_max = np.argmax(x)\n\n x_iter = x[1:-1]\n if len(x_iter):\n xinit = 0\n xmin = pos_min\n xmax = pos_max\n xlast = n\n\n yinit = x[0]\n ymin = x[pos_min]\n ymax = x[pos_max]\n ylast = x[-1]\n\n triangle1 = np.array([[xinit, xmin, xmax],\n [yinit, ymin, ymax], [1, 1, 1]])\n triangle2 = np.array([[xmin, xmax, xlast],\n [ymin, ymax, ylast], [1, 1, 1]])\n\n area_1 = 0.5 * np.abs(np.linalg.det(triangle1))\n area_2 = 0.5 * np.abs(np.linalg.det(triangle2))\n sum_area = area_1 + area_2\n\n p_area = sum_area / ((ymax - ymin) * n)\n else:\n p_area = 0\n\n return p_area", "def maxArea(self, height: List[int]) -> int:\n max_area = 0\n l,r = 0,len(height)-1\n while l<r:\n curr_area = min(height[l],height[r])*(r-l)\n max_area = max(max_area,curr_area)\n if height[l]<height[r]:\n l += 1\n else:\n r -= 1\n return max_area", "def intrpf(xi,x,y):\n\n # calculate yi = p(xi) using Lagrange polynomial \n yi = ((xi-x[1])*(xi-x[2])/((x[0]-x[1])*(x[0]-x[2]))) * y[0]\\\n +((xi-x[0])*(xi-x[2])/((x[1]-x[0])*(x[1]-x[2]))) * y[1]\\\n +((xi-x[0])*(xi-x[1])/((x[2]-x[0])*(x[2]-x[1]))) * y[2]\n return yi", "def Pol_Lagrange(x,datos_x,datos_y):\n puntos=[]\n for p in x: \n n=datos_x.shape[0] - 1 #n=numero de punto o datos menos 1 .\n l_s=[]\n for k in range(datos_x.shape[0]):\n producto=1\n for j in range(datos_x.shape[0]):\n if k!=j:\n producto = producto*((p-datos_x[j])/(datos_x[k]-datos_x[j]))\n l_s.append(producto)\n l_s=np.array(l_s)\n puntos.append(datos_y@l_s)\n puntos=np.array(puntos)\n return puntos", "def getMaxima(x, y):\n# mx_x = (np.abs(np.min(x)) + np.max(x)) / 2\n# mx_y = (np.abs(np.min(y)) + np.max(y)) / 2\n# \n mx_x = np.max(x)\n mx_y = np.max(y)\n return mx_x, mx_y", "def solve(arr):\n for i in range(len(arr) - 2, -1, -1):\n arr[i] = [max_subtriangle(arr, i, j) for j in range(len(arr[i]))]\n return arr[0][0]", "def interpolate(m):\n \n x1 = m[0]\n x2 = m[1]\n x3 = m[2]\n y1 = m[3]\n y2 = m[4]\n y3 = m[5]\n denom = (x1 - x2)*(x1 - x3)*(x2 - x3)\n A = (x3 * (y2 - y1) + x2 * (y1 - y3) + x1 * (y3 - y2)) / denom\n B = (x3**2 * (y1 - y2) + x2**2 * (y3 - y1) + x1**2 * (y2 - y3)) / denom\n C = (x2 * x3 * (x2 - x3) * y1 + x3 * x1 * (x3 - x1) * y2 + x1 * x2 * (x1 - x2) * y3) / denom\n xext = -B/(2*A)\n yext = A*xext**2 + B*xext + C\n \n return(np.array([xext,yext]))", "def interpol(self,x,y,x1):\n \n N = len(x)\n i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1)\n xl = x[i-1]\n xr = x[i]\n yl = y[i-1]\n yr = y[i]\n y1 = yl + (yr-yl)/(xr-xl) * (x1-xl)\n above = x1 > x[-1]\n below = x1 < x[0]\n y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1)\n y1 = np.where(below,y[0],y1)\n \n return y1, i" ]
[ "0.5873291", "0.57678014", "0.57673573", "0.5748936", "0.5686612", "0.5632311", "0.56151384", "0.5560464", "0.55602175", "0.55535275", "0.5540132", "0.5523416", "0.5505528", "0.54992855", "0.54948914", "0.5446044", "0.540624", "0.5403315", "0.5367828", "0.53601944", "0.5334236", "0.5330695", "0.5303005", "0.52992976", "0.5286483", "0.52843434", "0.5277828", "0.52587295", "0.52426213", "0.52384955" ]
0.75891775
0
Measures the relative focus of each pixel using the Sobel operator.
def focus(self, smooth=0): if self.image is None: self.load_image() # image = self.load_image() # print self.image if not self.bw: gray = rgb_2_gray(self.image) else: gray = self.image sx = ndimage.filters.sobel(gray, axis=0, mode='constant') sy = ndimage.filters.sobel(gray, axis=1, mode='constant') sob = np.hypot(sx, sy) self.image = None self.sob = sob if smooth > 0: sob = ndimage.filters.gaussian_filter(sob, sigma=smooth) return sob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def focus_stack(self, smooth=0, interpolate_heights=True, use_all=False,\n layer_smooth=0):\n if len(self.layers) == 0:\n print(\"no images were properly imported\")\n else:\n if use_all:\n self.images = []\n self.focuses = []\n for layer in self.layers:\n self.images += [layer.load_image()]\n self.focuses += [layer.focus(smooth=layer_smooth)]\n self.focuses = np.array(self.focuses)\n self.images = np.array(self.images)\n if interpolate_heights:\n print(\"this is not available yet\")\n else:\n top_focus = np.argmax(self.focuses, axis=0)\n self.stack = np.zeros(self.images.shape[1:],\n dtype='uint8')\n for val in set(top_focus.flatten()):\n coords = top_focus == val\n self.stack[coords] = self.images[val][coords]\n else:\n first = self.layers[0].load_image()\n if first.ndim == 3:\n l, w, d = first.shape\n images = np.zeros((3, l, w, d), first.dtype)\n elif first.ndim == 2:\n l, w = first.shape\n images = np.zeros((3, l, w), first.dtype)\n focuses = np.zeros((3, l, w), dtype=float)\n heights = focuses[0].astype(int)\n images[0] = first\n previous = self.layers[0].focus()\n focuses[0] = previous\n better = np.greater(focuses[0], focuses[1])\n x = 1\n for l in self.layers[1:]:\n img = l.load_image()\n foc = l.focus(smooth=layer_smooth)\n focuses[2, better] = foc[better]\n images[2, better] = img[better]\n better = np.greater(foc, focuses[1])\n focuses[1, better] = foc[better]\n images[1, better] = img[better]\n heights[better] = x\n focuses[0, better] = previous[better]\n previous = foc\n x += 1\n print_progress(x, len(self.layers))\n self.focuses = focuses\n self.images = images\n h = interpolate_max(focuses)\n self.heights = (heights-1) + h\n # h, w = self.heights.shape\n # xs, ys = np.arange(w), np.arange(h)\n # xgrid, ygrid = np.meshgrid(xs, ys)\n # vals = np.array(\n # [xgrid.flatten(), ygrid.flatten(), self.heights.flatten()]).T\n # vals = np.array(\n # [xgrid.flatten(), ygrid.flatten(), sub.flatten()]).T\n # hull = spatial.ConvexHull(vals.max() - vals)\n # xs, ys, zs = vals[hull.vertices].T\n # img = np.zeros(self.heights.shape, dtype=float)\n # img[ys.astype(int), xs.astype(int)] = zs\n # grid = interpolate.griddata(np.array([ys, xs]).T, zs,\n # (xgrid, ygrid), method='linear')\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(xgrid, ygrid, self.heights)\n if interpolate_heights:\n down = np.floor(h)\n up = np.ceil(h)\n up[up == 0] = 1\n if self.bw:\n down = down.flatten().reshape(first.shape)\n up = up.flatten().reshape(first.shape)\n h = h.flatten().reshape(first.shape)\n else:\n down = down.flatten().repeat(3).reshape(first.shape)\n up = up.flatten().repeat(3).reshape(first.shape)\n h = h.flatten().repeat(3).reshape(first.shape)\n down_img = np.zeros(first.shape)\n up_img = np.zeros(first.shape)\n for x in range(3):\n down_img[np.where(down == x)] = images[x][\n np.where(down == x)]\n up_img[np.where(up == x)] = images[x][\n np.where(up == x)]\n stack = (up - h)*down_img + (h - down)*up_img\n stack[np.where(h == 0)] = images[0][np.where(h == 0)]\n stack[np.where(h == 1)] = images[1][np.where(h == 1)]\n stack[np.where(h == 2)] = images[2][np.where(h == 2)]\n self.stack = stack\n if smooth > 0:\n self.smooth(smooth)\n else:\n self.stack = self.images[1]\n print(\"done\")", "def calc_power_in_main_focus(field, focus_size):\r\n # 1. Вычленим окно с основным фокусом, используя focus_size\r\n # 2. Просуммируем в окне все, что больше уровня -6 дБ\r\n poynt_in_points = 0.5*numpy.real(field.p * numpy.conj(field.vn))\r\n\r\n # Maximum\r\n max_focus = poynt_in_points.max()\r\n print(\"max_focus = \", max_focus)\r\n print(\"max_focus p/p0= \", numpy.absolute(field.p).max())\r\n max_indices = numpy.argwhere(max_focus == poynt_in_points)\r\n max_index_x = max_indices[0][0]\r\n max_index_y = max_indices[0][1]\r\n print(\"max_indices = \", max_index_x, max_index_y)\r\n focus_size_index = focus_size // field.dx\r\n print(\"focus_size_index = \", focus_size_index)\r\n field_window = field\r\n field_window.p = field.p[(max_index_x-focus_size_index):(max_index_x+focus_size_index),(max_index_y-focus_size_index):(max_index_y+focus_size_index),:]\r\n field_window.vn = field.vn[(max_index_x-focus_size_index):(max_index_x+focus_size_index),(max_index_y-focus_size_index):(max_index_y+focus_size_index),:]\r\n field_window.x = field.x[(max_index_x-focus_size_index):(max_index_x+focus_size_index)]\r\n field_window.y = field.y[(max_index_y-focus_size_index):(max_index_y+focus_size_index)]\r\n\r\n import draw_plane_field\r\n draw_plane_field.draw_XY(field_window)\r\n\r\n poynt_in_points_window = 0.5*numpy.real(field_window.p * numpy.conj(field_window.vn))\r\n\r\n # print(field_window.p)\r\n # power_array = numpy.where(poynt_in_points_window>=0.5*max_focus)\r\n # print(power_array)\r\n # power = numpy.sum(power_array)\r\n # power *= field.one_pixel_area\r\n # print(power)\r\n # return power\r\n\r\n w = 0.0\r\n max_focus = poynt_in_points_window.max()\r\n for j, y in enumerate(field_window.y):\r\n for i, x in enumerate(field_window.x):\r\n if poynt_in_points_window[i,j,0] >= 0.25*max_focus:\r\n w += poynt_in_points_window[i,j,0]\r\n\r\n w *= field_window.one_pixel_area\r\n print(\"Power in Focus -6dB = \", w)\r\n # w = field.p_amp(i, j, 0) * field.p_amp(i, j, 0)\r\n \r\n # powerOnPlane *= field.one_pixel_area\r\n # # powerOnRibs *= field.one_pixel_area\r\n\r\n # print(\"PowerOnPlane = \" + str(powerOnPlane))\r", "def skywalker(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tbin = kargs.get('bin', this._BINARY)\n\t\toffshore = kargs.get('offshore', 5)\n\t\tminSize = kargs.get('minSize', 3)\n\t\tblur = kargs.get('blur', False)\n\t\t\n\t\tif blur: # Flou de test\n\t\t\tkernel = np.ones((3, 3), np.float32)/9\n\t\t\tbin = cv2.filter2D(bin, -1, kernel)\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tscan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\tthis._SCAN = scan\n\t\t\n\t\tstep = 0 # Compteur de pas dans le vide\n\t\tstart, end = None, None\n\t\t\n\t\t# Dimensions de l'image à scanner\n\t\tsize = D2Point(width(bin), height(bin))\n\t\tratio = size if minSize < 1 else 1\n\t\t\n\t\t# Scan pixel par pixel, en partant du bas\n\t\tfor v in xrange(int(size.y)-1, -1, -1):\n\t\t\tfor u in xrange(int(size.x)):\n\t\t\t\n\t\t\t\tif bin.item((v, u)): # Si un pixel != 0:\n\t\t\t\t\tscan[v,u] = [0, 0, 255] # Rouge.\n\t\t\t\t\tstep = 0 # On reset le jump\n\t\t\t\t\t\n\t\t\t\t\t# Si c'est le premier\n\t\t\t\t\tif not start:\n\t\t\t\t\t\tstart = D2Point(u, v)\n\t\t\t\t\t\tend = D2Point(u, v)\n\t\t\t\t\telse: # On trace\n\t\t\t\t\t\tend.x, end.y = u, v\n\t\t\t\t\n\t\t\t\telif end:\n\t\t\t\t\tif step < offshore:\n\t\t\t\t\t\tscan[v,u] = [0, 255, 255] # Jaune\n\t\t\t\t\t\tstep += 1 # On continue\n\t\t\t\t\telif abs((start - end)/ratio) < minSize:\n\t\t\t\t\t\tstart, end = None, None\n\t\t\t\t\telse: break\n\t\t\t\t# elif end: break\n\t\t\t###\n\t\t\tif end: break\n\t\t###\n\t\t\n\t\tif end: # Si on a trouvé une fin\n\t\t\t\n\t\t\t# Point médian = doigt\n\t\t\tresult = start % end\n\t\t\t\n\t\t\t# Visuel\n\t\t\tscan[:,result.x,:] = [0, 255, 0] # On trace une bande verte\n\t\t\tscan[result.y,:,:] = [0, 127, 0] # On trace une autre bande verte\n\t\t\t\n\t\t\t# Reformatage\n\t\t\tresult /= size-1 # On remet en ratio d'image\n\t\t\tresult.x = 1 - result.x # On inverse le côté de mesure\n\t\t\t\n\t\t\t# Stockage\n\t\t\tthis._DETECTED = result # On stocke le point détecté\n\t\t\tthis._BOTTOM = result.y == 1 # On clic ou bien ?\n\t\t\n\t\t# Si rien\n\t\telse:\n\t\t\tresult = None\n\t\t\tthis._BOTTOM = False\n\t\t\n\t\t# Tchao\n\t\treturn result", "def edges(image):\n #store image width and height and initialize new image\n image_width = image['width']\n image_height = image['height']\n new_image = {'height': image['height'], 'width': image['width'], 'pixels': len(image['pixels'])*[0]}\n \n #sobel operator kernels\n kernel_x = {'height': 3, 'width': 3, 'pixels': [-1,0,1,-2,0,2,-1,0,1]}\n kernel_y = {'height': 3, 'width': 3, 'pixels': [-1,-2,-1,0,0,0,1,2,1]}\n \n #creating the filters\n o_x = correlate(image, kernel_x)\n o_y = correlate(image, kernel_y)\n\n #perform relvant calculation for each pixel \n for x in range(image_width):\n for y in range(image_height):\n a = ((get_pixel(o_x, x, y))**2+(get_pixel(o_y, x, y))**2)**0.5\n set_pixel(new_image, x, y, a)\n return round_and_clip_image(new_image)", "def s_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += 1 - ayxx\n return running_total", "def beam(xb,yb,zb,wx,wy,wavelen):\n\n zRx = np.pi * wx**2 / wavelen\n zRy = np.pi * wy**2 / wavelen \n \n sqrtX = np.sqrt( 1 + np.power(zb/zRx,2) ) \n sqrtY = np.sqrt( 1 + np.power(zb/zRy,2) ) \n intensity = np.exp( -2.*( np.power(xb/(wx*sqrtX ),2) \\\n + np.power(yb/(wy*sqrtY),2) )) / sqrtX / sqrtY\n return intensity", "def sivina(self):\n return (self.r + self.g + self.b) / 3", "def surf_bright(im, coord, minrad=3.):\n r = minrad\n slist = []\n while r < 80:\n aperture = CircularAperture(coord, r=r)\n phot_tab = aperture_photometry(im, aperture)\n s = phot_tab['aperture_sum']\n sb = s/(np.pi * r**2)\n print(sb)\n r += 1", "def refocus(opt_model):\n osp = opt_model['optical_spec']\n\n fld = osp['fov'].fields[0] # assumed to be the axial field\n wvl = osp['wvls'].central_wvl\n\n df_ray, ray_op, wvl = trace_safe(opt_model, [0., 1.], fld, wvl, \n output_filter=None, rayerr_filter='full', \n use_named_tuples=True)\n\n defocus = -df_ray[-1].p[1]/(df_ray[-2].d[1]/df_ray[-2].d[2])\n\n return defocus", "def __GetSobel(self, image):\n # Gradient X.\n gradX = cv2.Sobel(image, cv2.CV_64F, 1, 0)\n\n # Gradient Y.\n gradY = cv2.Sobel(image, cv2.CV_64F, 0, 1)\n\n # Converting back to uint8 dtype.\n absX = cv2.convertScaleAbs(gradX)\n absY = cv2.convertScaleAbs(gradY)\n\n # Merge the horizontal and vertical components.\n return cv2.addWeighted(absX, 0.5, absY, 0.5, 0)", "def edgeDetectionSobel(img: np.ndarray, thresh: float = 0.7) -> (np.ndarray, np.ndarray):\r\n img = cv2.GaussianBlur(img, (3, 3), 0)\r\n\r\n Gx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])\r\n Gy = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])\r\n thresh = thresh * 255\r\n\r\n smooth_x = cv2.filter2D(img, -1, Gx, borderType=cv2.BORDER_REPLICATE).astype(float)\r\n smooth_y = cv2.filter2D(img, -1, Gy, borderType=cv2.BORDER_REPLICATE).astype(float)\r\n\r\n my_sobel_image = np.sqrt(smooth_x ** 2 + smooth_y ** 2)\r\n\r\n sobel_x = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)\r\n sobel_y = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)\r\n cv_ans = np.sqrt(sobel_x ** 2 + sobel_y ** 2)\r\n\r\n change_by_thresh(my_sobel_image, thresh)\r\n change_by_thresh(cv_ans, thresh)\r\n\r\n return cv_ans, my_sobel_image", "def x(self):\n return np.sum(self.bbox, 0)[0] / 2", "def sobelxy(img, ksize=5):\n\tgray = grayscale(img)\n\treturn cv2.Sobel(gray, cv2.CV_64F, 1, 1, ksize= ksize)", "def sobelX(img):\r\n f = np.array([-1, 0, 1, -2, 0, 2, -1, 0, 1]).reshape([3, 3])\r\n return cv2.filter2D(img, cv2.CV_64F, f)", "def bead_position_pix(im, selem):\n # The x, y coordinates of pixels are nonzero values in selem\n y, x = np.nonzero(selem)\n x = x - selem.shape[1] // 2\n y = y - selem.shape[0] // 2\n\n # Find the center of the bead to pixel accuracy\n peak_flat_ind = np.argmax(im)\n peak_j = peak_flat_ind % im.shape[0]\n peak_i = (peak_flat_ind - peak_j) // im.shape[1]\n\n # Define local neighborhood\n irange = (peak_i - selem.shape[0] // 2, peak_i + selem.shape[0] // 2 + 1)\n jrange = (peak_j - selem.shape[1] // 2, peak_j + selem.shape[1] // 2 + 1)\n\n # Get values of the image in local neighborhood\n z = im[irange[0]:irange[1], jrange[0]:jrange[1]][selem.astype(np.bool)]\n\n # Fit Gaussian\n a, j_subpix, i_subpix, sigma = fit_gaussian(x, y, z)\n\n # Return x-y position\n return np.array([peak_i + i_subpix, peak_j + j_subpix])", "def Sobel(img):\n # use cv2.sobel() function to detect edge in image\n # syntax : cv2.sobel(src, depth, dx, dy, ksize)\n return cv2.Sobel(img, cv2.CV_16U, 1, 0, ksize=5)", "def calibrate(cap, location):\n\n #Poisition and size of sensor\n [x, y, h, w] = location\n\n #show square to user and wait for key\n print(\"please, step away to clear the blue square displayed on screen and press q to continue\")\n while True:\n ret, frame = cap.read()\n cv2.namedWindow('Calibrate',cv2.WINDOW_NORMAL)\n show = cv2.rectangle(frame, (x,y), (x+w,y+h), (255, 0, 0) , 5)\n cv2.imshow('Calibrate', show)\n key = cv2.waitKey(1)\n if key == ord('q'):\n break\n\n #get first image, process and define window previous for iteration\n ret, frame = cap.read()\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n previous = frame[y:y+w,x:x+h]\n\n #set parameters for mean value of sensor, kernel of erode function,\n sampleNbMean = 50\n xi = np.empty((0, sampleNbMean))\n kernel = np.ones((5,5), np.uint8)\n\n #iterate over each frame until sample number\n for iteration in range(sampleNbMean):\n\n # Capture frame, draw the window and display to the user\n ret, frame = cap.read()\n # Image operation\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.GaussianBlur(frame, (7,7), 0)\n\n #get present window\n present = frame[y:y+w,x:x+h]\n\n #add sample for mean, add diference of window with prieviuos\n xi = np.append(xi,\n np.sum(\n cv2.erode(\n cv2.bitwise_xor(present,previous), kernel, iterations=1)))\n\n #present image becomes previous before steping into next image\n previous = present\n\n #mean\n mean = np.sum(xi)/len(xi)\n\n #sigma\n sum = 0\n for sample in xi:\n sum += np.power(sample - mean, 2)\n sigma = np.sqrt(sum/len(xi))\n\n #close window\n cv2.destroyWindow('Calibrate')\n\n return mean, sigma", "def paint_focal_axes(self):\n GL.glTranslatef(*self.focus) # translate to focus\n self.paint_axes(self.sigma)\n GL.glTranslatef(*-self.focus) # translate back", "def act(self, state, eps=0.):", "def GetBitmapFocus(self):\n\n return self.bmpFocus", "def calculate(self, image, blurred_image):\n # First, use sobel.\n image_grad = cv2.Sobel(image,cv2.CV_64F,1,1,ksize=5)\n blurred_image_grad = cv2.Sobel(blurred_image,cv2.CV_64F,1,1,ksize=5)\n\n np.subtract(image_grad, blurred_image)\n\n return np.mean(np.abs(np.subtract(image_grad, blurred_image_grad)))", "def zeige_auf_sensehat(self):\n if self.sense is not None:\n sense.clear(farbe_tot)\n for zelle in self.lebendig:\n x, y = zelle\n if 0 <= x < 8 and 0 <= y < 8:\n sense.set_pixel(zelle[0], zelle[1]. farbe_lebendig)", "def Focus_beam(Collimated_Pupil, pad_width = 0):\n\n Collimated_Pupil_padded = np.pad(Collimated_Pupil,pad_width=int(pad_width),mode='constant') \n\n f = np.fft.fft2(Collimated_Pupil_padded) #must be complex amplitude going in here\n fshift = np.fft.fftshift(f)\n intensity_image = (np.abs(fshift))**2\n \n return intensity_image", "def get_state(self):\n return convert_x_to_bbox(self.kalman_filter.x)[0].astype(int)", "def bMinusbStar(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 and self.prob.Y[i] == -1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx = 0\n for j in range(self.prob.num):\n abcxx += (self.alphas[j] + self.deltas[j]) * self.prob.xkernel(self.prob.X[i], self.prob.X[j])\n abcxx *= (1 / self.prob.gamma)\n running_total += -1 + abcxx - ayxx\n return running_total", "def singledish_observe_image(image, pixel_scale, beam, boundary='fill'):\n\n if hasattr(image, 'wcs'):\n singledish_im = image.convolve_to(beam, boundary=boundary)\n\n else:\n kernel = beam.as_kernel(pixel_scale)\n\n # create the single-dish map by convolving the image with a FWHM=40\" kernel\n # (this interpretation is much easier than the sharp-edged stuff in fourier\n # space because the kernel is created in real space)\n singledish_im = convolution.convolve_fft(image,\n kernel=kernel,\n boundary=boundary,\n fill_value=image.mean())\n\n return singledish_im", "def epix(self):\n return self._epix", "def belt(image):\n\n # Belt Detector\n x, y = circular_detector(image, 70, 80)\n\n return x, y", "def new_ssim(x_img, y_img, delta=1):\n lum, cont, strut = ssim(x_img, y_img)\n blur = blurriness_index(x_img, y_img)\n blur = pow(blur, delta)\n\n fin_score = lum*cont*strut*blur\n\n return fin_score", "def test_baseline_spect(self, b0, x0, a, fwhm):\n x = np.linspace(-1, 1, 100)\n sigma = fwhm / np.sqrt(8 * np.log(2))\n y = a * np.exp(-((x - x0) ** 2) / (2 * sigma**2)) + b0\n\n b0_guess = guess.constant_spectral_offset(y)\n\n self.assertAlmostEqual(b0, b0_guess, delta=0.1)" ]
[ "0.54016566", "0.5381905", "0.53775144", "0.53436303", "0.533169", "0.5328394", "0.51986355", "0.516603", "0.5158025", "0.5139671", "0.512083", "0.504517", "0.5017682", "0.5011666", "0.5008117", "0.49915242", "0.497663", "0.4952939", "0.49101773", "0.48947313", "0.48783606", "0.48724467", "0.48638177", "0.48618612", "0.4860767", "0.48598957", "0.48542136", "0.4843766", "0.4842078", "0.48360023" ]
0.6810263
0
Generates a masking image by thresholding the image. If a background is provided, this will also subtract the background before generating the mask.
def generate_mask(self, thresh=50, b_ground=None): img = self.load_image() thresh = np.zeros(img.shape, "uint8") if b_ground is not None: img = img - b_ground thresh[img > 25] = 255 mask = ndimage.morphology.binary_dilation(thresh).astype("uint8") self.mask = 255*mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_background_mask( img ):\n\t\t\n\tif len( img.shape ) == 3: t = img[0]\n\telif len( img.shape ) == 2: t = img\n\n\tmask = img > filters.threshold_li(t)\n\n\treturn mask", "def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image", "def generate_mask(image, threshold):\n # TODO: Test this and optimize to only include pixels inward of the\n # horizon\n x_pix, y_pix = image.shape\n image_median = np.median(image)\n image_mean = np.mean(image)\n image_std = np.std(image)\n image_max = image.max()\n\n # generate mask\n mask = np.where(threshold < image, False, True)\n return mask", "def add_mask(self, bg, mask):\n # if mask is to tall for the background image, decrease the size by 50%\n if bg.shape[0] < mask.shape[0]:\n mask = cv2.resize(mask, (int(0.5*mask.shape[0]), int(0.5*mask.shape[1])), interpolation=cv2.INTER_AREA)\n h_mask, w_mask = mask.shape[:2]\n h, w = bg.shape[:2]\n \n # select random location for mask\n h_rand = np.random.rand() * 0.9\n h_rand = np.clip(h_rand, 0, 1.0 - h_mask/h)\n h_update = int(h_rand * h)\n w_rand = np.random.rand() * 0.9\n w_rand = np.clip(w_rand, 0, 1.0 - w_mask/w)\n w_update = int(w_rand * w)\n \n # define filter for a mask\n filt = (mask == 0)\n \n # place the mask in the bg img\n mod = bg.copy()\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] *= filt\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] += mask\n \n # yolo dim for mask\n locy = (h_update+h_update+h_mask)/2/h\n locx = (w_update+w_update+w_mask)/2/w\n sizey = (h_mask/h)\n sizex = (w_mask/w)\n \n dim = [locx, locy, sizex, sizey]\n \n return mod, dim", "def clean_mask(mask, background=0):\n kernels = [\n np.array([[ 1, -1, -1], [-1, 1, -1], [-1, -1, -1]]), # top left standalone pixel\n np.array([[-1, -1, 1], [-1, 1, -1], [-1, -1, -1]]), # top right standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [ 1, -1, -1]]), # bottom left standalone pixel\n np.array([[-1, -1, -1], [-1, 1, -1], [-1, -1, 1]]) # bottom right standalone pixel\n ]\n\n proc_masks = [cv2.morphologyEx(mask, cv2.MORPH_HITMISS, kernel).astype(np.bool) for kernel in kernels]\n\n for proc_mask in proc_masks:\n mask[proc_mask] = background\n return mask", "def generate_fg_mask(self, image_bg, image_fg, blur, closing, thresh):\n blur_dims = (2 * blur + 1, 2 * blur + 1)\n bg_blur = cv2.GaussianBlur(image_bg, blur_dims, 0)\n fg_blur = cv2.GaussianBlur(image_fg, blur_dims, 0)\n\n # mask = ||template - frame||^2 > threshold\n diff = cv2.absdiff(bg_blur, fg_blur)\n mask = np.sum(diff**2, axis=2) ** (1.0 / 2) > thresh\n mask = np.array(mask, dtype=np.uint8)\n\n # Fill holes\n if closing > 0:\n element = cv2.getStructuringElement(cv2.MORPH_RECT, (closing, closing))\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, element)\n\n return mask", "def threshold_mask(image, threshold):\n image = image.copy()\n if threshold == None:\n threshold = skimage.filters.threshold_isodata(image)\n image[image > threshold] = 255\n image[image <= threshold] = 0\n return image", "def backgroundSubtract(img, flag=0):\n fgbg = cv2.BackgroundSubtractorMOG()\n fgmask = fgbg.apply(img)\n return fgmask", "def mask_glare(img, threshold=175, mask_only=False):\n # if img.dtype is floating but threshold value is still the default\n # this could be generalized\n if np.issubdtype(img.dtype, np.floating) and (threshold == 175):\n threshold = 175 / 255\n # region to inpaint\n inp = (img > threshold)\n\n # get a larger area around the specks\n inp = binary_dilation(inp, selem=disk(2))\n\n # remove anything large\n #inp = white_tophat(inp, selem=disk(3))\n\n if mask_only:\n return inp\n else:\n # both the original background *and* these new glared regions\n # are masked\n return ma.masked_array(img, mask=inp)", "def img_process(fgMask):\n backSub = cv.createBackgroundSubtractorKNN()\n kernel1 = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(2,2))\n kernel2 = cv.getStructuringElement(shape=cv.MORPH_ELLIPSE, ksize=(2,2))\n #kernel1 = np.ones((3,3),np.uint8)\n #kernel2 = np.ones((3,3), np.uint8)\n\n fgMask = cv.threshold(fgMask, 230, 255, cv.THRESH_BINARY)[1]\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_OPEN, kernel1,iterations = 2)\n fgMask = cv.dilate(fgMask, kernel2, iterations = 2)\n fgMask = cv.morphologyEx(fgMask, cv.MORPH_CLOSE, kernel2, iterations = 2)\n return fgMask", "def generate_mask(\n self,\n noise_background,\n noise_value,\n generated_points_x,\n generated_points_y,\n xsize,\n ysize,\n ):\n\n # background of noise mask\n img_mask = np.random.randint(\n noise_background[0],\n noise_background[1] + 1,\n (ysize, xsize),\n )\n\n # mask of random value\n img_mask_random = np.random.randint(\n low=noise_value[0],\n high=noise_value[1] + 1,\n size=(ysize, xsize),\n )\n\n # insert random value into background\n img_mask[generated_points_y, generated_points_x] = img_mask_random[generated_points_y, generated_points_x]\n\n return img_mask.astype(\"uint8\")", "def mask_image(image):\n pass", "def make_mask(image_shape, fieldmap, activation_data,\n sigma=0.2, threshold=0.9, alpha=0.1):\n offset, shape, step = fieldmap\n activations = numpy.zeros(image_shape)\n activations[_centered_slice(fieldmap, activation_data.shape)] = (\n activation_data)\n blurred = gaussian_filter(\n activations, sigma=tuple(s * sigma for s in shape), mode='constant')\n maximum = blurred.flatten().max()\n return 1 - (1 - alpha) * (blurred < maximum * 0.9)", "def bgSubMasking(self, frame):\n fgmask = self.bgSubtractor.apply(frame, learningRate=self.bgSubtractorLr)\n\n kernel = np.ones((4, 4), np.uint8)\n # MORPH_OPEN removes noise\n # MORPH_CLOSE closes the holes in the object\n fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel, iterations=2)\n fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel, iterations=2)\n return cv2.bitwise_and(frame, frame, mask=fgmask)", "def apply_mask(im, im_pred):\n r_channel, g_channel, b_channel = cv2.split(im_pred)\n alpha_channel = 127 * np.ones(b_channel.shape, dtype=b_channel.dtype)\n # Make background pixels fully transparent\n alpha_channel -= 127 * np.all(im_pred == np.array([0, 0, 0]), axis=2).astype(b_channel.dtype)\n im_pred = cv2.merge((r_channel, g_channel, b_channel, alpha_channel))\n mask = Image.fromarray(im_pred, mode='RGBA')\n # masked_img = Image.fromarray(im)#array to image\n masked_img=im\n masked_img.paste(mask, box=None, mask=mask)\n # return np.array(masked_img)\n return masked_img", "def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):\n print(\"Loading bareground\")\n b = bareground_ds.GetRasterBand(1)\n l = b.ReadAsArray()\n print(\"Masking pixels with <%0.1f%% bare ground\" % bareground_thresh)\n if bareground_thresh < 0.0 or bareground_thresh > 100.0:\n sys.exit(\"Invalid bare ground percentage\")\n mask = (l>bareground_thresh)\n #Write out original data\n if out_fn is not None:\n print(\"Writing out %s\" % out_fn)\n iolib.writeGTiff(l, out_fn, bareground_ds)\n l = None\n return mask", "def remove_background(img):\n mask = np.zeros(img.shape[:2], np.uint8)\n bgdModel = np.zeros((1, 65), np.float64)\n fgdModel = np.zeros((1, 65), np.float64)\n rect = (50, 50, 450, 290)\n cv.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv.GC_INIT_WITH_RECT)\n mask2 = np.where((mask == 2)|(mask == 0), 0, 1).astype('uint8')\n img = img*mask2[:, :, np.newaxis]\n return img", "def fuse_images_without_background(img, img_to_insert, img_to_insert_segmentation_mask, box):\r\n x1, y1, x2, y2 = box\r\n # Take the patch of the original image that will suffer modification\r\n original_img_patch = img[y1:y2, x1:x2]\r\n # Extract a boolean mask containing the background\r\n background_mask = resize(image=img_to_insert_segmentation_mask, output_shape=original_img_patch.shape[:2],preserve_range=True,\r\n anti_aliasing=False).astype(np.bool)\r\n # Paste the non background part of img_to_insert in this patch\r\n original_img_patch[background_mask] = img_to_insert[background_mask]\r\n # Put again the modified patch into img\r\n img[y1:y2, x1:x2] = original_img_patch\r\n return img", "def remove_background(self, frame):\n logging.debug(\"Performing background subtraction\")\n\n #cv.CvtColor(frame, self.Igray, cv.CV_BGR2GRAY)\n cv.Sub(frame, self.bg, self.Imask)\n\n return self.Imask", "def process_mask(self, image):\n image = np.array(image)\n image[image == 5] = 1 # set un-classified to undestroyed\n return Image.fromarray(image)", "def backgroundEstimator(self, image):\n return self.bg_filter.convolve(image)", "def create_mask(masking_positions, img, cells):\n left, right, top, bottom = masking_positions\n left += 1\n right += 1\n top += 1\n bottom += 1\n mask = np.ones((img.shape[0], img.shape[1]))*255\n\n # Compute corresponding positions and put zeros in the background part\n left = (img.shape[1]//cells[0])*left\n mask[:, :left] = 0\n right = img.shape[1]-(img.shape[1]//cells[0])*right\n mask[:, right:] = 0\n top = (img.shape[0]//cells[1])*top\n mask[:top, :] = 0\n bottom = img.shape[0]-(img.shape[0]//cells[0])*bottom\n mask[bottom:, :] = 0\n\n masks = mask.astype(np.uint8)\n return mask", "def image_mask(image, patch_R, patch_C, seg_model):\n\n im = Image.open(image)\n im_name = os.path.basename(image).split('.')[0]\n im_width, im_height = im.width, im.height\n\n N = patch_R // patch_C\n\n W_ps_NI = im_width // patch_C # 31782 // 256 = 124\n # W_ps_NR = slide_width % patch_C # 31782 % 256 = 38\n H_ps_NI = im_height // patch_R # 24529 // 1024 = 23\n # H_ps_NR = slide_height % patch_R # 24529 % 1024 = 977\n\n cell_ratio = 0.85 # the threshold that decide the patch is background or not\n\n output_dir = os.path.join(current_path, \"..\", \"output\", \"output_mask\")\n if not os.path.isdir(output_dir): os.makedirs(output_dir)\n\n np_im = np.array(im)[:, :, 0:3] # exclude alpha\n for w in range(W_ps_NI):\n for h in range(H_ps_NI):\n subHIC = np_im[h * patch_R: (h+1) * patch_R, w * patch_C:(w+1) * patch_C, :]\n\n # rgb three channels value that >200 and <40 are ignored segment\n rgb_s = (abs(subHIC[:, :, 0] - 120) >= 80) & (abs(subHIC[:, :, 1] - 120) >= 80) & (\n abs(subHIC[:, :, 2] - 120) >= 80) # >200 <40\n\n if np.sum(rgb_s) <= (patch_R * patch_C) * cell_ratio:\n # segment\n subHIC = np.where(rgb_similarity(subHIC, 15, 195), 250, subHIC)\n # adjust equalization histogram and adjust brightness\n for k in range(subHIC.shape[2]):\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(N * 4, 4))\n subHIC[:, :, k] = clahe.apply(subHIC[:, :, k])\n subHIC = exposure.adjust_gamma(subHIC, gamma=1.5)\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n\n subHIC = subHIC.reshape(N, patch_C, patch_C, 3)\n allmask_prob_list = maskrcnn_detection(seg_model, subHIC)\n\n for i in range(len(allmask_prob_list)):\n for layer in range(allmask_prob_list[i].shape[2]):\n image, cnts, hierarchy = cv2.findContours(allmask_prob_list[i][:, :, layer],\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n np_im[h * patch_R + i * patch_C: h * patch_R + (i + 1) * patch_C, w * patch_C:(w + 1) * patch_C,\n :] = cv2.drawContours(np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :],\n cnts, -1, (0, 255, 0), 1)\n\n # np_im[h * patch_R + i*patch_C: h*patch_R+(i+1)*patch_C, w * patch_C:(w + 1) * patch_C, :] = subHIC[i]\n\n # plt.savefig(os.path.join(output_dir, f\"{im_name}w{w}h{h}N{i}.png\"))\n\n io.imsave(os.path.join(output_dir, f\"{im_name}.png\"), np_im)", "def create_mask(frame):\n \n # detect ridges\n ridges = enhance_ridges(frame)\n\n # threshold ridge image\n thresh = filters.threshold_otsu(ridges)\n thresh_factor = 1.1\n prominent_ridges = ridges > thresh_factor*thresh\n prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=128)\n\n # the mask contains the prominent ridges\n mask = morphology.convex_hull_image(prominent_ridges)\n mask = morphology.binary_erosion(mask, disk(10))\n return mask", "def backgroundSubtraction(kmin, kmax, kwin=10,\n indir=IMG_DIR, inpattern=IMG_PATTERN,\n outdir=DIFF_DIR, outpattern=DIFF_PATTERN,\n verbose=True):\n\n def equalize_color(img):\n \"\"\"Apply histogram equalization to each channel of the color image.\n \n Note: returns float [0;1]-valued image.\n Note: Not used, in the end.\n \"\"\"\n imgeq = numpy.zeros_like(img, dtype='float')\n for i in xrange(img.shape[2]):\n imgeq[:,:,i] = exposure.equalize_hist(img[:,:,i])\n return imgeq\n\n # pre-compute mean, std for initialization\n if verbose:\n print 'Initializing background...'\n imgstack = []\n for k in range(kmin, kmin+kwin):\n fname = os.path.join(indir, inpattern.format(k))\n img = pyplot.imread(fname) # read image\n imgstack.append(img) # add to the stack\n imgstack = numpy.asarray(imgstack)\n imgmean = numpy.average(imgstack, axis=0)\n imgstd = numpy.std(imgstack, axis=0)\n rga = RunningGaussianAverage(imgmean, 15) #15 is the initial (constant) standard deviation\n\n\n # now run the detector\n if verbose:\n print 'Running foreground dectector...'\n for k in range(kmin, kmax):\n fname = os.path.join(indir, inpattern.format(k))\n # read image\n img = pyplot.imread(fname) # read image\n # classification mask: foreground if all channels are flagged as foreground\n isfg = numpy.all(rga.update_classify(img.astype('float'), rho=1./kwin), axis=-1)\n # set bg to 0\n img[numpy.logical_not(isfg)] = 0 \n outfile = os.path.join(DIFF_DIR, DIFF_PATTERN.format(k))\n pyplot.imsave(outfile, img)\n if verbose:\n print '\\tsaved {}'.format(outfile)\n if verbose:\n print 'Processing complete.'", "def remove_background1(img):\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def threshold_mask(mask, threshold=0.5):\n mask[np.where(mask >= threshold)] = 1.\n mask[np.where(mask < threshold)] = 0.\n return mask", "def remove_background(img):\n\n img = img.astype(np.uint8)\n # Binarize the image using OTSU's algorithm. This is used to find the center\n # of mass of the image, and find the threshold to remove background noise\n threshold, _ = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n # Remove noise - anything higher than the threshold. Note that the image is still grayscale\n img[img > threshold] = 255\n\n return img", "def broaden_mask(img, threshold=0.05, qual=None):\n if not np.any(qual):\n qual = DerivativeVariance(img.phase)\n qual = qual[img.mask==True].max()*1.1 - qual\n max_value = qual[img.mask==True].max()\n img['mask'][qual<max_value*threshold] = False" ]
[ "0.7898407", "0.7298791", "0.7213714", "0.683245", "0.68142647", "0.6722708", "0.65909743", "0.6589123", "0.6578678", "0.6564967", "0.6479782", "0.6466483", "0.6382156", "0.62984586", "0.62144023", "0.6209692", "0.6193849", "0.6181939", "0.6138382", "0.6132241", "0.6123924", "0.61105907", "0.6083435", "0.6052826", "0.60448986", "0.604447", "0.6022449", "0.60167634", "0.60155755", "0.59839886" ]
0.7330115
1
Grab unmoving 'background' of the stack by averaging over a sample of layers. The default is 50 samples.
def get_average(self, samples=50): first = self.layers[0].load_image() res = np.zeros(first.shape, dtype=float) intervals = len(self.layers)/samples for l in self.layers[::int(intervals)]: img = l.load_image().astype(float) res += img l.image = None return samples**-1*res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, sample):\n temp = sample['stack']/255.0\n totensor = transforms.ToTensor()\n sample['stack'] = totensor(temp.transpose((1, 2, 0)))\n return sample", "def zstack_normalize_mean(instack):\n stack = np.copy(instack) \n stackmean = stack.mean()\n for x in range(0,stack.shape[0]):\n immean = stack[x].mean()\n stack[x] = stack[x] / immean * stackmean\n return(stack)", "def track_foreground(self, diff_threshold=None, frames_avg=50,\n smooth_std=3):\n avg = self.get_average(frames_avg)\n self.track = []\n self.diffs = []\n for ind, layer in enumerate(self.layers):\n diff = abs(layer.load_image() - avg)\n diff = colors.rgb_to_hsv(diff)[..., 2]\n layer.image = None\n diff = gaussian_filter(diff, smooth_std)\n layer.diff = diff\n if diff_threshold is None:\n xs, ys = local_maxima(diff, disp=False, p=95)\n if len(xs) > 0:\n self.track += [(xs[0], ys[0])]\n else:\n self.track += [(np.nan, np.nan)]\n else:\n xs, ys = local_maxima(diff, disp=False,\n min_diff=diff_threshold)\n if len(xs) > 0:\n self.track += [(xs, ys)]\n else:\n self.track += [(np.nan, np.nan)]\n # self.diffs += [diff]\n # self.track += [(np.argmax(diff.mean(0)),\n # np.argmax(diff.mean(1)))]\n print_progress(ind, len(self.layers))", "def make_flat_avg(images, out):\n image = Image(avg_images(images, out))\n image.normalise()\n return out", "def get_average_image_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=0) )\n raise NotImplementedError()", "def global_average(x, batch_lengths):\n\n # Loop over the clouds of the batch\n averaged_features = []\n i0 = 0\n for b_i, length in enumerate(batch_lengths):\n\n # Average features for each batch cloud\n averaged_features.append(torch.mean(x[i0:i0 + length], dim=0))\n\n # Increment for next cloud\n i0 += length\n\n # Average features in each batch\n return torch.stack(averaged_features)", "def get_channel_average_from_batch(batch):\n # YOUR CODE HERE\n return( mx.nd.mean(batch, axis=1, exclude=True) )\n raise NotImplementedError()", "def findBackground(frames):\n\n n = 100 # Number of frames to use\n i = 1\n background = frames[0]\n # Read each frame and do a weighted sum\n # beta is the weight for each new frame which starts at 1/2 and\n # gets smaller\n # alpha is the weight of the sum and starts at 1/2 and gets larger\n while(i < n):\n i += 1\n frame = frames[i]\n beta = 1.0 / (i + 1)\n alpha = 1.0 - beta\n background = cv2.addWeighted(background, alpha, frame, beta, 0.0)\n\n if VISUALIZE:\n cv2.imshow('estimated background', background)\n cv2.waitKey(3000)\n cv2.destroyAllWindows()\n\n return background", "def average(self):\n\n x = list(zip(*self.memory))\n states = list(x[0])\n actions = list(x[1])\n \n downsampled_states = resample(states , self.output_size-1)\n downsampled_actions = resample(actions, self.output_size-1)\n\n return downsampled_states, downsampled_actions", "def sample(self, sample_shape=t.Size(), avg=True):\n with t.no_grad():\n return self.rsample(sample_shape, avg)", "def subtract_average_image(sample, average_image):\n # YOUR CODE HERE\n return sample-average_image\n raise NotImplementedError()", "def testMeanClip(self):\n stats = afwMath.makeStatistics(self.image, afwMath.MEANCLIP)\n self.assertEqual(stats.getValue(afwMath.MEANCLIP), self.val)\n\n # this bug was caused by the iterative nature of the MEANCLIP.\n # With only one point, the sample variance returns NaN to avoid a divide by zero error\n # Thus, on the second iteration, the clip width (based on _variance) is NaN and corrupts\n # all further calculations.\n img = afwImage.ImageF(afwGeom.Extent2I(1, 1))\n img.set(0)\n stats = afwMath.makeStatistics(img, afwMath.MEANCLIP)\n self.assertEqual(stats.getValue(), 0)", "def system_4(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(7,7), as_numeric=True, stretched=True):\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([lm(cv2.imread(f)) for f in filenames[0:num_prev_frames]])\n seed_img = np.average(initial_background_model, axis=0)\n if blur:\n seed_img = cv2.blur(seed_img, blur)\n # start off with initial background model as evenly weighted average\n total_weights = 10\n counter = 10\n average_diff = np.average(np.array([initial_background_model[i] - initial_background_model[i - 1] for i in range(1, num_prev_frames)]))\n previous_frame = initial_background_model[-1]\n # we are going to use previous-frame segmentation mask prediction later to constrain weights...\n # ... in cases where image doesn't change much but there are cars in the image; ideally we will segment them...\n # ... and so we can use segmentation mask to regularize the weight update\n previous_segmentation_prediction = _prepare_frame_segment(seed_img=seed_img, img = initial_background_model[-1], blur=None, stretched=False)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))\n if blur:\n img = cv2.blur(img, blur)\n diff_img = np.abs(img - previous_frame)\n # cars might stop, but we don't want them to contribute much to background...\n # ... so constrain by segmentation mask prediction from previous frame\n constrained_diff_img = np.where(previous_segmentation_prediction > 0, 0, diff_img)\n weight = average_diff / (np.average(np.ravel(constrained_diff_img)) + 0.01)\n seed_img = (total_weights * seed_img + weight * img) / (total_weights + weight)\n segmented = _prepare_frame_segment(seed_img=seed_img, img=img, blur=None)\n total_weights += weight\n average_diff = (counter * average_diff + constrained_diff_img) / (counter + 1) # todo: make sure this works\n\n cv2.imwrite(os.path.join(out_dir, 'segmented', filenames[i + num_prev_frames][-8:]), segmented)\n cv2.imwrite(os.path.join(out_dir, 'backgrounds', filenames[i + num_prev_frames][-8:]), seed_img)\n previous_frame = img\n previous_segmentation_prediction = segmented # this is not necessary, but good practice ~ interpretability", "def get_brightest_mean(self, num_pix=3):\n peak_x = np.zeros(\n [len(self.pixel_x)]) # Create blank arrays for peaks\n # rather than a dict (faster)\n peak_y = np.zeros(peak_x.shape)\n peak_amp = np.zeros(peak_x.shape)\n\n # Loop over all tels to take weighted average of pixel\n # positions This loop could maybe be replaced by an array\n # operation by a numpy wizard\n\n tel_num = 0\n for tel in self.image:\n top_index = self.image[tel].argsort()[-1 * num_pix:][::-1]\n print(top_index, self.pixel_x[tel][top_index],\n self.image[tel][top_index])\n weight = self.image[tel][top_index]\n weighted_x = self.pixel_x[tel][top_index] * weight\n weighted_y = self.pixel_y[tel][top_index] * weight\n\n ppx = np.sum(weighted_x) / np.sum(weight)\n ppy = np.sum(weighted_y) / np.sum(weight)\n\n peak_x[tel_num] = ppx # Fill up array\n peak_y[tel_num] = ppy\n peak_amp[tel_num] = np.sum(weight)\n tel_num += 1\n\n self.peak_x = peak_x # * unit # Add to class member\n self.peak_y = peak_y # * unit\n self.peak_amp = peak_amp", "def system_3(in_dir, threshold, num_frames=150, num_prev_frames=10, blur=None, as_numeric=True, stretched=True):\n #if not os.path.exists(out_dir):\n # os.mkdir(out_dir)\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([lm(cv2.imread(f)) for f in filenames[0:num_prev_frames]])\n seed_img = np.average(initial_background_model, axis=0)\n if blur:\n seed_img = cv2.blur(seed_img, blur)\n counter = 10\n segmented_imgs = []\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))\n if blur:\n img = cv2.blur(img, blur)\n seed_img = (counter * seed_img + img) / (counter + 1)\n segmented = _prepare_frame_segment(seed_img=seed_img, img=img, blur=None)\n #segmented = blob_doh(segmented)\n counter += 1\n segmented_imgs.append(segmented)\n #cv2.imwrite(os.path.join(out_dir, 'segmented', filenames[i + num_prev_frames][-8:]), segmented)\n #cv2.imwrite(os.path.join(out_dir, 'backgrounds', filenames[i + num_prev_frames][-8:]), seed_img)\n return segmented_imgs", "def meanColor(self):\n return self.image[self.x, self.y]", "def gate_average(recurrent_net, sample_input):\n length = sample_input.size\n score, select = np.zeros(length), np.zeros(length)\n for i in range(length):\n select[i], score[i] = recurrent_net.activate([sample_input[i]])\n select = sigmoid(select)\n return np.sum(select * score) / np.sum(select)", "def avg_pooling(self, filter_):\n return self.add_layer(avg_pooling, filter_)", "def prob_3_5(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return avgImg ######", "def sample_trajectories(self):\n minibatch = []\n for i in range(self.num_avg_gradient):\n trajectory = self.replay_buffer[random.randint(0, len(self.replay_buffer) - 1)]\n trajectory = trajectory[random.randint(0, len(trajectory) - 1):]\n minibatch.append(trajectory)\n return minibatch", "def preprocess(self, img):\n return img - np.mean(img)", "def testMeanClip(self):\n stats = afwMath.makeStatistics(self.image, afwMath.MEANCLIP)\n self.assertEqual(stats.getValue(afwMath.MEANCLIP), self.val)", "def load_average(self):\n return _favg(self.load_samples)", "def mean_allcnnc():\n # TODO implement pre forward hook to adapt to arbitary image size for other data sets than cifar100\n return nn.Sequential(\n nn.AvgPool2d(kernel_size=(6, 6)),\n flatten()\n )", "def system_5(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(3,3), as_numeric=True, stretched=True):\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([cv2.imread(f) for f in filenames[0:num_prev_frames]])\n seed_img = mode(initial_background_model)\n previous_frames = deque(initial_background_model, maxlen=num_prev_frames)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))", "def moving_avg_filter(data, filter_size=filter_size):\n filter_size = int(filter_size)\n smoothed = np.zeros(len(data))\n for n in range(filter_size, len(data) - filter_size):\n vals = data[n - filter_size : n + filter_size + 1]\n smoothed[n] = np.mean(vals)\n return smoothed", "def sample_from_belief(self):\n if self._use_information_bottleneck:\n posteriors = [torch.distributions.Normal(m, torch.sqrt(s)) for m, s in zip(torch.unbind(self.z_means), torch.unbind(self.z_vars))]\n z = [d.rsample() for d in posteriors]\n self.z = torch.stack(z)\n else:\n self.z = self.z_means", "def mean_pixel(model_variant=None):\n if model_variant is None:\n return _MEAN_RGB\n else:\n return [127.5, 127.5, 127.5]", "def blur_ground(X):\n return img_conv(X, kernel_blur)", "def backgroundSubtraction(kmin, kmax, kwin=10,\n indir=IMG_DIR, inpattern=IMG_PATTERN,\n outdir=DIFF_DIR, outpattern=DIFF_PATTERN,\n verbose=True):\n\n def equalize_color(img):\n \"\"\"Apply histogram equalization to each channel of the color image.\n \n Note: returns float [0;1]-valued image.\n Note: Not used, in the end.\n \"\"\"\n imgeq = numpy.zeros_like(img, dtype='float')\n for i in xrange(img.shape[2]):\n imgeq[:,:,i] = exposure.equalize_hist(img[:,:,i])\n return imgeq\n\n # pre-compute mean, std for initialization\n if verbose:\n print 'Initializing background...'\n imgstack = []\n for k in range(kmin, kmin+kwin):\n fname = os.path.join(indir, inpattern.format(k))\n img = pyplot.imread(fname) # read image\n imgstack.append(img) # add to the stack\n imgstack = numpy.asarray(imgstack)\n imgmean = numpy.average(imgstack, axis=0)\n imgstd = numpy.std(imgstack, axis=0)\n rga = RunningGaussianAverage(imgmean, 15) #15 is the initial (constant) standard deviation\n\n\n # now run the detector\n if verbose:\n print 'Running foreground dectector...'\n for k in range(kmin, kmax):\n fname = os.path.join(indir, inpattern.format(k))\n # read image\n img = pyplot.imread(fname) # read image\n # classification mask: foreground if all channels are flagged as foreground\n isfg = numpy.all(rga.update_classify(img.astype('float'), rho=1./kwin), axis=-1)\n # set bg to 0\n img[numpy.logical_not(isfg)] = 0 \n outfile = os.path.join(DIFF_DIR, DIFF_PATTERN.format(k))\n pyplot.imsave(outfile, img)\n if verbose:\n print '\\tsaved {}'.format(outfile)\n if verbose:\n print 'Processing complete.'" ]
[ "0.5758102", "0.57173944", "0.5690426", "0.56789875", "0.5661047", "0.56501585", "0.5576374", "0.55361825", "0.5420607", "0.53817236", "0.53514606", "0.53506416", "0.53381515", "0.5317445", "0.53117204", "0.5308503", "0.5302598", "0.5294157", "0.52913225", "0.52802604", "0.5276964", "0.5272504", "0.52591634", "0.5234589", "0.5232514", "0.5228797", "0.52202386", "0.52199453", "0.5218638", "0.5217786" ]
0.6894385
0
A 2d smoothing filter for the heights array
def smooth(self, sigma): self.heights = self.heights.astype("float32") self.heights = np.fft.ifft2( ndimage.fourier_gaussian( np.fft.fft2(self.heights), sigma=sigma)).real
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def smooth(img, sigma):\n if sigma < 0:\n raise ValueError('smoothing kernel size is negative')\n elif sigma == 0:\n return img.get_data()\n else:\n sigma_vox = sigma / np.sqrt(np.sum(img.get_affine()[0:3, 0:3] ** 2, 0))\n return nd.gaussian_filter(img.get_data(), sigma_vox)", "def Get_2d_smoothed_activation( MNI_coords, kernel_width=10 ):\n MNI_coords = MNI_coords[:, :2].astype('int') + 100\n\n arr = np.zeros((200,200))\n arr[ MNI_coords[:,0], MNI_coords[:,1]] = 1\n\n return gaussian_filter( arr, kernel_width )", "def smoothing(data, mask):\n smooth_data = gaussian_filter(data, [2, 2, 2, 0])\n\n Y = smooth_data[mask].T\n\n return Y", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def smooth(y, box_pts):\r\n box = np.ones(box_pts)/box_pts\r\n y_smooth = np.convolve(y, box, mode='same')\r\n return y_smooth", "def sobelfilter(D, W):\n here, plus, minus = slice(1, -1), slice(2, None), slice(None, -2)\n # Estimate slopes along each axis at each pixel.\n Dx = 0.5 * (D[:, plus] - D[:, minus])\n Dy = 0.5 * (D[plus, :] - D[minus, :])\n # Calculate the corresponding inverse variances.\n Wp, Wm = W[:, plus], W[:, minus]\n Wx = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)\n Wp, Wm = W[plus, :], W[minus, :]\n Wy = 0.25 * np.divide(Wp * Wm, Wp + Wm, out=np.zeros_like(Wp), where=Wp + Wm > 0)\n # Average slope estimates along the other axis with weights (1, 2, 1).\n WDx = Wx[minus, :] * Dx[minus, :] + 2 * Wx[here, :] * Dx[here, :] + Wx[plus, :] * Dx[plus, :]\n Wxsum = Wx[minus, :] + 2 * Wx[here, :] + Wx[plus, :]\n Dx = np.divide(WDx, Wxsum, out=np.zeros_like(WDx), where=Wxsum > 0)\n WDy = Wy[:, minus] * Dy[:, minus] + 2 * Wy[:, here] * Dy[:, here] + Wy[:, plus] * Dy[:, plus]\n Wysum = Wy[:, minus] + 2 * Wy[:, here] + Wy[:, plus]\n Dy = np.divide(WDy, Wysum, out=np.zeros_like(WDy), where=Wysum > 0)\n # Estimate the 2D gradient magnitude.\n Dg = np.zeros_like(D)\n Dg[here, here] = np.hypot(Dx, Dy)\n return Dg", "def stdfilt2d(data, filtersize, threshold=None,verbose=False):\n # 2012-08-07 13:42 IJMC: Created from medianfilter\n from numpy import zeros, median, abs, std, isfinite\n\n if not hasattr(filtersize, '__iter__'):\n filtersize = [filtersize]\n\n if len(filtersize)<1:\n print 'medianfilter2 requires that filtersize be a 1- or 2-element vector'\n return -1\n elif len(filtersize)==1:\n filtersize = [filtersize[0], filtersize[0]]\n else:\n filtersize = filtersize[0:2]\n\n npix = data.shape[0]\n npiy = data.shape[1]\n bigsize_x = npix+2*(filtersize[0]-1)\n bigsize_y = npiy+2*(filtersize[1]-1)\n bigdata = zeros((bigsize_x,bigsize_y),float)\n ind = filtersize[0]-1\n if ind==0:\n bigdata = data\n else:\n bigdata[ind:(bigsize_x-ind), ind:(bigsize_y-ind)] = data\n\n\n # FOR NOW, WE ASSUME FILTERSIZE IS ODD!!\n # AND THAT DATA IS SQUARE!\n niter_x = npix + (filtersize[0]-1)\n niter_y = npiy + (filtersize[1]-1)\n filt = zeros((niter_x,niter_y), float)\n\n for ii in range(niter_x):\n for jj in range(niter_y):\n if verbose>1:\n print \"ii,jj>>\",ii,jj\n if filtersize[0]==1:\n indi = 1\n else:\n indi = filtersize[0]-1\n if filtersize[1]==1:\n indj = 1\n else:\n indj = filtersize[1]-1\n select = bigdata[ii:(ii+indi),jj:(jj+indj)].ravel()\n #select = select[isfinite(select)]\n #residualSelection = abs(select - median(select))\n\n doFilter = True\n\n if verbose:\n print \"doFilter?>>\",doFilter\n if verbose>1:\n print \"select>>\",select\n\n if doFilter: \n newval = ( select ).std()\n else:\n newval = bigdata[ii,jj]\n\n if verbose>1:\n print \"newval>>\",newval\n\n filt[ii,jj] = newval\n\n #print filt.shape, [(filtersize[0]-1)/2,niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2,niter_y-(filtersize[0]-1)/2]\n return filt[(filtersize[0]-1)/2:niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2:niter_y-(filtersize[0]-1)/2]", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def smooth(y, box_pts):\n box = np.ones(box_pts) / box_pts\n y_smooth = np.convolve(y, box, mode='same')\n return y_smooth", "def filter2D(img, kernel = (5,5)):\n\ttmp = img.copy()\n\tk = np.ones((kernel[0], kernel[1]), np.float32) / (kernel[0]*kernel[1])\n\tdst = cv2.filter2D(tmp, -1, k)\n\treturn dst", "def smooth(data, nd, axis=0):\n if np.any(data):\n if data.ndim > 1:\n filt = np.zeros(data.shape)\n for i in range(data.shape[::-1][axis]):\n if axis == 0:\n filt[:, i] = np.convolve(\n data[:, i], np.ones((nd,))/nd, mode='same')\n elif axis == 1:\n filt[i, :] = np.convolve(\n data[i, :], np.ones((nd,))/nd, mode='same')\n else:\n filt = np.convolve(data, np.ones((nd,))/nd, mode='same')\n return filt\n else:\n return None", "def smooth(array, binwidth):\n\tarray =scipy.convolve(array,scipy.ones(binwidth)/binwidth, mode='same')\n\treturn(array)", "def _smooth(values, std):\n width = std * 4\n x = np.linspace(-width, width, min(2 * width + 1, len(values)))\n kernel = np.exp(-(x / 5)**2)\n\n values = np.array(values)\n weights = np.ones_like(values)\n\n smoothed_values = np.convolve(values, kernel, mode='same')\n smoothed_weights = np.convolve(weights, kernel, mode='same')\n\n return smoothed_values / smoothed_weights", "def smooth_2d(res_array, window_len):\n\n gx, gy = np.mgrid[-window_len : window_len + 1, -window_len : window_len + 1]\n\n gauss = np.exp(-(gx ** 2 / float(window_len) + gy ** 2 / float(window_len)))\n gauss /= gauss.sum()\n\n smooth_array = sps.convolve(res_array, gauss, mode=\"same\")\n\n return smooth_array", "def construct_2d_filt(lo: torch.Tensor, hi: torch.Tensor) -> torch.Tensor:\n ll = _outer(lo, lo)\n lh = _outer(hi, lo)\n hl = _outer(lo, hi)\n hh = _outer(hi, hi)\n filt = torch.stack([ll, lh, hl, hh], 0)\n filt = filt.unsqueeze(1)\n return filt", "def edgesMarrHildreth(img, sigma):\n\tsize = int(2*(np.ceil(3*sigma))+1)\n\n\tx, y = np.meshgrid(np.arange(-size/2+1, size/2+1), np.arange(-size/2+1, size/2+1))\n\t\n\tnormal = 1 / (2.0 * np.pi * sigma**2)\n\n\tkernel = ((x**2 + y**2 - (2.0*sigma**2)) / sigma**4) * np.exp(-(x**2+y**2) / (2.0*sigma**2)) / normal # LoG filter\n\n\tkern_size = kernel.shape[0]\n\tlog = np.zeros_like(img, dtype=float)\n\n\t# applying filter\n\tfor i in range(img.shape[0]-(kern_size-1)):\n\t\tfor j in range(img.shape[1]-(kern_size-1)):\n\t\t\twindow = img[i:i+kern_size, j:j+kern_size] * kernel\n\t\t\tlog[i,j] = np.sum(window)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\n\tlog = log.astype(np.int64, copy=False)\n\n\tzero_crossing = np.zeros_like(log)\n\n\t# computing zero crossing\n\tfor i in range(log.shape[0]-(kern_size-1)):\n\t\tfor j in range(log.shape[1]-(kern_size-1)):\n\t\t\tif log[i][j] == 0:\n\t\t\t\tif (log[i][j-1] < 0 and log[i][j+1] > 0) or (log[i][j-1] < 0 and log[i][j+1] < 0) or (log[i-1][j] < 0 and log[i+1][j] > 0) or (log[i-1][j] > 0 and log[i+1][j] < 0):\n\t\t\t\t\tzero_crossing[i][j] = 255\n\t\t\tif log[i][j] < 0:\n\t\t\t\tif (log[i][j-1] > 0) or (log[i][j+1] > 0) or (log[i-1][j] > 0) or (log[i+1][j] > 0):\n\t\t\t\t\tzero_crossing[i][j] = 255 \n\n\t# plotting images\n\tfig = plt.figure()\n\ta =fig.add_subplot(1,2,1)\n\timgplot = plt.imshow(log, cmap='gray')\n\ta.set_title('Laplacian of Gaussian')\n\ta = fig.add_subplot(1,2,2)\n\timgplot = plt.imshow(zero_crossing, cmap='gray')\n\tstring = 'Zero Crossing sigma = '\n\tstring += (str(sigma))\n\ta.set_title(string)\t\n\tplt.show()\n\t\n\treturn zero_crossing", "def get_smoothing_kernel(x, y, smoothing_length):\n r_xy_2 = x + y\n q_xy_2 = r_xy_2 / smoothing_length\n return get_dimensionless_2D_kernel(q_xy_2)", "def smooth(D, W, smoothing):\n WD = scipy.ndimage.gaussian_filter(W * D, smoothing)\n W = scipy.ndimage.gaussian_filter(W, smoothing)\n D = np.divide(WD, W, out=np.zeros_like(D), where=W > 0)\n return D, W", "def apply_smoothing(image, kernel_size=3):\n return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)", "def sobelY(img):\r\n f = np.array([-1, -2, -1, 0, 0, 0, 1, 2, 1]).reshape([3, 3])\r\n return cv2.filter2D(img, cv2.CV_64F, f)", "def filter_2d(x, sdm, size):\n col_begin = np.max([1, x[1] - int(size / 2)]) - 1\n col_end = np.min([x[1] + int(size / 2), len(sdm)]) - 1\n row_begin = np.max([1, x[0] - int(size / 2)]) - 1\n row_end = np.min([x[0] + int(size / 2), len(sdm)]) - 1\n\n beats_count = np.min([row_end - row_begin, col_end - col_begin])\n\n area = sdm[row_begin : row_begin + beats_count, col_begin : col_begin + beats_count]\n\n # the main diagonal\n main_diag = np.diag(area, 0)\n\n # black diagonals\n diags = np.concatenate((main_diag, np.diag(area, -int(size / 2)), np.diag(area, int(size / 2))))\n\n _alpha = np.mean(diags)\n _beta = np.mean(main_diag)\n _lambda = (np.sum(area) - np.sum(diags)) / (beats_count * beats_count - len(diags))\n\n rho_alpha = _alpha / _lambda\n rho_bera = _beta / _lambda\n\n return rho_alpha, rho_bera", "def nonLinearSmooth(h2):\n for x in range( h2.GetNbinsX()-2 ):\n for y in range( h2.GetNbinsY()-2 ):\n centerBin = x+2 + (y+2)*(h2.GetNbinsX()+2)\n surroundingBins = [\n centerBin-1, #this row\n centerBin+1,\n centerBin-1 - (h2.GetNbinsX()+2), # row above\n centerBin - (h2.GetNbinsX()+2),\n centerBin+1 - (h2.GetNbinsX()+2),\n centerBin-1 + (h2.GetNbinsX()+2), # row below\n centerBin + (h2.GetNbinsX()+2),\n centerBin+1 + (h2.GetNbinsX()+2),\n ]\n surroundingBinValues = [ h2.GetBinContent(b) for b in surroundingBins ]\n if h2.GetBinContent(centerBin) < min(surroundingBinValues) or \\\n h2.GetBinContent(centerBin) > max(surroundingBinValues):\n h2.SetBinContent(centerBin, sum(surroundingBinValues)/8.0)", "def smooth_median(y, window_size):\n if window_size % 2 == 1:\n window_size -= 1\n y_smooth = []\n for j in range(len(y)):\n l = max(j - window_size/2, 0)\n u = min(j + window_size/2+1, len(y))\n y_smooth.append(np.median(y[l:u]))\n return y_smooth", "def smooth_mask ( gray , blur = 1 , threshold = 128 ) :\n\n assert gray is not None\n\n gray_markers = nd.median_filter(gray, blur)\n _ , gray_markers = cv2.threshold ( gray_markers , threshold , 255 , cv2.THRESH_BINARY )\n\n return gray_markers", "def heavy_blur_skeleton_hand(save_path=None):\n im = auto_hand_img() # reload the edge map\n blurred = gaussian(np.copy(im)) \n #blurred = blurred * blurred # strengthen the image by multiplying\n im2 = to_rgba(np.copy(im)) # take an RGBA copy to add the skeleton onto\n skel = skeletonize(blurred) # given as a Boolean array\n skel_blur = gaussian(np.copy(skel), sigma=3)\n skel_blur *= (255/np.max(skel_blur))\n # manually examine the distribution to set a threshold for binarisation\n # for i in np.arange(0,101,1): print(np.percentile(skel_blur, i))\n skel_blur[skel_blur >= 30] = 255\n skel_blur[skel_blur < 30] = 0\n skel2 = (skel_blur/255).astype(bool)\n # also expand the edge map using the blurred version for visibility\n im2[blurred <= 0.75] = [0,0,0,255]\n # set the skeleton pixels to red in the edge map copy\n im2[skel2] = [255, 0, 0, 255]\n if save_path is None:\n return im2\n else:\n save_image(im2, (8,8), save_path)\n return im2", "def smooth(data, M):\n\n if M % 2 == 0: # even window\n M += 1\n\n window = signal.windows.hann(M=M)\n window /= window.sum()\n\n smoothed = np.convolve(data, window, mode='valid')\n valid_indices = np.arange(len(data))[int(M/2):-int(M/2)]\n\n return smoothed, valid_indices", "def build_gaussian_pyramid(im, max_levels, filter_size):\n filter_vec = create_gaussian_line(filter_size)\n # creating duplicate for confy use\n temp_im = im\n pyr = [im]\n kernel = np.array([0.0625,0.25,0.375,0.25,0.0625])\n kernel = kernel.reshape((1,5))\n for i in range(max_levels - 1):\n # blurring the cur layer\n #temp_im_temp = cv2.filter2D(temp_im,-1,kernel,borderType=cv2.B)\n temp_im = scipy.signal.convolve2d(temp_im, filter_vec, mode='same')\n temp_im = scipy.signal.convolve2d(temp_im, np.transpose(filter_vec), mode='same')\n # sampling only every 2nd row and column\n temp_im = temp_im[::2, ::2]\n pyr.append(temp_im)\n\n return pyr, filter_vec", "def smooth_scipy(self, mri_data):\n\n # image dimension\n if hasattr(mri_data.img_header, 'info'):\n dx, dy, dz = np.abs(mri_data.img_header.info['DELTA'])\n elif hasattr(mri_data.img_header, 'get_zooms'):\n dx, dy, dz = mri_data.img_header.get_zooms()[:3]\n else:\n self.errmsg(\"No voxel size information in mri_data header\")\n\n # Set gaussian sigma in image dimension\n sigma = (self.blur_fwhm / np.array((dx, dy, dz))) / 2.354820\n imgdata = mri_data.img_data.astype(np.float64)\n\n # Apply mask\n if hasattr(self, 'maskV'):\n imgdata[~self.maskV] = 0\n\n # Apply Gaussian filter\n filt_img = gaussian_filter(imgdata, sigma, mode='constant')\n\n if hasattr(self, 'maskV'):\n # Adjust voxels with out of the mask (0) convolution\n aux_img = np.ones_like(imgdata)\n aux_img[~self.maskV] = 0\n filt_aux_img = gaussian_filter(aux_img, sigma, mode='constant')\n filt_img[self.maskV] /= filt_aux_img[self.maskV]\n\n return filt_img.astype(mri_data.img_data.dtype)", "def nfw2D_smoothed(self, R, Rs, rho0, r200, pixscale):\n x = R/Rs\n d = pixscale/(2*Rs)\n a = np.empty_like(x)\n x_ = x[x > d]\n upper = x_+d\n lower = x_-d\n\n a[x > d] = 4*rho0*Rs**3*(self.g(upper)-self.g(lower))/(2*x_*Rs*pixscale)\n a[x < d] = 4*rho0*Rs**3*self.g(d)/((pixscale/2)**2)\n return a", "def smooth_series(y,p = 6.25):\n cycle, trend = sm.tsa.filters.hpfilter(y, p)\n return trend" ]
[ "0.62981075", "0.62646466", "0.62593687", "0.6154723", "0.60443723", "0.5988841", "0.59508985", "0.59495175", "0.59495175", "0.58990556", "0.58296025", "0.5766525", "0.57433444", "0.5726047", "0.5722643", "0.5715811", "0.5714172", "0.5711716", "0.56931025", "0.56912136", "0.5676201", "0.5652017", "0.5609689", "0.5586494", "0.5549165", "0.55362207", "0.551597", "0.54843366", "0.54820377", "0.5477666" ]
0.6630815
0
Simple motion tracking using an average of the whole video as the background and the absolut difference between each frame and the background as the foreground.
def track_foreground(self, diff_threshold=None, frames_avg=50, smooth_std=3): avg = self.get_average(frames_avg) self.track = [] self.diffs = [] for ind, layer in enumerate(self.layers): diff = abs(layer.load_image() - avg) diff = colors.rgb_to_hsv(diff)[..., 2] layer.image = None diff = gaussian_filter(diff, smooth_std) layer.diff = diff if diff_threshold is None: xs, ys = local_maxima(diff, disp=False, p=95) if len(xs) > 0: self.track += [(xs[0], ys[0])] else: self.track += [(np.nan, np.nan)] else: xs, ys = local_maxima(diff, disp=False, min_diff=diff_threshold) if len(xs) > 0: self.track += [(xs, ys)] else: self.track += [(np.nan, np.nan)] # self.diffs += [diff] # self.track += [(np.argmax(diff.mean(0)), # np.argmax(diff.mean(1)))] print_progress(ind, len(self.layers))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def track_ball_generic(video):\n result = []\n frames = readFrames(video)\n\n # get background with background estimator method\n background = findBackground(frames)\n # Setup background subtractor object with parameters\n subtractor = cv2.BackgroundSubtractorMOG(30, 10, 0.7, 0)\n # Feed estimated background as first input to subtractor\n subtractor.apply(background)\n\n # Iterate over every frame in video\n i = 0\n while i < len(frames):\n frame = frames[i] # get the new frame\n\n # apply background subtraction to frame\n frame = subtractor.apply(frame)\n\n # find contours in the frame\n contours, _ = cv2.findContours(frame, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # sort the contours in reverse order by width to eliminate small\n # contours caused by noise.\n sort_cnt = sorted(contours, key=lambda x: cv2.boundingRect(x)[2],\n reverse=True)\n\n # get the parameters of the bounding box for the largest width contour\n x, y, w, h = cv2.boundingRect(sort_cnt[0])\n # append to result list\n result.append((x, y, x + w, y + h))\n\n if VISUALIZE:\n orig_frame = frames[i]\n cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow('frame', orig_frame)\n cv2.waitKey(30)\n\n i += 1\n\n return result", "def track_motion(self, video=None, set_roi=False):\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n if set_roi:\r\n roi = self.get_roi(video=video)\r\n \r\n video_track = video.copy()\r\n motion_tracker = []\r\n # Generate different colors for tracking display \r\n color = np.random.randint(0,255,(100,3))\r\n \r\n # params for ShiTomasi corner detection\r\n feature_params = dict( maxCorners = 100,\r\n qualityLevel = 0.3,\r\n minDistance = 5,\r\n blockSize = 7 )\r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict( winSize = (15,15),\r\n maxLevel = 8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n\r\n old_gray = cv2.cvtColor(video[0], cv2.COLOR_BGR2GRAY)\r\n # Create mask for drawing\r\n mask = np.zeros_like(video[0])\r\n # Mask to dectate the features to track\r\n features_mask = np.zeros_like(old_gray)\r\n features_mask[roi['x1']: roi['x2'], roi['y1']: roi['y2']] = old_gray[roi['x1']: roi['x2'], roi['y1']: roi['y2']]\r\n # Find corners in first frame\r\n p0 = cv2.goodFeaturesToTrack(features_mask, mask = None, **feature_params)\r\n \r\n for idx in range(1, video.shape[0]):\r\n new_gray = cv2.cvtColor(video[idx], cv2.COLOR_BGR2GRAY)\r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, new_gray, p0, None, **lk_params)\r\n \r\n # Get good points\r\n good_old = p0[st==1]\r\n good_new = p1[st==1]\r\n motion_tracker.append(good_new)\r\n for i, (old, new) in enumerate(zip(good_old, good_new)):\r\n (ox, oy) = old.reval()\r\n (nx, ny) = new.ravel()\r\n mask = cv2.circle(mask, (nx, ny), 5, color[i].tolist(), -1)\r\n frame = cv2.add(video[idx], mask)\r\n video_track[idx] = frame\r\n \r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(30) & 0xFF==27:\r\n break\r\n # Updat old frames and points before checking next frame\r\n \r\n old_gray = new_gray.copy()\r\n p0 = p1.resapr(-1,1,2)\r\n cv2.destroyAllWindows()\r\n \r\n return video_track, motion_tracker", "def detect_motion(frame_count):\n global video_stream, output_frame, lock\n motion_detector = MotionDetector(accum_weight=0.1)\n total = 0\n\n while True:\n frame = video_stream.read()\n frame = imutils.resize(frame, width=400)\n gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray_image = cv2.GaussianBlur(gray_image, (7, 7), 0)\n timestamp = datetime.now()\n cv2.putText(frame, timestamp.strftime(\n \"%A %d %B %Y %I:%M:%S%p\"), (10, frame.shape[0] - 15),\n cv2.FONT_HERSHEY_PLAIN, 0.8, (20, 20, 255), 1)\n if total > frame_count:\n motion = motion_detector.detect(gray_image)\n if motion is not None:\n (thresh, (min_X, min_y, max_x, max_y)) = motion\n cv2.rectangle(frame, (min_X, min_y),\n (max_x, max_y), (0, 255, 0), 2)\n motion_detector.update(gray_image)\n total += 1\n with lock:\n output_frame = frame.copy()", "def motion_extraction():\n # iterate through frames\n global frame_height, frame_width\n global limb_coords, init_coords\n frame_count = 0\n has_frames, frame = capture.read()\n\n while has_frames:\n img_out = frame.copy()\n img_out = insert_padding(img_out, 14*14, 12*14)\n\n if frame_count == 0:\n # change global values of height and width\n frame_height = frame_height + 14*14*2\n frame_width = frame_width + 12*14*2\n get_start_positions(img_out)\n img_out2 = segment_red(img_out, 200, 130)\n #erode(img_out2, 4, 6)\n remove_artifacts(img_out2)\n #enhance_contrast(img_out2)\n\n if frame_count > 0:\n get_motion(prev_frame, img_out2, frame_count)\n\n prev_frame = img_out2.copy()\n frame_count += 1\n has_frames, frame = capture.read()", "def run(self):\n pre_stop = False\n begin_t = 0\n end_t = 0\n\n self.removeNoise()\n\n while (not self.stopped()):\n\n if self._pause:\n time.sleep(0.001)\n continue\n\n frame = self._video.getImage()\n\n if not (frame is None):\n fgmask = self._fgbg.apply(frame)\n hist = cv2.calcHist([fgmask],[0],None,[256],[0,256])\n\n white_count = hist[255]\n\n if (white_count > 500):\n if not self._video.isRecorded() and not self._pause:\n if self._video.startRecord():\n self._hasMotion = True\n print('[Detector] start record video')\n else:\n print('[Detector] start record video fail!')\n pre_stop = False\n elif (white_count <= 100) and self._video.isRecorded():\n if not pre_stop:\n pre_stop = True\n begin_t = clock()\n else:\n end_t = clock()\n if end_t - begin_t > 10:\n if self._video.stopRecord():\n self._hasMotion = False\n print('[Detector] stop record video')\n else:\n print('[Detector] stop record video fail!')\n if self._video.isRecorded():\n self._hasMotion = False\n self._video.stopRecord()\n print('[Detector] end Thread')", "def track_ball_1(video):\n return track_ball_generic(video)", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)", "def track_ball_3(video):\n return track_ball_generic(video)", "def track_ball_2(video):\n return track_ball_generic(video)", "def track_ball_4(video):\n\n result = []\n frames = readFrames(video)\n\n i = 0\n while i < len(frames):\n frame = frames[i] # get the new frame\n\n # find edges of the ball in the frame\n # parameters filter out background\n edges = cv2.Canny(frame, 700, 800)\n\n # find contours in the edge frame\n contours, _ = cv2.findContours(edges, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # sort the contours in reverse order by width to eliminate small\n # contours caused by noise.\n sort_cnt = sorted(contours, key=lambda x: cv2.boundingRect(x)[2],\n reverse=True)\n\n # get the parameters of the bounding box for the largest width contour\n x, y, w, h = cv2.boundingRect(sort_cnt[0])\n # append to result list\n result.append((x, y, x + w, y + h))\n\n if VISUALIZE:\n orig_frame = frames[i]\n cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow(\"ball\", orig_frame)\n cv2.waitKey(30)\n\n i += 1\n\n return result", "def TemporalAverage(video_handle):\n temp_avg = np.zeros((int(video_handle.get(cv2.CAP_PROP_FRAME_HEIGHT)), int(video_handle.get(\n cv2.CAP_PROP_FRAME_WIDTH))))\n\n while video_handle.isOpened():\n ret, frame = video_handle.read()\n if not ret:\n break\n # Converts input RGB frames to Grayscale and adds the pixel values of successive frames\n temp_avg += GrayScale(frame)\n # Find the average of each pixel in the video\n temp_avg = temp_avg / video_handle.get(cv2.CAP_PROP_FRAME_COUNT)\n\n return temp_avg", "def __init__(self, video):\n threading.Thread.__init__(self)\n self._stop = threading.Event()\n self._pause = True\n self._video = video\n #self._fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n #self._fgbg = cv2.bgsegm.createBackgroundSubtractorGMG()\n self._fgbg = cv2.createBackgroundSubtractorMOG2()\n self._init = True\n self._hasMotion = False", "def run(self):\n\n last_mean = 0\n st = time.time()\n sframe = 0\n while True:\n if time.time()-1 > st:\n st = time.time()\n #print 'fps', self.frame_counter - sframe\n sframe = self.frame_counter\n\n self.frame_counter += 1\n frame = next(self.frame_generator)\n\n xMax = frame.shape[1]\n yMax = frame.shape[0]\n\n capture_area = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n mean, stddev = cv2.meanStdDev(capture_area)\n mean = mean[0][0]\n stddev = stddev[0][0]\n\n if abs(mean-last_mean) > ACTIVATE_MEAN_DIFF:\n self.wakeup()\n\n last_mean = mean\n\n faces = []\n if abs(self.frame_counter - self.last_face_frame) < 20 or self.frame_counter % 5 == 0:\n faces = faceCascade.detectMultiScale(\n capture_area,\n scaleFactor=1.1,\n minNeighbors=MIN_NEIGHBOURS,\n minSize=(30, 30)\n )\n\n if len(faces) == 1:\n self.last_face_frame = self.frame_counter\n face = faces[0]\n x, y, w, h = face\n\n x1 = x\n x2 = x+w\n y1 = y\n y2 = y+h\n\n # expand_area\n width_plus = int(w/4.0)\n height_plus = int(h/4.0)\n x1 -= width_plus\n x2 += width_plus\n y1 -= height_plus\n y2 += height_plus\n\n y_max, x_max = frame.shape[:2]\n\n x1 = max(0, x1)\n y1 = max(0, y1)\n x2 = min(x_max, x2)\n y2 = min(y_max, y2)\n\n colour_face = frame[y1:y2, x1:x2]\n colour_face = np.copy(colour_face)\n\n face_obj = Face(face, colour_face, self.frame_counter)\n self.capture_face(face_obj)\n\n #st = time.time()\n bm = get_best_match(colour_face)\n match_person = bm\n if match_person is not None:\n self.found_people[match_person] += 1\n\n\n #et = time.time()\n #print et-st\n #result = self.pool.apply_async(get_best_match, (colour_face,))\n #self.pool_results.append(result)\n\n if len(self.pool_results) > 0:\n print(len(self.pool_results))\n res = self.pool_results[0]\n try:\n match_person = res.get()\n print('match here', match_person)\n except TimeoutError:\n pass\n else:\n self.pool_results.popleft()\n if match_person is not None:\n self.found_people[match_person] += 1\n\n # do flush if we have enough frames\n if len(self.capture_buffer) >= FRAMES_COUNT_TO_SAVE:\n self.flush_capture_buffer()\n\n # clear buffer if we never got enough frames\n if len(self.capture_buffer) > 0:\n if self.frame_counter - self.capture_buffer[-1].frame_counter > MAX_FRAMES_BETWEEN_CAPTURES:\n self.flush_capture_buffer()\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x+w, y+h), DRAWING_COLOR, 15)\n\n # Display the resulting frame\n frame = cv2.flip(frame, flipCode=1)\n\n if self.draw_wanted_start_frame > self.frame_counter - TEXT_DISPLAY_TIME:\n cv2.putText(frame, \"Thanks!\", (150,250), cv2.FONT_HERSHEY_DUPLEX, 8.0, DRAWING_COLOR, 14)\n if self.thank_person is not None:\n cv2.putText(frame, self.thank_person, (150,450), cv2.FONT_HERSHEY_DUPLEX, 6.0, DRAWING_COLOR, 12)\n\n # When the screen goes off, we hang on waitKey, so don't do it if we haven't done a wakeup recently\n # Also no point in updating the screen if it is off.\n if self.last_wakeup + 40 > time.time():\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Display the resulting frame\n cv2.imshow('Video', frame)", "def get_video_average(video_path):\n vidcap = cv2.VideoCapture(video_path)\n\n width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n num_color_channels = 3\n\n avg_frame = np.zeros((height, width, num_color_channels), dtype=np.float64)\n frames = 0\n\n while True:\n success, img = vidcap.read()\n if not success:\n break\n avg_frame += img\n frames += 1\n\n avg_frame = avg_frame / frames\n ####avg_frame = cv2.cvtColor(avg_frame, cv2.COLOR_BGR2RGB)\n avg_frame = avg_frame.astype(np.uint8)\n cv2.imwrite(\"average_frame.png\", avg_frame)", "def heatmap_video(path_in, path_out, frames_sec = 2, thresh = 2, maxValue = 3):\n\n cap = cv2.VideoCapture(path_in)\n fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()\n\n num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n fps = cap.get(cv2.CAP_PROP_FPS) # frames per second\n duration = round(num_frames / fps, 2) # duration of the video in seconds\n print('Total number of frames to process: {}'.format(num_frames))\n print('Duration of the video in seconds: {}'.format(duration))\n step = round(fps / frames_sec)\n\n first_iteration_indicator = 1\n for i in range(0, num_frames, step):\n\n if (first_iteration_indicator == 1):\n ret, frame = cap.read()\n first_frame = copy.deepcopy(frame)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n height, width = gray.shape[:2]\n accum_image = np.zeros((height, width), np.uint8)\n first_iteration_indicator = 0\n\n else:\n cap.set(cv2.CAP_PROP_POS_FRAMES, i)\n print('Frame process... ' + str(i) + ' of ' + str(num_frames))\n print('Second process... ' + str(int(cap.get(cv2.CAP_PROP_POS_MSEC)) / 1000) + ' of ' + str(duration))\n print('...')\n ret, frame = cap.read() # read a frame\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n fgmask = fgbg.apply(gray) # remove the background\n ret, th1 = cv2.threshold(fgmask, thresh, maxValue, cv2.THRESH_BINARY)\n\n # add to the accumulated image\n accum_image = cv2.add(accum_image, th1)\n\n # apply a color map\n # COLORMAP_PINK also works well, COLORMAP_BONE is acceptable if the background is dark\n color_image = cv2.applyColorMap(accum_image, cv2.COLORMAP_HOT)\n\n # overlay the color mapped image to the first frame\n result_overlay = cv2.addWeighted(first_frame, 0.7, color_image, 0.7, 0)\n\n # save the final overlay image\n cv2.imwrite(path_out, result_overlay)\n\n # cleanup\n cap.release()\n cv2.destroyAllWindows()", "def emvCoreMotion(frames, fps, maxLevel, freqLow, freqHigh, alpha, lambdaC, chromAttenuation, method=\"ideal\"): \n pyrVideo_=buildVideoLapPyr(frames, maxLevel)\n \n if method==\"ideal\":\n filteredVideoPyr=idealFilterForVideoPyr(pyrVideo_, freqLow, freqHigh, fps)\n elif method==\"butt\":\n filteredVideoPyr=buttFilterForVideoPyr(pyrVideo_, freqLow, freqHigh, fps)\n \n amplifiedPyr = amplifyTemporalMotionSignal(pyrVideo_, filteredVideoPyr, alpha, lambdaC, chromAttenuation)\n recreateFrames=recreateVideoFromLapPyr(amplifiedPyr)\n \n return recreateFrames", "def motionDetector(frame, first_frame):\n\n detector = MotionDetection(status=0)\n\n # convert the color frame to gray frame as an extra layer of color\n # is not required\n detector.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # convert gray scale image to GaussianBlur\n detector.gray = cv2.GaussianBlur(detector.gray, (21, 21), 0)\n\n # set first frame as the baseline frame\n if first_frame[0] is None:\n first_frame[0] = detector.gray\n return False, detector.status\n\n # calculate difference between static background and current frame\n detector.delta_frame = cv2.absdiff(first_frame[0], detector.gray)\n\n # apply the threshold\n detector.thresh_frame = cv2.threshold(detector.delta_frame, 30, 255, cv2.THRESH_BINARY)[1]\n\n # dilate the Threshold Frame and find pixel contours in it\n detector.thresh_frame = cv2.dilate(detector.thresh_frame, None, iterations=3)\n\n # find contours in the frame\n contours, _ = cv2.findContours(\n detector.thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n for contour in contours:\n if cv2.contourArea(contour) < 10000:\n continue\n detector.status = 1\n\n return True, detector.status", "def stabilization(videopath,\n smoothing_radius=50,\n fixed_area=[0, -1, 0, -1],\n stab_points=200):\n\n # Read original video\n # Extract directory and videoname for saving purposes\n capture = cv2.VideoCapture(videopath)\n directory, videoname = os.path.split(videopath)\n videoname = os.path.splitext(videoname)[0]\n\n # Get number of frames\n # Get width and height of video stream\n # Get frames per second (fps)\n n_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = capture.get(cv2.CAP_PROP_FPS)\n\n # Set up output video\n out = cv2.VideoWriter(os.path.join(directory, str(videoname)+'_stabilized.mp4'),\n -1,\n fps,\n (w, h))\n\n # Read first frame\n # Convert frame to grayscale and extract part of image\n _, prev = capture.read()\n xmin, xmax, ymin, ymax = fixed_area\n prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)[ymin:ymax, xmin:xmax]\n\n # Pre-define transformation-store array\n transforms = np.zeros((n_frames-1, 3), np.float32)\n\n # Filling in transformation array per frameset\n with tqdm(total=2*(n_frames-2), ncols=50) as pbar:\n for i in range(n_frames-2):\n # Detect feature points in previous frame\n prev_pts = cv2.goodFeaturesToTrack(prev_gray,\n maxCorners=stab_points,\n qualityLevel=0.1,\n minDistance=100,\n blockSize=10)\n # Read next frame\n # If not success: break loop\n success, curr = capture.read()\n if not success:\n break\n\n # Convert to grayscale and extract part of image\n # Calculate optical flow (i.e. track feature points)\n # Sanity check\n curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)[ymin:ymax,\n xmin:xmax]\n curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray,\n curr_gray,\n prev_pts,\n None)\n assert prev_pts.shape == curr_pts.shape\n\n # Filter only valid points\n idx = np.where(status == 1)[0]\n prev_pts = prev_pts[idx]\n curr_pts = curr_pts[idx]\n\n # Find transformation matrix\n m = cv2.estimateAffinePartial2D(prev_pts, curr_pts)[0]\n\n # Extract translation\n # Extract rotation angle\n dx = m[0, 2]\n dy = m[1, 2]\n da = np.arctan2(m[1, 0], m[0, 0])\n\n # Store transformation\n transforms[i] = [dx, dy, da]\n\n # Move to next frame\n prev_gray = curr_gray\n\n pbar.update(1)\n\n # Compute trajectory using cumulative sum of transformations\n # Create variable to store smoothed trajectory\n # Calculate diference in smoothed_trajectory and trajectory\n trajectory = np.cumsum(transforms, axis=0)\n smoothed_trajectory = smooth(trajectory, smoothing_radius)\n difference = smoothed_trajectory - trajectory\n\n # Calculate newer transformation array\n transform_smooth = transforms + difference\n\n # Reset stream to first frame\n capture.set(cv2.CAP_PROP_POS_FRAMES, 0)\n\n # Apply transformations to video\n for i in range(n_frames-2):\n # Read next frame\n success, frame = capture.read()\n if not success:\n break\n\n # Extract transformations from the new transformation array\n dx = transform_smooth[i, 0]\n dy = transform_smooth[i, 1]\n da = transform_smooth[i, 2]\n\n # Reconstruct transformation matrix accordingly to new values\n m = np.zeros((2, 3), np.float32)\n m[0, 0] = np.cos(da)\n m[0, 1] = -np.sin(da)\n m[1, 0] = np.sin(da)\n m[1, 1] = np.cos(da)\n m[0, 2] = dx\n m[1, 2] = dy\n\n # Apply affine wrapping to the given frame\n # Fix border artifacts\n frame_stabilized = cv2.warpAffine(frame, m, (w, h))\n frame_stabilized = fixBorder(frame_stabilized)\n\n # Save new frame\n out.write(frame_stabilized)\n\n pbar.update(1)\n\n # Release original and stabilized video\n capture.release()\n out.release()", "def get_motion(frame1k, frame2k, frame_count):\n frame1 = frame1k.copy()\n frame2 = frame2k.copy()\n\n global limb_coords, init_coords, num_blocks\n cv2.imwrite(\"thisImageAnalyse.png\", frame2)\n block_size = 3\n block_rad = int(block_size/2)\n\n def get_SSD():\n \"\"\" applies SSD formula to search area\n :return SSD value\"\"\"\n dist = 0\n # traversal of pixels in potential Bi+1 block\n # compare corresponding pixel positions with source block in f1 and neighbour block in f2\n y1 = center_y1 - block_rad # start pos.\n for y2 in range(center_y2 - block_rad, (center_y2 - block_rad + block_size)):\n x1 = center_x1 - block_rad # start pos\n for x2 in range(center_x2 - block_rad, (center_x2 - block_rad + block_size)):\n try:\n # displacement formula for RGB channels of each pixel in block\n dist = dist + (frame1[y1][x1][0] - frame2[y2][x2][0])**2 + (frame1[y1][x1][1] - frame2[y2][x2][1])**2 + (frame1[y1][x1][2] - frame2[y2][x2][2])**2\n except RuntimeWarning:\n pass\n x1 += 1\n y1 += 1\n return math.sqrt(dist)\n\n # for each body part\n b = 0\n while b < 5:\n avg_x = 0.0\n avg_y = 0.0\n new_x = 0.0\n new_y = 0.0\n a = 0\n # for each block on body part (9 total)\n while a < num_blocks:\n found = False\n search_rad = 5\n while found is False:\n center_y1 = int(init_coords[b][a][0])\n center_x1 = int(init_coords[b][a][1])\n min_SSD = 999999\n # for pythagoras to ensure closest block gets picked when equality occurs of SSD value\n min_d = 999999\n # this finds the center of the block to compare\n for factor_y in range(-search_rad, search_rad + 1):\n center_y2 = center_y1 + block_size*factor_y\n y_dist = center_y1 - abs(center_y2)\n for factor_x in range(-search_rad, search_rad + 1):\n center_x2 = center_x1 + block_size*factor_x\n x_dist = center_x1 - abs(center_x2)\n # pythagoras\n d = math.sqrt((y_dist**2 + x_dist**2))\n if d < min_d:\n min_d = d\n\n SSD = get_SSD()\n if frame2[center_y2][center_x2][1] != 0 and frame2[center_y2][center_x2][2] != 0:\n found = True\n if SSD < min_SSD:\n min_SSD = SSD\n new_y = center_y2\n new_x = center_x2\n elif SSD == min_SSD and d < min_d:\n new_y = center_y2\n new_x = center_x2\n if found is False:\n # if no block is found repeat the search, increasing the search size by 1\n search_rad += 1\n # draw extracted vectors\n cv2.arrowedLine(frame1k, (int(center_x1), int(center_y1)), (int(new_x), int(new_y)), (150, 200, 30), 1, 4, 0, 0.3)\n avg_x += new_x\n avg_y += new_y\n init_coords[b][a][0] = new_y\n init_coords[b][a][1] = new_x\n a += 1\n cv2.imwrite('monkeyFrames/contrast_enhanced%d.png' % frame_count, frame1k)\n limb_coords[b][frame_count][0] = int(avg_y/num_blocks)\n limb_coords[b][frame_count][1] = int(avg_x/num_blocks)\n b += 1", "def object_detection(): # needs to be modified so definition can be called as part of main function\r\n green_lower = (29, 86, 6) # define the lower boundaries of the \"green\"\r\n green_upper = (64, 255, 255) # define the upper boundaries of the \"green\"\r\n pts = deque(maxlen=args[\"buffer\"]) # ball in the HSV color space, then initialize the list of tracked points\r\n\r\n if not args.get(\"video\", False): # if a video path was not supplied, grab the reference to the picam\r\n vs = VideoStream(usePiCamera=args[\"picamera\"] > 0).start()\r\n else: # otherwise, grab a reference to the video file\r\n vs = cv2.VideoCapture(args[\"video\"])\r\n time.sleep(2.0) # allow the camera or video file to warm up\r\n while True: # keep looping\r\n frame = vs.read() # grab the current frame\r\n frame = frame[1] if args.get(\"video\", False) else frame # handle the frame from VideoCapture or VideoStream\r\n if frame is None: # if viewing video and did not grab frame, then reached end of video\r\n break\r\n frame = imutils.resize(frame, width=600) # resize the frame\r\n blurred = cv2.GaussianBlur(frame, (11, 11), 0) # blur it\r\n hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # and convert it to the HSV color space\r\n\r\n mask = cv2.inRange(hsv, green_lower, green_upper) # construct a mask for the color \"green\"\r\n mask = cv2.erode(mask, None, iterations=2) # then perform a series of erosions\r\n mask = cv2.dilate(mask, None, iterations=2) # and dilations to remove any small blobs left in the mask\r\n\r\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\r\n cv2.CHAIN_APPROX_SIMPLE) # find contours in the mask\r\n cnts = imutils.grab_contours(cnts)\r\n center = None # and initialize the current (x, y) center of the ball\r\n\r\n if len(cnts) > 0: # only proceed if at least one contour was found\r\n c = max(cnts, key=cv2.contourArea) # find the largest contour in the mask\r\n ((x, y), radius) = cv2.minEnclosingCircle(c) # then use it to compute minimum enclosing circle and centroid\r\n M = cv2.moments(c) # calculate moments\r\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"])) # use moment to find centroid in x,y\r\n if radius > 10: # only proceed if the radius meets a minimum size\r\n cv2.circle(frame, (int(x), int(y)), int(radius),\r\n (0, 255, 255), 2) # draw the circle\r\n cv2.circle(frame, center, 5, (0, 0, 255), -1) # draw the centroid\r\n object_tracking(int(x), int(y)) # update the list of tracked points\r\n\r\n pts.appendleft(center) # update the points queue\r\n for i in range(1, len(pts)): # loop over the set of tracked points\r\n if pts[i - 1] is None or pts[i] is None: # if either of the tracked points are None, ignore them\r\n continue\r\n thickness = int(np.sqrt(args[\"buffer\"] / float(i + 1)) * 2.5) # otherwise, compute thickness of line\r\n cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness) # draw the connecting lines\r\n\r\n cv2.imshow(\"Frame\", frame) # show the frame to our screen\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"): # if the 'q' key is pressed, stop the loop\r\n break\r\n\r\n if not args.get(\"video\", False): # if we are not using a video file, stop the camera video stream\r\n vs.stop()\r\n else: # otherwise, release the camera\r\n vs.release()\r\n cv2.destroyAllWindows() # close all windows\r", "def compute_motion(mov, max_shift=(25,25), template=np.median, template_matching_method=None, resample=4, verbose=True):\n if template_matching_method is None and cv2 is not None:\n template_matching_method = cv2.TM_CCORR_NORMED\n \n # Parse movie\n mov = mov.astype(np.float32) \n n_frames,h_i, w_i = mov.shape\n\n # Parse max_shift param\n if type(max_shift) in [int,float]:\n ms_h = max_shift\n ms_w = max_shift\n elif type(max_shift) in [tuple, list, np.ndarray]:\n ms_h,ms_w = max_shift\n else:\n raise Exception('Max shift should be given as value or 2-item list')\n \n # Parse/generate template\n if callable(template):\n movr = mov.resample(resample)\n with Progress(msg='Computing template', verbose=verbose):\n template=template(movr,axis=0) \n elif not isinstance(template, np.ndarray):\n raise Exception('Template parameter should be an array or function')\n template_uncropped = template.astype(np.float32)\n template=template_uncropped[ms_h:h_i-ms_h,ms_w:w_i-ms_w]\n h,w = template.shape\n \n vals = np.zeros([n_frames,3])\n if verbose: \n print('Computing shifts:'); sys.stdout.flush()\n pbar = ProgressBar(maxval=n_frames).start()\n\n for i,frame in enumerate(mov):\n\n if verbose: \n pbar.update(i) \n\n if cv2 is not None:\n res = cv2.matchTemplate(frame, template, template_matching_method)\n avg_metric = np.mean(res)\n top_left = cv2.minMaxLoc(res)[3]\n elif cv2 is None:\n res = match_template(frame, template)\n avg_metric = np.mean(res)\n top_left = np.unravel_index(np.argmax(res), res.shape)\n\n ## from hereon in, x and y are reversed in naming convention\n sh_y,sh_x = top_left\n \n if (0 < top_left[1] < 2 * ms_h-1) & (0 < top_left[0] < 2 * ms_w-1):\n # if max is internal, check for subpixel shift using gaussian peak registration\n log_xm1_y = np.log(res[sh_x-1,sh_y]) \n log_xp1_y = np.log(res[sh_x+1,sh_y]) \n log_x_ym1 = np.log(res[sh_x,sh_y-1]) \n log_x_yp1 = np.log(res[sh_x,sh_y+1]) \n four_log_xy = 4*np.log(res[sh_x,sh_y])\n \n sh_x_n = -(sh_x - ms_h + (log_xm1_y - log_xp1_y) / (2 * log_xm1_y - four_log_xy + 2 * log_xp1_y))\n sh_y_n = -(sh_y - ms_w + (log_x_ym1 - log_x_yp1) / (2 * log_x_ym1 - four_log_xy + 2 * log_x_yp1))\n else:\n sh_x_n = -(sh_x - ms_h)\n sh_y_n = -(sh_y - ms_w)\n\n # NOTE: to correct for reversal in naming convention, vals are placed y, x -- but their meaning is x,y\n vals[i,:] = [sh_y_n, sh_x_n, avg_metric] # X , Y\n \n if verbose: \n pbar.finish() \n\n return template_uncropped, vals", "def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()", "def background_subtraction(frame, background):\n \n gray_sample = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if debug:\n plt.imshow(fixColor(gray_sample))\n dframe = cv2.absdiff(gray_sample, background)\n return dframe", "def findBackground(frames):\n\n n = 100 # Number of frames to use\n i = 1\n background = frames[0]\n # Read each frame and do a weighted sum\n # beta is the weight for each new frame which starts at 1/2 and\n # gets smaller\n # alpha is the weight of the sum and starts at 1/2 and gets larger\n while(i < n):\n i += 1\n frame = frames[i]\n beta = 1.0 / (i + 1)\n alpha = 1.0 - beta\n background = cv2.addWeighted(background, alpha, frame, beta, 0.0)\n\n if VISUALIZE:\n cv2.imshow('estimated background', background)\n cv2.waitKey(3000)\n cv2.destroyAllWindows()\n\n return background", "def record_project():\n # open the video and save the frame and return the fW,fH and the frame\n frame = video_handle_for_demo()\n\n # detect the blue square and resize the frame\n image = detect_and_rotate(frame)\n if image is None:\n return [-100, -100, 0]\n\n fW, fH, _ = image.shape\n\n # detect both yellow and green square for further angle and center computation\n x2g, y2g, xfg, yfg, frameg = frame_analysis_green(fW, fH, image)\n\n x2y, y2y, xfy, yfy, framey = frame_analysis_yellow(fW, fH, image)\n\n # Correct the coordinate to have them in the true axis\n # x2_ are the coordinate in grid referential\n # xf_ are the coordinate in pixel of the resized image referential\n\n x2y = xfy\n x2g = xfg\n y2g = yfg\n y2y = yfy\n\n # Compute the thymio center in grid's coordinate\n xc = (x2g + x2y) / 2\n yc = (y2g + y2y) / 2\n print(\"Picture of thymio with computed center represented by a green dot\")\n cv2.circle(image, (round(xc), round(yc)), 4, (255, 255, 0), -1)\n plt.figure()\n plt.imshow(image[:, :, ::-1])\n plt.show()\n\n ratio = (gw / fH, gh / fW)\n\n xfg_temp = fW - (fH - yfg)\n yfg = xfg\n xfg = xfg_temp\n\n xfy_temp = fW - (fH - yfy)\n yfy = xfy\n xfy = xfy_temp\n\n # Compute the angle thymio has\n angle = give_thymio_angle(image, xfy, yfy, xfg, yfg)\n\n x2g = x2g * ratio[0]\n x2y = x2y * ratio[0]\n y2g = y2g * ratio[1]\n y2y = y2y * ratio[1]\n\n # compute the center of the thymio & gives thymio angle\n xc = (x2g + x2y) / 2\n yc = (y2g + y2y) / 2\n\n # plot the image with the drawings and print the X,Y coordinate and the angle\n xc = xc - 2.5\n yc = yc - 2.5\n yc = 72.5 - yc\n\n return [xc, yc, angle], image", "def __init__(self):\n self.active = True # Camera activation control\n self.stream = cv2.VideoCapture(0) # Open video stream\n while not self.stream.isOpened():\n pass\n _,self.image = self.stream.read()# Save the first frame\n cv2.waitKey(10)\n self.frame = self.image[196:304,:546,:]# Cropped frame\n self.diff_frame = self.frame\n# self.reference_frame = copy.deepcopy(self.frame)\n# self.abs_diff_frame = copy.deepcopy(self.frame)\n self.reference_frame = self.frame\n self.abs_diff_frame = self.frame\n self.frame_count = 1 # Used for framerate estimation\n self.frame_rate = 0\n self.tic = time()", "def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr", "def track(self, frame, init_pos, frame_ctr=1):\n distance_list = []\n self.tracker.init(frame, tuple(init_pos))\n init_center_pos = int((init_pos[3] / 2) + init_pos[1])\n\n if self.debug_mode:\n # Created window to display video\n cv2.namedWindow('Barbell_Tracker', cv2.WINDOW_NORMAL)\n\n while(self.video.isOpened()):\n \"\"\"\n Based off: https://pythonprogramming.net/haar-cascade-object-detection-python-opencv-tutorial/\n Using tracker instead of constant detection\n \"\"\"\n # Read a frame\n retval, frame = self.video.read()\n if not retval:\n break\n frame_ctr += 1\n\n # Update the tracker\n retval, current_pos = self.tracker.update(frame)\n if not retval:\n raise Exception('Could not update tracker')\n\n # Find center of current position\n current_center_pos = int((current_pos[3] / 2) + current_pos[1])\n\n # Log distance moved\n distance_moved = init_center_pos - current_center_pos\n distance_list.append(distance_moved)\n logging.info('Frame {}. Distance moved: {}'.format(frame_ctr, distance_moved))\n\n if self.debug_mode:\n # Draw inital position\n cv2.rectangle(frame, (int(init_pos[0]), int(init_pos[1])),\n (int(init_pos[0] + init_pos[2]), int(init_pos[1] + init_pos[3])),\n (0, 255, 0), 1)\n # Draw rectangle over tracker position\n cv2.rectangle(frame, (int(current_pos[0]), int(current_pos[1])),\n (int(current_pos[0] + current_pos[2]), int(current_pos[1] + current_pos[3])),\n (0, 0, 255), 2)\n # Display distance moved\n cv2.putText(frame, 'Distance moved: {}'.format(distance_moved),\n (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)\n # Show frame\n cv2.imshow('Barbell_Tracker', frame)\n # Exit if q pressed\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Stop video when finished\n self.video.release()\n return distance_list", "def process(frame, fgbg, kernel, debug, ttrack, angle):\n\t# Rotate the image\n\timage = frame\n\timage = imutils.rotate(image, angle = angle)\n\t\t\n\t# Apply background subtraction and clean up noise\n\tfgmask = fgbg.apply(image)\n\tfgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n\tfgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)\n\tret, fgmask = cv2.threshold(fgmask,200,255,cv2.THRESH_BINARY)\n\t\t\n\t# Find the contours from the fgmask binary image\n\tcontours, hierarchy = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\t\n\t# Eliminate contours that are too small that likely come from noise and various\n\t# scene shifts. Also apply rectangles to the original image around contours if debug\n\tgoodcont = 0\n\tfor c in contours:\n\t\tif cv2.contourArea(c) > 600:\t\n\t\t\t(x, y, w, h) = cv2.boundingRect(c)\n\t\t\tif debug:\n\t\t\t\tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\t\t\tgoodcont += 1\n\t\n\t# Checks for motion in the frame\n\tif goodcont > 0:\n\t\tismotion = True\n\telse:\n\t\tismotion = False\n\t\n\t# time stamp the frame\n\tfont = cv2.FONT_HERSHEY_SIMPLEX\n\tctime = time.strftime(\"%H:%M:%S\")\n\tif not ttrack:\n\t\tcv2.putText(image, ctime, (10, 450), font, 1.3,(255,255,255),3,cv2.LINE_AA) \n\telse:\n\t\tcv2.putText(image, ctime, (10, 450), font, 1.3,(0,255,0),3,cv2.LINE_AA) \n\treturn image, fgmask, ismotion", "def yolo_test_video(self):\n # Open the input video, blocking call\n inputVideo = cv2.VideoCapture(self.inputFile)\n\t\t\n # Get infomration about the input video\n codec = int(inputVideo.get(cv2.CAP_PROP_FOURCC))\n fps = int(inputVideo.get(cv2.CAP_PROP_FPS))\n frameWidth = int(inputVideo.get(cv2.CAP_PROP_FRAME_WIDTH))\n frameHeight = int(inputVideo.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n # Open the output stream\n outputVideo = cv2.VideoWriter(self.outputFile,\n codec,\n fps,\n (frameWidth,frameHeight))\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n totalFrames = inputVideo.get(cv2.CAP_PROP_FRAME_COUNT)\n \t \n\tavgGrabTime = 0\n\tavgYoloTime = 0\n\tavgWriteTime = 0\n \n # For each frame in the video\n while True:\n \n startTime = time.time()\n \n # Calculate the time it takes to grab a frame\n startGrabTime = time.time()\n grabbed, frame = inputVideo.read()\n endGrabTime = time.time() \n\t avgGrabTime+=(endGrabTime-startGrabTime)\n\t \n\n if grabbed:\n\t\t\n # Calculate the time it takes to run YOLO pipeline \n\t\tstartYoloTime = time.time()\n annotatedFrame, predictedObjects = self.detect_from_image(frame)\n\t\tendYoloTime = time.time()\n\t\tavgYoloTime+= ( endYoloTime - startYoloTime)\n\n frameIndex = inputVideo.get(cv2.CAP_PROP_POS_FRAMES)\n \t\n\t\tcurrentTime = time.time()\n\t\telapsedTime = currentTime - startTime\n\t\tcurrentFPS = (1)/elapsedTime \n\t\t \t\n #cv2.rectangle(annotatedFrame, (0, 0), (30, 30), (0,0,0), -1)\n cv2.putText(\n annotatedFrame, 'FPS' + ': %.2f' % currentFPS,\n (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n\t\t\n # Calculate the time it takes to write an annotated frame to video\n\t\tstartWriteTime = time.time()\n outputVideo.write(annotatedFrame)\n\t\tendWriteTime = time.time()\n\t\tavgWriteTime +=(endWriteTime - startWriteTime)\n\t\n else:\n inputVideo.set(cv2.CAP_PROP_POS_FRAMES, frameIndex-1)\n cv2.waitKey(100)\n\n if frameIndex==totalFrames:\n break\n\t\t\n inputVideo.release()\n outputVideo.release()\n cv2.destroyAllWindows()\n \n avgGrabTime/=totalFrames\n avgYoloTime/=totalFrames\n avgWriteTime/=totalFrames\n\n if self.verbose:\n print ('Average time for extracting compressed video frame : %.3f' %avgGrabTime)\n print ('Average time for YOLO object detection : %.3f' %avgYoloTime )\n print ('Average time for writing frame to video : %.3f' %avgWriteTime)" ]
[ "0.7056259", "0.6407131", "0.6285593", "0.61958647", "0.6153237", "0.60899645", "0.60573727", "0.6040458", "0.60244703", "0.5989936", "0.5976939", "0.59573466", "0.5945152", "0.59423107", "0.5914144", "0.5860365", "0.5854662", "0.58460635", "0.5825132", "0.57794726", "0.5724917", "0.56778157", "0.5647344", "0.5636294", "0.5630327", "0.56093806", "0.55841553", "0.5582614", "0.5567797", "0.55631185" ]
0.6411053
1
Handle attribute update from fan cluster.
def attribute_updated(self, attrid: int, value: Any, _: Any) -> None: attr_name = self._get_attribute_name(attrid) self.debug( "Attribute report '%s'[%s] = %s", self.cluster.name, attr_name, value ) if attr_name == "fan_mode": self.async_send_signal( f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, attr_name, value )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_coordinator_update(self) -> None:\n self._attr_is_on = self.relay.active\n self.async_write_ha_state()", "def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)", "def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)", "def _handle_coordinator_update(self) -> None:\n self._update_attrs()\n self.async_write_ha_state()", "def attribute_updated(self, attrid: int, value: Any, _: Any) -> None:\n attr_name = self._get_attribute_name(attrid)\n self.debug(\n \"Attribute report '%s'[%s] = %s\", self.cluster.name, attr_name, value\n )\n self.async_send_signal(\n f\"{self.unique_id}_{SIGNAL_ATTR_UPDATED}\",\n AttributeUpdateRecord(attrid, attr_name, value),\n )", "def OnAttributesUpdated():\n pass", "async def async_update(self):\n data = await nooa.get_data()\n self._state = data[\"0\"][self._unit][\"Scale\"]\n self.filter_attributes(data)", "def _async_update_attrs(self) -> None:\n self._attr_is_on = self._device.light_on\n if self._device.light_brightness is not None:\n self._attr_brightness = int(min(255, self._device.light_brightness * 16))", "def _handle_coordinator_update(self) -> None:\n _LOGGER.debug(f\"{self.name} updating\")\n self._value = self._data.wiserhub.system.away_mode_target_temperature\n # Support prior to 2022.7.0 Versions without deprecation warning\n if hasattr(self, \"_attr_value\"):\n self._attr_value = self._data.wiserhub.system.away_mode_target_temperature\n\n self.async_write_ha_state()", "async def async_update(self) -> None:\n await super().async_update()\n if self._on_off_cluster_handler:\n await self._on_off_cluster_handler.get_attribute_value(\n \"on_off\", from_cache=False\n )", "def _handle_coordinator_update(self) -> None:\n self._update_attrs()\n return super()._handle_coordinator_update()", "def _handle_coordinator_update(self) -> None:\n _LOGGER.debug(f\"{self.name} updating\")\n self._value = getattr(self._actuator.floor_temperature_sensor, self._name)\n # Support prior to 2022.7.0 Versions without deprecation warning\n if hasattr(self, \"_attr_value\"):\n self._attr_value = getattr(\n self._actuator.floor_temperature_sensor, self._name\n )\n\n self.async_write_ha_state()", "def _handle_coordinator_update(self) -> None:\n device = next(\n (\n device\n for device in self.coordinator.data\n if device[\"uid\"] == self.unique_id\n ),\n None,\n )\n if device is not None and \"state\" in device:\n state = device[\"state\"]\n self._attr_is_on = state[DEVICE_KEY_MAP[self._type]]\n super()._handle_coordinator_update()", "async def async_update(self) -> None:\n await super().async_update()\n self.error(\"Polling current state\")\n if self._cluster_handler:\n value = await self._cluster_handler.get_attribute_value(\n self._zcl_attribute, from_cache=False\n )\n await self._cluster_handler.get_attribute_value(\n self._zcl_inverter_attribute, from_cache=False\n )\n self.debug(\"read value=%s, inverted=%s\", value, self.inverted)", "def update(self, attribute: str, result: ProcessorResult) -> None:\n pass", "def _handle_coordinator_update(self) -> None:\n self._update_from_rest_data()\n self.async_write_ha_state()", "def process_IN_ATTRIB(self, event):", "def async_update_callback(self) -> None:\n self._attr_is_on = self._switch.on", "def update(self) -> None:\n try:\n sensor_type = self.entity_description.key\n if sensor_type == \"status\":\n self._attr_native_value = self.charger.getStatus()\n elif sensor_type == \"charge_time\":\n self._attr_native_value = self.charger.getChargeTimeElapsed() / 60\n elif sensor_type == \"ambient_temp\":\n self._attr_native_value = self.charger.getAmbientTemperature()\n elif sensor_type == \"ir_temp\":\n self._attr_native_value = self.charger.getIRTemperature()\n elif sensor_type == \"rtc_temp\":\n self._attr_native_value = self.charger.getRTCTemperature()\n elif sensor_type == \"usage_session\":\n self._attr_native_value = float(self.charger.getUsageSession()) / 1000\n elif sensor_type == \"usage_total\":\n self._attr_native_value = float(self.charger.getUsageTotal()) / 1000\n else:\n self._attr_native_value = \"Unknown\"\n except (RequestException, ValueError, KeyError):\n _LOGGER.warning(\"Could not update status for %s\", self.name)", "def _handle_coordinator_update(self) -> None:\n self._attr_is_on = self.entity_description.is_on_fn(self.coordinator.diffuser)\n super()._handle_coordinator_update()", "def set_attr(self, attr_name, value, indices=None):\n target_remotes = self._get_target_remotes(indices)\n for remote in target_remotes:\n remote.send(('set_attr', (attr_name, value)))\n for remote in target_remotes:\n remote.recv()", "def _update_device_attributes_on_backend(self):\n if self.is_paired:\n LOG.info('Sending updated device attributes to the backend...')\n try:\n api = DeviceApi()\n api.update_version()\n except Exception:\n self._notify_backend_down()", "def update(self):\n self.data.update()\n\n sensor_type = self.entity_description.key\n if sensor_type == \"light\":\n self._attr_native_value = self.data.light\n elif sensor_type == \"light_red\":\n self._attr_native_value = self.data.light_red\n elif sensor_type == \"light_green\":\n self._attr_native_value = self.data.light_green\n elif sensor_type == \"light_blue\":\n self._attr_native_value = self.data.light_blue\n elif sensor_type == \"accelerometer_x\":\n self._attr_native_value = self.data.accelerometer_x\n elif sensor_type == \"accelerometer_y\":\n self._attr_native_value = self.data.accelerometer_y\n elif sensor_type == \"accelerometer_z\":\n self._attr_native_value = self.data.accelerometer_z\n elif sensor_type == \"magnetometer_x\":\n self._attr_native_value = self.data.magnetometer_x\n elif sensor_type == \"magnetometer_y\":\n self._attr_native_value = self.data.magnetometer_y\n elif sensor_type == \"magnetometer_z\":\n self._attr_native_value = self.data.magnetometer_z\n elif sensor_type == \"temperature\":\n self._attr_native_value = self.data.temperature\n elif sensor_type == \"pressure\":\n self._attr_native_value = self.data.pressure\n elif sensor_type == \"voltage_0\":\n self._attr_native_value = self.data.voltage_0\n elif sensor_type == \"voltage_1\":\n self._attr_native_value = self.data.voltage_1\n elif sensor_type == \"voltage_2\":\n self._attr_native_value = self.data.voltage_2\n elif sensor_type == \"voltage_3\":\n self._attr_native_value = self.data.voltage_3", "def update(self, *args, **kwargs):", "def _handle_coordinator_update(self) -> None:\n self.update_from_latest_data()\n self.async_write_ha_state()", "def handle_climate_attribute_received(\n self, msg: ReceiveMessage, template_name: str, attr: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n if not payload:\n _LOGGER.debug(\n \"Invalid empty payload for attribute %s, ignoring update\",\n attr,\n )\n return\n if payload == PAYLOAD_NONE:\n setattr(self, attr, None)\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)\n return\n try:\n setattr(self, attr, float(payload))\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)\n except ValueError:\n _LOGGER.error(\"Could not parse %s from %s\", template_name, payload)", "def update( ):\r\n pass", "def update_from_latest_data(self) -> None:\n self._attr_is_on = self.coordinator.data[self.entity_description.data_key]", "def _async_update_attrs(self) -> None:\n self._attr_is_on = self.entity_description.is_on(self._lock.state)", "def set_node_attribute(\n node: MatterNode,\n endpoint: int,\n cluster_id: int,\n attribute_id: int,\n value: Any,\n) -> None:\n attribute_path = f\"{endpoint}/{cluster_id}/{attribute_id}\"\n node.endpoints[endpoint].set_attribute_value(attribute_path, value)" ]
[ "0.64523953", "0.6417711", "0.64156264", "0.6376256", "0.6307701", "0.6217214", "0.61811453", "0.6129198", "0.61026025", "0.60620624", "0.6059136", "0.5902997", "0.5844933", "0.5842309", "0.5804179", "0.5798653", "0.5789843", "0.576516", "0.5699014", "0.56795084", "0.56300855", "0.56216383", "0.5612652", "0.56106645", "0.5607974", "0.55929637", "0.5581904", "0.55752236", "0.55741084", "0.556046" ]
0.72610325
0
Absolute maximum cooling setpoint.
def abs_max_cool_setpoint_limit(self) -> int: return self.cluster.get("abs_max_cool_setpoint_limit", 3200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abs_min_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_min_cool_setpoint_limit\", 1600)", "def absmax(self):\n raise NotImplementedError", "def abs_max_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_heat_setpoint_limit\", 3000)", "def _maximum(self) -> float:\n if self._type == \"power\":\n return 5.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_max\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n (((fan - 1) * 2) + 1)\n ]", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def max_score(self):\n return self.points", "def abs_min_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_min_heat_setpoint_limit\", 700)", "def peak(self) -> Point:\n return self.most_intense_point()", "def maxx(self):\n return self.__maxx", "def cmax(self):\n return self['cmax']", "def cmax(self):\n return self[\"cmax\"]", "def max_temp(self):\n return 99", "def state_max(self) -> float:\n raise NotImplementedError", "def maxY(self):\n return max(self.gety())", "def get_max_temp(self):\n self.max_temp = self.domain[1] * 2", "def maxpoints(self):\n return self[\"maxpoints\"]", "def peak(self):\n pass", "def lambda_max(self):\n return const.b_wien / self.temperature", "def unoccupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"unoccupied_cooling_setpoint\")", "def set_max(calories, max_calories):\n return calories if calories > max_calories else max_calories", "def occupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"occupied_cooling_setpoint\")", "def max_y_arg(self):\n return max((self(0).y,0), (self(1).y,1))[1]", "def max(self) -> float:\n return stats.max(self)", "def right(self) -> float:\n points = self.get_adjusted_points()\n x_points = [point[0] for point in points]\n return max(x_points)", "def set_Ec_max(self, x):\n x = float(x)\n if self.Ec_max != x:\n self.Ec_max = x\n self.Ec[1] = x", "def compute_max(self):\r\n self.x_max = self.ox + self.dx*self.nx\r\n self.y_max = self.oy + self.dy*self.ny\r\n self.z_max = self.oz + self.dz*self.nz", "def __abs__(self):\n abspos = abs(self.pos)\n absvel = abs(self.vel)\n return np.amax((abspos, absvel))", "def get_max_score(self):\r\n return sum(self.maxpoints.values())", "def _maximum(self) -> float:\n return self._config[CONF_MAX]", "def getMaxX(self):\n return self.maxx" ]
[ "0.7390342", "0.6880755", "0.6699675", "0.66157746", "0.6585473", "0.64992166", "0.64724123", "0.6445962", "0.6444601", "0.63723814", "0.63416517", "0.63181305", "0.62716085", "0.6267244", "0.62624335", "0.62495303", "0.6168795", "0.61668366", "0.61656046", "0.6160874", "0.6120391", "0.61001647", "0.6094203", "0.60913557", "0.60833025", "0.6078252", "0.606603", "0.60546184", "0.60492444", "0.6041777" ]
0.75471944
0
Absolute minimum cooling setpoint.
def abs_min_cool_setpoint_limit(self) -> int: return self.cluster.get("abs_min_cool_setpoint_limit", 1600)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cx(x):\n return cw(x - global_min_x)", "def state_min(self) -> float:\n raise NotImplementedError", "def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]", "def cmin(self):\n return self['cmin']", "def cmin(self):\n return self[\"cmin\"]", "def abs_min_heat_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_min_heat_setpoint_limit\", 700)", "def runmaxmin(self):\n import random\n random.seed(self.seed)\n mindist_ptolandmarkset = np.full(self.pointcloud.size, np.inf)\n self.subsetindices = []\n for i in xrange(self.subsetsize):\n if i == 0:\n selected_index = random.randint(0, self.pointcloud.size - 1)\n # update min for all the rest indices\n # update min for this index to 0.\n for z in xrange(self.pointcloud.size):\n # if z == selected_index:\n # mindist_ptolandmarkset[z] = 0.0\n # else:\n mindist_ptolandmarkset[z] = self.pointcloud.distmat[selected_index][z]\n else:\n selected_index = np.argmax(mindist_ptolandmarkset)\n # update minimum distance for all points\n for z in xrange(self.pointcloud.size):\n mindist_ptolandmarkset[z] = min(mindist_ptolandmarkset[z],\n self.pointcloud.distmat[selected_index][z])\n\n self.subsetindices.append(selected_index)\n\n self.subsetpointcloud = pc.PointCloud(self.pointcloud.points[self.subsetindices])", "def minimum_clearance(self):\n ...", "def getMinX(self):\n return self.minx", "def MINET(self):", "def minX(self):\n return min(self.getx())", "def _minimum(self) -> float:\n return self._config[CONF_MIN]", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def min(x):\n pass", "def min_temp(self):\n return 1", "def set_Ec_min(self, x):\n x = float(x)\n if self.Ec_min != x:\n self.Ec_min = x\n self.Ec[0] = x", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def unoccupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"unoccupied_cooling_setpoint\")", "def occupied_cooling_setpoint(self) -> int | None:\n return self.cluster.get(\"occupied_cooling_setpoint\")", "def getmin(self):\n\n return self.X", "def minimum(x, y):\r\n # see decorator for function body\r", "def vmin(self):\n return self._vmin", "def min(self) -> float:\n return stats.min(self)", "def getXmin(self):\n return min(self.p1.x, self.p2.x)", "def lower_bound(self) -> float:\n ...", "def abs_max_cool_setpoint_limit(self) -> int:\n return self.cluster.get(\"abs_max_cool_setpoint_limit\", 3200)", "def min(self):\n return self.__min", "def critical_point(self, cp_override: Optional[CriticalPoint] = None) -> Point:\n ...", "def min(self):\n return min(self)", "def native_min_value(self) -> float:\n return TEMP_MINIMUM" ]
[ "0.6725199", "0.6523529", "0.64395744", "0.64117193", "0.64096063", "0.6405297", "0.6404757", "0.6319182", "0.6316694", "0.6291801", "0.62867165", "0.6276625", "0.6271663", "0.6258228", "0.6251705", "0.6247299", "0.6238187", "0.62373286", "0.6215882", "0.6213482", "0.617138", "0.6129936", "0.6100521", "0.609555", "0.609468", "0.6090438", "0.6044157", "0.6037588", "0.6029165", "0.602701" ]
0.7301009
0
Test saving, reading and deleting a correctly formed calibration blob.
def test_save_delete_file( tmp_path: Path, calibration: typing.Dict[str, typing.Any] ) -> None: my_calpath = tmp_path / "calibrations" io.save_to_file(my_calpath, "my_calibration", calibration) file_to_delete = my_calpath / "my_calibration.json" assert io.read_cal_file(file_to_delete) io.delete_file(file_to_delete) with pytest.raises(FileNotFoundError): io.read_cal_file(file_to_delete)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cereal():\n test_path = tempfile.mkdtemp()\n x_train, metadata = cereal(test_path)\n try:\n assert x_train.shape == (36, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_malformed_calibration(\n tmp_path: Path, malformed_calibration: typing.Dict[str, typing.Any]\n) -> None:\n malformed_calibration_dir = tmp_path / \"calibrations\"\n malformed_calibration_path = malformed_calibration_dir / \"my_bad_calibration.json\"\n\n # TODO (lc 10-27-2022) We don't actually throw an error when we're saving bad calibration data.\n # Probably before this point, we should make sure that we're passin in a validated pydantic\n # model because otherwise we could potentially be saving malformed data which would fail on\n # a read file.\n io.save_to_file(\n malformed_calibration_dir, \"my_bad_calibration\", malformed_calibration\n )\n\n malformed_calibration_dir.mkdir(parents=True, exist_ok=True)\n malformed_calibration_path.write_text(\n json.dumps(malformed_calibration), encoding=\"utf-8\"\n )\n with pytest.raises(AssertionError):\n io.read_cal_file(malformed_calibration_path)", "def test_ceosal2():\n test_path = tempfile.mkdtemp()\n x_train, metadata = ceosal2(test_path)\n try:\n assert x_train.shape == (177, 15)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_save_calibration_essay(self):\r\n response = self.peer_grading.save_calibration_essay(self.save_dict)\r\n self.assertTrue(response['success'])", "def test_cv_created(self):\n\n\t\t# Pass a cv file object to CVResume to save to db/disk\n\t\tCVResume.objects.create(\n\t\t\tuser=self.user,\n\t\t\tcv=self.cv\n\t\t)\n\t\tself.cv.open() # Reopen the cv object - set seek to 0\n\t\tcv_resume = CVResume.objects.get(pk=1)\n\t\tself.assertEqual(\n\t\t\tself.cv.read(), cv_resume.cv.read(), # Compare file like objects\n\t\t\t'Created cv does not match the provided one.'\n\t\t)", "def test_coo_roi_dump_single(self, data, expected_coo):\n data.update(additional_roi_json_data)\n data.update({\"exclusion_labels\": [], \"id\": 42})\n rois = SparseAndDenseROISchema().load(data)\n expected_data = data.copy()\n expected_data.update({\"coo_roi\": expected_coo})\n np.testing.assert_array_equal(\n expected_data[\"coo_roi\"].toarray(), rois[\"coo_roi\"].toarray())", "def test_consitency_manual(self):\n name = os.path.basename(self.cbf_filename)\n obj = fabio.open(self.cbf_filename)\n new = fabio.cbfimage.cbfimage(data=obj.data, header=obj.header)\n new.write(os.path.join(self.tempdir, name))\n other = fabio.open(os.path.join(self.tempdir, name))\n self.assertEqual(abs(obj.data - other.data).max(), 0, \"data are the same\")\n for key in obj.header:\n if key in[ \"filename\", \"X-Binary-Size-Padding\"]:\n continue\n self.assertTrue(key in other.header, \"Key %s is in header\" % key)\n self.assertEqual(obj.header[key], other.header[key], \"value are the same for key %s [%s|%s]\" % (key, obj.header[key], other.header[key]))", "def test_good_load(self):\n self.r0.save_to_file([self.r0, self.r1])\n objs = self.r0.load_from_file()\n self.assertEqual(str(objs[0]), '[Rectangle] (1) 0/0 - 2/3')\n self.assertEqual(str(objs[1]), '[Rectangle] (2) 0/0 - 4/6')", "def save_calibration(filename):\n pass", "def test(self):\n model = cropped_manual()\n model.image_id = 123\n model.time_stamp = 1547453775.2\n model.cropped_path = '/im/a/totally/real/cropped/path/i/swear.jpg'\n\n truncateTable('cropped_manual')\n dao = CroppedManualDAO(defaultConfigPath())\n\n self.assertEqual(dao.addImage(None), -1)\n\n resultingId = dao.addImage(model)\n self.assertIsNotNone(resultingId)\n self.assertNotEqual(resultingId, -1)", "def cam_calibration():\n # read all calibration images in a folder with similar names\n images = glob.glob('./camera_cal/calibration*.jpg')\n\n # calibrate camera and read object-points (3D), image points (2D) and image shape\n objpoints, imgpoints, img_shape = calibrate_camera(images)\n print(\"DONE: Camera calibration\")\n # save calibration parameters' pickle file\n save_calib_params(objpoints, imgpoints, img_shape)\n print(\"Calibration parameters pickle file saved \")", "def test_cv_and_resume_created(self):\n\t\tCVResume.objects.create(\n\t\t\tuser=self.user,\n\t\t\tcv=self.cv,\n\t\t\tresume=self.resume\n\t\t)\n\t\t# Set file positions to the begining\n\t\tself.cv.open()\n\t\tself.resume.open()\n\n\t\t# Retrieve the saved object\n\t\tcv_resume = CVResume.objects.get(pk=1)\n\n\t\t# Check if cv match\n\t\tself.assertEqual(\n\t\t\tself.cv.read(), cv_resume.cv.read(), # Compare file like objects\n\t\t\t'Created cv does not match the provided one.'\n\t\t)\n\n\t\t# Check if resume match\n\t\tself.assertEqual(\n\t\t\tself.resume.read(), cv_resume.resume.read(), # Compare file like objects\n\t\t\t'Created reusme does not match the provided one.'\n\t\t)", "def test_real_file(self):\n log.info('===== START TEST BYTE LOSS =====')\n\n # Recovered\n file_path = os.path.join(RESOURCE_PATH, '11079364_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n particles = self.parser.get_records(182)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079364_SNA_SNA_recov.yml', RESOURCE_PATH)\n self.assertEquals(self.exception_callback_value, [])\n stream_handle.close()\n\n # Telemetered\n file_path = os.path.join(RESOURCE_PATH, '11079419_SNA_SNA.txt')\n\n stream_handle = open(file_path, MODE_ASCII_READ)\n\n self.create_parser(stream_handle)\n\n particles = self.parser.get_records(172)\n\n log.debug(\"*** test_real_file Num particles %s\", len(particles))\n\n # check all the values against expected results.\n self.assert_particles(particles, '11079419_SNA_SNA_telem.yml', RESOURCE_PATH)\n stream_handle.close()\n\n log.info('===== END TEST REAL FILE =====')", "def test_record_update_file(appctx, db, record_with_file_processed, obj_name, content):\n record = CernSearchRecord.get_record(record_with_file_processed.id)\n initial_file_name = \"hello.txt\"\n initial_file = record.files[initial_file_name].obj # type: ObjectVersion\n initial_file_content = record.files_content[initial_file_name].obj # type: ObjectVersion\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert initial_file.file.readable is False\n assert initial_file.deleted is False\n assert initial_file_content.file.readable is True\n\n record.files[obj_name] = BytesIO(content)\n db.session.commit()\n\n # mimic file uploaded flow\n file_uploaded.send(record.files[obj_name].obj)\n\n record = CernSearchRecord.get_record(record.id)\n\n assert record[\"_bucket\"] == record.bucket_id\n assert record[\"_bucket_content\"] == record.bucket_content_id\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert record.files[obj_name].obj.file.readable is False\n assert initial_file_content.file.readable is False\n\n # different file upload creates a delete marker\n if initial_file_name != obj_name:\n with raises(KeyError):\n record.files[initial_file_name]\n with raises(KeyError):\n record.files_content[initial_file_name]\n\n file_1 = record.files_content[obj_name]\n assert obj_name == file_1[\"key\"]\n\n storage = file_1.obj.file.storage() # type: FileStorage\n fp = storage.open(mode=READ_MODE_BINARY)\n\n try:\n assert content.decode() in json.load(fp)[\"content\"]\n finally:\n fp.close()", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def test_construct_and_write_metadata(tmp_path):\n\n prov = Provenance()\n prov.start_activity(\"test\")\n prov.finish_activity()\n prov_activity = prov.finished_activities[0]\n\n reference = meta.Reference(\n contact=meta.Contact(\n name=\"Somebody\", email=\"[email protected]\", organization=\"CTA Consortium\"\n ),\n product=meta.Product(\n description=\"An Amazing Product\",\n creation_time=\"2020-10-11 15:23:31\",\n data_category=\"S\",\n data_level=\"DL1\",\n data_association=\"Subarray\",\n data_model_name=\"Unofficial DL1\",\n data_model_version=\"1.0\",\n data_model_url=\"http://google.com\",\n format=\"hdf5\",\n ),\n process=meta.Process(_type=\"Simulation\", subtype=\"Prod3b\", _id=423442,),\n activity=meta.Activity.from_provenance(prov_activity.provenance),\n instrument=meta.Instrument(\n site=\"CTA-North\",\n class_=\"Array\",\n type_=\"Layout H1B\",\n version=\"1.0\",\n id_=\"threshold\",\n ),\n )\n\n ref_dict = reference.to_dict()\n assert ref_dict[\"CTA PRODUCT FORMAT\"] == \"hdf5\"\n\n import uuid # pylint: disable=import-outside-toplevel\n\n assert str(uuid.UUID(ref_dict[\"CTA PRODUCT ID\"])) == ref_dict[\"CTA PRODUCT ID\"]\n\n # check that we can write this to the header of a typical table file in multiple\n # formats:\n from astropy.table import Table # pylint: disable=import-outside-toplevel\n\n table = Table(dict(x=[1, 2, 3], y=[15.2, 15.2, 14.5]))\n table.meta = ref_dict\n for file_name in [tmp_path / \"test.fits\", tmp_path / \"test.ecsv\"]:\n table.write(file_name)\n\n # write to pytables file\n\n import tables # pylint: disable=import-outside-toplevel\n\n with tables.open_file(tmp_path / \"test.h5\", mode=\"w\") as h5file:\n meta.write_to_hdf5(ref_dict, h5file)", "def test_cambia_imagen_elimina_la_antigua(self):\n self.image_path = os.path.join(os.path.dirname(__file__), 'image_for_model2.jpg')\n image_path = self.image_obj.image.path\n self.image_obj.image = simple_uploaded_file(self.image_path)\n self.image_obj.save()\n\n self.assertNotEqual(image_path, self.image_obj.image.path)\n self.assertFalse(os.path.exists(image_path))", "def test_prepare_for_submission(vasp2w90_calc_and_ref, sandbox_folder):\n vasp_calc, reference = vasp2w90_calc_and_ref\n with pytest.raises(NotImplementedError):\n vasp_calc.prepare_for_submission(sandbox_folder)\n with managed_temp_object() as temp_object:\n vasp_calc.write_incar(temp_object)\n with open(temp_object, 'r', encoding='utf8') as result_incar_fo:\n assert result_incar_fo.readlines() == reference['incar']", "def test_save_method(self):\n\n models.storage.save()\n self.assertTrue(os.path.exists('file.json'))", "def test_save_to_file(self):\n rect = Rectangle(1, 1)\n types = (int, float, str, tuple, list, dict, bool)\n insts = [rect] + [Rectangle(1, 1, id=t()) for t in types]\n fname = 'Rectangle.json'\n try:\n remove(fname)\n except FileNotFoundError:\n pass\n self.assertIsNone(Rectangle.save_to_file(None))\n with open(fname) as ifile:\n self.assertEqual(ifile.read(), '[]')\n for index in range(len(insts)):\n self.assertIsNone(Rectangle.save_to_file(insts[index:]))\n with open(fname) as ifile:\n self.assertEqual(ifile.read(), Rectangle.to_json_string(\n [obj.to_dictionary() for obj in insts[index:]]\n ))", "def testSave(self):\n\n # Generate temp file path\n index = os.path.join(tempfile.gettempdir(), \"bm25\")\n os.makedirs(index, exist_ok=True)\n\n model = self.method(\"bm25\")\n model.save(index)\n model.load(index)", "def test_write_OPK_to_shp_file(self):\r\n arr_oris = [{'altitude': 53.534337, 'id': 'IMG_1468832894.185000000.jpg', 'easting': 657739.197431,\r\n 'pitch': -172.350586, 'heading': -75.622522, 'roll': -40.654833, 'northing': 6860690.284637}]\r\n\r\n # on export le shapefile a partir des donnees pour le tests\r\n write_OPK_to_shp_file(arr_oris,\r\n self.test_shapefile,\r\n b_export_view_dir=False)\r\n # on tests si la methode a exporte les fichiers\r\n # url: http://stackoverflow.com/questions/82831/how-to-check-whether-a-file-exists-using-python\r\n self.assertTrue(exists(self.test_shapefile))\r\n\r\n # lecture d'un shapefile\r\n r = shapefile.Reader(self.test_shapefile)\r\n # geometries\r\n shapes = r.shapes()\r\n # extraction de la listes des points\r\n list_points = shapes[0].points\r\n # 1 point definit dans le shapefile\r\n self.assertEqual(len(shapes), 1)\r\n # on tests le type de la shape stockee\r\n # url: http://www.esri.com/library/whitepapers/pdfs/shapefile.pdf\r\n # type == 1 => Shape type=Point\r\n self.assertEqual(shapes[0].shapeType, 1)\r\n # on utilise extract_center_dict_ori (qui est doctestee)\r\n self._raise_assert_on_np_is_close_all(list_points[0], extract_center_dict_ori(arr_oris[0])[:2])", "def test_save_and_load_svmlight_file(self):\n self.logger.info(\"Testing libsvm dataset loading and saving...\")\n\n test_file = fm.join(fm.abspath(__file__), \"myfile.libsvm\")\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e\n\n self.logger.info(\"Patterns saved:\\n{:}\".format(self.patterns))\n self.logger.info(\"Labels saved:\\n{:}\".format(self.labels))\n\n CDataLoaderSvmLight.dump(\n CDataset(self.patterns, self.labels), test_file)\n\n new_dataset = CDataLoaderSvmLight().load(test_file)\n\n self.assertFalse((new_dataset.X != self.patterns).any())\n self.assertFalse((new_dataset.Y != self.labels).any())\n\n # load data but now remove all zero features (colums)\n new_dataset = CDataLoaderSvmLight().load(\n test_file, remove_all_zero=True)\n\n self.logger.info(\"Patterns loaded:\\n{:}\".format(new_dataset.X))\n self.logger.info(\"Labels loaded:\\n{:}\".format(new_dataset.Y))\n self.logger.info(\n \"Mapping back:\\n{:}\".format(new_dataset.header.idx_mapping))\n\n self.assertTrue(new_dataset.X.issparse)\n self.assertTrue(new_dataset.Y.isdense)\n self.assertTrue(new_dataset.header.idx_mapping.isdense)\n\n # non-zero elements should be unchanged\n self.assertEqual(self.patterns.nnz, new_dataset.X.nnz)\n new_nnz_data = new_dataset.X.nnz_data\n self.assertFalse((self.patterns.nnz_data != new_nnz_data.sort()).any())\n\n # With idx_mapping we should be able to reconstruct original data\n original = CArray.zeros(self.patterns.shape, sparse=True)\n original[:, new_dataset.header.idx_mapping] = new_dataset.X\n self.assertFalse((self.patterns != original).any())\n\n # Cleaning test file\n try:\n fm.remove_file(test_file)\n except (OSError, IOError) as e:\n if e.errno != 2:\n raise e", "def test_fs_instance(self):\n b1 = BaseModel()\n models.storage.save()\n self.assertEqual(os.path.exists('file.json'), True)", "def test_write(self):\n cases = {\n self.test_eac + \"NE00401.xml\": True,\n self.test_eac + \"NE01501.xml\": False,\n self.test_eac + \"NE01302.xml\": True,\n }\n metadata_url = 'http://www.example.com/metadata.xml'\n presentation_url = 'http://www.example.com/presentation.html'\n for case in cases:\n doc = EacCpf.EacCpf(case, metadata_url, presentation_url)\n self.assertNotEqual(doc, None)\n path = doc.write(self.temp)\n self.assertEquals(os.path.exists(path), True)\n # read the file and try to extract the attributes\n try:\n tree = etree.parse(path)\n ns = {\n EacCpf.DOC_KEY: EacCpf.DOC_NS,\n EacCpf.ESRC_KEY: EacCpf.ESRC_NS,\n }\n # get the url to the metadata file\n metadata = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":metadata\", namespaces=ns)\n self.assertNotEqual(metadata, None)\n self.assertEqual(metadata[0], metadata_url)\n # get the url to the presentation file\n presentation = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":presentation\", namespaces=ns)\n self.assertNotEqual(presentation, None)\n self.assertEqual(presentation[0], presentation_url)\n # get the url to the source file\n source = tree.xpath(\"//doc:eac-cpf/@\" + EacCpf.ESRC_KEY + \":source\", namespaces=ns)\n self.assertNotEqual(source, None)\n self.assertEqual(source[0], case)\n except:\n msg = \"Failed to complete parsing of {0}\".format(case)\n self.log.error(msg, exc_info=True)\n self.fail(msg)", "def test_blank(self):\n self._calibration_test(\"blank\")", "def test_save_and_load(self):\n with test_util.TempDirectory() as f:\n self.model.save(f)\n self.model = tc.load_model(f)\n\n try:\n self.test__list_fields()\n print(\"Saved model list fields passed\")\n self.test_get()\n print(\"Saved model get passed\")\n self.test_summaries()\n print(\"Saved model summaries passed\")\n\n except:\n assert False, \"Failed during save and load tests.\"\n del self.model", "def test_export_raw_eeglab(tmp_path):\n pytest.importorskip(\"eeglabio\")\n raw = read_raw_fif(fname_raw, preload=True)\n raw.apply_proj()\n temp_fname = tmp_path / \"test.set\"\n raw.export(temp_fname)\n raw.drop_channels([ch for ch in [\"epoc\"] if ch in raw.ch_names])\n\n with pytest.warns(RuntimeWarning, match=\"is above the 99th percentile\"):\n raw_read = read_raw_eeglab(temp_fname, preload=True, montage_units=\"m\")\n assert raw.ch_names == raw_read.ch_names\n\n cart_coords = np.array([d[\"loc\"][:3] for d in raw.info[\"chs\"]]) # just xyz\n cart_coords_read = np.array([d[\"loc\"][:3] for d in raw_read.info[\"chs\"]])\n assert_allclose(cart_coords, cart_coords_read)\n assert_allclose(raw.times, raw_read.times)\n assert_allclose(raw.get_data(), raw_read.get_data())\n\n # test overwrite\n with pytest.raises(FileExistsError, match=\"Destination file exists\"):\n raw.export(temp_fname, overwrite=False)\n raw.export(temp_fname, overwrite=True)\n\n # test pathlib.Path files\n raw.export(Path(temp_fname), overwrite=True)\n\n # test warning with unapplied projectors\n raw = read_raw_fif(fname_raw, preload=True)\n with pytest.warns(RuntimeWarning, match=\"Raw instance has unapplied projectors.\"):\n raw.export(temp_fname, overwrite=True)", "def test_vince111b():\n test_path = tempfile.mkdtemp()\n x_train, metadata = vince111b(test_path)\n try:\n assert x_train.shape == (36, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_circdich_spectrum_saveablity(self):\n import tempfile\n circdich1 = self.circdich1\n cntnr = CircDichSpectrumContainer()\n cntnr.set_spectrum(circdich1, tag='tester')\n \n with tempfile.TemporaryFile() as f:\n cntnr.save(f)\n f.seek(0)\n circdich2 = CircDichSpectrumContainer()\n circdich2 = circdich2.load(f)\n\n data2 = circdich2.get_spectrum(tag='tester').data\n \n numpy.testing.assert_array_equal(circdich1.data, data2)" ]
[ "0.6524659", "0.59664446", "0.59082484", "0.59029675", "0.590175", "0.5826562", "0.57601", "0.5708722", "0.56999654", "0.56497383", "0.5637816", "0.55756545", "0.5571732", "0.55515367", "0.554141", "0.55031055", "0.5485215", "0.5481416", "0.54808974", "0.5467557", "0.5450826", "0.54487413", "0.5443921", "0.542653", "0.54225165", "0.54108185", "0.54032755", "0.54025394", "0.5388373", "0.5377938" ]
0.6699812
0
Test saving and reading a malformed calibration blob. Note, malformed here only means datetime abnormalities.
def test_malformed_calibration( tmp_path: Path, malformed_calibration: typing.Dict[str, typing.Any] ) -> None: malformed_calibration_dir = tmp_path / "calibrations" malformed_calibration_path = malformed_calibration_dir / "my_bad_calibration.json" # TODO (lc 10-27-2022) We don't actually throw an error when we're saving bad calibration data. # Probably before this point, we should make sure that we're passin in a validated pydantic # model because otherwise we could potentially be saving malformed data which would fail on # a read file. io.save_to_file( malformed_calibration_dir, "my_bad_calibration", malformed_calibration ) malformed_calibration_dir.mkdir(parents=True, exist_ok=True) malformed_calibration_path.write_text( json.dumps(malformed_calibration), encoding="utf-8" ) with pytest.raises(AssertionError): io.read_cal_file(malformed_calibration_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def test_irr_read(irregular_written_data):\n\n fp, written = irregular_written_data\n with openEDF(fp) as reader:\n arr = reader.read(0)\n #imprecision due to 2-byte conversion so tolerance set to 1 unit\n assert np.allclose(written, arr, equal_nan=True, atol=1)", "def test_load_from_tsfile_to_dataframe():\n # Test that an empty file is classed an invalid\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = \"\"\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file and assert that it is invalid\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n finally:\n os.remove(path)\n # Test that a file with an incomplete set of metadata is invalid\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \" \"true\\n@univariate true\\n\"\n )\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file and assert that it is invalid\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n finally:\n os.remove(path)\n # Test that a file with a complete set of metadata but no data is invalid\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel false\\n@data\"\n )\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file and assert that it is invalid\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n finally:\n os.remove(path)\n # Test that a file with a complete set of metadata and no data but\n # invalid metadata values is invalid\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName\\n@timeStamps\\n@univariate \"\n \"true\\n@classLabel false\\n@data\"\n )\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file and assert that it is invalid\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n finally:\n os.remove(path)\n # Test that a file with a complete set of metadata and a single\n # case/dimension parses correctly\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2)\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file\n df = load_from_tsfile_to_dataframe(path)\n # Test the DataFrame returned accurately reflects the data in\n # the file\n np.testing.assert_equal(len(df), 1)\n np.testing.assert_equal(len(df.columns), 1)\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 1)\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n finally:\n os.remove(path)\n # Test that a file with a complete set of metadata and 2 cases with 3\n # dimensions parses correctly\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5), (1, 6)\\n\"\n file_contents += \"(0, 11), (1, 12):(0, 13), (1,14):(0, 15), (1, 16) \\n\"\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file\n df = load_from_tsfile_to_dataframe(path)\n # Test the DataFrame returned accurately reflects the data in\n # the file\n np.testing.assert_equal(len(df), 2)\n np.testing.assert_equal(len(df.columns), 3)\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 3.0)\n np.testing.assert_equal(series[1], 4.0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 15.0)\n np.testing.assert_equal(series[1], 16.0)\n finally:\n os.remove(path)\n # Test that a file with a complete set of metadata and time-series of\n # different length parses correctly\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3):(0, 5), (1, 6)\\n\"\n file_contents += \"(0, 11), (1, 12):(0, 13), (1,14):(0, 15)\\n\"\n tmp_file.write(file_contents)\n tmp_file.flush()\n # Parse the file\n df = load_from_tsfile_to_dataframe(path)\n # Test the DataFrame returned accurately reflects the data in\n # the file\n\n np.testing.assert_equal(len(df), 2)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 1)\n np.testing.assert_equal(series[0], 3.0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 1)\n np.testing.assert_equal(series[0], 15.0)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data but an\n # inconsistent number of dimensions across cases is classed as invalid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5), (1, 6)\\n\"\n file_contents += \"(0, 11), (1, 12):(0, 13), (1,14) \\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file and assert that it is invalid\n\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data but missing\n # values after a tuple is classed as invalid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5),\\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file and assert that it is invalid\n\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data and some\n # empty dimensions is classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2): :(0, 5), (1, 6)\\n\"\n file_contents += \"(0, 11), (1, 12):(0, 13), (1,14) : \\n\"\n file_contents += (\n \"(0, 21), (1, 22):(0, 23), (1,24) : (0,25), (1, 26) \\n\"\n )\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame returned accurately reflects the data in\n # the file\n\n np.testing.assert_equal(len(df), 3)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_0\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 21.0)\n np.testing.assert_equal(series[1], 22.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_1\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 23.0)\n np.testing.assert_equal(series[1], 24.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_2\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 25.0)\n np.testing.assert_equal(series[1], 26.0)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data that\n # contains datetimes as timestamps and has some empty dimensions is\n # classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += (\n \"(01/01/2019 00:00:00, 1), (01/02/2019 \"\n \"00:00:00, 2) : \"\n \" : (01/05/2019 00:00:00, \"\n \"5), (01/06/2019 00:00:00, 6)\\n\"\n )\n file_contents += (\n \"(01/01/2020 00:00:00, 11), (01/02/2020 \"\n \"00:00:00, 12) : (01/03/2020 00:00:00, 13), \"\n \"(01/04/2020 00:00:00, 14) : \\n\"\n )\n file_contents += (\n \"(01/01/2021 00:00:00, 21), (01/02/2021 \"\n \"00:00:00, 22) : (01/03/2021 00:00:00, 23), \"\n \"(01/04/2021 00:00:00, 24) : \\n\"\n )\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame returned accurately reflects the data in\n # the file\n\n np.testing.assert_equal(len(df), 3)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/01/2019\"], 1.0)\n np.testing.assert_equal(series[\"01/02/2019\"], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/01/2020\"], 11.0)\n np.testing.assert_equal(series[\"01/02/2020\"], 12.0)\n\n series = df[\"dim_0\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/01/2021\"], 21.0)\n np.testing.assert_equal(series[\"01/02/2021\"], 22.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/03/2020\"], 13.0)\n np.testing.assert_equal(series[\"01/04/2020\"], 14.0)\n\n series = df[\"dim_1\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/03/2021\"], 23.0)\n np.testing.assert_equal(series[\"01/04/2021\"], 24.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[\"01/05/2019\"], 5.0)\n np.testing.assert_equal(series[\"01/06/2019\"], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_2\"][2]\n np.testing.assert_equal(len(series), 0)\n\n finally:\n os.remove(path)\n\n # Test that a file that mixes timestamp conventions is invalid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += (\n \"(01/01/2019 00:00:00, 1), (01/02/2019 \"\n \"00:00:00, 2) : \"\n \" : (01/05/2019 00:00:00, \"\n \"5), (01/06/2019 00:00:00, 6)\\n\"\n )\n file_contents += (\n \"(00, 11), (1, 12) : (01/03/2020 00:00:00, 13), \"\n \"(01/04/2020 00:00:00, 14) : \\n\"\n )\n file_contents += (\n \"(01/01/2021 00:00:00, 21), (01/02/2021 \"\n \"00:00:00, 22) : (01/03/2021 00:00:00, 23), \"\n \"(01/04/2021 00:00:00, 24) : \\n\"\n )\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file and assert that it is invalid\n\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data but missing\n # classes is classed as invalid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel true 0 1 \"\n \"2\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5), (1, 6)\\n\"\n file_contents += \"(0, 11), (1, 12):(0, 13), (1,14):(0, 15), (1, 16) \\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file and assert that it is invalid\n\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data but invalid\n # classes is classed as invalid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel true 0 1 \"\n \"2\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5), (1, 6) : 0 \\n\"\n file_contents += (\n \"(0, 11), (1, 12):(0, 13), (1,14):(0, 15), (1, 16) : 3 \\n\"\n )\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file and assert that it is invalid\n\n np.testing.assert_raises(IOError, load_from_tsfile_to_dataframe, path)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data with classes\n # is classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"true\\n@univariate true\\n@classLabel true 0 1 \"\n \"2\\n@data\\n\"\n )\n file_contents += \"(0, 1), (1, 2):(0, 3), (1, 4):(0, 5), (1, 6): 0\\n\"\n file_contents += (\n \"(0, 11), (1, 12):(0, 13), (1,14):(0, 15), (1, 16): 2 \\n\"\n )\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df, y = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame of X values returned accurately reflects\n # the data in the file\n\n np.testing.assert_equal(len(df), 2)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 3.0)\n np.testing.assert_equal(series[1], 4.0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 2)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 15.0)\n np.testing.assert_equal(series[1], 16.0)\n\n # Test that the class values are as expected\n\n np.testing.assert_equal(len(y), 2)\n np.testing.assert_equal(y[0], \"0\")\n np.testing.assert_equal(y[1], \"2\")\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data, with no\n # timestamps, is classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"false\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"1,2:3,4:5,6\\n\"\n file_contents += \"11,12:13,14:15,16\\n\"\n file_contents += \"21,22:23,24:25,26\\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame returned accurately reflects the data in\n # the file\n\n np.testing.assert_equal(len(df), 3)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_0\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 21.0)\n np.testing.assert_equal(series[1], 22.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 3.0)\n np.testing.assert_equal(series[1], 4.0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_1\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 23.0)\n np.testing.assert_equal(series[1], 24.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 15.0)\n np.testing.assert_equal(series[1], 16.0)\n\n series = df[\"dim_2\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 25.0)\n np.testing.assert_equal(series[1], 26.0)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data, with no\n # timestamps and some empty dimensions, is classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"false\\n@univariate true\\n@classLabel \"\n \"false\\n@data\\n\"\n )\n file_contents += \"1,2::5,6\\n\"\n file_contents += \"11,12:13,14:15,16\\n\"\n file_contents += \"21,22:23,24:\\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame returned accurately reflects the data in\n # the file\n\n np.testing.assert_equal(len(df), 3)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_0\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 21.0)\n np.testing.assert_equal(series[1], 22.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_1\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 23.0)\n np.testing.assert_equal(series[1], 24.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 15.0)\n np.testing.assert_equal(series[1], 16.0)\n\n series = df[\"dim_2\"][2]\n np.testing.assert_equal(len(series), 0)\n\n finally:\n os.remove(path)\n\n # Test that a file with a complete set of metadata and data, with no\n # timestamps and some empty dimensions and classes, is classed as valid\n\n fd, path = tempfile.mkstemp()\n try:\n with os.fdopen(fd, \"w\") as tmp_file:\n # Write the contents of the file\n\n file_contents = (\n \"@problemName Test Problem\\n@timeStamps \"\n \"false\\n@univariate true\\n@classLabel true cat \"\n \"bear dog\\n@data\\n\"\n )\n file_contents += \"1,2::5,6:cat \\n\"\n file_contents += \"11,12:13,14:15,16: dog\\n\"\n file_contents += \"21,22:23,24:: bear \\n\"\n\n tmp_file.write(file_contents)\n tmp_file.flush()\n\n # Parse the file\n\n df, y = load_from_tsfile_to_dataframe(path)\n\n # Test the DataFrame of X values returned accurately reflects\n # the data in the file\n\n np.testing.assert_equal(len(df), 3)\n np.testing.assert_equal(len(df.columns), 3)\n\n series = df[\"dim_0\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_0\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 1.0)\n np.testing.assert_equal(series[1], 2.0)\n\n series = df[\"dim_0\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 11.0)\n np.testing.assert_equal(series[1], 12.0)\n\n series = df[\"dim_0\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 21.0)\n np.testing.assert_equal(series[1], 22.0)\n\n series = df[\"dim_1\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_1\"][0]\n np.testing.assert_equal(len(series), 0)\n\n series = df[\"dim_1\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 13.0)\n np.testing.assert_equal(series[1], 14.0)\n\n series = df[\"dim_1\"][2]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 23.0)\n np.testing.assert_equal(series[1], 24.0)\n\n series = df[\"dim_2\"]\n np.testing.assert_equal(len(series), 3)\n\n series = df[\"dim_2\"][0]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 5.0)\n np.testing.assert_equal(series[1], 6.0)\n\n series = df[\"dim_2\"][1]\n np.testing.assert_equal(len(series), 2)\n np.testing.assert_equal(series[0], 15.0)\n np.testing.assert_equal(series[1], 16.0)\n\n series = df[\"dim_2\"][2]\n np.testing.assert_equal(len(series), 0)\n\n # Test that the class values are as expected\n\n np.testing.assert_equal(len(y), 3)\n np.testing.assert_equal(y[0], \"cat\")\n np.testing.assert_equal(y[1], \"dog\")\n np.testing.assert_equal(y[2], \"bear\")\n\n finally:\n os.remove(path)", "def test_export_raw_pybv(tmp_path, meas_date, orig_time, ext):\n pytest.importorskip(\"pybv\")\n raw = read_raw_fif(fname_raw, preload=True)\n raw.apply_proj()\n\n raw.set_meas_date(meas_date)\n\n # add some annotations\n annots = Annotations(\n onset=[3, 6, 9, 12, 14], # seconds\n duration=[1, 1, 0.5, 0.25, 9], # seconds\n description=[\n \"Stimulus/S 1\",\n \"Stimulus/S2.50\",\n \"Response/R101\",\n \"Look at this\",\n \"Comment/And at this\",\n ],\n ch_names=[(), (), (), (\"EEG 001\",), (\"EEG 001\", \"EEG 002\")],\n orig_time=orig_time,\n )\n raw.set_annotations(annots)\n\n temp_fname = tmp_path / (\"test\" + ext)\n with pytest.warns(RuntimeWarning, match=\"'short' format. Converting\"):\n raw.export(temp_fname)\n raw_read = read_raw_brainvision(str(temp_fname).replace(\".eeg\", \".vhdr\"))\n assert raw.ch_names == raw_read.ch_names\n assert_allclose(raw.times, raw_read.times)\n assert_allclose(raw.get_data(), raw_read.get_data())", "def test_badformat():\n res = vtec.contime(\"AABBCCTHHMMZ\")\n assert res is None", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def test_bad_data(self):\n # bad data file has:\n # 1 bad status\n # particle A has bad timestamp\n # particle B has bad dark fit\n # particle C has bad frame type\n # particle D has bad year\n stream_handle = open(os.path.join(RESOURCE_PATH,\n 'bad_SNA_SNA.txt'), MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n # get E, since it is first it will generate a metadata\n particles = self.parser.get_records(2)\n\n # check all the values against expected results.\n self.assert_particles(particles, 'last_and_meta_SNA_recov.yml', RESOURCE_PATH)\n\n # should have had 5 exceptions by now\n self.assertEqual(len(self.exception_callback_value), 5)\n\n for exception in self.exception_callback_value:\n self.assert_(isinstance(exception, RecoverableSampleException))", "def _is_probably_old_datfile_format(raw_data):\n return not _is_probably_new_datfile_format(raw_data) and \"UTC\" in raw_data", "def test_DL_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)", "def test_estimate_data_time__correct_doy():\n parser = IMFV283Parser()\n # BOU aka normal\n transmission = '17274013121'\n day = 274\n minute = 72\n (data_time, transmit_time, corrected) = \\\n parser._estimate_data_time(transmission, day, minute)\n assert_equals(data_time, UTCDateTime('2017-10-01T01:12:00Z'))\n assert_equals(transmit_time, UTCDateTime('2017-10-01T01:31:21Z'))\n assert_equals(corrected, False)", "def test_save_calibration_essay(self):\r\n response = self.peer_grading.save_calibration_essay(self.save_dict)\r\n self.assertTrue(response['success'])", "def test_estimate_data_time__incorrect_doy():\n parser = IMFV283Parser()\n # BLC aka 1999 rollover gps issue\n transmission = '17274013241'\n day = 46\n minute = 78\n (data_time, transmit_time, corrected) = \\\n parser._estimate_data_time(transmission, day, minute)\n assert_equals(data_time, UTCDateTime('2017-10-01T01:18:00Z'))\n assert_equals(transmit_time, UTCDateTime('2017-10-01T01:32:41Z'))\n assert_equals(corrected, True)", "def test_export_raw_edf(tmp_path, dataset, format):\n if dataset == \"test\":\n raw = read_raw_fif(fname_raw)\n elif dataset == \"misc\":\n fname = misc_path / \"ecog\" / \"sample_ecog_ieeg.fif\"\n raw = read_raw_fif(fname)\n\n # only test with EEG channels\n raw.pick_types(eeg=True, ecog=True, seeg=True)\n raw.load_data()\n orig_ch_names = raw.ch_names\n temp_fname = tmp_path / f\"test.{format}\"\n\n # test runtime errors\n with pytest.warns() as record:\n raw.export(temp_fname, physical_range=(-1e6, 0))\n if dataset == \"test\":\n assert any(\"Data has a non-integer\" in str(rec.message) for rec in record)\n assert any(\"The maximum\" in str(rec.message) for rec in record)\n remove(temp_fname)\n\n with pytest.warns() as record:\n raw.export(temp_fname, physical_range=(0, 1e6))\n if dataset == \"test\":\n assert any(\"Data has a non-integer\" in str(rec.message) for rec in record)\n assert any(\"The minimum\" in str(rec.message) for rec in record)\n remove(temp_fname)\n\n if dataset == \"test\":\n with pytest.warns(RuntimeWarning, match=\"Data has a non-integer\"):\n raw.export(temp_fname)\n elif dataset == \"misc\":\n with pytest.warns(RuntimeWarning, match=\"EDF format requires\"):\n raw.export(temp_fname)\n\n if \"epoc\" in raw.ch_names:\n raw.drop_channels([\"epoc\"])\n\n raw_read = read_raw_edf(temp_fname, preload=True)\n assert orig_ch_names == raw_read.ch_names\n # only compare the original length, since extra zeros are appended\n orig_raw_len = len(raw)\n\n # assert data and times are not different\n # Due to the physical range of the data, reading and writing is\n # not lossless. For example, a physical min/max of -/+ 3200 uV\n # will result in a resolution of 0.09 uV. This resolution\n # though is acceptable for most EEG manufacturers.\n assert_array_almost_equal(\n raw.get_data(), raw_read.get_data()[:, :orig_raw_len], decimal=4\n )\n\n # Due to the data record duration limitations of EDF files, one\n # cannot store arbitrary float sampling rate exactly. Usually this\n # results in two sampling rates that are off by very low number of\n # decimal points. This for practical purposes does not matter\n # but will result in an error when say the number of time points\n # is very very large.\n assert_allclose(raw.times, raw_read.times[:orig_raw_len], rtol=0, atol=1e-5)", "def test_save_delete_file(\n tmp_path: Path, calibration: typing.Dict[str, typing.Any]\n) -> None:\n my_calpath = tmp_path / \"calibrations\"\n io.save_to_file(my_calpath, \"my_calibration\", calibration)\n file_to_delete = my_calpath / \"my_calibration.json\"\n assert io.read_cal_file(file_to_delete)\n\n io.delete_file(file_to_delete)\n\n with pytest.raises(FileNotFoundError):\n io.read_cal_file(file_to_delete)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_incorrect_creation_2(rawinputfile, reformfile0, year, ref, asm, gdr):\n # pylint: disable=too-many-arguments\n if ref == 'reformfile0':\n reform = reformfile0.name\n else:\n reform = ref\n with pytest.raises(ValueError):\n TaxCalcIO(\n input_data=rawinputfile.name,\n tax_year=year,\n reform=reform,\n assump=asm,\n growdiff_response=gdr,\n aging_input_data=False,\n exact_calculations=False)", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def test_GFD_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.GroundFlowData(filepath)\n os.remove(filepath)", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))", "def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)", "def test_sktime_save_model_raises_invalid_serialization_format(auto_arima_model, model_path):\n with pytest.raises(MlflowException, match=\"Unrecognized serialization format: \"):\n flavor.save_model(\n sktime_model=auto_arima_model, path=model_path, serialization_format=\"json\"\n )", "def test_cast_observation_date_sad_path(self):\n # a record without date value should throw an exception\n descriptor = self.descriptor\n schema = ObservationSchema(descriptor)\n record = {\n 'Latitude': \"-32\", 'Longitude': \"115.3\"\n }\n self.assertIsNone(schema.cast_record_observation_date(record))\n\n record = {\n 'Latitude': \"-32\", 'Observation Date': '', 'Longitude': \"115.3\"\n }\n self.assertIsNone(schema.cast_record_observation_date(record))\n\n record = {\n 'Latitude': \"-32\", 'Observation Date': 'bullshit', 'Longitude': \"115.3\"\n }\n with self.assertRaises(InvalidDateType):\n schema.cast_record_observation_date(record)\n\n record = {\n 'Latitude': \"-32\", 'Observation Date': 1200, 'Longitude': \"115.3\"\n }\n with self.assertRaises(InvalidDateType):\n schema.cast_record_observation_date(record)", "def validate_timecode_input(self):\n frame = self.file_buffer.get_image(self.frame_offset)\n try:\n test = frame.shape\n except Exception as e:\n print(e)\n return False\n else:\n return True\n finally:\n test = None\n frame = None", "def test_validate_invalid_firms_conversion_with_invalid_date():\n firms_conversion_json = copy.deepcopy(FIRMS_CONVERSION)\n firms_conversion_json['startDate'] = \"test\"\n legal_filing = {'conversion': firms_conversion_json}\n\n is_valid, errors = validate(legal_filing, 'conversion')\n\n if errors:\n for err in errors:\n print(err.message)\n print(errors)\n\n assert not is_valid", "def test_raw_existence(self):\n\n # RNA - raw layer required\n del self.validator.adata.raw\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: Raw data is missing: there is no 'raw.X' and 'X_normalization' is not 'none'.\"\n ],\n )\n\n # ATAC - raw layer not required\n # The assignment above makes X to not be raw: self.validator.adata.uns[\"X_normalization\"] = \"CPM\"\n # The following line makes it to be scATAC-seq data (EFO:0010891)\n # Missing raw data in atac-seq data is allowed, thus the following should not return an error message\n self.validator.errors = []\n self.validator.adata.obs[\"assay_ontology_term_id\"] = \"EFO:0010891\"\n self.validator.validate_adata()\n self.assertEqual(self.validator.errors, [])", "def test_incorrect_data_type_plate():\n \n test_object = fa.read_in_envision(data_csv=list_A, platemap_csv=plate_map_file, data_type='plate', size=384)", "def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False", "def test_invalidate():\n \n example_platemap = pd.read_csv(inval_platemap, index_col=[0]) # read in an example platemap with invalidated well ids, rows and columns\n test_object = fa.read_in_envision(data_csv=plate_2_repeat, platemap_csv=plate_map_file, data_type='plate', size=384) # read in actual data and plate map\n test_object.invalidate(wells=['A2', 'B3', 'E4'], rows=['C', 'G'], columns=[7,8,12,20]) # invalidate specific well ids, rows and columns\n pd.testing.assert_frame_equal(test_object.plate_map, example_platemap, check_dtype=False) # compare the two dfs without checking the data types because the example df was not read in using the read_in_envision function", "def checkfile():\n with open(STATEFILE, 'r') as f:\n try:\n prevdate = datetime.strptime(f.read(), TIMEFORMAT)\n except ValueError:\n print \"Statefile is either blank or has corrupt data.\"\n f.seek(0,0)\n print \"Here is the statefile\"\n print f.read()\n return False\n print prevdate\n return prevdate", "def test_bad_assumption_file(reformfile1, assumpfile_bad1, assumpfile_bad2):\n input_stream = StringIO(RAWINPUTFILE_CONTENTS)\n input_dataframe = pd.read_csv(input_stream)\n taxyear = 2022\n with pytest.raises(ValueError):\n TaxCalcIO(input_data=input_dataframe,\n tax_year=taxyear,\n reform=reformfile1.name,\n assump=assumpfile_bad1.name,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)\n with pytest.raises(ValueError):\n TaxCalcIO(input_data=input_dataframe,\n tax_year=taxyear,\n reform=reformfile1.name,\n assump=assumpfile_bad2.name,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)" ]
[ "0.63918567", "0.5941573", "0.5877316", "0.58717", "0.5855711", "0.57905686", "0.5781173", "0.57732564", "0.57466656", "0.5714373", "0.5701173", "0.5693405", "0.5687235", "0.5651733", "0.56513137", "0.56238693", "0.56158066", "0.5615417", "0.56153786", "0.559923", "0.5591831", "0.5579631", "0.5573033", "0.5546883", "0.5540613", "0.5493063", "0.54731673", "0.54642767", "0.54362607", "0.54303056" ]
0.7445197
0
variance and mean normalization parameter g=10 set by Ng and Coates 2011a g parameter is scaledependent and assumes each pixel intensity remains betwwen 0 and 255.
def NgNormalization2(Pin,g=10.0): Pmean = np.mean(Pin,axis=1,keepdims=True) Pstd = np.sqrt(np.var(Pin,axis=1,keepdims=True)+g ) # g = 10 for images of brightness 0...255 O = (Pin - Pmean) / Pstd return O
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)", "def standardize(image, mean=[0.48462227599918, 0.45624044862054, 0.40588363755159], std=[0.22889466674951, 0.22446679341259, 0.22495548344775]):\n image = image.astype(np.float32) / 255.0\n image = np.divide(np.subtract(image, mean), std)\n return image", "def global_standardization(X): \n print(f'Image shape: {X[0].shape}')\n print(f'Data Type: {X[0].dtype}')\n X = X.astype('float32')\n\n print(\"***\")\n ## GLOBAL STANDARDIZATION\n # calculate global mean and standard deviation\n mean, std = X.mean(), X.std()\n print(f'Mean: {mean:.3f} | Std: {std:.3f}')\n print(f'Min: {X.min():.3f} | Max: {X.max():.3f}')\n # global standardization of pixels\n X = (X - mean) / std\n # confirm it had the desired effect\n mean, std = X.mean(), X.std()\n print(f'Mean: {mean:.3f} | Std: {std:.3f}')\n print(f'Min: {X.min():.3f} | Max: {X.max():.3f}')\n \n return X", "def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std", "def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def color_normalize(x, mean, std):\n if x.dim() in {3, 4}:\n if x.size(0) == 1:\n x = x.repeat(3, 1, 1)\n assert x.size(0) == 3, \"For single video format, expected RGB along first dim\"\n for t, m, s in zip(x, mean, std):\n t.sub_(m)\n t.div_(s)\n elif x.dim() == 5:\n assert (\n x.shape[1] == 3\n ), \"For batched video format, expected RGB along second dim\"\n x[:, 0].sub_(mean[0]).div_(std[0])\n x[:, 1].sub_(mean[1]).div_(std[1])\n x[:, 2].sub_(mean[2]).div_(std[2])\n return x", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x", "def standardize(self, x):\n if not self.image_resample:\n x = to_shape(x, self.image_shape, constant_values=-1024)\n elif self.image_resample:\n x = resample(x, self.image_shape)\n\n if self.preprocessing_function:\n x = self.preprocessing_function(x)\n if self.voxelwise_normalization:\n if self.voxel_bounds is not None:\n x = voxelwise_normalize(x, self.voxel_bounds)\n if self.voxelwise_center:\n if self.voxel_mean is not None:\n x -= self.voxel_mean\n if self.voxelwise_std_normalization:\n x /= (self.voxelwise_std + 1e-7)\n if self.samplewise_center:\n x -= np.mean(x, axis=self.channel_axis, keepdims=True)\n if self.samplewise_std_normalization:\n x /= (np.std(x, axis=self.channel_axis, keepdims=True) + 1e-7)\n return x", "def _collect_params(self) -> np.ndarray:\n res = np.array([0.]*(self.dimensions))\n res[0] = self.model.rbf.variance\n res[1:-1] = self.model.rbf.lengthscale\n res[-1] = self.model.Gaussian_noise.variance\n return res", "def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data", "def standardize(image):\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # initialize to array of zeros, with same shape as the image\n standardized_image = np.zeros(image.shape)\n\n # iterate over channels\n for c in range(image.shape[0]):\n # iterate over the `z` dimension\n for z in range(image.shape[3]):\n # get a slice of the image \n # at channel c and z-th dimension `z`\n image_slice = image[c,:,:,z]\n\n # subtract the mean from image_slice\n centered = image_slice - np.mean(image_slice)\n \n # divide by the standard deviation (only if it is different from zero)\n centered_scaled = centered / np.std(centered)\n\n # update the slice of standardized image\n # with the scaled centered and scaled image\n standardized_image[c, :, :, z] = centered_scaled\n\n ### END CODE HERE ###\n\n return standardized_image", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def normalization(img):\n max_val = img.max()\n min_val = img.min()\n\n return ((img-min_val)*255)/(max_val-min_val)", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def abe(img,variance):\n nominator = img**2-3*variance\n nominator[nominator<0] = 0\n out = np.divide(nominator,img)\n out[img==0]=0\n return out", "def normalise(image):", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def test_scaling():\n rng = np.random.RandomState(42)\n shape = (400, 10)\n u = rng.standard_normal(size=shape)\n mean = 100 * rng.uniform(size=shape[1]) + 1\n Y = u + mean\n Y_, mean_ = mean_scaling(Y)\n assert_almost_equal(Y_.mean(0), 0, 5)\n assert_almost_equal(mean_, mean, 0)\n assert Y.std() > 1", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std" ]
[ "0.68335885", "0.62980634", "0.62980634", "0.6090265", "0.607238", "0.5978527", "0.5925094", "0.5905927", "0.58876354", "0.5860991", "0.5840054", "0.5811187", "0.57432306", "0.57223207", "0.5718694", "0.57149523", "0.57009166", "0.5692579", "0.5685911", "0.5682441", "0.56731033", "0.56655484", "0.565657", "0.5650634", "0.56485045", "0.5647275", "0.5646377", "0.564436", "0.56388265", "0.5636179" ]
0.7362537
0
FINDS THE COORDINATE OF THE MAXIMUM POINT IN A TRAJECTORY.
def find_max(trajectory): x = trajectory.s y = trajectory.i yt = np.abs(y - max(y)) yt = yt < 1e-5 max_idx = np.where(yt == True)[0] max_idx = max(max_idx) return [x[max_idx], y[max_idx]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_max_score_location(grid, shape):", "def find_max(self):\n\n max_x = -10\n max_y = -10\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n return max_x, max_y", "def get_max_point(image):\r\n max_value= 0\r\n better_point= None\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n if image[line][column]>max_value:\r\n max_value= image[line][column]\r\n better_point = [line,column]\r\n return better_point", "def find_dist_max(ar_coorx,ar_coory):\n nb_cell=len(ar_coorx)\n max_dist=0.\n for i in range(nb_cell):\n for j in range(nb_cell):\n max_dist=max(max_dist,distance(ar_coorx[i],ar_coory[i],ar_coorx[j],ar_coory[j]))\n return max_dist", "def max_point(self):\n x = self.max(0).idxmax()\n y = self.loc[:, x].idxmax()\n return x, y", "def __find_max_distance(self):\n return utils.find_max_distance(self.__game)", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def get_max_coordinates(self) -> float:\n return self.pendulum2.get_max_coordinates()", "def max(self):\n return self._max_coords", "def compute_max(self):\r\n self.x_max = self.ox + self.dx*self.nx\r\n self.y_max = self.oy + self.dy*self.ny\r\n self.z_max = self.oz + self.dz*self.nz", "def get_max_score(location_list, grid, shape):", "def _get_maximum_from_heatmap(self, heatmap):\n assert heatmap.size(0) == 1 and heatmap.size(1) == 1\n max_map = torch.eq(heatmap, self.pool(heatmap)).float()\n heatmap = heatmap * max_map\n score = heatmap.view(-1)\n score, pos_idx = score.topk(self.max_num_people)\n mask = score > self.keypoint_threshold\n score = score[mask]\n pos_idx = pos_idx[mask]\n return pos_idx, score", "def _find_largest_candidate(self, reduced):\n nbr_counts = np.count_nonzero(reduced == 0, axis=0) # = [1, 1, 4, 2,...] where each value is the number of neighbours for the variant at that index.\n count_max = nbr_counts.max()\n if count_max == 0: # Indicates there are no available variants close enough\n return None, [] # to the remaining unassigned. Usually raises an error.\n max_inds = np.nonzero(nbr_counts == count_max)[0] # Array containing the indices of all variants with the max number of neighbours.\n if len(max_inds) == 1: # A single largest cluster\n best_center = max_inds[0]\n best_clstr = np.nonzero(reduced[:,best_center] == 0)[0]\n else: # A tie for largest cluster. Broken by smallest sum of full scores\n # This was tested with the below more accurate and true scoring function. Unfortunately it became hideously slow (clustered_inds and centre_inds were given as args):\n # clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n # covered_inds = list(clustered_inds | set(clstr_inds))\n # centre_inds.append(max_ind)\n # score = np.sum(np.min(self.orig_dists[np.ix_(covered_inds,centre_inds)], axis=1))\n # centre_inds.pop()\n best_center, best_clstr, best_score = None, [], np.inf\n for max_ind in max_inds:\n clstr_inds = np.nonzero(reduced[:,max_ind] == 0)[0]\n score = np.sum(self.orig_dists[clstr_inds,max_ind])\n if score < best_score:\n best_center, best_clstr, best_score = max_ind, clstr_inds, score\n return best_center, best_clstr", "def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data", "def getMaximum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle >= v2.angle:\n return self.p1\n else:\n return self.p2", "def list_maxpoint(N, rect):\r\n m = [0] * (N * N + 1)\r\n for i in range(N):\r\n for j in range(N):\r\n for ie in range(i+1, N+1):\r\n for je in range(j+1, N+1):\r\n n = (ie - i) * (je - j)\r\n m[n] = max(m[n], rect[i][j] - rect[i][je] - rect[ie][j] + rect[ie][je])\r\n return m", "def find_max_coords(self):\n all_max_bound = []\n all_min_bound = []\n shape_dict = self.shape_dict\n for zone_id in shape_dict:\n zone_shape = shape_dict[zone_id]\n max_bound_zone = zone_shape.max_bound\n min_bound_zone = zone_shape.min_bound\n all_max_bound.append(max_bound_zone)\n all_min_bound.append(min_bound_zone)\n\n map_max_bound, unused_max = Utils.calculate_boundaries(all_max_bound)\n unused_min, map_min_bound = Utils.calculate_boundaries(all_min_bound)\n\n return (map_max_bound, map_min_bound)", "def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data", "def get_max_cl(Re, r):\n xf = XFoil()\n if r <= 0.175: \n xf.airfoil = naca6409\n else:\n xf.airfoil = naca2412\n xf.Re = Re\n xf.Re = Re\n xf.max_iter = 200\n xf.n_crit = 9.00\n xf.xtr = [1.00, 1.00]\n xf.M = 0\n a_seq, cl_seq, cd_seq, cm_seq, cp_seq = xf.aseq(10,15,0.1)\n # ignore nan by making it 0\n cl_seq = np.nan_to_num(cl_seq)\n # find the maximum cl \n cl_maxi = np.max(cl_seq)\n # index of the maximum cl\n idx = np.argmax(cl_seq)\n return round(cl_maxi,2),round(a_seq[idx],2), round(cd_seq[idx],2)", "def find_max_distance(l):\n\tcomb = list(combinations(list(range(1, len(l))), 2))\n\tx, y, max_distance = 0, 0, 0\n\n\tfor i,j in comb:\n\t\tif np.sum(np.abs(l[i]-l[j])) > max_distance:\n\t\t\tx, y, max_distance = i, j, np.sum(np.abs(l[i]-l[j]))\n\treturn x, y, max_distance", "def _get_max_t(self):\n\n return max([\n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])", "def get_track_endpoints_max_dist(points):\n coords = points\n dist = cdist(coords, coords)\n inds = np.unravel_index(dist.argmax(), dist.shape)\n return coords[inds[0]], coords[inds[1]]", "def get_gridpoint_max(self):\n ind_array = np.indices(self.results_array.shape)\n maxes = []\n\n def get_max(x, y, z):\n \"\"\"\n Would be funnier if I knew a Max.\n \"\"\"\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))\n\n vget_max = np.vectorize(get_max, otypes=[list])\n vget_max(ind_array[0], ind_array[1], ind_array[2])\n return maxes", "def largest_polygon(polygons):\n # we should probably use a complicated formula to do this\n # but for now, it probably suffices to notice that the last one is usually\n # the largest\n return polygons.points[-1]", "def find_max(self, root = None):\n return self._find_max(root if root else self.root)", "def findmax(h5file, pcoord_dim, fi, li):\n max_values = []\n for i in range(fi,li+1):\n i = str(i)\n iteration = \"iter_\" + str(numpy.char.zfill(i,8))\n pc = h5file['iterations'][iteration]['pcoord']\n maxv = numpy.max(pc[:,-1,pcoord_dim-1])\n max_values.append(maxv)\n maxmax = numpy.max(max_values)\n nw = numpy.where(max_values>(maxmax-maxmax*0.0001))\n iter_num = str((nw[0]+1)[0])\n \n wheretolook = \"iter_\" + str(numpy.char.zfill(iter_num,8))\n max_iter = h5file['iterations'][wheretolook]['pcoord'][:,-1,pcoord_dim-1]\n segmax = numpy.max(max_iter)\n nw2 = numpy.where(max_iter>(segmax-segmax*0.0001))\n seg_num = (nw2[0])[0]\n print (\"Maximum pcoord value for dimension\",pcoord_dim,\"is:\",segmax) \n print (\"It is segment:\",seg_num,\"of iteration:\",iter_num)", "def _find_max(self, root):\n while root.right:\n root = root.right\n return root", "def max_position(self):\n raise NotImplementedError", "def find_max(self):\n return max(self.nodes, key=int)", "def _get_max_t(self):\n \"\"\"\n if hasattr(self,'k_of_t'):\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n self.k_of_t[-1][0],\n ])\n else:\n return max([ \n self.s_of_t[-1][0],\n self.i_of_t[-1][0],\n self.r_of_t[-1][0],\n ])\n \"\"\"\n return self.t_max" ]
[ "0.69206774", "0.6715515", "0.6675191", "0.650989", "0.64683646", "0.6467673", "0.6457012", "0.64458644", "0.6366418", "0.6241499", "0.6237555", "0.62108934", "0.61889035", "0.61826956", "0.61502874", "0.61187315", "0.60993797", "0.60676146", "0.6038875", "0.60360044", "0.60331416", "0.59925634", "0.5987295", "0.5974697", "0.5941667", "0.5913743", "0.59135544", "0.5891078", "0.58817965", "0.5874074" ]
0.67509127
1
A FUNCTION THAT CREATES THE INITIAL CONDITIONS FOR THE SEARCH OF THE COMMUTATION CURVE IN A DETERMINED SYSTEM. TAKES AS INPUT THE SYSTEM THAT IS GOING TO BE ANALYZED, THE displacement TO THE RIGHT, AND HOW FAR low THE LINE SEGMENT IS GOING TO GO.
def make_initial_conditions(system, displacement, low): C = sir.CurveSegment(system.sbar, system.imax, 0, system, 1) C.s = C.s + displacement s_inter, i_inter = C._curve_sol(low) C = sir.CurveSegment(system.sbar, system.imax, 0, system, s_inter) C.s = C.s + displacement s0 = np.linspace(C.s[-1], 1) i0 = np.array([i_inter]*len(s0)) s0 = np.concatenate((C.s, s0)) i0 = np.concatenate((C.i, i0)) return s0, i0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))", "def init_sequential_planning_program() -> str:\n # We reason about the state of the world at particular time steps: [0, t_max]\n seq_encoding = 'time(0..horizon).\\n'\n\n # Predicates evaluate to True or False\n seq_encoding += 'boolean(true).\\n'\n seq_encoding += 'boolean(false).\\n'\n # The contains/2 atom captures this relationship\n seq_encoding += 'contains(X, value(X, B)) :- predicate(X), boolean(B).\\n'\n\n # The initial state is at time t=0\n # The holds/3 atom captures the value of a predicate at a particular timestep t >= 0\n seq_encoding += 'holds(Predicate, Value, 0) :- initialState(Predicate, Value).\\n'\n\n # Closed World Assumption (CWA): Any ground atoms in the initial state which are not explicitly declared True\n # are set to False\n seq_encoding += 'initialState(X, value(X, false)) :- predicate(X), not initialState(X, value(X, true)).\\n'\n\n # The solution to the planning problem is extracted from occurs/2 atoms\n # This is a sequential encoding: only one action may occur at a particular timestep\n # Also, actions may only occur AFTER the initial state.\n seq_encoding += '1 {occurs(Action, T) : action(Action)} 1 :- time(T), T > 0.\\n'\n\n # An action may not occur unless its preconditions are met (i.e., for an action to occur at time t,\n # all applicable predicates must hold the values specified in the precondition at time t-1)\n seq_encoding += (\n ':- occurs(Action, T), precondition(Action, Predicate, Value), '\n 'not holds(Predicate, Value, T - 1).\\n'\n )\n\n # Capture the effects of an action: at time t, the value of a predicate is changed to the one specified in the\n # action's effect as long as the action was valid (see previous statement).\n seq_encoding += (\n 'caused(Predicate, Value, T) :- '\n 'occurs(Action, T), '\n 'effect(Action, Predicate, Value), '\n 'holds(PredicatePre, ValuePre, T - 1) : precondition(Action, PredicatePre, ValuePre).\\n'\n )\n\n # A predicate is considered modified if its value was changed by an action\n seq_encoding += 'modified(Predicate, T) :- caused(Predicate, Value, T).\\n'\n\n # The so-called 'inertia' statements. At a particular timestep, the value of a predicate was either:\n # 1) Modified and therefore holds a new value\n seq_encoding += 'holds(Predicate, Value, T) :- caused(Predicate, Value, T).\\n'\n # 2) Was not modified and therefore continues to hold its previous value\n seq_encoding += (\n 'holds(predicate(V), Value, T) :- holds(predicate(V), Value, T - 1), '\n 'not modified(predicate(V), T), time(T).\\n'\n )\n\n # The goal is not met unless the appropriate predicates hold their goal values at the final timestep\n seq_encoding += ':- goal(Predicate, Value), not holds(Predicate, Value, horizon).\\n'\n\n return seq_encoding", "def __init__(self, initial_state):\n\n self.x0 = np.asarray(initial_state)\n\n self.T = 20 # The time bound of our specification\n\n # Obstacle and goal region vertices: (xmin, xmax, ymin, ymax)\n self.obstacle_vert = (3,5,4,6)\n self.goal_vert = (7,8,8,9)\n\n # Now we'll define the STL specification. We'll do this over\n # the signal s, which is a list of x, y coordinates and the control\n # input u at each timestep. \n\n # Obstacle and goal constraints\n hit_obstacle = self.in_rectangle_formula(self.obstacle_vert) \n at_goal = self.in_rectangle_formula(self.goal_vert)\n \n self.obstacle_avoidance = hit_obstacle.negation().always(0,self.T)\n self.reach_goal = at_goal.eventually(0,self.T)\n\n # Control constraints\n umin = - 0.2\n umax = 0.2\n u1_above_min = STLFormula(lambda s, t : s[t,2] - umin)\n u1_below_max = STLFormula(lambda s, t : -s[t,2] + umax)\n u2_above_min = STLFormula(lambda s, t : s[t,3] - umin)\n u2_below_max = STLFormula(lambda s, t : -s[t,3] + umax)\n\n u1_valid = u1_above_min.conjunction(u1_below_max)\n u2_valid = u2_above_min.conjunction(u2_below_max)\n\n self.control_bounded = u1_valid.conjunction(u2_valid).always(0,self.T)\n\n # Full specification\n self.full_specification = self.obstacle_avoidance.conjunction(self.reach_goal).conjunction(self.control_bounded)", "def minimum_spanning_arborescence(sol):", "def wfc_init(prestate):\n prestate.adjacency_directions_rc = {\n i: CoordRC(a.y, a.x) for i, a in prestate.adjacency_directions.items()\n } #\n prestate = wfc.wfc_utilities.find_pattern_center(prestate)\n parameters = types.SimpleNamespace(wfc_ns=prestate)\n\n state = types.SimpleNamespace()\n state.result = None\n\n parameters.heuristic = (\n 0 # TODO: Implement control code to choose between heuristics\n )\n\n parameters.adjacency_relations = adjacency_extraction_consistent(\n parameters.wfc_ns, parameters.wfc_ns.patterns\n )\n parameters.patterns = np.array(list(parameters.wfc_ns.pattern_catalog.keys()))\n parameters.pattern_translations = list(parameters.wfc_ns.pattern_catalog.values())\n parameters.number_of_patterns = parameters.patterns.size\n parameters.number_of_directions = len(parameters.wfc_ns.adjacency_directions)\n\n # The Propagator is a data structure that holds the adjacency information\n # for the patterns, i.e. given a direction, which patterns are allowed to\n # be placed next to the pattern that we're currently concerned with.\n # This won't change over the course of using the solver, so the important\n # thing here is fast lookup.\n parameters.propagator_matrix = np.zeros(\n (\n parameters.number_of_directions,\n parameters.number_of_patterns,\n parameters.number_of_patterns,\n ),\n dtype=np.bool_,\n )\n for direction, pattern_one, pattern_two in parameters.adjacency_relations:\n parameters.propagator_matrix[(direction, pattern_one, pattern_two)] = True\n\n output = types.SimpleNamespace()\n\n # The Wave Table is the boolean expression table of which patterns are allowed\n # in which cells of the solution we are calculating.\n parameters.rows = parameters.wfc_ns.generated_size[0]\n parameters.columns = parameters.wfc_ns.generated_size[1]\n\n output.solving_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.propagation_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n\n parameters.wave_shape = [\n parameters.rows,\n parameters.columns,\n parameters.number_of_patterns,\n ]\n state.wave_table = np.full(parameters.wave_shape, True, dtype=np.bool_)\n\n # The compatible_count is a running count of the number of patterns that\n # are still allowed to be next to this cell in a particular direction.\n compatible_shape = [\n parameters.rows,\n parameters.columns,\n parameters.number_of_patterns,\n parameters.number_of_directions,\n ]\n\n WFC_LOGGER.debug(f\"compatible shape:{compatible_shape}\")\n state.compatible_count = np.full(\n compatible_shape, parameters.number_of_patterns, dtype=np.int16\n ) # assumes that there are less than 65536 patterns\n\n # The weights are how we manage the probabilities when we choose the next\n # pattern to place. Rather than recalculating them from scratch each time,\n # these let us incrementally update their values.\n state.weights = np.array(list(parameters.wfc_ns.pattern_weights.values()))\n state.weight_log_weights = np.vectorize(weight_log)(state.weights)\n state.sum_of_weights = np.sum(state.weights)\n\n state.sum_of_weight_log_weights = np.sum(state.weight_log_weights)\n state.starting_entropy = math.log(state.sum_of_weights) - (\n state.sum_of_weight_log_weights / state.sum_of_weights\n )\n\n state.entropies = np.zeros([parameters.rows, parameters.columns], dtype=np.float64)\n state.sums_of_weights = np.zeros(\n [parameters.rows, parameters.columns], dtype=np.float64\n )\n\n # Instead of updating all of the cells for every propagation, we use a queue\n # that marks the dirty tiles to update.\n state.observation_stack = collections.deque()\n\n output.output_grid = np.full(\n [parameters.rows, parameters.columns], WFC_NULL_VALUE, dtype=np.int64\n )\n output.partial_output_grid = np.full(\n [parameters.rows, parameters.columns, parameters.number_of_patterns],\n -9,\n dtype=np.int64,\n )\n\n output.current_iteration_count_observation = 0\n output.current_iteration_count_propagation = 0\n output.current_iteration_count_last_touch = 0\n output.current_iteration_count_crystal = 0\n output.solving_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.ones_time = np.full((parameters.rows, parameters.columns), 0, dtype=np.int32)\n output.propagation_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.touch_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.crystal_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.method_time = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.int32\n )\n output.choices_recording = np.full(\n (parameters.rows, parameters.columns), 0, dtype=np.float32\n )\n\n output.stats_tracking = prestate.stats_tracking.copy()\n\n return parameters, state, output", "def part1():\n camera_output = IntCodeProcessor(path='day17input.txt').execute_program()\n camera_output = ints_to_string(camera_output)\n grid = camera_output.splitlines()\n\n scaffolds = set()\n for row_index, row in enumerate(grid):\n for col_index, element in enumerate(row): \n if element == '#':\n scaffolds.add((row_index,col_index))\n\n intersections = [loc for loc in scaffolds if set(locations_adjacent_to(loc)) <= scaffolds]\n parameters = [loc[0] * loc[1] for loc in intersections]\n\n print(f'Part 1 answer: {sum(parameters)}')", "def linesearch(self):\n alp = self.alims[0]\n da = self.da\n Na = int((self.alims[1]-self.alims[0])/da)+1\n Jcv_prev = np.Inf\n Ncv = self.Nls\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Ncv,Nxp))\n xs,ps,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n print(\"========================================================\")\n print(\"============= LINE SEARCH OF OPTIMAL ALPHA =============\")\n print(\"========================================================\")\n for k in range(Na):\n self.cvstem0(xs,ps,alp)\n print(\"Optimal value: Jcv =\",\"{:.2f}\".format(self.Jcv),\\\n \"( alpha =\",\"{:.3f}\".format(alp),\")\")\n if Jcv_prev <= self.Jcv:\n alp = alp-da\n break\n alp += da\n Jcv_prev = self.Jcv\n self.alp_opt = alp\n print(\"Optimal contraction rate: alpha =\",\"{:.3f}\".format(alp))\n print(\"========================================================\")\n print(\"=========== LINE SEARCH OF OPTIMAL ALPHA END ===========\")\n print(\"========================================================\\n\\n\")\n pass", "def getCriticStation(analyzer):\n mayIn = model.getRankMay(analyzer,\"in\")\n mayOut=model.getRankMay(analyzer,\"out\")\n less=model.getRankMen(analyzer,\"LessPopular\")\n return (mayIn,mayOut,less)", "def InitialCondition():\n maxX = getX(C.N + 1,C.N+1,C.alpha_max)\n y0 = np.zeros(maxX,dtype=complex)\n for i in range(0, C.N+2):\n for j in range(0, C.N+2):\n for alpha in [1]:\n\n X = getX(i, j, alpha)\n\n y0[X] = 1./2./C.N * (1-delta(i, C.N+1))*(1-delta(j, C.N+1))+1./2*delta(i, C.N+1)*delta(j, C.N+1) +\\\n 1./2./(C.N)**0.5 * ((1-delta(i, C.N+1)) *\n delta(j, C.N+1)+(1-delta(j, C.N+1))*delta(i, C.N+1))", "def make_control_knowledge(self, horizon):\n\n \"\"\" *** YOUR CODE HERE *** \"\"\"\n\n # ADD_RULE1_COUNT = 0\n # ADD_RULE2_COUNT = 0\n # ADD_RULE3_COUNT = 0\n\n close = list()\n far = list()\n\n for g in self.problem.goal:\n for p in self.problem.propositions:\n if re.match(r'at\\spackage\\d+\\scity\\d+-\\d+', str(p)):\n p_split = str(p).split()\n g_split = str(g).split()\n\n # if \"at\" and \"package[oo]\" match\n if p_split[0] == g_split[0] and p_split[1] == g_split[1]:\n # also \"city[oo]-[xx]\" match\n if p_split[2][:-2] == g_split[2][:-2]:\n close.append(p)\n else:\n far.append(p)\n\n # Rule 1:\n # ===============================\n # If a package is at its goal location, then it must remain there.\n # p@t and goal@t) -> p@t+1), where p is at(package, location)\n # cnf: not p@t or not goal@t or p@t+1\n\n for g in self.problem.goal:\n for t in range(0, horizon):\n clause = list()\n clause.append(-self.proposition_fluent_codes[(g, t)])\n clause.append(self.proposition_fluent_codes[(g, t + 1)])\n self.add_clause(clause, \"control\")\n # ADD_RULE1_COUNT += 1\n\n for t in range(0, horizon):\n for a in self.problem.actions:\n\n # Rule 2\n # ===============================\n\n # RULE\n # close -> do not load airplane\n # p1: close@t\n # p2: at the location of an airport @t\n # p3: airplane at this location @t\n # p4: plane is not loaded\n # a: load this airplane\n #\n # p1@t and p2@t and p3@t and p4@t => a@t\n # not p1@t or not p2@t or not p3@t or not p4@t or a@t\n # cnf: not p@t or not a@t\n if str(a).startswith('load-airplane'):\n for i in close:\n package = str(i).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(i, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE2_COUNT += 1\n\n # Rule 3\n # ===============================\n\n # RULE\n # far -> do not unload airplane\n # p@t -> not a@t, where p is far, a is unload-airplane\n # cnf: not p@t or not a@t\n if str(a).startswith('unload-airplane'):\n for j in far:\n package = str(j).split()[1]\n if str(a).split()[1] == package:\n clause = list()\n clause.append(\n -self.proposition_fluent_codes[(j, t)])\n clause.append(-self.action_fluent_codes[(a, t)])\n self.add_clause(clause, \"control\")\n # ADD_RULE3_COUNT += 1\n\n # # RULE\n # # if an airplane has a package on it and the package's\n # # destination is close do not fly this airplane.\n # # in fact, if the destination of package is far,\n # # fly this plane to it.\n # #\n # # p1: package on airplane @ t\n # # p2: package at a place @ t\n # # p3: the place and the goal are in the same city\n # # rule: p1@t and p2@t and p@3 => not fly plane@t\n # # and unload the plane@t\n #\n # # not p1@t or not p2@t or not fly@t\n # # not p1@t or not p2@t or unload\n #\n # # rule: p1@t and p2@t and not p3@t => fly plane@t and not\n # # unload the plane@t\n #\n # if str(a).startswith('fly-airplane'):\n # plane = str(a).split()[1]\n # # loc_from = str(a).split()[2]\n # for p1 in self.problem.propositions:\n # if str(p1).startswith('in package') and str(p1).split()[2] == plane: # in package plane\n # package = str(p1).split()[1]\n # for p2 in self.problem.propositions:\n # if p2 in close and str(p2).split()[1] == package: # at package location\n # clause = list()\n # clause.append(-self.proposition_fluent_codes[p1, t])\n # clause.append(-self.proposition_fluent_codes[p2, t])\n # clause.append(-self.action_fluent_codes[a, t])\n # self.add_clause(clause, 'control')\n # ADD_RULE2_COUNT += 1\n #\n #\n # for g in self.problem.goal:\n # if str(g).split()[1] == package:\n # destination = str(g).split()[2]\n # for do in self.problem.actions:\n # # unload-airplane package00 plane00 city00-00\n # if str(do).startswith('unload') and str(do).split()[1] == package and str(do).split()[2] == plane and str(do).split()[3] == destination:\n # clause2 = list()\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p1, t])\n # clause2.append(-\n # self.proposition_fluent_codes[\n # p2, t])\n # clause2.append(\n # self.action_fluent_codes[\n # do, t])\n # self.add_clause(clause2,\n # 'control')\n #\n # ADD_RULE3_COUNT += 1\n\n # RULE\n # if there is no package needs to be transferred at a location,\n # and the location has a truck\n # drive the truck to its airport\n\n # p1: (at package__ city__-__ /\\ (it is a goal)@t\n # p2: (at truck__ city__-__)@t\n # p3: (city__-__ is not airport)\n # not p1/\\p2/\\p3 => drive_truck_to_its_airport@t\n #\n #\n # CNF: p1 V not p2 V not p3 V drive_truck_to_its_airport@t\n # if str(a).startswith('DRIVE-TRUCK'):\n # for p1 in self.problem.goal:\n # city = str(p1).split()[2]\n # for p2 in self.problem.propositions:\n # if str(p2).startswith('at truck') and str(p2).split()[2] == city:\n # for p3 in self.problem.propositions:\n # if str(p3).startswith('airport') and str(p3).split()[1] == city:\n # clause = list()\n # clause.append(self.proposition_fluent_codes[(p1, t)])\n # clause.append(-self.proposition_fluent_codes[(p2, t)])\n # clause.append(-self.proposition_fluent_codes[(p3, t)])\n # clause.append(self.action_fluent_codes[(a, t)])\n # self.add_clause(clause, \"control\")\n\n # RULE\n # if there is an airplane is loaded with a package need\n # transfer (to another city), fly airplane to the corresponding\n # city.\n\n # p1: (at airplane__ city__-__)@t\n # p2: (in package__ airplane__)@t\n # p3: ( p2 is in far)\n # p1/\\p2/\\p3 => fly_airplane_to_its_airport@t\n #\n #\n # CNF: not p1@t V not p2@t V not p3@t V fly_plane_to_airport@t\n\n # print(\"ADDED RULE 1:\")\n # print(ADD_RULE1_COUNT)\n #\n # print(\"ADDED RULE 2:\")\n # print(ADD_RULE2_COUNT)\n #\n # print(\"ADDED RULE 3:\")\n # print(ADD_RULE3_COUNT)", "def __init__(self, initial_state, T=20):\n\n self.x0 = np.asarray(initial_state)\n\n self.T = T # The time bound of our specification\n\n # Obstacle and goal region vertices: (xmin, xmax, ymin, ymax)\n self.obstacle_vert = (3,5,4,6)\n self.goal_vert = (7,8,8,9)\n self.target1_vert = (6,7,4.5,5.5)\n self.target2_vert = (1,2,4.5,5.5)\n\n # Now we'll define the STL specification. We'll do this over\n # the signal s, which is a list of x, y coordinates and the control\n # input u at each timestep. \n\n # Obstacle and goal constraints\n hit_obstacle = self.in_rectangle_formula(self.obstacle_vert) \n at_goal = self.in_rectangle_formula(self.goal_vert)\n \n self.obstacle_avoidance = hit_obstacle.negation().always(0,self.T)\n self.reach_goal = at_goal.eventually(0,self.T)\n\n # Intermediate target constraints\n at_target1 = self.in_rectangle_formula(self.target1_vert)\n reach_target1 = at_target1.eventually(0,self.T)\n \n at_target2 = self.in_rectangle_formula(self.target2_vert)\n reach_target2 = at_target2.eventually(0,self.T)\n\n self.intermediate_target = reach_target1.disjunction(reach_target2)\n\n # Control constraints\n umin = - 0.9\n umax = 0.9\n u1_above_min = STLFormula(lambda s, t : s[t,2] - umin)\n u1_below_max = STLFormula(lambda s, t : -s[t,2] + umax)\n u2_above_min = STLFormula(lambda s, t : s[t,3] - umin)\n u2_below_max = STLFormula(lambda s, t : -s[t,3] + umax)\n\n u1_valid = u1_above_min.conjunction(u1_below_max)\n u2_valid = u2_above_min.conjunction(u2_below_max)\n\n self.control_bounded = u1_valid.conjunction(u2_valid).always(0,self.T)\n\n # Full specification\n self.full_specification = self.obstacle_avoidance.conjunction(self.reach_goal).conjunction(self.control_bounded).conjunction(self.intermediate_target)", "def cvstem(self):\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun1 = self.Cfun\n self.Cfun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Gw).parameters) == 1):\n fun2 = self.Gw\n self.Gw = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n self.c_over = self.matrix_2bound(self.Cfun)\n self.g_over = self.matrix_2bound(self.Gw)\n if (len(sig(self.Bw).parameters) == 1):\n fun3 = self.Bw\n self.Bw = lambda x,p: fun3(x)\n self.b_over = self.matrix_2bound(self.Bw)\n self.linesearch()\n alp = self.alp_opt\n Nx = self.Nx\n Nsplit = 1\n Np = int(Nx/Nsplit)\n Nr = np.remainder(Nx,Nsplit)\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Nx,Nxp))\n xs_opt,ps_opt,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n Ws_opt = []\n chi_opt = 0\n nu_opt = 0\n print(\"========================================================\")\n print(\"====== SAMPLING OF CONTRACTION METRICS BY CV-STEM ======\")\n print(\"========================================================\")\n for p in range(Np):\n if np.remainder(p,int(Np/10)) == 0:\n print(\"# sampled metrics: \",p*Nsplit,\"...\")\n xs_p = xs_opt[Nsplit*p:Nsplit*(p+1),:]\n ps_p = ps_opt[Nsplit*p:Nsplit*(p+1),:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n if Nr != 0:\n print(\"# samples metrics: \",Nx,\"...\")\n xs_p = xs_opt[Nsplit*(p+1):Nx,:]\n ps_p = ps_opt[Nsplit*(p+1):Nx,:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n self.xs_opt = xs_opt\n self.ps_opt = ps_opt\n self.Ws_opt = Ws_opt\n self.chi_opt = chi_opt\n self.nu_opt = nu_opt\n if self.iEC == \"est\":\n self.Jcv_opt = (self.d1_over*self.b_over*np.sqrt(chi_opt)\\\n +self.d2_over*self.c_over*self.g_over*nu_opt)/alp\n print(\"Optimal steady-state estimation error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n elif self.iEC == \"con\":\n self.Jcv_opt = self.d1_over*self.b_over*np.sqrt(chi_opt)/alp\n print(\"Optimal steady-state tracking error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.M2cholM()\n path = \"models/optvals/\"+self.fname\n if os.path.exists(path) == False:\n try:\n os.makedirs(path)\n except: \n raise OSError(\"Creation of directory %s failed\" %path)\n else:\n print (\"Successfully created directory %s \" %path)\n else:\n print (\"Directory %s already exists\" %path)\n np.save(path+\"/alp_opt.npy\",alp)\n np.save(path+\"/chi_opt.npy\",self.chi_opt)\n np.save(path+\"/nu_opt.npy\",self.nu_opt)\n np.save(path+\"/Jcv_opt.npy\",self.Jcv_opt)\n print(\"========================================================\")\n print(\"==== SAMPLING OF CONTRACTION METRICS BY CV-STEM END ====\")\n print(\"========================================================\\n\\n\")\n pass", "def starting_point(conditions, state_variables, phase_records, grid):\n global_min_enabled = global_min_is_possible(conditions, state_variables)\n from pycalphad import __version__ as pycalphad_version\n active_phases = sorted(phase_records.keys())\n # Ensure that '_FAKE_' will fit in the phase name array\n max_phase_name_len = max(max([len(x) for x in active_phases]), 6)\n maximum_internal_dof = max(prx.phase_dof for prx in phase_records.values())\n nonvacant_elements = phase_records[active_phases[0]].nonvacant_elements\n coord_dict = OrderedDict([(str(key), value) for key, value in conditions.items()])\n grid_shape = tuple(len(x) for x in coord_dict.values())\n coord_dict['vertex'] = np.arange(\n len(nonvacant_elements) + 1) # +1 is to accommodate the degenerate degree of freedom at the invariant reactions\n coord_dict['component'] = nonvacant_elements\n conds_as_strings = [str(k) for k in conditions.keys()]\n specified_elements = set()\n for i in conditions.keys():\n # Assume that a condition specifying a species contributes to constraining it\n if not hasattr(i, 'species'):\n continue\n specified_elements |= set(i.species.constituents.keys()) - {'VA'}\n dependent_comp = set(nonvacant_elements) - specified_elements\n if len(dependent_comp) != 1:\n raise ValueError('Number of dependent components different from one')\n\n ds_vars = {'NP': (conds_as_strings + ['vertex'], np.empty(grid_shape + (len(nonvacant_elements)+1,))),\n 'GM': (conds_as_strings, np.empty(grid_shape)),\n 'MU': (conds_as_strings + ['component'], np.empty(grid_shape + (len(nonvacant_elements),))),\n 'X': (conds_as_strings + ['vertex', 'component'],\n np.empty(grid_shape + (len(nonvacant_elements)+1, len(nonvacant_elements),))),\n 'Y': (conds_as_strings + ['vertex', 'internal_dof'],\n np.empty(grid_shape + (len(nonvacant_elements)+1, maximum_internal_dof,))),\n 'Phase': (conds_as_strings + ['vertex'],\n np.empty(grid_shape + (len(nonvacant_elements)+1,), dtype='U%s' % max_phase_name_len)),\n 'points': (conds_as_strings + ['vertex'],\n np.empty(grid_shape + (len(nonvacant_elements)+1,), dtype=np.int32))\n }\n\n # If we have free state variables, they will also be data variables / output variables\n free_statevars = sorted(set(state_variables) - set(conditions.keys()))\n for f_sv in free_statevars:\n ds_vars.update({str(f_sv): (conds_as_strings, np.empty(grid_shape))})\n\n result = LightDataset(ds_vars, coords=coord_dict, attrs={'engine': 'pycalphad %s' % pycalphad_version})\n if global_min_enabled:\n result = lower_convex_hull(grid, state_variables, result)\n else:\n raise NotImplementedError('Conditions not yet supported')\n\n return result", "def __init__(self, model, line, segments = None, influence = None, \r\n strength = 1, variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into #segments pieces\r\n \r\n self.line_raw = copy.copy(line)\r\n \r\n if segments is None:\r\n \r\n self.segments = line.shape[0]-1\r\n \r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n \r\n raise Exception('Number of segments '+str(self.segments)+\" mustn't be smaller than number of line points \"+str(line.shape[0])+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = copy.copy(self.line[:,0] + 1j*self.line[:,1])\r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # --------------------------------------------------------------------- \r\n \r\n \r\n \r\n \r\n self.strength = np.ones(self.segments)*strength\r\n \r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n \r\n self.Zi = []\r\n self.offset_outside = []\r\n self.L = []\r\n self.zc = []\r\n self.segment_nvec = []\r\n self.head_target = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n influence_pt = (self.line_c[seg+1]-self.line_c[seg])*self.influence/self.L[seg] + self.line_c[seg]\r\n Z = (2*influence_pt-(self.line_c[seg]+self.line_c[seg+1]))/(self.line_c[seg+1]-self.line_c[seg])\r\n self.Zi += [copy.copy(Z)]\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n part1 = np.nan_to_num((Z+1)*np.log(Z+1))\r\n part2 = np.nan_to_num((Z-1)*np.log(Z-1))\r\n self.offset_outside += [self.L[seg] / (4*np.pi) * (part1 - part2)]\r\n \r\n # Convert list of segment centers to array\r\n self.zc = np.asarray(self.zc)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def prepare(self) -> None:\n\n \"\"\"\n Objective function\n Coefficient -2 means that we solve maximization problem (multiple all \n value to -1) and also there are left coverage area and right coverage \n area for each station (2* cov)\n \"\"\"\n\n f = [-2 * self.cov[i] for i in range(self.get_column_num)]\n self._f = np.array(f)\n\n \"\"\" Inequality Constraints\"\"\"\n ineq_cost = [self.cost[i] for i in range(self.get_column_num)]\n self._ineq_constraints = np.array(ineq_cost)\n self._b = np.array(self.cost_limit)\n\n \"\"\" \n There is no equality constraints. \n self._eq_constraints is empty\n self._beq is empty\n \"\"\"", "def find_lcs(l1: str, l2: str, length1: int, length2: int):\n \"\"\" Theorem:{\n Initialize matrix with 0 for first row and colm\n If s1[i] = s2[j], update matrix[i][j] with value\n of matrix[i-1][j-1]+1\n Else update matrix[i][j] with max of value among\n matrix[i][j-1],matrix[i-1][j]\n Matrix[n][m] will be lcs\n }\n \"\"\"\n matrix = [[None]*(length1+1) for i in range(0, length2+1)]\n for i in range(0, length2+1):\n for j in range(0, length1+1):\n if i == 0 or j == 0:\n matrix[i][j] = 0\n elif l1[j-1] == l2[i-1]:\n matrix[i][j] = matrix[i-1][j-1] + 1\n else:\n matrix[i][j] = max(matrix[i][j-1], matrix[i-1][j])\n lcs = [None for i in range(0, matrix[length2][length1])]\n index = matrix[length2][length1]\n m = length2 \n n = length1\n while(m > -1 and n > -1):\n if l2[m-1] == l1[n-1]:\n lcs[index-1] = l2[m-1]\n index -= 1\n m -= 1\n n -= 1\n elif matrix[m-1][n] > matrix[m][n-1]:\n m -= 1\n else:\n n -= 1\n return lcs", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def __init__(self, model, line, segments = None,head_target = 0,\r\n variables = [], priors=[]):\r\n\r\n import numpy as np\r\n from scipy.interpolate import interp1d\r\n import copy\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n\r\n # ---------------------------------------------------------------------\r\n # Subdivide the provided no flow boundary into segments pieces\r\n \r\n # Complexify the line, if it wasn't already complex\r\n line = self.complexify(line)\r\n \r\n # The subdivision algorith requires the line coordinates as a real N-by-2 matrix\r\n line = np.column_stack((\r\n np.real(line)[:,np.newaxis],\r\n np.imag(line)[:,np.newaxis]))\r\n \r\n self.line_raw = copy.copy(line)\r\n if segments is None:\r\n self.segments = line.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.line_raw.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than base number of segments \"+str(line.shape[0]-1)+'.')\r\n \r\n if self.segments > self.line_raw.shape[0]-1:\r\n \r\n # Subdivide the line\r\n self.line = self.subdivide_line(line,self.segments)\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n else:\r\n \r\n self.line = self.line_raw.copy()\r\n self.line_c = self.line[:,0] + 1j*self.line[:,1]\r\n \r\n # Also get the normal vector components to each segment\r\n self.line_nvec = self.line[:,1] - 1j*self.line[:,0]\r\n self.line_nvec = self.line_nvec/np.abs(self.line_nvec)\r\n\r\n # ---------------------------------------------------------------------\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n \r\n self.zc = []\r\n self.segment_nvec = []\r\n self.L = []\r\n \r\n for seg in range(self.segments):\r\n \r\n self.zc += [(self.line_c[seg]+self.line_c[seg+1])/2]\r\n \r\n # Calculate the normal vector to this segment\r\n self.segment_nvec += [(self.line_c[seg]-self.line_c[seg+1])]\r\n self.segment_nvec[-1]= [np.imag(self.segment_nvec[-1])-1j*np.real(self.segment_nvec[-1])]\r\n \r\n self.L += [np.abs(self.line_c[seg+1] - self.line_c[seg])]\r\n \r\n self.zc = np.asarray(self.zc)\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n self.L = np.asarray(self.L)\r\n \r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n # Go through all elements\r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def _LineSearch(self, disp_vector):\n self.GetDispDeriv(self.disp_mag, disp_vector)\n disp_mag = self.disp_mag\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_mag *= disp_sign\n disp_sign_same = True\n ref_energy = self.mol.e_total\n\n # binary search to find upper bound on displacement magnitude\n self.n_subiter = 0\n while (disp_sign_same):\n self.n_subiter += 1\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)\n self.GetDispDeriv(disp_mag, disp_vector)\n self._DisplaceCoords(-1.0 * disp_mag, disp_vector)\n if self.mol.e_total > ref_energy:\n disp_mag *= 0.5\n break\n old_disp_sign = disp_sign\n disp_sign = 1.0 if self.disp_deriv <= 0.0 else -1.0\n disp_sign_same = bool(disp_sign == old_disp_sign)\n disp_mag *= 2.0\n self.GetDispDeriv(disp_mag, disp_vector)\n self.AdjustDispMag(self.n_subiter)\n\n # binary search to find value of displacement within bounds\n numer = 1.0\n denom = 2.0\n for i in range(const.NUMLINESEARCHSTEPS):\n self.n_subiter += 1\n test_disp = disp_mag * numer / denom\n self._DisplaceCoords(+1.0 * test_disp, disp_vector)\n self.GetDispDeriv(disp_mag / (2**(-i)), disp_vector)\n self._DisplaceCoords(-1.0 * test_disp, disp_vector)\n direc = 1.0 if self.disp_deriv < 0.0 else -1.0\n numer = 2*numer + direc\n denom = 2*denom\n disp_mag *= numer / denom\n\n # final line search energy minimized molecular coordinates\n self._DisplaceCoords(+1.0 * disp_mag, disp_vector)", "def system(p):\r\n\r\n C1, C2, C3, C4, C5, C6, C7, C8, \\\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22, \\\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34,\\\r\n C35, C36, C37, C38, C39, C40, C41, C42, C43, \\\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56 = p\r\n\r\n C = [C1, C2, C3, C4, C5, C6, C7, C8,\r\n C9,C10,C11,C12,C13,C14,C15,C16,C17,C18,C19,C20,C21,C22,\r\n C23, C24, C25, C26, C27, C28, C29, C30, C31, C32, C33, C34, C35, C36, C37,\r\n C38, C39, C40, C41, C42, C43,\r\n C44, C45, C46, C47, C48, C49, C50, C51, C52, C53, C54, C55, C56]\r\n\r\n eqs = [C[i] * (Kd[i] + Rtot - sum(C)) + Etot[i] * (sum(C) - Rtot) for i in range(n)]\r\n\r\n return eqs", "def get_start_positions(img_in):\n\n def initialize_coordinates(kernel_h, kernel_w):\n \"\"\" locates positions of interest by traversing eroded image and\n saves 9 points on each area of interest to global matrix\n :param kernel_h height of kernel used for harsh erosion\n :param kernel_w width of kernel used for harsh erosion\"\"\"\n global init_coords\n\n count = 0\n y = 0\n while y < frame_height - kernel_h:\n x = 0\n while x < frame_width - kernel_w:\n locator = img[y:y+kernel_h, x:x+kernel_w, 2] > 0 + numpy.zeros((kernel_h, kernel_w))\n if numpy.any(locator):\n if count == 0:\n init_coords[count][0][0] = y - 2\n init_coords[count][0][1] = x + 2\n elif count == 1:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 2:\n init_coords[count][0][0] = y + 2\n init_coords[count][0][1] = x + 2\n elif count == 3:\n init_coords[count][0][0] = y - 3\n init_coords[count][0][1] = x + 2\n elif count == 4:\n init_coords[count][0][0] = y + 3\n init_coords[count][0][1] = x - 5\n count += 1\n break\n x += kernel_w\n y += kernel_h\n\n # store 8 more points for each body part\n f = 1.5\n for count in range(5):\n init_coords[count][1][1] = init_coords[count][0][1] + 3*f\n init_coords[count][1][0] = init_coords[count][0][0] + 0\n init_coords[count][2][1] = init_coords[count][0][1] + 6*f\n init_coords[count][2][0] = init_coords[count][0][0] + 0\n init_coords[count][3][1] = init_coords[count][0][1] + 0\n init_coords[count][3][0] = init_coords[count][0][0] + 3*f\n init_coords[count][4][1] = init_coords[count][0][1] + 3*f\n init_coords[count][4][0] = init_coords[count][0][0] + 3*f\n init_coords[count][5][1] = init_coords[count][0][1] + 6*f\n init_coords[count][5][0] = init_coords[count][0][0] + 3*f\n init_coords[count][6][1] = init_coords[count][0][1] + 0\n init_coords[count][6][0] = init_coords[count][0][0] + 6*f\n init_coords[count][7][1] = init_coords[count][0][1] + 3*f\n init_coords[count][7][0] = init_coords[count][0][0] + 6*f\n init_coords[count][8][1] = init_coords[count][0][1] + 6*f\n init_coords[count][8][0] = init_coords[count][0][0] + 6*f\n\n limb_coords[0][0][0] = init_coords[0][5][0]\n limb_coords[0][0][1] = init_coords[0][5][1]\n limb_coords[1][0][0] = init_coords[1][5][0]\n limb_coords[1][0][1] = init_coords[1][5][1]\n limb_coords[2][0][0] = init_coords[2][5][0]\n limb_coords[2][0][1] = init_coords[2][5][1]\n limb_coords[3][0][0] = init_coords[3][5][0]\n limb_coords[3][0][1] = init_coords[3][5][1]\n limb_coords[4][0][0] = init_coords[4][5][0]\n limb_coords[4][0][1] = init_coords[4][5][1]\n\n img = img_in.copy()\n img = segment_red(img, 205, 135)\n erode(img, 14, 12)\n initialize_coordinates(14, 12)", "def searchOrigin(concForm, preSet, formulSet):\r\n lis = []\r\n for i in range(len(preSet)):\r\n if preSet[i] == concForm:\r\n lis.append(i + 1)\r\n liss = preSet[i].split()\r\n liss.append(\"Assumed\")\r\n lis.append(liss)\r\n return [1, lis]\r\n for subset in formulSet:\r\n if concForm == subset[1][0]:\r\n lis.append(subset[0])\r\n lis.append(subset[1])\r\n return [0, lis]\r\n return [-1]", "def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r", "def oracle_wer(self, ref):\n # Add start and end to ref\n ref = [NULL, SOS] + ref.split() + [EOS]\n # Most lattices contain the correct path, so check that first\n if self.in_lattice(ref):\n return (0, [(i, i) for i in ref])\n # Initialize the alignment matrix\n align_matrix = np.ones((len(ref),len(self.nodes)), 'i') * MAXINT\n # And the backpointer matrix\n bp_matrix = np.zeros((len(ref),len(self.nodes)), 'O')\n # Figure out the minimum distance to each node from the start\n # of the lattice, and construct a node to ID mapping\n nodeid = {}\n for i,u in enumerate(self.nodes):\n u.score = MAXINT\n nodeid[u] = i\n self.start.score = 1\n for u in self.nodes:\n for x in u.exits:\n dist = u.score + 1\n if dist < x.dest.score:\n x.dest.score = dist\n def find_pred(ii, jj):\n bestscore = MAXINT\n bestp = -1\n if len(self.nodes[jj].entries) == 0:\n return bestp\n for e in self.nodes[jj].entries:\n k = nodeid[e.src]\n if align_matrix[ii,k] < bestscore:\n bestp = k\n bestscore = align_matrix[ii,k]\n return bestp\n # Now fill in the alignment matrix\n for i, w in enumerate(ref):\n for j, u in enumerate(self.nodes):\n # Insertion = cost(w, prev(u)) + 1\n if u == self.start: # start node\n bestp = -1\n inscost = i + 2 # Distance from start of ref\n else:\n # Find best predecessor in the same reference position\n bestp = find_pred(i, j)\n inscost = align_matrix[i,bestp] + 1\n # Deletion = cost(prev(w), u) + 1\n if i == 0: # start symbol\n delcost = u.score + 1 # Distance from start of hyp\n else:\n delcost = align_matrix[i-1,j] + 1\n # Substitution = cost(prev(w), prev(u)) + (w != u)\n if i == 0 and bestp == -1: # Start node, start of ref\n subcost = int(w != u.sym)\n elif i == 0: # Start of ref\n subcost = (self.nodes[bestp].score\n + int(w != u.sym))\n elif bestp == -1: # Start node\n subcost = i - 1 + int(w != u.sym)\n else:\n # Find best predecessor in the previous reference position\n bestp = find_pred(i-1, j)\n subcost = (align_matrix[i-1,bestp]\n + int(w != u.sym))\n align_matrix[i,j] = min(subcost, inscost, delcost)\n # Now find the argmin\n if align_matrix[i,j] == subcost:\n bp_matrix[i,j] = (i-1, bestp)\n elif align_matrix[i,j] == inscost:\n bp_matrix[i,j] = (i, bestp)\n else:\n bp_matrix[i,j] = (i-1, j)\n # Find last node's index\n last = nodeid[self.end]\n # Backtrace to get an alignment\n i = len(ref)-1\n j = last\n bt = []\n while True:\n ip,jp = bp_matrix[i,j]\n if ip == i: # Insertion\n bt.append(('**INS**', '*%s*' % self.nodes[j].sym))\n elif jp == j: # Deletion\n bt.append(('*%s' % ref[i], '**DEL**'))\n else:\n if ref[i] == self.nodes[j].sym:\n bt.append((ref[i], self.nodes[j].sym))\n else:\n bt.append((ref[i], '*%s*' % self.nodes[j].sym))\n # If we consume both ref and hyp, we are done\n if ip == -1 and jp == -1:\n break\n # If we hit the beginning of the ref, fill with insertions\n if ip == -1:\n while True:\n bt.append(('**INS**', self.nodes[jp].sym))\n bestp = find_pred(i,jp)\n if bestp == -1:\n break\n jp = bestp\n break\n # If we hit the beginning of the hyp, fill with deletions\n if jp == -1:\n while ip >= 0:\n bt.append((ref[ip], '**DEL**'))\n ip = ip - 1\n break\n # Follow the pointer\n i,j = ip,jp\n bt.reverse()\n return align_matrix[len(ref)-1,last], bt", "def analyze_orbit_corrector(OC1, OC2, beamline, phase_beg):\n\n M = np.identity(4)\n OC_parameters = np.zeros(4)\n\n for element in beamline:\n M = np.dot(element.M1, M)\n\n # Since the X and Y are decoupled, we can treat them separately.\n M_x = M[0:2, 0:2]\n M_y = M[2:4, 2:4]\n\n L1 = [[OC1.length/2], [1]]\n L2 = [[OC2.length/2], [1]]\n\n M_OC1 = np.array(OC1.M1)[0:2, 0:2]\n M_OC2 = np.array(OC2.M1)[0:2, 0:2]\n\n # The following part solve the cx_1 and cx_2\n M1_x = np.linalg.multi_dot([M_OC2, M_x, L1])\n M2_x = np.linalg.multi_dot([M_OC2, M_x, M_OC1])\n M_OC_x = np.hstack((M1_x, L2))\n\n OC_parameters[0:2] = -np.linalg.multi_dot([np.linalg.inv(M_OC_x), M2_x, phase_beg[0:2]])\n # The end of the X-part\n\n # The following part solve the cy_1 and cy_2\n M1_y = np.linalg.multi_dot([M_OC2, M_y, L1])\n M2_y = np.linalg.multi_dot([M_OC2, M_y, M_OC1])\n M_OC_y = np.hstack((M1_y, L2))\n\n OC_parameters[2:4] = -np.linalg.multi_dot([np.linalg.inv(M_OC_y), M2_y, phase_beg[2:4]])\n # The end of the Y-part\n\n\n return OC_parameters", "def monotonic_contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n xpts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n # compute automatic differentiated curvature:\n qxdot = np.dot(xpts,self.localBasis[1,:])\n qxddot = np.dot(xpts,self.localBasis[2,:])\n qydot = np.dot(ypts,self.localBasis[1,:])\n qyddot = np.dot(ypts,self.localBasis[2,:]) \n #computation of doubledots is expanded below\n \n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = ((np.sqrt(qxdot*qxdot + qydot*qydot) )**3. )*constraint\n lhs = (np.sqrt(qxdot**2 + qydot**2)**3.) *constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxdot.contains(0.):\n print 'qxdot = ',qxdot\n print 'qxdot not invertable, implement other logic please'\n else:\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n t1 = (lhs + qydot*qxddot)/(qxdot)#*(qxdot**-1.)\n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (t1 - ypts[j]*float(self.localBasis[2,j])) + min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ## \n ## qxdot\n ##\n \n if qyddot.contains(0.):\n print 'qyddot = ',qyddot\n print 'qyddot not invertable, implement other logic please'\n else:\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + qydot*qxddot)/(qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n #if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n if qxddot.contains(0.):\n print 'qxddot = ',qxddot\n print 'qxddot not invertable, implement other logic please'\n else:\n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - qxdot*qyddot)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (t1 - ypts[j]*float(self.localBasis[1,j])) + min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n \n ##contract on qxdot\n \n \n #contract on qxddot\n if qydot.contains(0.):\n print 'qydot = ',qxddot\n print 'qydot not invertable, implement other logic please'\n else:\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n \n min_ans = (fix - xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n \n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n \n return vertices", "def main():\n aoc_input = aoc_01_input.get_input()\n\n current_direction = 'N'\n steps_north = 0\n steps_east = 0\n\n # For part 2: Store all the coords visited in a list\n all_coords_list = []\n # A variable to save HQ coordinates in\n hq_coords = None\n\n for instruction in aoc_input:\n # One instruction is eg 'R2' or 'L44'\n input_turn = instruction[0]\n input_steps = int(instruction[1:])\n\n current_direction = change_direction(current_direction, input_turn)\n\n if current_direction == 'N':\n\n for k in range(input_steps):\n current_coords = [steps_north + k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north += input_steps\n\n elif current_direction == 'E':\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east + k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east += input_steps\n\n elif current_direction == 'S':\n\n for k in range(input_steps):\n current_coords = [steps_north - k, steps_east]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_north -= input_steps\n\n else:\n\n for k in range(input_steps):\n current_coords = [steps_north, steps_east - k]\n [all_coords_list, hq_coords] = check_all_coords(all_coords_list, current_coords, hq_coords)\n\n steps_east -= input_steps\n\n current_coords = [steps_north, steps_east]\n\n total_distance = abs(steps_north) + abs(steps_east)\n\n total_distance_part2 = abs(hq_coords[0]) + abs(hq_coords[1])\n\n print('Part 1: {}'.format(total_distance))\n print('Part 2: {}'.format(total_distance_part2))\n\n # print('Part 1: {}'.format(get_root(aoc_input[:])['name']))\n # print('Part 2: {}'.format(find_imbalance(aoc_input[:])))", "def set_consist(ss, ia, ib, input1, input2):\n comf3 = open(input2).readlines()\n comf1 = open(input1).readlines()\n\n \"\"\"\n get module identifiers\n \"\"\"\n comm1_array = []\n comm3_array = []\n\n for line in comf1:\n a, b = map(int, line.split())\n comm1_array.append(b)\n\n comm1_array.append(comm1_array[len(comm1_array)-1])\n\n for line in comf3:\n a, b = map(int, line.split())\n comm3_array.append(b)\n\n\n \"\"\"\n Make dictionaries. module numbers are keys and voxels in modules are values\n \"\"\" \n mod3_dict = {}\n mod1_dict = {}\n for i in set(comm3_array):\n mod3_dict[i] = [v for v, c in enumerate(comm3_array) if c == i]\n for i in set(comm1_array):\n mod1_dict[i] = [v for v, c in enumerate(comm1_array) if c == i]\n\n\n \"\"\"\n For each voxel, find its module in condition3, then in condition1, and interset voxels in its module in condition3 with condition1\n \"\"\"\n preservation = []\n for i in xrange(len(comm3_array)):\n if len(mod3_dict[comm3_array[i]]) < 20 or len(mod1_dict[comm1_array[i]]) < 20:\n preservation.append(777)\n else:\n inter = len(set(mod3_dict[comm3_array[i]]).intersection(set(mod1_dict[comm1_array[i]])))\n preservation.append(round(inter / float(len(mod3_dict[comm3_array[i]])), 4))\n\n pres_out = \"\"\n for line in preservation:\n pres_out += str(round(line,4))+\"\\n\"\n\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/preserved_iters_\"+ia+\"_\"+ib+\"_\"+ss+\".txt\"\n #outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outname = os.environ[\"state\"]+\"/\"+ss+\"/modularity5p/set_consistency2/iter\"+ia+\"_\"+ss+\"_preserved.txt\"\n outf = open(outname, \"w\")\n outf.write(pres_out)\n outf.close()" ]
[ "0.5643535", "0.5529715", "0.54889023", "0.5479325", "0.53951627", "0.53906417", "0.53498375", "0.53264004", "0.529862", "0.52936363", "0.5267395", "0.52408236", "0.51946", "0.51942897", "0.5164973", "0.5137584", "0.5127261", "0.5117817", "0.5111111", "0.5094825", "0.508434", "0.5067278", "0.5063571", "0.5041551", "0.5036992", "0.5019392", "0.5017755", "0.500363", "0.4995011", "0.49933782" ]
0.6616834
0
Visualize the learned weights More suitable for many weights
def show_weights(weights, names=None): plt.imshow(visualize_grid(weights, padding=1).astype('uint8'), cmap='Greys') plt.gca().axis('off') plt.show() plt.savefig('vis.png')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)", "def get_weights(self):", "def plot_weights(self,):\n \n weights_evolution = pd.DataFrame(self.predict[\"weights\"].values.tolist(), columns=[*self.models.keys()])\n\n plt.figure(figsize=(8, 5))\n\n for name in weights_evolution.columns:\n plt.plot(weights_evolution[name], label=name)\n\n plt.title(\"Weights evolution\")\n plt.legend()\n plt.grid(axis=\"y\", linestyle='--')\n plt.show()\n\n del weights_evolution", "def print_weight_info(weights):\n print(\"Length: {}\\nw:\\n{}\\nb:\\n{}\".format(\n len(weights[\"w\"]),\n [ w.shape for w in weights[\"w\"] ],\n [ b.shape for b in weights[\"b\"] ]))", "def debug_weights(self):\n return self.weights", "def visulize_weights(W):\n fig, axes1 = plt.subplots(2,5,figsize=(3,3))\n i = 0\n for j in range(2):\n for k in range(5):\n im = W[i,:].reshape(32, 32, 3)\n im = (im - np.min(im)) / (np.max(im) - np.min(im))\n axes1[j][k].set_axis_off()\n axes1[j][k].imshow(im)\n i += 1\n plt.show()", "def __str__(self):\n return str(self._weights)", "def weight_to_summary(self, name, var, batch=1):\n with self.graph.as_default():\n with tf.name_scope(\"weights.visualization\"):\n shape = var.get_shape()\n for i in range(0, shape[2]):\n for o in range(0, shape[3]):\n l_name = (\"%s.in.%d.out.%d\" % (name, i, o))\n tf.summary.image(l_name, tf.reshape(var[:, :, i:i+1, o:o+1], [1,5,5,1]))\n # TODO Hardcoded [5, 5]: bisogna sistemarlo.", "def get_weights(self):\n return self.weights\n #print(W)", "def print_weights(self, round=False):\n\n print \"[\",\n for l in range(1, self.num_layers()):\n for n in range(self.get_layer(l).num_nodes):\n weights = self.get_node_with_layer(l, n).weights\n for w in range(len(weights)):\n if round:\n print '{0:.3f}'.format(weights[w]),\n else:\n print weights[w],\n print ']'", "def visualize():\n model.eval()\n with torch.no_grad():\n alpha = model.mu_q_alpha\n beta = model.get_beta(alpha) \n \n print('\\n')\n print('#'*100)\n print('Visualize topics...') \n \n topics_words = []\n for k in range(args.num_topics):\n gamma = beta[k, :]\n top_words = list(gamma.cpu().numpy().argsort()[-args.num_words+1:][::-1]) \n topic_words = [vocab[a] for a in top_words]\n topics_words.append(' '.join(topic_words))\n print('Topic {} .. ===> {}'.format(k, topic_words)) \n\n print('\\n')\n print('Visualize word embeddings ...')\n # queries = ['economic', 'assembly', 'security', 'management', 'debt', 'rights', 'africa']\n # queries = ['economic', 'assembly', 'security', 'management', 'rights', 'africa']\n queries = ['border', 'vaccines', 'coronaviruses', 'masks']\n queries = set(queries).intersection(vocab)\n try:\n embeddings = model.rho.weight # Vocab_size x E\n except:\n embeddings = model.rho # Vocab_size x E\n # neighbors = []\n for word in queries:\n print('word: {} .. neighbors: {}'.format(\n word, nearest_neighbors(word, embeddings, vocab, args.num_words)))\n print('#'*100)", "def weights(self):\r\n\t\treturn None", "def summary(self, verbose=False):\n for i, layer in enumerate(self._layers):\n print('%d: %s' % (i, str(layer)))\n if verbose:\n print('weights:', layer.get_weights())\n if layer._use_bias:\n print('bias:', layer._bias)\n print()", "def save_weights(self, outfile='', title=''):\n\n row = self.num_visible\n col = self.num_hidden\n # plt.imshow(self.weights, cmap='gray', aspect='auto', interpolation='none')\n # plt.show()\n\n # plt.imshow(self.reconstruct_image(self.dataset[0]).reshape((28,28)), cmap='gray', aspect='equal', interpolation='none')\n # plt.show()\n\n # plt.imshow(self.dataset[0].reshape((28,28)), cmap='gray', aspect='equal', interpolation='none')\n # plt.show()\n return", "def weights(self) -> List[float]:", "def update_weights(self):\n\t\tpass", "def get_name(cls):\n\t\treturn 'plot_weights'", "def visualize(self):\n\n self.check_model()\n show(prepare(self.model, self.vectorized_data, self.vectorizer, mds='tsne'))", "def weight(self):", "def print(self):\n for l in range(self.h+1):\n print(\"Weight matrix between layer \" + str(l) + \" and layer \" + str(l+1))\n print(self.W[l])", "def visualize_training(features, labels, pl):\n print(\"Visualizing training\")\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Take out each feature type, one at a time\n label_map = get_label_map(labels)\n\n for key in label_map.keys():\n like_ind = label_map[key]\n like_data = np.array([features[i] for i in like_ind])\n\n plt.scatter(like_data[:,0],like_data[:,1],label=key)\n\n # get limits\n xmin = features.column_min(0) - .5\n xmax = features.column_max(0) + .5\n ymin = features.column_min(1) - .5\n ymax = features.column_max(1) + .5\n\n plt.xlim(xmin,xmax)\n plt.ylim(ymin,ymax)\n\n # Track the current dividing line, as well as the number of epochs passed\n divider, = plt.plot([],[])\n epoch_tracker = plt.text(-1,.9, '', fontsize=15)\n\n def update(i):\n \"\"\"\n 1.) Get the next set of weights from the tracker\n 2.) Calculate and draw the new divider line\n 3.) Update the epoch counter\n 4.) If we are at the end of an epoch, plot a dashed divider line to track progress\n \"\"\"\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider\n\n ani = animation.FuncAnimation(fig, update, frames=range(len(pl.weights_tracker)), interval=250,repeat=False)\n plt.legend()\n\n # optional save file\n if len(sys.argv) >= 3 :\n ani.save(sys.argv[2], writer='imagemagick', fps=5)\n\n plt.show()", "def plotWeights(w):\n w = w[:,:,0,:]\n # rescale w to 0.0 - 1.0\n mincode = np.amin(w)\n maxcode = np.amax(w)\n w = (w - mincode) / (maxcode - mincode)\n\n out = np.zeros((15, 15))\n for x in range(0,4):\n for y in range(0,4):\n c = x*4+y\n out[x*4:x*4+3, y*4:y*4+3] = w[:,:,c]\n return out", "def learn(self):\n Xt = np.append(np.ones((self.X.shape[0], 1)), self.X, axis=1)\n Yt = self.Y * 2 - 1\n\n w = np.ones(Xt.shape[1]) # avoiding random init, for debugging\n lw = [[] for k in range(len(w))]\n \n for iter in range(self.max_steps):\n P = Yt * np.dot(Xt, w)\n M = np.where(P <= 0)[0] # indices of misclassified datapoints\n\n if len(M) == 0: \n self.logger.debug(\"Found linearly separable hyperplane!\")\n break\n\n if self.is_stochastic:\n # just pick one randomly from M\n M = [M[random.randint(0, len(M)-1)]]\n\n grad = -1 * np.sum((Yt[M] * Xt[M].T), axis=1) / len(M)\n\n if self.reg_constant > 0:\n grad += self.reg_constant * w\n \n eta = self.step_size * 10000 / (10000 + iter)\n \n w = w - grad * eta\n \n if iter % 100 == 0:\n for k in range(len(w)):\n lw[k].append(w[k])\n \n if iter % 1000 == 0:\n self.logger.debug(\"Iter %s:\\t %f %f %f\" %(iter, w[0], w[1], w[2]))\n \n self.logger.debug(\"Iterations: %s\" %(iter))\n\n# x_range = range(len(lw[0]))\n# fig = plt.figure()\n# ax1 = fig.add_subplot(111) \n# for j, lwn in enumerate(lw):\n# if j % 3 >= 2: # plot an arbitrary subset of features\n# a = w[j]\n# ax1.plot(x_range, [(x-a) for x in lwn], label=str(j))\n# \n# plt.xlabel(\"Iteration\")\n# plt.ylabel(\"Feature weight\")\n# plt.show()\n \n #self.logger.debug(\"%s\" % np.array2string(w, precision=2, separator=','))\n \n self.w = w", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\r\n return self.weights", "def weights_lst(self):\n assert self.sess is not None, \"Model has not been fitted yet!\"\n return self.sess.run(self.W_lst)", "def learn(self):\n pass", "def learn(self):\n pass", "def train(self):\n\n for i in range(self.c):\n mu_i = self.estimate_mu(i).T\n cov_i = self.estimate_sigma(i, mu_i)\n inv_sigma_i = np.linalg.inv(cov_i)\n P_i = self.estimate_P(i)\n Wi = -1 / 2 * inv_sigma_i\n wi = inv_sigma_i @ mu_i\n wi0 = -1 / 2 * mu_i.T @ inv_sigma_i @ mu_i - 1 / 2 * np.log(np.linalg.det(cov_i)) + np.log(P_i)\n self.weights.append([Wi, wi, wi0])\n return self.weights", "def __init__(self, weights):\n self._weights = weights" ]
[ "0.69428915", "0.6728055", "0.6703914", "0.66064876", "0.6543751", "0.65201473", "0.63403493", "0.63328934", "0.6332723", "0.62977654", "0.61747783", "0.61710286", "0.616417", "0.6146608", "0.6127756", "0.6119084", "0.609718", "0.6054239", "0.6037545", "0.60133284", "0.5982789", "0.5953382", "0.59513724", "0.5950616", "0.5950616", "0.5948618", "0.5947823", "0.5947823", "0.592274", "0.5900907" ]
0.69596964
0
Initialize I2C LCD at specified I2C address on the I2C Bus.
def __init__(self, i2c, address=0x27, cols=2, rows=16, dotsize=LCD_5x8DOTS ): self.address = address self.i2c = i2c self.cols = cols self.rows = rows self._backlightval = LCD_NOBACKLIGHT self._displayfunction = None self._displaycontrol = None self._displaymode = None # ========= Initialize screen ========== # When the display powers up, it is configured as follows: # # 1. Display clear # 2. Function set: # DL = 1; 8-bit interface data # N = 0; 1-line display # F = 0; 5x8 dot character font # 3. Display on/off control: # D = 0; Display off # C = 0; Cursor off # B = 0; Blinking off # 4. Entry mode set: # I/D = 1; Increment by 1 # S = 0; No shift # # Note, however, that resetting the Arduino doesn't reset the LCD, so we # can't assume that its in that state when a sketch starts (and the # LiquidCrystal constructor is called). self._displayfunction = LCD_4BITMODE | LCD_1LINE | LCD_5x8DOTS if rows > 1: self._displayfunction = self._displayfunction | LCD_2LINE # for some 1 line displays you can select a 10 pixel high font if (dotsize != 0) and (lines == 1): self._displayfunction = self._displayfunction | LCD_5x10DOTS # SEE PAGE 45/46 FOR INITIALIZATION SPECIFICATION! # according to datasheet, we need at least 40ms after power rises above 2.7V before sending commands. sleep_ms(50) # Now we pull both RS and R/W low to begin commands self.expanderWrite(self._backlightval) # reset expanderand turn backlight off (Bit 8 =1) sleep_ms( 1000 ) # put the LCD into 4 bit mode this is according to the hitachi HD44780 datasheet # figure 24, pg 46 # we start in 8bit mode, try to set 4 bit mode self.write4bits( 0x03 << 4 ) sleep_us(4500) # wait min 4.1ms # second try self.write4bits( 0x03 << 4 ) sleep_us(4500) # wait min 4.1ms # third go! self.write4bits( 0x03 << 4 ) sleep_us(150) # finally, set to 4-bit interface self.write4bits( 0x02 << 4 ) # set #lines, font size, etc. self.command( LCD_FUNCTIONSET | self._displayfunction ) # DisplayFunction # turn the display on with no cursor or blinking default self._displaycontrol = LCD_DISPLAYON | LCD_CURSOROFF | LCD_BLINKOFF self.display() # turn display on/off (quickly) #clear it off self.clear() # Initialize to default text direction (for roman languages) self._displaymode = LCD_ENTRYLEFT | LCD_ENTRYSHIFTDECREMENT # set the entry mode self.command( LCD_ENTRYMODESET | self._displaymode ) self.home()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def i2c_init(id_, scl, sda, freq=400000):\n return machine.I2C(id_,\n scl=machine.Pin(\n scl,\n machine.Pin.OUT,\n machine.Pin.PULL_DOWN\n ),\n sda=machine.Pin(\n sda,\n machine.Pin.OUT,\n machine.Pin.PULL_DOWN\n ),\n freq=freq\n )", "def use_i2c():\n _LIB.oled_click_use_i2c()", "def __set_i2c_address(self, address):\n fcntl.ioctl(self.file_read, self.I2C_SLAVE, address)\n fcntl.ioctl(self.file_write, self.I2C_SLAVE, address)", "def open(self):\n self._i2c.open(bus=self._i2c_bus)\n self._configure_i2c_library_functions()\n if self.debug:\n print('VL53L1X: Opened I2C bus {}'.format(self._i2c_bus))", "def __init__(self, i2c: I2C, address: int = _SGP30_DEFAULT_I2C_ADDR) -> None:\n self._device = I2CDevice(i2c, address)\n\n # get unique serial, its 48 bits so we store in an array\n self.serial = self._i2c_read_words_from_cmd([0x36, 0x82], 0.01, 3)\n # get featureset\n featureset = self._i2c_read_words_from_cmd([0x20, 0x2F], 0.01, 1)\n if featureset[0] not in _SGP30_FEATURESETS:\n raise RuntimeError(\"SGP30 Not detected\")\n self.iaq_init()", "def __init__ (self, i2c, address, accel_range = 0):\n\n ## The I2C driver which was created by the code which called this\n self.i2c = i2c\n\n ## The I2C address at which the accelerometer is located\n self.addr = address\n self.i2c.mem_write(0x18, 40, 0x3D)", "def SetAutodetectLCD(lcd):\n SPI.DeviceList[\"spi_lcd\"]= lcd \n I2C.DeviceList[\"i2c_lcd\"]= lcd", "def test_i2c_controller(self):\n i2c = I2cController()\n i2c.configure(self.ftdi_url, frequency=100e3)\n eeprom = SerialEepromManager.get_from_controller(i2c, '24AA32A', 0x50)", "def ensureI2C(i2c=None):\n if i2c is None:\n logger.info('Initializing I2C.')\n import Adafruit_GPIO.I2C as I2C\n i2c = I2C\n return i2c", "def do_i2c(self, i2cAddress):\n ise.setI2CAddress(i2cAddress)", "def __init__(self, address=0x68, **kwargs):\n I2CDevice.__init__(self, address, **kwargs)\n logger.info(\"Created new si5324 instance with address 0x{:02X}.\".format(address))\n self.iCAL_required = True # An iCAL is required at least once before run", "def __init__(self, i2c_bus):\n self.regs = [0 for _ in range(255)]", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def __init__(self, i2c, address=_SGP30_DEFAULT_I2C_ADDR):\n self._i2c = i2c\n self._addr = address\n self.serial = self._i2c_read_words_from_cmd(command=[0x36, 0x82], reply_size=3, delay=0.01)\n featureset = self._i2c_read_words_from_cmd([0x20, 0x2f], 1, 0.01)\n if featureset[0] != _SGP30_FEATURESET:\n raise RuntimeError('SGP30 Not detected')\n self.initialise_indoor_air_quality()", "def __init__(self, address=0x68, config=0):\r\n\t\tself.i2c = FT232H.I2CDevice(ft232h, address)\r\n\t\tif config == 0:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=0)\r\n\t\t\tself.setScale(mode='GYR',scale=0)\r\n\t\telif config == 1:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=1)\r\n\t\t\tself.setScale(mode='GYR',scale=1)\t\t\t\t\r\n\t\telif config == 2:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=2)\r\n\t\t\tself.setScale(mode='GYR',scale=2)\t\r\n\t\telif config == 3:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=3)\r\n\t\t\tself.setScale(mode='GYR',scale=3)\t\t\t\t\r\n\t\telif config == 4:\r\n\t\t\tself.setWake()\r\n\t\t\tself.setScale(mode='ACC',scale=1)\t\r\n\t\t\tself.setTempDisable()\r\n\t\t\tself.setGYRStandby(axis='X')\r\n\t\t\tself.setGYRStandby(axis='Y')\r\n\t\t\tself.setGYRStandby(axis='Z')", "def __init__(self):\n try: \n self.i2c = busio.I2C(board.SCL, board.SDA)\n self.mpu = adafruit_mpu6050.MPU6050(self.i2c)\n \n except: \n print(\"No IMU connection\")", "def __init__(self):\n i2c.Pn532_i2c.__init__(self)\n self._uid = False", "def __init__(self, address: int = PCA9685_ADDRESS, i2c = None, \n frequency: int = 26500000, resolution: int = 4096,\n servo_frequency: int = 50, **kwargs):\n i2c = ensureI2C(i2c)\n self._servos = {}\n self._servo_frequency = servo_frequency\n self._frequency = frequency\n self._resolution = resolution\n self._address = address\n self._device = i2c.get_i2c_device(address, **kwargs)\n\n self.set_all_pwm(0, 0)\n self._device.write8(MODE2, OUTDRV)\n self._device.write8(MODE1, ALLCALL)\n\n time.sleep(0.005) # wait for oscillator\n mode = self._device.readU8(MODE1)\n mode = mode & ~SLEEP # wake up (reset sleep)\n self._device.write8(MODE1, mode)\n time.sleep(0.005) # wait for oscillator\n self.set_pwm_freq(self._servo_frequency)\n logger.info(\"Registered controller on address %d\" % address)", "def EnableI2c(self):\n\n try:\n\n if os.path.exists('/sys/bus/i2c/devices/i2c-0/0-0060'):\n result = \" - I2C device already enabled!\"\n\n else:\n\n with open('/sys/bus/i2c/devices/i2c-0/new_device', 'a') as f:\n # 'echo '+i2c_device.driver+' '+i2c_device.addr+ '\n f.write('mpl3115 0x60')\n result = \" - I2C device enabled!\"\n\n LOG.info(result)\n\n except Exception as err:\n LOG.error(\"Error enabling I2C (device1): \" + str(err))", "def toI2C(n):\n print(\"{} 0x{:02x} I2C Address \".format(n, n))\n print(\"==================\")\n wb = n << 1\n print(\"{} 0x{:02x} Address Write\".format(wb, wb))\n rb = (n << 1) | 0b000001\n print(\"{} 0x{:02x} Address Read\".format(rb, rb))", "def open_i2c(device_index, slave_address):\n path = \"/dev/i2c-{}\".format(device_index)\n flags = os.O_RDWR\n fd = os.open(path, flags)\n I2C_SLAVE = 0x0703 # <-- a constant from `linux/i2c-dev.h`.\n ioctl(fd, I2C_SLAVE, slave_address)\n return fd", "def ip_display(self):\n if not self.cfg.get(\"use_screen\"):\n return\n i2c = I2C(\n -1,\n scl=Pin(self.cfg.get(\"i2c_screen_scl\")),\n sda=Pin(self.cfg.get(\"i2c_screen_sda\"))\n )\n screen = SSD1306_I2C(128, 32, i2c)\n screen.fill(0)\n if self.mode == STA_MODE:\n ip = self.sta_if.ifconfig()[0]\n screen.text('Normal Mode', 0, 0)\n screen.text('IP Address:', 0, 8)\n screen.text(ip, 0, 16)\n if self.mode == AP_MODE:\n ip = self.ap.ifconfig()[0]\n screen.text('Access Point:', 0, 0)\n screen.text(Connection.AP_SSID, 0, 8)\n screen.text('IP Address:', 0, 16)\n screen.text(ip, 0, 24)\n\n screen.show()\n time.sleep(5)", "def lcd_test(args):\n _check_mcu()\n lcd_test_bytes = CMD_MODULE_ID_LCD | 0x11\n i2c.write_bytes_to_address(MCU_MOUTH_ADDRESS, lcd_test_bytes)", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def __init__( self, address = None, smbus = None ):\n\n if not address: address = self.DEFAULT_I2C_ADDRESS\n self.__address = address\n \n if not bus: bus = self.DEFAULT_BUS\n self.__bus = smbus.SMBus( bus )\n\n self.__bus.write_byte_data( self.address, self.PWR_MGMT_1, self.WAKE )", "def __init__(self,\n vl53l1x_i2c: VL53L1X_I2C,\n initial_address: int = DEFAULT_ADDRESS,\n required_address: int = None,\n name: str = 'VL53L1X'):\n self.closed = False\n self.debug = vl53l1x_i2c.debug\n if self.debug:\n print(\"VL53L1X: Creating sensor at I2C address 0x{:x}.\".format(initial_address))\n self.name = name\n self.address = initial_address\n if not vl53l1x_i2c.is_device_at(initial_address):\n raise ValueError(\"VL53L1X: No device found at I2C address 0x{:x}\".format(initial_address))\n self.dev = VL53L1X_C_LIBRARY.allocDevice(c_uint8(initial_address))\n vl53l1x_i2c.devices.append(self)\n if initial_address == VL53L1X.DEFAULT_ADDRESS:\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_software_reset(self.dev))\n # else skip soft reset because it only works with the default I2C address\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_WaitDeviceBooted(self.dev))\n if required_address and required_address != initial_address:\n if self.debug:\n print(\" Changing sensor address to 0x{:x} from 0x{:x}.\".format(required_address, initial_address))\n Utils.check(VL53L1X_C_LIBRARY.setAddress(self.dev, c_uint8(required_address)))\n self.address = required_address\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_DataInit(self.dev))\n Utils.check(VL53L1X_C_LIBRARY.VL53L1_StaticInit(self.dev))", "def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()", "def _setup_io_devices(self) -> None:\n # Add PCI\n self.platform.pci_host.pio = self.iobus.mem_side_ports\n\n # Add Ethernet card\n self.ethernet = IGbE_e1000(\n pci_bus=0, pci_dev=0, pci_func=0, InterruptLine=1, InterruptPin=1\n )\n\n self.ethernet.host = self.platform.pci_host\n self.ethernet.pio = self.iobus.mem_side_ports\n self.ethernet.dma = self.iobus.cpu_side_ports\n\n if self.get_cache_hierarchy().is_ruby():\n for device in self._off_chip_devices + self._on_chip_devices:\n device.pio = self.iobus.mem_side_ports\n\n else:\n for device in self._off_chip_devices:\n device.pio = self.iobus.mem_side_ports\n for device in self._on_chip_devices:\n device.pio = self.get_cache_hierarchy().get_mem_side_port()\n\n self.bridge = Bridge(delay=\"10ns\")\n self.bridge.mem_side_port = self.iobus.cpu_side_ports\n self.bridge.cpu_side_port = (\n self.get_cache_hierarchy().get_mem_side_port()\n )\n self.bridge.ranges = [\n AddrRange(dev.pio_addr, size=dev.pio_size)\n for dev in self._off_chip_devices\n ]\n\n # PCI\n self.bridge.ranges.append(AddrRange(0x2F000000, size=\"16MB\"))\n self.bridge.ranges.append(AddrRange(0x30000000, size=\"256MB\"))\n self.bridge.ranges.append(AddrRange(0x40000000, size=\"512MB\"))" ]
[ "0.7174927", "0.67933416", "0.66797334", "0.65896314", "0.65378994", "0.6502087", "0.6427027", "0.63613755", "0.6358532", "0.63391155", "0.63276124", "0.62933046", "0.62551033", "0.6249159", "0.6034287", "0.5950266", "0.59406036", "0.5842928", "0.5823635", "0.5810184", "0.5787542", "0.5710431", "0.5704675", "0.5685172", "0.5685172", "0.56131953", "0.5610439", "0.554539", "0.5537006", "0.5516078" ]
0.7152499
1
Write a 4 bits values then pulse the Enable flag
def write4bits( self, value ): # uint8_t self.expanderWrite( value ) self.pulseEnable( value )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write4bits(self, value):\n for i in range(4):\n bit = (value >> i) & 0x01\n self.output(self._data_pins()[i], bit)\n self._pulse_enable()", "def _write4bits(self, value):\n\t\tfor i in range(4):\n\t\t\tbit = (value >> i) & 0x01\n\t\t\tGPIO.output(self.pins_data[i], bit)\n\t\tself._pulse_e()\n\t\tmicroSleep(1) # Address Hold Time is 10nS min", "def pulseEnable( self, _data ): # uint8_t\n\t\tself.expanderWrite( _data | LCD_EN ) # En high\n\t\tsleep_us(1) # enable pulse must be >450ns\n\n\t\tself.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low\n\t\tsleep_us(50) # commands need > 37us to settle", "def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)", "def write_lock_bits(self, bit0=1, bit1=1, bit2=1, bit3=1, bit4=1):\n if not self._wrt_defined:\n print \"Please, call set_write_cycle_time() first\"\n return False\n lock_byte = 0\n for b in (bit4, bit3, bit2, bit1, bit0):\n lock_byte += b & 1\n lock_byte <<= 1\n self.SPItrans([0xac, 0xe0, 0x00, lock_byte])\n return True", "def _write8bits(self, value):\n\t\tfor i in range(8):\n\t\t\tbit = (value >> i) & 0x01\n\t\t\tGPIO.output(self.pins_data[i], bit)\n\t\tself._pulse_e()\n\t\tmicroSleep(1) # Address Hold Time is 10nS min", "def SPIwriteenable(self):\n data=[0x06];\n self.SPItrans(data);", "def _write8bits(self, value):\n for i in range(8):\n bit = (value >> i) & 0x01\n self.output(self._data_pins()[i], bit)\n self._pulse_enable()", "def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def readout_enable(self, enable):\n dlm = 8 if enable else 9\n self.send_command(dlm)", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def enable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def write_reg1(self, value: int) -> None:\n self.sweep_enabled = (value & 0x80) > 0\n\n self.sweep_period = (value >> 4) & 0x07\n\n self.sweep_negate = (value & 0x08) > 0\n\n self.sweep_shift = value & 0x07", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def write_reg0(self, value: int) -> None:\n self.control_flag = (value & 0x80) > 0\n self._length_ctr = 0\n\n # self.output.setMul(self.volume)\n\n self.linear_ctr_reload_value = value & 0x7F", "def toggleCounterEnable(self):\n mask = 1\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def set_sleep_after_interrupt(self, enable=True):\n self.write_flag_data([enable], APDS_9960.CONFIG_3_REG_ADDRESS, 4)", "def enable(self):\n for val in data:\n val.enable()\n self.enabled = True", "def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)", "def enable(self, port):\n assert port in self.ports, 'bad port name'\n port = ord(port[4:]) - ord('A')\n (_, reg, ofs) = gpio_info[self.device.soc_name]\n hw = self.device.RCC.registers[reg]\n port += ofs\n val = hw.rd()\n val &= ~(1 << port)\n val |= 1 << port\n hw.wr(val)", "def send( self, value, mode=LCD_RS ): # RegisterSelect bit by default\n\t\thighnib = value & 0xF0\n\t\tlownib=(value<<4) & 0xF0\n\t\tself.write4bits( highnib | mode )\n\t\tself.write4bits( lownib | mode )", "def togglePWMEnable(self):\n mask = 1 << 3\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def __int__(self):\n flags = self._analog_input_mode\n flags = set_bit(flags, 2, self._send_on_sensor_alarm)\n flags = set_bit(flags, 3, self._send_on_input_port_change)\n flags = set_bit(flags, 4, self._enable_1_wire_port)\n flags = set_bit(flags, 5, self._enable_all_link_aliasing)\n flags = set_bit(flags, 6, self._send_on_output_port_change)\n flags = set_bit(flags, 7, self._enable_output_timers)\n return flags", "def write_reg0(self, value: int) -> None:\n duty = value >> 6\n self.linear_table.replace(self.sequence[duty])\n\n self.length_ctr_halt = (value & 0x20) > 0\n if self.length_ctr_halt:\n self.length_ctr_load = 0\n\n self.constant_volume = (value & 0x10) > 0\n self.volume_envelope = value & 0x0F\n\n if self.constant_volume:\n self.output.setMul(_PULSE_VOLUME[self.volume_envelope])\n else:\n # TODO Envelope\n pass", "def enable_charge_pump(enable):\n send_command(0x8D)\n if enable:\n send_command(0x14)\n else:\n send_command(0x10)", "def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)", "def write(self):\n mask = 0\n for pin in self.pins:\n if pin.mode == OUTPUT:\n if pin.value == 1:\n pin_nr = pin.pin_number - self.port_number * 8\n mask |= 1 << pin_nr\n msg = chr(DIGITAL_MESSAGE + self.port_number)\n msg += chr(mask % 128)\n msg += chr(mask >> 7)\n self.board.sp.write(msg)", "def r4_on_off():\n \n r4_cmd_packet = b'\\x04\\x14\\x08\\x00\\x00\\xe0\\x0f'\n ser_relay.write(r4_cmd_packet)", "def write_reg0(self, value: int) -> None:\n self.length_ctr_halt = (value & 0x20) > 0\n if self.length_ctr_halt:\n self.length_ctr_load = 0\n\n self.constant_volume = (value & 0x10) > 0\n self.volume_envelope = value & 0x0F\n\n if self.constant_volume:\n self.output.setMul(_NOISE_VOLUME[self.volume_envelope])\n else:\n # TODO Envelope\n pass" ]
[ "0.8017873", "0.7902668", "0.6740639", "0.6712541", "0.66352326", "0.65554494", "0.6446782", "0.64384186", "0.64248353", "0.62296814", "0.61854124", "0.6129692", "0.60957503", "0.608745", "0.60842526", "0.5940202", "0.5918675", "0.58718055", "0.58666646", "0.58586204", "0.58545685", "0.58155394", "0.5814393", "0.58133644", "0.58008987", "0.57912946", "0.57877487", "0.57440525", "0.573726", "0.56672114" ]
0.80717915
0
Send data over I2C bus by including the BackLight flag
def expanderWrite( self, _data ): # uint8_t #Wire.beginTransmission(_Addr); #printIIC((int)(_data) | _backlightval) # print II self.i2c.writeto( self.address, bytes( [_data | self._backlightval] )) #Wire.endTransmission();
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sendi2c(self,command,data=[]) -> None:\n if isinstance(command, str):\n command = ord(command)\n try:\n self.bus.write_i2c_block_data(self.address, command, data)\n except OSError as err:\n print(\"I2C Device Error\\nCheck Connection\\n{}\".format(err))", "def _data(self, data):\n# \"\"\"Send data to spi bus of display chip, most DC pin need set to HIGH \"\"\"\n# if self._spi == None: raise \"Do not setting SPI\"\n# GPIO.output( self._spi_dc, 1 )\n# self._spi.writebytes( data )\n raise NotImplementedError", "def send_byte(byte_out):\n GPIO.output(clock_pin, 0)\n # set the chip select to write\n GPIO.output(chip_select, 1)\n # send the byte \n values = [(ord(byte_out) >> i) % 2 for i in range(0, 8)]\n GPIO.setup(data_pins, GPIO.OUT)\n GPIO.output(data_pins, values)\n # flash the clock pin\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)", "def send_data(self, SPEED, STEER, BRAKE, GEAR):\n GEAR = 2 if SPEED >= 0.0 else 0\n\n # if self.feedbackMsg.AorM == 0:\n # return\n\n if self.doPIControl is True:\n\n current_speed = self.mps2kph(self.feedbackMsg.speed) # kph\n desired_speed = SPEED # kph\n SPEED, BRAKE = self.PIControl(\n currentSpeed=current_speed, desiredSpeed=desired_speed, brake=BRAKE)\n\n SPEED = abs(SPEED) * 10\n if SPEED > 200:\n SPEED = 200\n elif SPEED < 0:\n SPEED = 0\n\n STEER = STEER * 71\n if STEER > 1999:\n STEER = 1999\n if STEER < -1999:\n STEER = -1999\n\n try:\n\n if STEER >= 0:\n self.DATA[8] = int(STEER // 256)\n self.DATA[9] = int(STEER % 256)\n else:\n STEER = -STEER\n self.DATA[8] = int(255 - STEER // 256)\n self.DATA[9] = int(255 - STEER % 256)\n\n self.DATA[5] = GEAR # GEAR\n self.DATA[6] = int(SPEED // 256)\n self.DATA[7] = int(SPEED % 256)\n self.DATA[10] = BRAKE # BREAK\n self.DATA[11] = self.ALIVE\n\n self.ser.write((self.DATA))\n\n self.ALIVE = self.ALIVE + 1\n if self.ALIVE == 256:\n self.ALIVE = 0\n\n except Exception as ex:\n print(ex)", "def send(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('spi.send(data, timeout=50000)\\r\\n'.encode('utf-8'))\n sleep(1)", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "def send_data(self, data: int):\n self.write_pin(self.DC_PIN, RPi.GPIO.HIGH)\n self.__transfer([data])", "def send_recv(self, data):\n self._serial.write('spi = SPI(2, SPI.SLAVE, baudrate=500000, polarity=0, phase=0)\\r\\n'.encode('utf-8'))\n self._serial.write('data=bytearray({})\\r\\n'.format(data).encode('utf-8'))\n self._serial.write('list(spi.send_recv(data, timeout=50000))\\r\\n'.encode('utf-8'))\n sleep(1)", "def sendBuffer():\n dislin.sendbf()", "def write(self, register, value): #good\r\n\t\tself.i2c.write8(register, value)", "def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)", "def _i2c_write(self, register, value, bank=None):\n if bank is not None:\n self.set_bank(bank)\n self.i2c.write_byte_data(self.address, register, value)", "def write(self, command):\n print(commandToString(command)) # print the command in a user readable format.\n self.bus.write_i2c_block_data(self.address, 0, command)", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))", "def _write_v2(self, data):\n return self.usb_dev.write(self.ep_out, data, self.usb_wr_timeout)", "def update(self):\n\t\tfor x in range(self.leds):\n\t\t\tself.spi.write(self.buffer[x])\n\t\t\t#self.spi.flush()\n\t\t\t\n\t\tself.spi.write(bytearray(b'\\x00'))\n\t\tself.spi.flush()", "def send_command(command):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_CONTROL_BYTE )\n cmd += chr( command )\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"", "def write_byte_data(self, request, context):\n status_response = smbusRpc_pb2.operation_status(code=0, exception='')\n i2c_addr = request.i2c_addr\n i2c_register = request.register\n i2c_value = request.value\n try:\n client_id = self._get_client_id(context)\n self.logger.info('Client(%s) write_byte (0x%x) at 0x%x register 0x%x' % (client_id, i2c_value, i2c_addr,\n i2c_register))\n self.clients[client_id].bus_context.smbus.write_byte_data(i2c_addr, i2c_register, i2c_value)\n except Exception as e:\n status_response.code = 1\n status_response.exception = 'proxy server: ' + str(e)\n return status_response", "def __transfer(self, data: int):\n self.__spi.writebytes(data)", "def send_data(data_string):\n if connection_type == USE_I2C:\n cmd = \"\"\n cmd += chr( SSD1306_ADDRESS )\n cmd += chr( SELECT_DATA_BYTE )\n cmd += data_string\n i2cWrite(cmd, 10, False)\n else:\n print \"Not implemented for that connection type yet.\"", "def write_i2c_block_data(self, request, context):\n status_response = smbusRpc_pb2.operation_status(code=0, exception='')\n i2c_addr = request.i2c_addr\n i2c_register = request.register\n i2c_data = request.data\n try:\n client_id = self._get_client_id(context)\n self.logger.info('Client(%s) write_i2c_block_data at 0x%x register 0x%x' % (client_id, i2c_addr, i2c_register))\n self.clients[client_id].bus_context.smbus.write_i2c_block_data(i2c_addr, i2c_register, list(i2c_data))\n except Exception as e:\n status_response.code = 1\n status_response.exception = 'proxy server: ' + str(e)\n return status_response", "def send(self, data):\n \n try:\n self.s.send(data)\n LED.blink(2, 0.1, 0x00ff00)\n print(\"Sending data:\")\n print(data)\n except OSError as e:\n if e.errno == 11:\n print(\"Caught exception while sending\")\n print(\"errno: \", e.errno)\n \n LED.off()\n data = self.s.recv(64)\n print(\"Received data:\", data)\n\n return data", "def send(self, address, message):\n self.__set_i2c_address(address)\n self.__write(message)", "def data(self, data):\n for i in xrange(0, len(data), 32):\n self.bus.write_i2c_block_data(self.addr,\n self.data_mode,\n list(data[i:i+32]))", "async def send_ir(self):\n fan_speed = self.fan_mode\n # tweak for some ELECTRA_AC devices\n if HVAC_FAN_MAX_HIGH in self._fan_list and HVAC_FAN_AUTO_MAX in self._fan_list:\n if self.fan_mode == FAN_HIGH:\n fan_speed = HVAC_FAN_MAX\n if self.fan_mode == HVAC_FAN_MAX:\n fan_speed = HVAC_FAN_AUTO\n\n\n # Set the swing mode - default off\n self._swingv = STATE_OFF if self._fix_swingv is None else self._fix_swingv\n self._swingh = STATE_OFF if self._fix_swingh is None else self._fix_swingh\n\n if SWING_BOTH in self._swing_list or SWING_VERTICAL in self._swing_list:\n if self._swing_mode == SWING_BOTH or self._swing_mode == SWING_VERTICAL:\n self._swingv = STATE_AUTO\n\n if SWING_BOTH in self._swing_list or SWING_HORIZONTAL in self._swing_list:\n if self._swing_mode == SWING_BOTH or self._swing_mode == SWING_HORIZONTAL:\n self._swingh = STATE_AUTO\n\n _dt = dt_util.now()\n _min = _dt.hour * 60 + _dt.minute\n\n # Populate the payload\n payload_data = {\n \"StateMode\": self._state_mode,\n \"Vendor\": self._vendor,\n \"Model\": self._model,\n \"Power\": self.power_mode,\n \"Mode\": self._last_on_mode if self._keep_mode else self._hvac_mode,\n \"Celsius\": self._celsius,\n \"Temp\": self._target_temp,\n \"FanSpeed\": fan_speed,\n \"SwingV\": self._swingv,\n \"SwingH\": self._swingh,\n \"Quiet\": self._quiet,\n \"Turbo\": self._turbo,\n \"Econo\": self._econo,\n \"Light\": self._light,\n \"Filter\": self._filter,\n \"Clean\": self._clean,\n \"Beep\": self._beep,\n \"Sleep\": self._sleep,\n \"Clock\": int(_min),\n \"Weekday\": int(_dt.weekday()),\n }\n self._state_mode = DEFAULT_STATE_MODE\n for key in self._toggle_list:\n setattr(self, '_' + key.lower(), 'off')\n\n payload = (json.dumps(payload_data))\n \n # Publish mqtt message\n if float(self._mqtt_delay) != float(DEFAULT_MQTT_DELAY):\n await asyncio.sleep(float(self._mqtt_delay))\n \n await mqtt.async_publish(self.hass, self.topic, payload)\n\n # Update HA UI and State\n self.async_schedule_update_ha_state()", "def apa102_send_bytes( clock_pin, data_pin, bytes_ ):\n \n # implementeer deze functie:\n \n # zend iedere byte in bytes:\n assert len(bytes_) == 4\n for byte in bytes_:\n # zend ieder bit in byte:\n# print(byte)\n assert len(byte) == 8\n for bit in byte:\n GPIO.output(data_pin, bit)\n #time.sleep(.1)\n GPIO.output(clock_pin, 1)\n #time.sleep(.1)\n GPIO.output(clock_pin, 0)\n # maak de data pin hoog als het bit 1 is, laag als het 0 is\n # maak de clock pin hoog\n # maak de clock pin laag", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def transmit(self) -> None:\n # Like RadioHead library, turn on high power boost if enabled.\n self.set_boost(_TEST_PA1_BOOST)\n # Enable packet sent interrupt for D0 line.\n self.dio_0_mapping = 0b00\n # Enter TX mode (will clear FIFO!).\n self.operation_mode = TX_MODE", "def transmit(self, data):\n # ascii_stream = bytes(data, 'ascii')\n # self.log.info('Encoding bytestream: %s to ascii: %s', data, ascii_stream)\n try:\n self.ser.write(data)\n # Purge scheduler and reboot transmitter\n except serial.SerialTimeoutException as exc:\n self.log.exception(\"Frame write to transmitter timed out: %s\", exc)\n self.log.info(\"Successfully wrote %s to tesseract transmitter\", data)\n # # disconnect handle\n # if bin_frame is None:\n # # delete `SessionQueue` instance from `ReceiverRegister`\n # self.rec_reg.write(ap_index)\n # else:\n # hex_code = binascii.hexlify(bin_frame.tobytes())\n # hardware_encode = self.cache.cache_map(bin_frame,ap_index)\n # transmit_hex = binascii.hexlify(hardware_encode.tobytes())\n # print(str(hex_code) + \" | To Access Point \" + str(ap_index), end='\\r')\n # if os.environ['TCS_ENV'] == 'dev':\n # # TODO: Move these prints to cache logger\n # self.log.debug(\"Binary Frame Data: %s\", bin_frame)\n # self.log.debug(\"Hardware Mapping: %s\", hardware_encode)\n # self.log.debug(\"Decode from AP0: %s\", self.cache._cache[-1][0])\n # self.log.debug(\"Decode from AP1: %s\", self.cache._cache[-1][1])\n # self.log.debug(\"Decode from AP2: %s\", self.cache._cache[-1][2])\n # self.log.debug(\"Decode from AP3: %s\", self.cache._cache[-1][3])", "def r8_on_off():\n \n r8_cmd_packet = b'\\x04\\x14\\x80\\x00\\x00\\x68\\x0f'\n ser_relay.write(r8_cmd_packet)\n time.sleep(1)\n resp_array = array('B', ser_relay.read(7)) # get resp and put in array\n\n return resp_array" ]
[ "0.7092826", "0.62958944", "0.6190678", "0.6189689", "0.6078368", "0.60758936", "0.6061588", "0.6058107", "0.6019048", "0.59788793", "0.5976502", "0.59479564", "0.5939198", "0.589264", "0.5828056", "0.5798978", "0.579745", "0.5788026", "0.5786685", "0.57807434", "0.57775354", "0.5747578", "0.5726064", "0.5662251", "0.56510645", "0.5649305", "0.5645998", "0.5636832", "0.5624903", "0.5619479" ]
0.63112533
1
Pulse the Enable Pin on the LCD
def pulseEnable( self, _data ): # uint8_t self.expanderWrite( _data | LCD_EN ) # En high sleep_us(1) # enable pulse must be >450ns self.expanderWrite( _data & (0xFF ^ LCD_EN) ) # En low sleep_us(50) # commands need > 37us to settle
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pulse_enable(self):\n self.set_low(self._en_pin)\n self._usleep(1)\n self.set_high(self._en_pin)\n self._usleep(1)\n self.set_low(self._en_pin)\n # commands need > 37us to settle\n self._usleep(100)", "def enable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT ON\")", "def set_PIenable(self,highlow): \n GPIO.output(self.chanlist[4], highlow)", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def idle(self):\n self.pi.set_servo_pulsewidth(self.gpio, 0)", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def togglePWMEnable(self):\n mask = 1 << 3\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def toggleCounterEnable(self):\n mask = 1\n self._injectFault(\"PWM1TCR\", self.TCR, mask)", "def start_alarm(self):\n self.out_power.pulse()", "def _led_enable():\n # type: () -> None\n GPIO.output(LED_nOE, GPIO.LOW)", "def led_duty_cycle(val):\n set_tmr_ocr(TMR1, OCRxB, val)", "def togglePWMPinEnable(self, PWMpin):\n bitPos = PWMpin + 8\n mask = 1 << bitPos\n self._injectFault(\"PWM1PCR\",self.PCR,mask)", "def power_down(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(4)\n self.light_led(6)", "def blink(self):\n self.displaycontrol |= self.LCD_BLINKON\n self.write_lcd(self.LCD_DATA_E1, self.LCD_DISPLAYCONTROL | self.displaycontrol)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_DISPLAYCONTROL | self.displaycontrol)", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)", "def disable_pulse_modulation(self):\n self.write(\":SOUR:PULM:STAT OFF\")", "def set_sleep_after_interrupt(self, enable=True):\n self.write_flag_data([enable], APDS_9960.CONFIG_3_REG_ADDRESS, 4)", "def light_standby():\n for led in leds:\n led.on()\n\n rgb_driver.pulse(on_color=(scale[\"R\"], scale[\"G\"], scale[\"B\"]), off_color=(0,0,0))", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def left(self):\n self.pi.set_servo_pulsewidth(self.gpio, self.pulse_left_ns)", "def set_backlight(self, enabled=True):\n\t\tif enabled:\n\t\t\tRPIO.output(self._backlight_pin, RPIO.HIGH)\n\t\telse:\n\t\t\tRPIO.output(self._backlight_pin, RPIO.LOW)", "def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)", "def pulse_hi(pin, length=0.00001): \n on(pin)\n time.sleep(length)\n off(pin)\n time.sleep(length)", "def trigger(self):\n GPIO.output(self.trigger_pin, 1)\n time.sleep(10/1000000)\n GPIO.output(self.trigger_pin, 0)", "def pulse(vjoy, btn_id):\n global g_is_running\n g_is_running = True\n while g_is_running:\n vjoy[1].button(btn_id).is_pressed = True\n time.sleep(g_hold_time)\n vjoy[1].button(btn_id).is_pressed = False\n time.sleep(g_pause_time)", "def pulse_lo(pin, length=0.00001):\n off(pin)\n time.sleep(length)\n on(pin)\n time.sleep(length)", "def Cursor(self,on=True,blink=False):\n value = 8+4\n if on and blink:\n value+=1\n elif on and not blink:\n value+=2\n self.Bus.Write_uInt8(self.Address,0x01,value)", "def sleep(self):\r\n if not self.backlight:\r\n return\r\n\r\n self.backlight.power = False", "def hold(self):\n self.dev.write(1, 'H')" ]
[ "0.8290033", "0.72652996", "0.7065156", "0.69558316", "0.6947763", "0.6901592", "0.676794", "0.6735534", "0.66760164", "0.6586983", "0.6527253", "0.6503161", "0.64747596", "0.64213467", "0.6400039", "0.6391462", "0.6362962", "0.6358918", "0.63532215", "0.6292264", "0.6276042", "0.626684", "0.62542284", "0.62527275", "0.6252575", "0.620746", "0.6186658", "0.61821353", "0.6152691", "0.6123741" ]
0.7924556
1
Set the cursor @ home position
def home( self ): self.command( LCD_RETURNHOME ) # set cursor position to zero sleep_us( 2000 ) # this command takes a long time!
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def home(self):\n self.command(_LCD_RETURNHOME)\n self._cursor_pos = (0, 0)\n time.sleep(2*MILLISECOND)", "def CursorHome(self):\n self.Bus.Write_uInt8(self.Address, 0x01, 0x02)", "def home(self):\n self.command(self.LCD_RETURNHOME)\n self._cursor_pos = (0, 0)\n self._msleep(2)", "def set_cursor(self, row, col):\n self._vim.current.window.cursor = (row, col)", "def change_cursor(self, cursor):\n self.setCursor(cursor)", "def set_cursor( self, pos ):\n\t\tassert type( pos ) is tuple, \"pos must be a tuple (col,row)\"\n\t\trow_offsets = [ 0x00, 0x40, 0x14, 0x54 ]\n\t\trow = pos[1]\n\t\tif ( row > self.rows ):\n\t\t\trow = self.rows-1 # we count rows starting w/0\n\n\t\tself.command(LCD_SETDDRAMADDR | (pos[0] + row_offsets[row])) # Col + row offset", "def set_cursor_position(self, x: int, y: int) -> None:\n self.screen.move(y, x)", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos", "def home(self):\n self.goto(0, 0)\n self.setheading(0)", "def home(self):\n self.initial_offset = 0", "def go_home(self):\n self.set_jpos(self._home_position, wait=True)", "def set_cursor(self,x,y):\n if 1 <= x <= 20 and y in [1,2]:\n self.send(\"\\x1f\\x24%c%c\" % (x,y))\n else:\n raise ValueError('cursor position must be between 1,20 and 1,2')", "def _set_cursor(self, cursor):\n self._cursor = cursor", "def move3dCursor(p = (0,0,0)):\n bpy.context.scene.cursor_location = p\n # bpy.context.space_data.cursor_location = p", "def go_home(self):\n self.move_wl(0)", "def set_cursor(self, cursor):\n for step in self.steps:\n step[1].set_cursor(cursor)\n return self", "def home(self):\n self.goto(0, 0)", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def setDefaultCursorPosition(self):\n self.srcEditor.setFocus()\n self.srcEditor.setCursorPosition(0,0)", "def move_to(xy):\n (x,y) = xy\n win32api.SetCursorPos((x,y))", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def goto_position(editor, pos):\n cursor = editor.textCursor()\n editor.moveCursor(cursor.End)\n cursor.setPosition(pos)\n editor.setTextCursor(cursor)", "def set_cursor(obj: QObject, cursor: QCursor = Qt.PointingHandCursor) -> None:\n obj.setCursor(QCursor(cursor))", "def home(self):\n self.__send_short(self.MGMSG_MOT_MOVE_HOME, self.__chan, 0x00)", "def set_cursor( self, point ):\n\t\tif 0 <= abs(point[0]) <= self.width :\n\t\t\tself.xprint = point[0]\n\t\tif 0 <= abs(point[1]) <= self.height :\n\t\t\tself.yprint = point[1]", "def set_home_position(self, lat, lon, alt):\n pass", "def cursor( self, value=True ):\n\t\tif value:\n\t\t\tself._displaycontrol |= LCD_CURSORON\n\t\telse:\n\t\t\tself._displaycontrol &= (0xFF^LCD_CURSORON)\n\t\tself.command( LCD_DISPLAYCONTROL | self._displaycontrol )", "def home(self):\n self.send(\"\\x0b\")", "def go_home(self):\n self.set_all_positions([0]*self.nleaflets)", "def move_to(self, ypos, xpos):\n # the screen's coordinates are 1 based, but the command is 0 based\n xpos -= 1\n ypos -= 1\n self.exec_command(\"MoveCursor({0}, {1})\".format(ypos, xpos).encode(\"utf-8\"))" ]
[ "0.78883594", "0.7874507", "0.7804661", "0.71700335", "0.70821345", "0.70201814", "0.69244087", "0.6887372", "0.6868383", "0.6809072", "0.6770352", "0.6752197", "0.67183876", "0.66883904", "0.66750664", "0.66597855", "0.6611106", "0.6594645", "0.65583235", "0.654024", "0.6472678", "0.6448934", "0.6433706", "0.64253837", "0.630974", "0.6256005", "0.6231732", "0.62310416", "0.6230416", "0.61802685" ]
0.7986535
0
Scroll the display without changing the RAM
def scroll_display( self, direction=LCD_MOVELEFT ): assert direction in (LCD_MOVELEFT,LCD_MOVERIGHT), "Invalid direction %s value" % direction self.command(LCD_CURSORSHIFT | LCD_DISPLAYMOVE | direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_scroll():\n send_command(0x2F)", "def scroll(*args):", "def idle_loop(self):\n sleep(0.1)\n self.scroll()", "def autoscroll(self):\n self.displaymode |= self.LCD_ENTRYSHIFTINCREMENT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_ENTRYMODESET | self.displaymode)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_ENTRYMODESET | self.displaymode)", "def __window_scrollTo(self, x, y):\n pass", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def do_auto_scroll( self, auto = True ):\n print( \"do_auto_scroll fix !!\" )", "def __window_scroll(self, x, y):\n pass", "def Scroll(self, steps):\n self._EnsureHIDValueInRange(steps)\n self._kit.MouseScroll(steps)\n time.sleep(self.send_delay)", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def xview_scroll(self, number, what):\n self.tk.call(self._w, 'xview', 'scroll', number, what)", "def lulz(self):\n self.reset()\n self.scrollproc = threading.Thread(target=self.lulzloop)\n self.killedevent.wait()\n self.scrollproc.start()", "def on_mousewheel(self, event):\r\n self.container_widgets[\"order_canvas\"].yview_scroll(-1 * int(event.delta / 120), \"units\")\r\n # TODO FIX SCROLLING\r", "def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()", "def scroll(self):\r\n SCROLL_PAUSE_TIME = 2\r\n current_scrolls = 0\r\n\r\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n while True:\r\n try:\r\n if current_scrolls == total_scroll:\r\n return\r\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\r\n time.sleep(SCROLL_PAUSE_TIME)\r\n\r\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\r\n if new_height == last_height:\r\n break\r\n last_height = new_height\r\n except TimeoutException:\r\n break\r\n return", "def scrollDisplayRight(self):\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVERIGHT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)", "def scroll(self):\n x_position = self._player.get_position()[0]\n half_screen = self._master.winfo_width() / 2\n world_size = self._world.get_pixel_size()[0] - half_screen\n\n # Left side\n if x_position <= half_screen:\n self._view.set_offset((0, 0))\n\n # Between left and right sides\n elif half_screen <= x_position <= world_size:\n self._view.set_offset((half_screen - x_position, 0))\n\n # Right side\n elif x_position >= world_size:\n self._view.set_offset((half_screen - world_size, 0))", "def scroll(self, axis, value):\n\n\t\tself._interface.scroll(axis, value)", "def _on_mousewheel(event):\n if event.num == 4 or event.delta > 0:\n canvas.yview_scroll(-1, \"units\" )\n elif event.num == 5 or event.delta < 0:\n canvas.yview_scroll(1, \"units\" )", "def scroll(self, dir):\n try:\n self.scrool = dir\n except:\n raise ReferenceError", "def scroll(self, relative):\n if self.ui.browser and self.ui.browser.main_column:\n self.ui.browser.main_column.scroll(relative)\n self.thisfile = self.thisdir.pointed_obj", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def scrollDisplayLeft(self):\n self.displayshift = self.LCD_DISPLAYMOVE | self.LCD_MOVELEFT\n self.write_lcd(self.LCD_DATA_E1, self.LCD_CURSORSHIFT | self.displayshift)\n self.write_lcd(self.LCD_DATA_E2, self.LCD_CURSORSHIFT | self.displayshift)", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def __scroll(self, result, item, index=1, containerObject=None, relatedAreaEnd=None):\r\n defaultSideWidth=150\r\n counter=0\r\n initialDump = None\r\n\r\n itemCommented = self._getCommented(item) # commented/translated version for test step run\r\n\r\n if not self.isItemScrollable(item,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd):\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item, item %s (related to %s) is not scrollable' % (self._getCommented(item),self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item, item is not scrollable %s' %self._getCommented(item))\r\n\r\n maximumDuration = 240000\r\n startTime=time.time()\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n containerX, containerY, containerW, containerH = [int(c) for c in result[-1].getAttribute('container-area').split(\",\")]\r\n\r\n screenWidth = min([self.getScreenWidth(),containerX + containerW])\r\n screenHeight = min([self.getScreenHeight(),containerY + containerH])\r\n\r\n screenTop = max(0,containerY)\r\n\r\n while result[0]==self.phone.uiState.HIDDEN:\r\n initialDump = self.currentState.toxml('utf-8')\r\n\r\n # Check if item is outside of screen at right\r\n if result[1][0]>=screenWidth:\r\n yCoordinate = 20\r\n\r\n distance=result[1][0] #Distance from end of screen to coordinate\r\n\r\n #If y coordinates are bigger than screenwith then set them to 0\r\n if screenWidth-distance<0:\r\n x_move=0\r\n else:\r\n x_move=screenWidth-distance\r\n self.phone._touch.drawLine((screenWidth,yCoordinate),(x_move,yCoordinate))\r\n self.phone._run('Scrolling left \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Checking if item is outside of screen at bottom of screen\r\n if result[1][1]>=screenHeight:\r\n scrollEndY=screenHeight-result[1][1] #Distance from end of screen to coordinate\r\n distanceToScroll = scrollEndY\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = scrollEndY\r\n elif previousScrollValue == scrollEndY:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if scrollEndY<screenTop:\r\n scrollEndY=screenTop\r\n\r\n # -60 so that we won't grab the option list from the bottom of the screen\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore = screenHeight-result[1][1]\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,(screenHeight-60) - scrollModStep),(screenWidth-defaultSideWidth,scrollEndY))\r\n self.phone._run('Scrolling down \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n\r\n # Checking if item is outside of screen at up of screen\r\n if result[1][1]<=max(screenTop,(self.phone.uiState.statusbarHeight*2)):# Item must be scrolled lower than status bar\r\n #distance=abs(result[1][1])+self.phone.uiState.statusbarHeight #Distance from top of the screen to coordinate which is now negative\r\n distance=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight)) #Distance from top of the screen to coordinate which is now negative\r\n distance += ((screenHeight-screenTop)/2)\r\n distanceToScroll = distance\r\n\r\n # y_start must be min. 20 pixels from screenTop to ensure that ntf-drawer is not opened\r\n y_start = max(screenTop,(self.phone.uiState.statusbarHeight*3), 20)\r\n\r\n # increase scrollModStep if we haven't been able to scroll\r\n # NOTE: This is done due to possible brightness adjust bar in settings list\r\n if previousScrollValue == 0:\r\n previousScrollValue = distance\r\n elif previousScrollValue == distance:\r\n scrollModStep += 40\r\n else:\r\n previousScrollValue = 0\r\n scrollModStep = 0\r\n\r\n if screenTop==0:\r\n y_move = distance+(self.phone.uiState.statusbarHeight*3)\r\n else:\r\n y_move = distance+screenTop\r\n\r\n if y_move>=screenHeight:\r\n y_move = screenHeight-1\r\n\r\n # scrollModStep is used when for adjusting y coordinate\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # Check if we can safely scroll several times in a row\r\n distanceToScrollMore=abs(result[1][1]-max(screenTop,self.phone.uiState.statusbarHeight))\r\n distanceToScrollMore += ((screenHeight-screenTop)/2)\r\n scrolledDistance = distanceToScroll-distanceToScrollMore\r\n if abs(scrolledDistance) > 100:\r\n sweepsRequired = int(distanceToScrollMore/scrolledDistance)\r\n sweeps = min(sweepsRequired-2, 10) # Max 10 sweeps in a row without any checks\r\n if sweeps > 0:\r\n for i in range(0,sweeps):\r\n self.phone._touch.drawLine((screenWidth-defaultSideWidth,y_start + scrollModStep),(screenWidth-defaultSideWidth,y_move))\r\n self.phone._run('Scrolling up \"%s\" from UI' % itemCommented, testStepReporting = False)\r\n self.phone.delay(500,False)\r\n result = self.phone.uiState.isItemSelectable(item, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n #If selected item visible return result to caller\r\n if result[0]==self.phone.uiState.VISIBLE:\r\n return result\r\n\r\n # if phone UI has changed, let's not increase the counter\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n counter=counter+1\r\n\r\n # give up when counter has maximum value or maximum time is up\r\n if counter == 10 or time.time() > startTime + maximumDuration/1000.0:\r\n self.phone.capture('Failed to scroll to item')\r\n #if initial dump and current dump are identical, phone UI is frozen -> fail testcase\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n self.phone.comment('KBD_KEY_KEYLOCK_TOGGLE pressed to check if phone UI is freezed or not')\r\n self.phone._pressKey('KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone._run('Press KBD_KEY_KEYLOCK_TOGGLE')\r\n self.phone.delay(500, False)\r\n self.getCurrentState(refresh = True)\r\n #if initial and current dumps are identical after pressing KBD_KEY_BACK then UI is frozen\r\n if initialDump == self.currentState.toxml('utf-8'):\r\n errorString = 'Phone UI freeze detected, unable to scroll'\r\n self.phone.fail(errorString)\r\n\r\n if containerObject:\r\n self.phone.fail('Cannot scroll to item %s (related to %s)' % (self._getCommented(item), self._getCommented(containerObject)))\r\n else:\r\n self.phone.fail('Cannot scroll to item %s' %self._getCommented(item))\r\n\r\n return result", "def _on_scroll(self, event):", "def scroll(self, delta_x, delta_y):\n if delta_x < 0:\n shift_x = 0\n xend = self.width + delta_x\n dt_x = 1\n else:\n shift_x = self.width - 1\n xend = delta_x - 1\n dt_x = -1\n if delta_y < 0:\n y = 0\n yend = self.height + delta_y\n dt_y = 1\n else:\n y = self.height - 1\n yend = delta_y - 1\n dt_y = -1\n while y != yend:\n x = shift_x\n while x != xend:\n self.format.set_pixel(\n self, x, y, self.format.get_pixel(self, x - delta_x, y - delta_y)\n )\n x += dt_x\n y += dt_y", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def scrollY(self,yrel):\n # get the display size\n dispw, disph = c_int(), c_int()\n SDL_GetRendererOutputSize(self.rend,dispw,disph)\n\n # scroll vertically\n self.scroll += yrel\n\n # limit scrolling\n if self.scroll <= 0:\n self.scroll = 0\n if self.scroll+disph.value >= (len(self.itemList.items)+1)*150+178:\n self.scroll = (len(self.itemList.items)+1)*150+178-disph.value", "def scroll(self, direction):\n\n self.counter += direction # Counter of 'up' and 'down'\n do_redraw = self.counter == self.content_size - self.h\n\n if self.size > 0:\n self.count += direction\n pos = self.pos\n if math.fabs(self.count) == math.floor(self.content_size / self.h):\n pos += direction\n self.count = 0\n\n pos = max(0, pos) # Top limit\n pos = min(pos, self.h - self.size) # Bottom limit\n do_redraw = pos != self.pos # Redraw if pos has changed\n self.pos = pos\n\n if do_redraw:\n self._create()" ]
[ "0.7195914", "0.69599557", "0.6883477", "0.6802342", "0.67569107", "0.6602154", "0.6587491", "0.6541192", "0.6533414", "0.65180826", "0.64459866", "0.6440966", "0.6345634", "0.6307446", "0.6236501", "0.62016225", "0.61945426", "0.616172", "0.614911", "0.61103624", "0.61058736", "0.6065555", "0.60612786", "0.60516316", "0.6040696", "0.6033313", "0.6000514", "0.59916383", "0.59809095", "0.5951038" ]
0.7035041
1
set the cursor position with a 0 based tuple of (col,row) equivalent to the position x,y positionning
def set_cursor( self, pos ): assert type( pos ) is tuple, "pos must be a tuple (col,row)" row_offsets = [ 0x00, 0x40, 0x14, 0x54 ] row = pos[1] if ( row > self.rows ): row = self.rows-1 # we count rows starting w/0 self.command(LCD_SETDDRAMADDR | (pos[0] + row_offsets[row])) # Col + row offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setCursorPos(serDisplay, row, col, clearRow = False):\n offset = 127 + col\n if row == 2:\n offset = 128 + 63 + col\n elif row == 3:\n offset = 128 + 19 + col\n elif row == 4:\n offset = 128 + 83 + col\n cmd = array.array('B', (COMMAND_PREFIX, offset))\n writeToDisplay(serDisplay, cmd.tostring())", "def int_33H_4(self):\r\n horizontal_position = self.registers['CX'].get_int(-1)\r\n vertical_position = self.registers['DX'].get_int(-1)\r\n print(horizontal_position, vertical_position)\r\n MainWindow.set_cursor_poisition(horizontal_position, vertical_position)", "def cursor_position(self) -> Point:\n cpos = self.ui_content.cursor_position\n try:\n y, x = self._rowcol_to_yx[cpos.y, cpos.x]\n except KeyError:\n # For `DummyControl` for instance, the content can be empty, and so\n # will `_rowcol_to_yx` be. Return 0/0 by default.\n return Point(x=0, y=0)\n else:\n return Point(x=x - self._x_offset, y=y - self._y_offset)", "def set_cursor_position(self, x: int, y: int) -> None:\n self.screen.move(y, x)", "def set_position(self, x, y):\n self.tx = -x\n self.ty = -y", "def set_cursor( self, point ):\n\t\tif 0 <= abs(point[0]) <= self.width :\n\t\t\tself.xprint = point[0]\n\t\tif 0 <= abs(point[1]) <= self.height :\n\t\t\tself.yprint = point[1]", "def pos(self, x, y):\n\n if isinstance(x, float):\n x = int(x)\n\n self.screen.write(colorama.Cursor.POS(x, y), ansi=True)\n self.x = x\n self.y = y\n\n return x, y", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def set_cursor(self, row, col):\n self._vim.current.window.cursor = (row, col)", "def set_cursor_coordinates(self, x, y):\n text = self.getText()\n lines = text.split(\"\\n\")\n i = 0\n for row, line in enumerate(lines):\n if row == y:\n break\n i += len(line) + 1 # we need to include \"\\n\"\n if \"\\r\" in line: # and also \"\\r\"\n i -= 1\n pos = i + x\n if pos > len(text):\n pos = len(text)\n self.setCursorPos(pos)", "def set_cursor(self,x,y):\n if 1 <= x <= 20 and y in [1,2]:\n self.send(\"\\x1f\\x24%c%c\" % (x,y))\n else:\n raise ValueError('cursor position must be between 1,20 and 1,2')", "def setPosition(position):", "def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def move_to(xy):\n (x,y) = xy\n win32api.SetCursorPos((x,y))", "def _move_cursors_to_pos(self):\n for axis in range(3):\n x, y = self._vox[list(self._xy_idx[axis])]\n self._images['cursor_v'][axis].set_xdata([x, x])\n self._images['cursor_h'][axis].set_ydata([y, y])\n self._zoom(0) # doesn't actually zoom just resets view to center\n self._update_images(draw=True)\n self._update_moved()", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def position(self) -> Tuple[int, int]:\n return self.row, self.col", "def get_cursor_pos(self):\n return (self.text_maker.pos[0] + 9, self.text_maker.pos[1] + 120 + 8)", "def moveCursor(self):\n\n\t\tself._before = self.rect.center\n\t\tself.rect.center = self._pos" ]
[ "0.7308563", "0.70076805", "0.69503945", "0.6880716", "0.6815349", "0.6811424", "0.67916626", "0.67796946", "0.6775386", "0.67483145", "0.6735111", "0.6707917", "0.6695514", "0.667775", "0.6669448", "0.66299134", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6621606", "0.6582472", "0.6581449", "0.6539123" ]
0.76741517
0
Display a string at the current cursor location. Cursor may be repositionned before printing is pos is feeded with a (col,row) tuple.
def print( self, str, pos=None ): if pos: self.set_cursor( pos ) self.write( str.encode("ASCII") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PrintAt(self,x=0,y=0,text=''):\n self.SetCursor(x,y)\n self.Print(text)", "def display(self):\n row = (' ' * self.__x) + (Rectangle.print_symbol * self.__width) + '\\n'\n print(('\\n' * self.__y) + (row * self.__height), end=\"\")", "def display(self):\n yp = self.y + self.win_y\n xp = self.x + self.win_x\n # Print the current cursor cell in the top left corner\n self.scr.move(0, 0)\n self.scr.clrtoeol()\n s = \" {},{} \".format(yp + 1, xp + 1)\n addstr(self.scr, s, curses.A_REVERSE)\n\n # Adds the current cell content after the 'current cell' display\n wc = self.max_x - len(s) - 2\n s = self.cellstr(yp, xp, wc)\n addstr(self.scr, \" \" + s, curses.A_NORMAL)\n\n # Print a divider line\n self.scr.hline(1, 0, curses.ACS_HLINE, self.max_x)\n\n # Print the header if the correct offset is set\n if self.header_offset == self.header_offset_orig:\n self.scr.move(self.header_offset - 1, 0)\n self.scr.clrtoeol()\n for x in range(0, self.vis_columns):\n xc, wc = self.column_xw(x)\n s = self.hdrstr(x + self.win_x, wc)\n insstr(self.scr, self.header_offset - 1, xc, s, curses.A_BOLD)\n\n # Print the table data\n for y in range(0, self.max_y - self.header_offset):\n self.scr.move(y + self.header_offset, 0)\n self.scr.clrtoeol()\n for x in range(0, self.vis_columns):\n if x == self.x and y == self.y:\n attr = curses.A_REVERSE\n else:\n attr = curses.A_NORMAL\n xc, wc = self.column_xw(x)\n s = self.cellstr(y + self.win_y, x + self.win_x, wc)\n insstr(self.scr, y + self.header_offset, xc, s, attr)\n\n self.scr.refresh()", "def print_pos(pos):\n # TO DO: EXCLUDE FIRST LINE\n s = \"%BLOCK POSITIONS_FRAC\\n\" + str(pos) + \"\\n%ENDBLOCK POSITIONS_FRAC\"\n return s", "def Pos(row, col):\n return ESC + str(row) + ';' + str(col) + 'H'", "def display(self):\n mg_w = self.width\n mg_h = self.height\n str_to_prt = \"\\n\" * self.y + (\" \" * self.x + \"#\" * mg_w + '\\n') * mg_h\n print(str_to_prt[:-1])", "def display(self):\n prow = self.__width * '#'\n nstr = self.y * \"\\n\"\n for x in range(self.__height):\n nstr += self.x * \" \"\n nstr += prow\n if x == (self.__height - 1):\n break\n nstr += \"\\n\"\n print(nstr)", "def cursorPositionChanged(self):\r\n cursor = self.text_area.textCursor()\r\n line_no = cursor.blockNumber()\r\n col_no = cursor.columnNumber()\r\n self.statusBar.showMessage(\"Line \"+str(line_no)+\", Column \"+str(col_no))", "def display(self):\n \"\"\" Coordinates for position are x-axis (LR) and y-axis (NS) \"\"\"\n for coordY in range(self.y):\n print()\n for column in range(self.height):\n for coordLR in range(self.x):\n print(\" \", end=\"\")\n for row in range(self.width):\n print(\"#\", end=\"\")\n print()", "def safe_put(stdscr, string, loc):\n if loc[0] == curses.LINES-1 and loc[1] == curses.COLS-1:\n stdscr.addstr(loc[0], loc[1]-1, string.encode(\"utf-8\"))\n stdscr.insstr(loc[0], loc[1]-1, \" \")\n else:\n stdscr.addstr(loc[0], loc[1], string.encode(\"utf-8\"))", "def set_cursor( self, pos ):\n\t\tassert type( pos ) is tuple, \"pos must be a tuple (col,row)\"\n\t\trow_offsets = [ 0x00, 0x40, 0x14, 0x54 ]\n\t\trow = pos[1]\n\t\tif ( row > self.rows ):\n\t\t\trow = self.rows-1 # we count rows starting w/0\n\n\t\tself.command(LCD_SETDDRAMADDR | (pos[0] + row_offsets[row])) # Col + row offset", "def loc(y,x):\n return '\\033[%s;%sH' % (str(y),str(x))", "def fl_drw_text_cursor(align, xpos, ypos, width, height, colr, style, size,\n txtstr, curscolr, pos):\n _fl_drw_text_cursor = library.cfuncproto(\n library.load_so_libforms(), \"fl_drw_text_cursor\",\\\n None, [cty.c_int, xfdata.FL_Coord, xfdata.FL_Coord, xfdata.FL_Coord,\n xfdata.FL_Coord, xfdata.FL_COLOR, cty.c_int, cty.c_int, xfdata.STRING,\n cty.c_int, cty.c_int],\n \"\"\"void fl_drw_text_cursor(int align, FL_Coord x, FL_Coord y,\n FL_Coord w, FL_Coord h, FL_COLOR c, int style, int size,\n const char * str, int cc, int pos)\"\"\")\n library.check_if_flinitialized()\n library.checkfatal_allowed_value_in_list(align, xfdata.ALIGN_list)\n i_align = library.convert_to_intc(align)\n i_xpos = library.convert_to_FL_Coord(xpos)\n i_ypos = library.convert_to_FL_Coord(ypos)\n i_width = library.convert_to_FL_Coord(width)\n i_height = library.convert_to_FL_Coord(height)\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.checkfatal_allowed_value_in_list(style, xfdata.TEXTSTYLE_list)\n i_style = library.convert_to_intc(style)\n i_size = library.convert_to_intc(size)\n s_txtstr = library.convert_to_bytestrc(txtstr)\n #library.checknonfatal_allowed_value_in_list(curscolr, xfdata.COLOR_list)\n i_curscolr = library.convert_to_intc(curscolr)\n i_pos = library.convert_to_intc(pos)\n library.keep_elem_refs(align, i_align, xpos, i_xpos, ypos, i_ypos, \\\n width, i_width, height, i_height, colr, ul_colr, style, \\\n i_style, size, i_size, txtstr, s_txtstr, curscolr, \\\n i_curscolr, pos, i_pos)\n _fl_drw_text_cursor(i_align, i_xpos, i_ypos, i_width, i_height, \\\n ul_colr, i_style, i_size, s_txtstr, i_curscolr, i_pos)", "def print_position(position):\n print('Packet Number # %s' % position)", "def display_text(self, text, size=None, colr=None,\r\n x = None, y = None,\r\n new_line = None):\r\n if size is None:\r\n size = self.dt_size\r\n self.size = size\r\n if colr is None:\r\n colr = self.text_color\r\n self.text_color = colr\r\n if new_line is not None:\r\n if x is not None or y is not None:\r\n raise Exeception(\"Must not have new_line and x,y\")\r\n else:\r\n if x is not None or y is not None:\r\n new_line = False\r\n else:\r\n new_line = True\r\n if new_line:\r\n x = self.dt_x = self.disp_left\r\n self.dt_y -= size*self.font_size_to_ch\r\n y = self.dt_y\r\n #print(f\"new_line: y:{y} dt_y:{self.dt_y}\")\r\n else:\r\n if x is None:\r\n x = dt_x\r\n self.dt_x = x\r\n if y is None:\r\n y = self.dt_y\r\n self.dt_y = y\r\n #print(f\"display_text: text:{text} x:{x}, y:{y}\")\r\n tu.penup()\r\n if y < self.disp_bottom + self.disp_boarder:\r\n continue_msg = \"Press ENTER to continue\"\r\n inp = input(continue_msg)\r\n self.clear_text() # Only option \r\n \r\n tu.goto(x,y)\r\n tu.pendown()\r\n \r\n tu.color(colr)\r\n font = (\"Arial\", size, \"normal\")\r\n #print(f\"colr:{colr} text:{text} font:{font}\")\r\n #print(f\"xcor():{tu.xcor()} ycor():{tu.ycor()}\")\r\n tu.write(text, align=\"left\", font=font)", "def DrawStringAt(self, x, y, s, color=(229, 153, 153, 255)):\r\n self.screen.blit(self.font.render(s, True, color), (x, y))", "def addstr(self, s: str, x: int = None, y: int = None, mod: int = None) -> None:\n if x is not None and y is not None:\n self.set_cursor_position(x, y)\n\n if mod is not None:\n self.screen.addstr(s, mod)\n else:\n self.screen.addstr(s)", "def print(self, my_screen, text_string):\n text_bitmap = self.font.render(text_string, True, BLACK)\n my_screen.blit(text_bitmap, [self.x_pos, self.y_pos])\n self.y_pos += self.line_height", "def display(self):\n print('')\n print(\" ---------------------------------\")\n counter = 0\n for row in self.positions:\n counter += 1\n line = f'{counter}: |'\n for space in row:\n if isinstance(space, str):\n line += f' {space} |'\n else:\n starter = ' '\n ender = '|'\n if space.team == 'white':\n piece = stylize(space.symbol+' ', colored.fg(\"light_blue\"))\n else:\n piece = stylize(space.symbol+' ', colored.fg(\"light_red\"))\n line += starter+piece+ender\n print(line)\n print(\" ---------------------------------\")\n print(\" | A | B | C | D | E | F | G | H |\\n\")", "def display(self):\n for r in range(len(self.grid)):\n for c in range(len(self.grid[r])):\n if (r, c) == self.location:\n print('\\033[96m*\\x1b[0m', end=' ') # print a blue *\n else:\n print(self.grid[r][c], end=' ') # prints a space or wall\n print()\n print()", "def draw(self):\n res = ''\n # ANSI code to clear the screen\n #res += chr(27) + \"[2J\"\n for position, value in enumerate(self.board.tttboard):\n if value is None:\n res += str(position)\n #sys.stdout.write(str(position))\n else:\n res += str(value)\n #sys.stdout.write(str(value))\n\n if (position + 1) % 3 != 0:\n res += str('|')\n #sys.stdout.write('|')\n else:\n #print ''\n\n res += str('\\n')\n if position == 2 or position == 5:\n #print '-' * 5\n\n res += '-' * 5\n res += str('\\n')\n return res", "def my_print(self):\n if self.__size > 0:\n print(\"\\n\" * self.__position[1], end=\"\")\n for i in range(self.__size):\n print(\" \" * self.__position[0], end=\"\")\n print(\"#\" * self.__size)\n else:\n print()", "def setCursorPos(serDisplay, row, col, clearRow = False):\n offset = 127 + col\n if row == 2:\n offset = 128 + 63 + col\n elif row == 3:\n offset = 128 + 19 + col\n elif row == 4:\n offset = 128 + 83 + col\n cmd = array.array('B', (COMMAND_PREFIX, offset))\n writeToDisplay(serDisplay, cmd.tostring())", "def my_print(self):\n if self.size == 0:\n print(\"\")\n return\n for j in range(self.__position[1]):\n print(\"\")\n for i in range(self.size):\n if self.__position[0] > 0:\n print(\" \" * self.__position[0], end=\"\")\n print('#' * self.size)", "def display_s(s, font, screen, lcd, size=5, x=0, y=0):\n i = 0\n spacing = size * .11\n s = str(s)\n char = s[0]\n char_w, char_h = font[char].size\n for c in s:\n display_c(c,font,screen,lcd,size,(int(i*spacing*char_w)+x),y)\n i += 1\n return screen", "def my_print(self):\n if self.__size == 0:\n print()\n else:\n print(\"\\n\" * self.__position[1], end='')\n for x in range(self.__size):\n print(\" \" * self.__position[0], end='')\n print(\"#\" * self.__size)", "def print_location(loc):\n print('')\n pass", "def showText(pos):\n\treturn OnscreenText( \\\n\t\ttext=\" \", \\\n\t\tstyle=1, fg=(0,0,0,1), pos=(-1.3, pos), \\\n\t\talign=TextNode.ALeft, scale = .06, mayChange = True)", "def showText(self, surface, point, text, color=None, size=20):\n if not color: color = self.color\n v = self / 2\n point = v(point)\n surface.print(text, tuple(point), color=color, size=size)", "def my_print(self):\n if self.__size == 0:\n print(\"\")\n return\n [print(\"\") for x in range(0, self.__position[1])]\n for i in range(0, self.__size):\n [print(\" \", end=\"\") for i in range(0, self.__position[0])]\n [print(\"#\", end=\"\") for j in range(0, self.__size)]\n print(\"\")" ]
[ "0.7152005", "0.6511145", "0.6333124", "0.6215792", "0.61729866", "0.6170965", "0.615026", "0.6097372", "0.6082236", "0.6067916", "0.60624087", "0.60408413", "0.6027435", "0.5971494", "0.5957337", "0.5951418", "0.5941546", "0.5902664", "0.5889489", "0.58843726", "0.5864881", "0.5833638", "0.58193827", "0.5804545", "0.58035344", "0.58010536", "0.5775303", "0.57712907", "0.5768299", "0.5747781" ]
0.7490052
0
See PlotlyGraphObjectError.__init__ for param docs.
def __init__(self, obj, path, notes=()): format_dict = {'index': path[-1], 'object_name': obj._name} message = ("Invalid entry found in '{object_name}' at index, '{index}'" .format(**format_dict)) note = "It's invalid because it doesn't contain a valid 'type' value." notes = [note] + list(notes) super(PlotlyDataTypeError, self).__init__( message=message, path=path, notes=notes )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, message='', path=(), notes=()):\n self.message = message\n self.plain_message = message # for backwards compat\n self.path = list(path)\n self.notes = notes\n super(PlotlyGraphObjectError, self).__init__(message)", "def __init__(self, g, msg):\n self.graph = g\n self.message = 'Graph ' + repr(self.graph) + ' error: ' + msg", "def __init__(self, obj, path, notes=()):\n format_dict = {'attribute': path[-1], 'object_name': obj._name}\n message = (\"'{attribute}' has invalid value inside '{object_name}'\"\n .format(**format_dict))\n notes = [obj.help(path[-1], return_help=True)] + list(notes)\n super(PlotlyDictValueError, self).__init__(\n message=message, notes=notes, path=path\n )", "def __init__(self, obj, path, notes=()):\n format_dict = {'index': path[-1], 'object_name': obj._name}\n message = (\"Invalid entry found in '{object_name}' at index, '{index}'\"\n .format(**format_dict))\n notes = [obj.help(return_help=True)] + list(notes)\n super(PlotlyListEntryError, self).__init__(\n message=message, path=path, notes=notes\n )", "def __init__(self, obj, path, notes=()):\n format_dict = {'attribute': path[-1], 'object_name': obj._name}\n message = (\"'{attribute}' is not allowed in '{object_name}'\"\n .format(**format_dict))\n notes = [obj.help(return_help=True)] + list(notes)\n super(PlotlyDictKeyError, self).__init__(\n message=message, path=path, notes=notes\n )", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, message):\n ModelException.__init__(self, message)", "def __init__(self, *args):\n this = _libsbml.new_XMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, error, status_code, *args, **kwargs):\n super(BusinessException, self).__init__(*args, **kwargs)\n self.error = error\n self.status_code = status_code", "def __init__(self, msg, path_to_item=None):\n self.path_to_item = path_to_item\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiAttributeError, self).__init__(full_msg)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def __init__(self, g, l, msg):\n super().__init__(g, '')\n self.link = l\n self.message = ('Arc ' if l.directed else 'Edge ') + str(l) + ' in graph ' + repr(self.graph) + ' error: ' + msg", "def __init__(self, componentId, locationId, **kw):\n self.componentId = componentId\n self.locationId = locationId\n super(ComponentInLocationError, self).__init__(**kw)", "def __init__(self, g, v, msg):\n super().__init__(g, '')\n self.node = v\n self.message = 'Node ' + str(v) + ' in graph ' + repr(self.graph) + ' error: ' + msg", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def __init__(self):\n raise", "def __init__(self, name):\n super(NodeExistsError, self).__init__(name)\n self.name = name", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def __init__(self):\n raise Exception(\"Cannot create this object\")", "def __init__(self, *args):\n\n super(GoveeException, self).__init__()\n\n if args:\n self.message = args[0]\n else:\n self.message = None", "def __init__(self, msg):\n\n super(DBConnectionError, self).__init__(msg)\n self.msg = msg", "def __init__(self, code, reason):\n super(RequestError, self).__init__(code, reason)" ]
[ "0.75614715", "0.670434", "0.6560559", "0.64961225", "0.6422344", "0.63814306", "0.62893766", "0.6205201", "0.600632", "0.5995472", "0.5995472", "0.5995472", "0.596991", "0.5916648", "0.5776747", "0.5770383", "0.57605684", "0.5759503", "0.57565665", "0.5754255", "0.57489777", "0.57404745", "0.5711863", "0.56932306", "0.568876", "0.5684412", "0.5684412", "0.5680606", "0.5674153", "0.56609666" ]
0.6825649
1
Takes an images, divides it into tiles, return an array of tiles. >>> image_to_tiles(test_ns.img, test_ns.tile_size) array([[[[[255, 255, 255]]], [[[255, 255, 255]]], [[[255, 255, 255]]], [[[255, 255, 255]]]], [[[[255, 255, 255]]], [[[ 0, 0, 0]]], [[[ 0, 0, 0]]], [[[ 0, 0, 0]]]], [[[[255, 255, 255]]], [[[ 0, 0, 0]]], [[[255, 0, 0]]], [[[ 0, 0, 0]]]], [[[[255, 255, 255]]], [[[ 0, 0, 0]]], [[[ 0, 0, 0]]], [[[ 0, 0, 0]]]]], dtype=uint8)
def image_to_tiles(img, tile_size): padding_argument = [(0,0),(0,0),(0,0)] for input_dim in [0,1]: padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size) img = np.pad(img, padding_argument, mode='constant') tiles = img.reshape((img.shape[0]//tile_size, tile_size, img.shape[1]//tile_size, tile_size, img.shape[2] )).swapaxes(1,2) return tiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _split_image_into_tiles(\n self, image: np.ndarray\n ) -> t.Sequence[t.Tuple[t.Tuple[t.Any, ...], np.ndarray]]:\n h, w, c = image.shape\n tile_height = (\n math.ceil(h / (self._n_tiles // 2 - 1))\n if self._n_tiles > 4\n else math.ceil(h / (self._n_tiles // 2))\n )\n tile_width = math.ceil(w / (self._n_tiles // 2))\n tiles = [] # type: ignore\n for i in range(0, h, tile_height):\n for j in range(0, w, tile_width):\n tiles.append(\n (\n (i, i + tile_height, j, j + tile_width),\n image[i : i + tile_height, j : j + tile_width, :],\n )\n )\n return tiles", "def split_to_tiles(array: np.ndarray, tile_height: int, tile_width: int) -> np.ndarray:\n arr_height, arr_width, *dimensions = array.shape\n nchannels = dimensions[0] if dimensions else 1\n new_shape = get_shape_for_tile_split(\n arr_height, arr_width, nchannels, tile_height, tile_width\n )\n return array.reshape(new_shape).swapaxes(1, 2)", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]", "def split_image_into_tiles_of_size(arr: Image, tile_w: int, tile_h: int, overlap: int):\n x_axis = -1\n y_axis = -2\n arr_width, arr_height = arr.shape[x_axis], arr.shape[y_axis]\n\n x_ntiles = (\n arr_width // tile_w if arr_width % tile_w == 0 else (arr_width // tile_w) + 1\n )\n y_ntiles = (\n arr_height // tile_h if arr_height % tile_h == 0 else (arr_height // tile_h) + 1\n )\n\n tiles = []\n\n # row\n for i in range(0, y_ntiles):\n # height of this tile\n ver_f = tile_h * i\n ver_t = ver_f + tile_h\n\n # col\n for j in range(0, x_ntiles):\n # width of this tile\n hor_f = tile_w * j\n hor_t = hor_f + tile_w\n\n tile = get_tile(arr, hor_f, hor_t, ver_f, ver_t, overlap)\n\n tiles.append(tile)\n tile_shape = [tile_h, tile_w]\n ntiles = dict(x=x_ntiles, y=y_ntiles)\n padding = dict(left=0, right=0, top=0, bottom=0)\n if arr_width % tile_w == 0:\n padding[\"right\"] = 0\n else:\n padding[\"right\"] = tile_w - (arr_width % tile_w)\n if arr_height % tile_h == 0:\n padding[\"bottom\"] = 0\n else:\n padding[\"bottom\"] = tile_h - (arr_height % tile_h)\n info = dict(tile_shape=tile_shape, ntiles=ntiles, overlap=overlap, padding=padding)\n return tiles, info", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def get_image_tiles_tensor(image, label, image_path, patch_width):\n tiles_before_reshape = tensorflow.extract_image_patches(\n tensorflow.expand_dims(image, dim=0), [1, patch_width, patch_width, 1],\n [1, patch_width, patch_width, 1], [1, 1, 1, 1], 'VALID')\n tiles = tensorflow.reshape(tiles_before_reshape, [-1, patch_width, patch_width, 1])\n\n labels = tensorflow.tile(tensorflow.expand_dims(label, dim=0), [tensorflow.shape(tiles)[0], 1])\n image_paths = tensorflow.tile(\n tensorflow.expand_dims(image_path, dim=0), [tensorflow.shape(tiles)[0], 1])\n\n return tiles, labels, image_paths", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out", "def get_tile_image(imgs, tile_shape=None, result_img=None, margin_color=None):\n def get_tile_shape(img_num):\n x_num = 0\n y_num = int(math.sqrt(img_num))\n while x_num * y_num < img_num:\n x_num += 1\n return x_num, y_num\n\n if tile_shape is None:\n tile_shape = get_tile_shape(len(imgs))\n\n # get max tile size to which each image should be resized\n max_height, max_width = np.inf, np.inf\n for img in imgs:\n max_height = min([max_height, img.shape[0]])\n max_width = min([max_width, img.shape[1]])\n\n # resize and concatenate images\n for i, img in enumerate(imgs):\n h, w = img.shape[:2]\n h_scale, w_scale = max_height / h, max_width / w\n scale = min([h_scale, w_scale])\n h, w = int(scale * h), int(scale * w)\n img = cv2.resize(img, (w, h))\n img = centerize(img, (max_height, max_width, 3),\n margin_color=margin_color)\n imgs[i] = img\n return _tile_images(imgs, tile_shape, result_img,\n margin_color=margin_color)", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n \n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n \n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n \n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n # colors default to 0 (i.e. black), alphas defaults to 1 (fully opaque i.e.\n # corresponding pixel fully visible in image))\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8') \n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype) \n\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n \n for i in range(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n \n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n \n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.ones(out_shape, dtype=dt)*255\n \n for tile_row in range(tile_shape[0]):\n for tile_col in range(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def _tile_images(imgs, tile_shape, concatenated_image, margin_color=None):\n x_num, y_num = tile_shape\n one_width = imgs[0].shape[1]\n one_height = imgs[0].shape[0]\n if concatenated_image is None:\n concatenated_image = np.zeros((one_height * y_num, one_width * x_num, 3),\n dtype=np.uint8)\n if margin_color is not None:\n concatenated_image[:, :] = margin_color\n for y in range(y_num):\n for x in range(x_num):\n i = x + y * x_num\n if i >= len(imgs):\n pass\n else:\n concatenated_image[y*one_height:(y+1)*one_height,x*one_width:(x+1)*one_width,] = imgs[i]\n return concatenated_image", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output numpy ndarray to store the image\n if output_pixel_vals:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n #colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = numpy.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = numpy.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # function\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\n scale_rows_to_unit_interval=True,\n output_pixel_vals=True):\n\n assert len(img_shape) == 2\n assert len(tile_shape) == 2\n assert len(tile_spacing) == 2\n\n # The expression below can be re-written in a more C style as\n # follows :\n #\n # out_shape = [0,0]\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\n # tile_spacing[0]\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\n # tile_spacing[1]\n out_shape = [\n (ishp + tsp) * tshp - tsp\n for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)\n ]\n\n if isinstance(X, tuple):\n assert len(X) == 4\n # Create an output np ndarray to store the image\n if output_pixel_vals:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype='uint8')\n else:\n out_array = np.zeros((out_shape[0], out_shape[1], 4),\n dtype=X.dtype)\n\n # colors default to 0, alpha defaults to 1 (opaque)\n if output_pixel_vals:\n channel_defaults = [0, 0, 0, 255]\n else:\n channel_defaults = [0., 0., 0., 1.]\n\n for i in xrange(4):\n if X[i] is None:\n # if channel is None, fill it with zeros of the correct\n # dtype\n dt = out_array.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array[:, :, i] = np.zeros(\n out_shape,\n dtype=dt\n ) + channel_defaults[i]\n else:\n # use a recurrent call to compute the channel and store it\n # in the output\n out_array[:, :, i] = tile_raster_images(\n X[i], img_shape, tile_shape, tile_spacing,\n scale_rows_to_unit_interval, output_pixel_vals)\n return out_array\n\n else:\n # if we are dealing with only one channel\n H, W = img_shape\n Hs, Ws = tile_spacing\n\n # generate a matrix to store the output\n dt = X.dtype\n if output_pixel_vals:\n dt = 'uint8'\n out_array = np.zeros(out_shape, dtype=dt)\n\n for tile_row in xrange(tile_shape[0]):\n for tile_col in xrange(tile_shape[1]):\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\n this_x = X[tile_row * tile_shape[1] + tile_col]\n if scale_rows_to_unit_interval:\n # if we should scale values to be between 0 and 1\n # do this by calling the `scale_to_unit_interval`\n # functionmapping\n this_img = scale_to_unit_interval(\n this_x.reshape(img_shape))\n else:\n this_img = this_x.reshape(img_shape)\n # add the slice to the corresponding position in the\n # output array\n c = 1\n if output_pixel_vals:\n c = 255\n out_array[\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\n tile_col * (W + Ws): tile_col * (W + Ws) + W\n ] = this_img * c\n return out_array", "def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),\r\n scale_rows_to_unit_interval=True,\r\n output_pixel_vals=True):\r\n\r\n assert len(img_shape) == 2\r\n assert len(tile_shape) == 2\r\n assert len(tile_spacing) == 2\r\n\r\n # The expression below can be re-written in a more C style as\r\n # follows :\r\n #\r\n # out_shape = [0,0]\r\n # out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -\r\n # tile_spacing[0]\r\n # out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -\r\n # tile_spacing[1]\r\n out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp\r\n in zip(img_shape, tile_shape, tile_spacing)]\r\n\r\n if isinstance(X, tuple):\r\n assert len(X) == 4\r\n # Create an output numpy ndarray to store the image\r\n if output_pixel_vals:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype='uint8')\r\n else:\r\n out_array = numpy.zeros((out_shape[0], out_shape[1], 4),\r\n dtype=X.dtype)\r\n\r\n #colors default to 0, alpha defaults to 1 (opaque)\r\n if output_pixel_vals:\r\n channel_defaults = [0, 0, 0, 255]\r\n else:\r\n channel_defaults = [0., 0., 0., 1.]\r\n\r\n for i in xrange(4):\r\n if X[i] is None:\r\n # if channel is None, fill it with zeros of the correct\r\n # dtype\r\n dt = out_array.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array[:, :, i] = numpy.zeros(out_shape,\r\n dtype=dt) + channel_defaults[i]\r\n else:\r\n # use a recurrent call to compute the channel and store it\r\n # in the output\r\n out_array[:, :, i] = tile_raster_images(\r\n X[i], img_shape, tile_shape, tile_spacing,\r\n scale_rows_to_unit_interval, output_pixel_vals)\r\n return out_array\r\n\r\n else:\r\n # if we are dealing with only one channel\r\n H, W = img_shape\r\n Hs, Ws = tile_spacing\r\n\r\n # generate a matrix to store the output\r\n dt = X.dtype\r\n if output_pixel_vals:\r\n dt = 'uint8'\r\n out_array = numpy.zeros(out_shape, dtype=dt)\r\n\r\n for tile_row in xrange(tile_shape[0]):\r\n for tile_col in xrange(tile_shape[1]):\r\n if tile_row * tile_shape[1] + tile_col < X.shape[0]:\r\n this_x = X[tile_row * tile_shape[1] + tile_col]\r\n if scale_rows_to_unit_interval:\r\n # if we should scale values to be between 0 and 1\r\n # do this by calling the `scale_to_unit_interval`\r\n # function\r\n this_img = scale_to_unit_interval(\r\n this_x.reshape(img_shape))\r\n else:\r\n this_img = this_x.reshape(img_shape)\r\n # add the slice to the corresponding position in the\r\n # output array\r\n c = 1\r\n if output_pixel_vals:\r\n c = 255\r\n out_array[\r\n tile_row * (H + Hs): tile_row * (H + Hs) + H,\r\n tile_col * (W + Ws): tile_col * (W + Ws) + W\r\n ] = this_img * c\r\n return out_array", "def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def tile_images(images_nhwc: Sequence[np.ndarray]) -> np.ndarray: # pragma: no cover\n img_nhwc = np.asarray(images_nhwc)\n n_images, height, width, n_channels = img_nhwc.shape\n # new_height was named H before\n new_height = int(np.ceil(np.sqrt(n_images)))\n # new_width was named W before\n new_width = int(np.ceil(float(n_images) / new_height))\n img_nhwc = np.array(list(img_nhwc) + [img_nhwc[0] * 0 for _ in range(n_images, new_height * new_width)])\n # img_HWhwc\n out_image = img_nhwc.reshape((new_height, new_width, height, width, n_channels))\n # img_HhWwc\n out_image = out_image.transpose(0, 2, 1, 3, 4)\n # img_Hh_Ww_c\n out_image = out_image.reshape((new_height * height, new_width * width, n_channels))\n return out_image", "def tile2im(imstack, fac=2):\n height = imstack.shape[1]\n width = imstack.shape[2]\n out = np.zeros(\n (imstack.shape[0] // (fac * fac), height * fac, width * fac, imstack.shape[3]),\n \"float32\",\n )\n cnt = 0\n for i in range(out.shape[0]):\n for j in np.arange(0, out.shape[1], height):\n for k in np.arange(0, out.shape[2], width):\n out[i, j : j + height, k : k + width, :] = imstack[cnt]\n cnt += 1\n return out", "def calcul_xy_array(img_x, img_y, tile_x, tile_y):\n array = []\n\n modu_x = img_x % tile_x\n modu_y = img_y % tile_y\n div_x = img_x // tile_x\n div_y = img_y // tile_y\n current_x = 0\n current_y = 0\n\n for i in range(div_y):\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n current_y += tile_y\n current_x = 0\n\n if modu_y:\n current_y = img_y - tile_y\n for j in range(div_x):\n array.append((current_x, current_y))\n current_x += tile_x\n if modu_x:\n array.append((img_x - tile_x, current_y))\n\n return array", "def render_tiles(output):\n chunks = [output[i:i + 3] for i in range(0, len(output), 3)]\n max_i = max_j = 0\n for i, j, _ in chunks:\n max_i, max_j = max(i, max_i), max(j, max_j)\n\n matrix = [[None] * (max_j + 1) for _ in range(max_i + 1)]\n\n for i, j, tile_id in chunks:\n matrix[i][j] = draw_tile(tile_id)\n\n for i, row in enumerate(matrix):\n matrix[i] = \" \".join(row)\n return matrix", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def createTiles():\n Renderer.Clear()\n map = []\n w, h = len(testmap[0]), len(testmap)\n x, y = 0, 0\n for row in testmap:\n for char in row:\n map.append(makeTile(char, x, y))\n x += 1\n y += 1\n x = 0\n\n return map, w, h" ]
[ "0.76679796", "0.70593137", "0.6855183", "0.6758002", "0.6689827", "0.66560644", "0.66036004", "0.65633535", "0.652363", "0.6499605", "0.6484208", "0.64485383", "0.6427238", "0.6427238", "0.6377075", "0.6372184", "0.6352362", "0.63131434", "0.6267358", "0.6265575", "0.62133276", "0.6084879", "0.60158384", "0.59292114", "0.5915294", "0.5842761", "0.5834937", "0.5832701", "0.5827293", "0.5800241" ]
0.8448935
0
Takes a tile_grid and transforms it into an image, using the information in tile_catalog. We use tile_size to figure out the size the new image should be, and visualize for displaying partial tile patterns.
def tiles_to_images(wfc_ns, tile_grid, tile_catalog, tile_size, visualize=False, partial=False, grid_count=None): new_img = np.zeros((tile_grid.shape[0] * tile_size, tile_grid.shape[1] * tile_size, wfc_ns.channels), dtype=np.int64) if partial and (len(tile_grid.shape) > 2): for i in range(tile_grid.shape[0]): for j in range(tile_grid.shape[1]): for u in range(wfc_ns.tile_size): for v in range(wfc_ns.tile_size): pixel_merge_list = [] for k in range(tile_grid.shape[2]): tile = tile_grid[i,j,k] ## If we want to display a partial pattern, it is helpful to ## be able to show empty cells. Therefore, in visualize mode, ## we use -1 as a magic number for a non-existant tile. pixel = None#[200, 0, 200] #print(tile) if (visualize) and ((-1 == tile) or (-2 == tile)): if (-1 == tile): pixel = [200, 0, 200] if 0 == (i + j) % 2: pixel = [255, 0, 255] else: pixel = [0, 255, 255] else: if (WFC_PARTIAL_BLANK != tile) and (WFC_NULL_VALUE != tile): # TODO: instead of -3, use MaskedArrays pixel = tile_catalog[tile][u,v] if not(pixel is None): pixel_merge_list.append(pixel) if len(pixel_merge_list) == 0: if 0 == (i + j) % 2: pixel_merge_list.append([255, 0, 255]) else: pixel_merge_list.append([0, 172, 172]) if len(pixel_merge_list) > 0: pixel_to_add = pixel_merge_list[0] if len(pixel_merge_list) > 1: pixel_to_add = [round(sum(x) / len(pixel_merge_list)) for x in zip(*pixel_merge_list)] try: while (len(pixel_to_add) < wfc_ns.channels): pixel_to_add.append(255) new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = pixel_to_add except TypeError as e: wfc_logger.warning(e) wfc_logger.warning("Tried to add {} from {}".format(pixel_to_add, pixel_merge_list)) else: for i in range(tile_grid.shape[0]): for j in range(tile_grid.shape[1]): tile = tile_grid[i,j] for u in range(wfc_ns.tile_size): for v in range(wfc_ns.tile_size): ## If we want to display a partial pattern, it is helpful to ## be able to show empty cells. Therefore, in visualize mode, ## we use -1 as a magic number for a non-existant tile. pixel = [200, 0, 200] #print(f"tile: {tile}") if (visualize) and ((-1 == tile) or (-2 == tile)): if (-1 == tile): if 0 == (i + j) % 2: pixel = [255, 0, 255] if (-2 == tile): pixel = [0, 255, 255] else: if (WFC_PARTIAL_BLANK != tile): pixel = tile_catalog[tile][u,v] # Watch out for images with more than 3 channels! new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v] = np.resize(pixel, new_img[(i*wfc_ns.tile_size)+u, (j*wfc_ns.tile_size)+v].shape) logging.debug('Output image shape is', new_img.shape) return new_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile_to_image(tile, tile_catalog, tile_size, visualize=False):\n new_img = np.zeros((tile_size, tile_size, 3), dtype=np.int64)\n for u in range(tile_size):\n for v in range(tile_size):\n ## If we want to display a partial pattern, it is helpful to\n ## be able to show empty cells. Therefore, in visualize mode,\n ## we use -1 as a magic number for a non-existant tile.\n pixel = [200, 0, 200]\n if (visualize) and ((-1 == tile) or (WFC_PARTIAL_BLANK == tile)):\n if 0 == (u + v) % 2:\n pixel = [255, 0, 255]\n else:\n if (visualize) and -2 == tile:\n pixel = [0, 255, 255]\n else: \n pixel = tile_catalog[tile][u,v]\n new_img[u,v] = pixel", "def make_image(self, save=False):\n\n # image_grid = np.full((self.size_x, self.size_y), '#888888', dtype=str)\n image_grid = np.full((self.size_x, self.size_y, 3), 0, dtype=np.uint8)\n\n # self.grid = np.flip(self.grid, 1)\n\n # self.grid = np.swapaxes(self.grid, 0, 0)\n \"\"\"\n image_grid[self.grid == 0] = 'FFFFFF'\n image_grid[self.grid == 1] = '000000'\n image_grid[self.grid == 2] = '00FF00'\n image_grid[self.grid == 3] = '0000FF'\n image_grid[self.grid == 4] = 'FFFF00'\n image_grid[self.grid == 5] = '00FFFF'\n image_grid[self.grid == 6] = 'FF00FF'\n \"\"\"\n image_grid[self.grid == 0] = (1, 1, 1)\n image_grid[self.grid == 1] = (0, 0, 0)\n image_grid[self.grid == 2] = (1, 0, 1)\n image_grid[self.grid == 3] = (0, 1, 0)\n image_grid[self.grid == 4] = (0, 0, 1)\n image_grid[self.grid == 5] = (0, 1, 1)\n image_grid[self.grid == 6] = (1, 1, 0)\n\n #for ant in self.ants:\n # image_grid[ant.x, ant.y] = (1, 0, 0)\n\n # image_grid = image_grid.swapaxes(0, 1)\n # self.grid = self.grid.swapaxes(0, 1)\n\n\n\n DPI = 100\n width, height = 1000, 1000\n fig = plt.figure(figsize=(width / DPI, height / DPI), dpi=DPI, facecolor='k')\n ax = fig.add_subplot()\n\n plt.axis('equal')\n plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n\n for y in range(self.size_x):\n for x in range(self.size_y):\n if self.grid[x, y] != 0:\n # Only plot a hexagon if its state is not zero.\n plot_hex(ax, x, y, image_grid[x, y])\n\n ax.set_xlim(0, self.size_x)\n ax.set_ylim(0, self.size_y)\n\n plt.show()\n\n logging.info(\"Finished Image Processing\")", "def _prepare_image(self, grid):\n grid = np.array(grid, dtype=np.uint8)\n\n width = int(grid.shape[1] * self.scale_percent)\n height = int(grid.shape[0] * self.scale_percent)\n grid = cv2.resize(grid, (width, height), interpolation=cv2.INTER_AREA)\n return grid", "def test_grdimage(grid):\n fig = Figure()\n fig.grdimage(grid, cmap=\"earth\", projection=\"W0/6i\")\n return fig", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")", "def __init__(self, width, height, tilesize=256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = (math.ceil(width / tilesize), math.ceil(height / tilesize))\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.append(tiles)\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append(imagesize)\n\n while (imagesize[0] > tilesize or imagesize[1] > tilesize):\n imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))\n tiles = (math.ceil(imagesize[0] / tilesize), math.ceil(imagesize[1] / tilesize))\n self.tierSizeInTiles.append(tiles)\n self.tierImageSize.append(imagesize)\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] +\n self.tileCountUpToTier[i-1]\n )", "def process_tile(tile):\n global base_kwds, resampling, src\n # Get the bounds of the tile.\n ulx, uly = mercantile.xy(\n *mercantile.ul(tile.x, tile.y, tile.z))\n lrx, lry = mercantile.xy(\n *mercantile.ul(tile.x + 1, tile.y + 1, tile.z))\n\n kwds = base_kwds.copy()\n kwds['transform'] = from_bounds(ulx, lry, lrx, uly, 256, 256)\n src_nodata = kwds.pop('src_nodata', None)\n dst_nodata = kwds.pop('dst_nodata', None)\n\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes),\n src_nodata=src_nodata,\n dst_nodata=dst_nodata,\n num_threads=1,\n resampling=resampling)\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n # Check for backported bug fix before re-ordering\n\tif kwds['driver'] == 'PNG' and data[0:8] == png_header:\n # Properly constructed PNG, no need to re-order bytes\n pass\n\telif kwds['driver'] == 'JPEG' and data[0:4] == jpeg_header:\n # Properly constructed JPEG, no need to re-order bytes\n pass\n\telse:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data", "def __init__(self, width, height, tilesize = 256, tileformat='jpg'):\n\n self.tilesize = tilesize\n self.tileformat = tileformat\n imagesize = (width, height)\n tiles = ( math.ceil( width / tilesize ), math.ceil( height / tilesize ) )\n\n # Size (in tiles) for each tier of pyramid.\n self.tierSizeInTiles = []\n self.tierSizeInTiles.push( tiles )\n\n # Image size in pixels for each pyramid tierself\n self.tierImageSize = []\n self.tierImageSize.append( imagesize );\n\n while (imagesize[0] > tilesize or imageSize[1] > tilesize ):\n imagesize = (math.floor( imagesize[0] / 2 ), math.floor( imagesize[1] / 2) )\n tiles = ( math.ceil( imagesize[0] / tilesize ), math.ceil( imagesize[1] / tilesize ) )\n self.tierSizeInTiles.append( tiles )\n self.tierImageSize.append( imagesize )\n\n self.tierSizeInTiles.reverse()\n self.tierImageSize.reverse()\n\n # Depth of the Zoomify pyramid, number of tiers (zoom levels)\n self.numberOfTiers = len(self.tierSizeInTiles)\n\n # Number of tiles up to the given tier of pyramid.\n self.tileCountUpToTier = []\n self.tileCountUpToTier[0] = 0\n for i in range(1, self.numberOfTiers+1):\n self.tileCountUpToTier.append(\n self.tierSizeInTiles[i-1][0] * self.tierSizeInTiles[i-1][1] + self.tileCountUpToTier[i-1]\n )", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def create_grid_map(self, scalar=20):\n self.grid_scalar = scalar\n if len(self.fem.Plates) == 0:\n raise ValueError(\"No plates to create image from\")\n\n height = self.height\n width = self.ring_n\n\n # TODO add scalar\n output = np.zeros(shape=(height * scalar, width * scalar))\n\n for xo in range(width):\n x = xo * scalar\n output[:, x] = 255\n\n for yo in range(height):\n y = yo * scalar\n output[y, :] = 255\n\n file_name = f'Grid_h{self.height}_ms{self.ring_n}_s{scalar}.jpg'\n plt.imshow(output, cmap='gray')\n plt.savefig('../../input/grids/{}'.format(file_name))\n plt.show()", "def export_image(self, bbox, zoomlevel, imagepath):\n assert has_pil, _(\"Cannot export image without python PIL\")\n grid = self.grid_tiles(bbox, zoomlevel)\n width = len(grid[0])\n height = len(grid)\n widthpix = width * self.tile_size\n heightpix = height * self.tile_size\n\n result = Image.new(\"RGBA\", (widthpix, heightpix))\n offset = (0, 0)\n for i, row in enumerate(grid):\n for j, (x, y) in enumerate(row):\n offset = (j * self.tile_size, i * self.tile_size)\n img = self._tile_image(self.tile((zoomlevel, x, y)))\n result.paste(img, offset)\n logger.info(_(\"Save resulting image to '%s'\") % imagepath)\n result.save(imagepath)", "def plot_tiles(self):\n \n #TODO: adjust plot, border and text_box sizes\n \n ordered_projections = []\n flat_clusters = []\n colors_2D = []\n\n for cluster, nodes in clusters.items():\n for n in nodes:\n ordered_projections.append(projection_2D[n])\n\n for n in nodes:\n flat_clusters.append(n)\n\n for i, n in enumerate(G.nodes):\n if n in nodes:\n colors_2D.append(colors[i])\n\n grid_cols = int(np.ceil(np.sqrt(len(ordered_projections))))\n\n if len(ordered_projections) <= (grid_cols**2 - grid_cols):\n grid_rows = grid_cols - 1\n else:\n grid_rows = grid_cols\n\n #assuming images are same size, get shape\n l, w = ordered_projections[0].shape\n\n #add blank images to pack in grid\n while len(ordered_projections) < grid_rows*grid_cols:\n ordered_projections.append(np.zeros((l, w)))\n colors_2D.append((0., 0., 0.))\n flat_clusters.append('')\n\n f = Figure()\n\n grid = ImageGrid(f, 111, #similar to subplot(111)\n nrows_ncols=(grid_rows, grid_cols), #creates grid of axes\n axes_pad=0.05) #pad between axes in inch\n \n lw = 1.75\n text_box_size = 5 \n props = dict(boxstyle='round', facecolor='white')\n \n for i, (ax, im) in enumerate(zip(grid, ordered_projections)):\n ax.imshow(im, cmap='gray')\n\n for side, spine in ax.spines.items():\n spine.set_color(colors_2D[i])\n spine.set_linewidth(lw)\n\n ax.get_yaxis().set_ticks([])\n ax.get_xaxis().set_ticks([])\n\n text = str(flat_clusters[i])\n ax.text(1, 1, text, va='top', ha='left', bbox=props, size=text_box_size)\n \n newWindow = tk.Toplevel()\n newWindow.grid_rowconfigure(0, weight=1)\n newWindow.grid_columnconfigure(0, weight=1)\n \n #PLOT FRAME\n plotFrame = tk.Frame(newWindow, bg='lightgrey', width=600, height=400)\n plotFrame.grid(row=0, column=0, sticky='nsew')\n \n canvas = FigureCanvasTkAgg(f, plotFrame)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n canvas.figure.tight_layout()\n \n\n #TOOLBAR FRAME\n toolbarFrame = ttk.Frame(newWindow, width=600, height=100)\n toolbarFrame.grid(row=1, column=0, sticky='nsew')\n toolbarFrame.grid_propagate(0)\n \n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)\n toolbar.update()", "def draw_grid(self, tile_img, tiles):\n #debug_print(\"drawing level\", data)\n img = Surface((self.xsize * SIZE, self.ysize * SIZE))\n for pos, char in self:\n rect = get_tile_rect(pos)\n img.blit(tile_img, rect, tiles[char])\n return img", "def tile_image(\n im: Image.Image, width: int, height: int, mode: Optional[str] = \"RGB\", **kwargs: Any\n) -> Image.Image:\n im_out = Image.new(mode, (width, height), **kwargs)\n\n h_tiles = ceil(width / im.width)\n v_tiles = ceil(height / im.height)\n\n for i in range(v_tiles):\n y = im.height * i\n for j in range(h_tiles):\n x = im.width * j\n im_out.paste(im, box=(x, y))\n\n return im_out", "def generate_overview_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Overview Tiles:\"\n\n if self.options.profile == 'garmin': # no overview tiles for 'garmin'\n return\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n zcount = 0\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n tcount += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n zcount+=1\n if self.options.resume:\n count_tiles=tcount\n zcount+=1\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n count_tiles += (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n i_count = self.tile_exists(0, 0, 0,1)\n if i_count == count_tiles:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; all-tiles [\",zcount,\"] zoom-levels with tiles[\",count_tiles,\"]\"\n return\n ti = 0\n\n # querysize = tilesize * 2\n\n for tz in range(self.tmaxz-1, self.tminz-1, -1):\n tminx, tminy, tmaxx, tmaxy = self.tminmax[tz]\n i_x_column_count=((tmaxx-tminx)+1)\n i_y_column_count=((tmaxy-tminy)+1)\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 140798 ] tmaxx[ 140872 ] ; ((tmaxx-tmaxy)+1) x_tiles[ -35331 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tminx)+1) x_tiles[\",i_x_column_count,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 176204 ] tminy[ 176126 ] ; ((tmaxy-tminy)) y_tiles[ 78 ]\n print \"\\ttz=[\",tz,\"] :ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n print \"\\tTile generation skipped because of --??? ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"] i_count[\",i_count,\"]\"\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] x/y_tiles[\",tcount,\"]\"\n break\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n print \"\\tTile generation skipped because of --??? ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"] i_count[\",i_count,\"]\"\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true 18-140798-176204.jpg\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None\n break\n\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume\"\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n try:\n self.write_overview_tile(tx, ty, tz,self.options.tms_osm)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')", "def _render_grid(self, r, tile_size):\n\n assert r.width == self.width * tile_size\n assert r.height == self.height * tile_size\n\n # Total grid size at native scale\n widthPx = self.width * CELL_PIXELS\n heightPx = self.height * CELL_PIXELS\n\n r.push()\n\n # Internally, we draw at the \"large\" full-grid resolution, but we\n # use the renderer to scale back to the desired size\n r.scale(tile_size / CELL_PIXELS, tile_size / CELL_PIXELS)\n\n # Draw the background of the in-world cells black\n r.fillRect(\n 0,\n 0,\n widthPx,\n heightPx,\n 0, 0, 0\n )\n\n # Draw grid lines\n r.setLineColor(100, 100, 100)\n for rowIdx in range(0, self.height):\n y = CELL_PIXELS * rowIdx\n r.drawLine(0, y, widthPx, y)\n for colIdx in range(0, self.width):\n x = CELL_PIXELS * colIdx\n r.drawLine(x, 0, x, heightPx)\n\n # Render the grid\n\n grid = self.encode()\n\n for j in range(0, self.width):\n for i in range(0, self.height):\n cell = grid[i,j]\n if cell == 0:\n continue\n\n r.push()\n r.translate(j * CELL_PIXELS, i * CELL_PIXELS)\n if cell == 1:\n self._render_wall(r)\n elif cell == 10 or cell == 12:\n self._render_goal(r, discovered=True)\n elif cell == 100 or cell == 102:\n self._render_goal(r, discovered=False)\n r.pop()\n\n r.pop()", "def draw_grid(grid):\n \n # Tile size variables\n tile_width = STAGE_WIDTH / GRID_WIDTH\n tile_height = STAGE_HEIGHT / GRID_HEIGHT\n \n for i in range(GRID_WIDTH):\n for j in range(GRID_HEIGHT):\n elev = grid[i][j]\n rect_x = i * tile_width\n rect_y = j * tile_height\n pygame.draw.rect(STAGE, get_color(elev),\n (rect_x, rect_y, tile_width, tile_height))", "def build_grid(tiles, tile_size, grid_rows=None, grid_cols=None):\n if grid_rows is None or grid_cols is None:\n grid_rows = int(math.sqrt(len(tiles)))\n grid_cols = int(math.ceil(len(tiles) / grid_rows))\n\n grid = np.zeros(\n (grid_rows * tile_size[1], grid_cols * tile_size[0], 3), np.uint8)\n for tile_id, tile in enumerate(tiles):\n assert(tile.shape[0] == tile_size[1] and tile.shape[1] == tile_size[0])\n yy = int(tile_id / grid_cols)\n xx = tile_id % grid_cols\n grid[(yy * tile_size[1]):((yy + 1) * tile_size[1]),\n (xx * tile_size[0]):((xx + 1) * tile_size[0]), :] = tile\n return grid", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles", "def tile_to_pil_tile(tile):\n t = tile\n slide_filepath = slide.get_training_slide_path(t.slide_name)\n s = slide.open_slide(slide_filepath)\n\n x, y = t.o_c_s, t.o_r_s\n w, h = t.o_c_e - t.o_c_s, t.o_r_e - t.o_r_s\n tile_region = s.read_region((x, y), 0, (w, h))\n # RGBA to RGB\n pil_img = tile_region.convert(\"RGB\")\n return pil_img", "def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def create_overview_tiles(tile_job_info, output_folder, options):\n mem_driver = gdal.GetDriverByName('MEM')\n tile_driver = tile_job_info.tile_driver\n out_driver = gdal.GetDriverByName(tile_driver)\n\n tilebands = tile_job_info.nb_data_bands + 1\n\n # Usage of existing tiles: from 4 underlying tiles generate one as overview.\n\n tcount = 0\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n tcount += (1 + abs(tmaxx-tminx)) * (1 + abs(tmaxy-tminy))\n\n ti = 0\n\n if tcount == 0:\n return\n\n if not options.quiet:\n print(\"Generating Overview Tiles:\")\n\n progress_bar = ProgressBar(tcount)\n progress_bar.start()\n\n for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):\n tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]\n for ty in range(tmaxy, tminy - 1, -1):\n for tx in range(tminx, tmaxx + 1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, options)\n tilefilename = os.path.join(output_folder,\n str(tz),\n #str(tx),\n #\"%s.%s\" % (ytile, tile_job_info.tile_extension))\n '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + tile_job_info.tile_extension)\n\n if options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if options.resume and os.path.exists(tilefilename):\n if options.verbose:\n print(\"Tile generation skipped because of --resume\")\n else:\n progress_bar.log_progress()\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,\n 2 * tile_job_info.tile_size, tilebands)\n # TODO: fill the null value\n dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,\n tilebands)\n\n # TODO: Implement more clever walking on the tiles with cache functionality\n # probably walk should start with reading of four tiles from top left corner\n # Hilbert curve\n\n children = []\n # Read the tiles and write them to query window\n for y in range(2 * ty, 2 * ty + 2):\n for x in range(2 * tx, 2 * tx + 2):\n minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]\n if x >= minx and x <= maxx and y >= miny and y <= maxy:\n ytile2 = GDAL2Tiles.getYtile(y, tz+1, options)\n dsquerytile = gdal.Open(\n os.path.join(output_folder, str(tz + 1),\n '{0:04d}'.format(x) + \"_\" + '{0:04d}'.format(ytile2) + \".\" + tile_job_info.tile_extension),\n #str(x), \"%s.%s\" % (ytile2, tile_job_info.tile_extension)),\n gdal.GA_ReadOnly)\n if (ty == 0 and y == 1) or (ty != 0 and (y % (2 * ty)) != 0):\n tileposy = 0\n else:\n tileposy = tile_job_info.tile_size\n if tx:\n tileposx = x % (2 * tx) * tile_job_info.tile_size\n elif tx == 0 and x == 1:\n tileposx = tile_job_info.tile_size\n else:\n tileposx = 0\n dsquery.WriteRaster(\n tileposx, tileposy, tile_job_info.tile_size,\n tile_job_info.tile_size,\n dsquerytile.ReadRaster(0, 0,\n tile_job_info.tile_size,\n tile_job_info.tile_size),\n band_list=list(range(1, tilebands + 1)))\n children.append([x, y, tz + 1])\n\n scale_query_to_tile(dsquery, dstile, tile_driver, options,\n tilefilename=tilefilename)\n # Write a copy of tile to png/jpg\n if options.resampling != 'antialias':\n # Write a copy of tile to png/jpg\n out_driver.CreateCopy(tilefilename, dstile, strict=0)\n\n del dstile\n\n options.generatedFiles.append(tilefilename)\n # applyLegend(tilefilename, options.legendObj)\n\n if options.verbose:\n print(\"\\tbuild from zoom\", tz + 1,\n \" tiles:\", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),\n (2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))\n\n # # Create a KML file for this tile.\n # if tile_job_info.kml:\n # with open(os.path.join(\n # output_folder,\n # '%d/%d/%d.kml' % (tz, tx, ty)\n # ), 'wb') as f:\n # f.write(generate_kml(\n # tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,\n # get_tile_swne(tile_job_info, options), options, children\n # ).encode('utf-8'))\n\n if not options.verbose and not options.quiet:\n progress_bar.log_progress()", "def MakeCoordPlot(tiles, coords, image_size=10000, boarder_width=20):\n tile_size = tiles.shape[1]\n\n grid_coords = Cloud2Grid(\n coords, grid_dim=(image_size - 2 * tile_size), tile_size=tile_size\n )\n grid_coords = grid_coords + tile_size # for black boarder\n grid_image = Image.new(\"RGB\", (image_size, image_size))\n for i in range(len(tiles)): # paste each tile onto image\n tile = ColorTileBoarder(tiles[i], channel=0, boarder_width=2)\n tile = Image.fromarray(tiles[i])\n x, y = grid_coords[i, :]\n grid_image.paste(tile, (int(x), int(y)))\n coords[\"grid1\"] = grid_coords[:, 0] + tile_size // 2\n coords[\"grid2\"] = grid_coords[:, 1] + tile_size // 2\n return grid_image, coords" ]
[ "0.7510578", "0.6322842", "0.6210446", "0.6197227", "0.6109045", "0.6020915", "0.5998174", "0.5994103", "0.595534", "0.588121", "0.5878566", "0.5868952", "0.58491933", "0.5847946", "0.5841431", "0.58058107", "0.57825464", "0.57611525", "0.57573795", "0.5754921", "0.5700889", "0.5696025", "0.5695121", "0.56938905", "0.5691652", "0.56693244", "0.56528926", "0.5639409", "0.5635226", "0.56205136" ]
0.72285825
1
Does the input equal the output? >>> [show_input_to_output(test_ns), load_source_image(test_ns.img_filename)] [[[255 255 255] [255 255 255] [255 255 255] [255 255 255]] [[255 255 255] [ 0 0 0] [ 0 0 0] [ 0 0 0]] [[255 255 255] [ 0 0 0] [255 0 0] [ 0 0 0]] [[255 255 255] [ 0 0 0] [ 0 0 0] [ 0 0 0]]] [None, Image([[[255, 255, 255], [255, 255, 255], [255, 255, 255], [255, 255, 255]], [[255, 255, 255], [ 0, 0, 0], [ 0, 0, 0], [ 0, 0, 0]], [[255, 255, 255], [ 0, 0, 0], [255, 0, 0], [ 0, 0, 0]], [[255, 255, 255], [ 0, 0, 0], [ 0, 0, 0], [ 0, 0, 0]]], dtype=uint8)]
def show_input_to_output(img_ns): figure() sp = subplot(1, 2, 1).imshow(img_ns.img) sp.axes.grid(False) sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0) title("Input Image", fontsize=10); outimg = tiles_to_images(img_ns, img_ns.tile_grid, img_ns.tile_catalog, img_ns.tile_size) sp = subplot(1, 2, 2).imshow(outimg.astype(np.uint8)); sp.axes.tick_params(bottom=False, left=False, which='both',labelleft=False,labelbottom=False,length=0) title("Output Image From Tiles", fontsize=10); sp.axes.grid(False) #print(outimg.astype(np.uint8)) #print(img_ns) plt.savefig(img_ns.output_filename + "_input_to_output.pdf", bbox_inches="tight") plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_tensor(dummy_input):\n # Test the 2D image: B, H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = ToTensor()\n _image, _label = transform(image, label, dtypes=[torch.float, torch.long])\n assert _image.dtype == torch.float\n assert _label.dtype == torch.long\n\n # Test the 3D image: B, H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = ToTensor()\n _image, _label = transform(image, label, dtypes=[torch.float, torch.long])\n assert _image.dtype == torch.float\n assert _label.dtype == torch.long", "def test_image():\n def get_images_name(folder):\n \"\"\"Create a generator to list images name at evaluation time\"\"\"\n onlyfiles = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]\n for f in onlyfiles:\n yield f\n\n def pil_loader(path):\n \"\"\"Load images from /eval/ subfolder, convert to greyscale and resized it as squared\"\"\"\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n sqrWidth = np.ceil(np.sqrt(img.size[0]*img.size[1])).astype(int)\n return img.convert('L').resize((sqrWidth, sqrWidth))\n\n eval_loader = torch.utils.data.DataLoader(ImageFolder(root=args.evalf, transform=transforms.Compose([\n transforms.Resize(28),\n transforms.CenterCrop(28),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]), loader=pil_loader), batch_size=1, **kwargs)\n\n # Name generator\n names = get_images_name(os.path.join(args.evalf, \"images\"))\n model.eval()\n with torch.no_grad():\n for data, target in eval_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n label = output.argmax(dim=1, keepdim=True).item()\n print (\"Images: \" + next(names) + \", Classified as: \" + str(label))", "def test_image_input(self):\n \n vgg16 = VGG16()\n data_dir = os.path.dirname(os.path.abspath(__file__))\n data = os.path.join(data_dir, 'data/elephant.jpg')\n output = vgg16.input_handler(data)\n\n self.assertEqual(len(output.shape), 4) \n self.assertEqual(output.shape[1], 224)\n self.assertEqual(output.shape[2], 224)\n self.assertEqual(output.shape[3], 3)", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def __call__(self, src, label):\r\n # img = mx.nd.image.to_tensor(src)\r\n # img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n src = mx.nd.array(src)\r\n img = mx.nd.image.to_tensor(src)\r\n img = mx.nd.image.normalize(img, mean=self._mean, std=self._std)\r\n return img, mx.nd.array(label, dtype=img.dtype)", "def load_images(input_img: str, output_img: str) -> Union[np.ndarray, np.ndarray]:\r\n input_img = Image.open(input_img).convert(\"RGB\")\r\n output_img = Image.open(output_img).resize(input_img.size).convert(\"RGB\")\r\n return np.array(input_img).copy(), np.array(output_img).copy()", "def test_test_image_dims_content(self):\n iterator = self._dataset.get_test()\n sample = next(iterator)\n image, label = sample['image'], sample['label']\n\n with self.subTest(name='DataShape'):\n self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3))\n\n with self.subTest(name='DataType'):\n self.assertTrue(np.issubdtype(image.dtype, float))\n\n with self.subTest(name='DataValues'):\n # Normalized by stddev., expect nothing to fall outside 3 stddev.\n self.assertTrue((image >= -3.).all() and (image <= 3.).all())\n\n with self.subTest(name='LabelShape'):\n self.assertLen(label, self._batch_size_test)\n\n with self.subTest(name='LabelType'):\n self.assertTrue(np.issubdtype(label.dtype, int))\n\n with self.subTest(name='LabelValues'):\n self.assertTrue((label >= 0).all() and\n (label <= self._dataset.num_classes).all())", "def create_displayable_test_output(test_image):\n if hasattr(test_image, \"numpy\"):\n return np.squeeze(test_image.numpy())[:, :, 1:]\n else:\n return np.squeeze(test_image)[:, :, 1:]", "def test__put_image_into():\n image = EmbedImage(url = 'https://orindance.party/')\n \n for input_value, defaults, expected_output in (\n (None, False, {}),\n (image, False, {'image': image.to_data()}),\n (image, True, {'image': image.to_data(defaults = True)}),\n ):\n data = put_image_into(input_value, {}, defaults)\n vampytest.assert_eq(data, expected_output)", "def __call__(self, src, label):\n\n h, w, _ = src.shape\n # interp = np.random.randint(0, 5)\n img = timage.resize_short_within(src, self._short, self._max_size, interp=1)\n img, flips = timage.random_flip(img, px=0.5)\n img = img.astype(np.float32)\n\n if self.teacher_aug:\n target_image_1 = self.random_color_aug(img)\n else:\n target_image_1 = img\n target_image_2 = self.random_color_aug(img)\n\n # target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.to_tensor(target_image_1)\n target_image_1 = mx.nd.image.normalize(target_image_1, mean=self._mean, std=self._std)\n\n target_image_2 = mx.nd.image.to_tensor(target_image_2)\n target_image_2 = mx.nd.image.normalize(target_image_2, mean=self._mean, std=self._std)\n\n return target_image_1, target_image_2", "def test(self, X_src, Y_src):\n X_test,Y_test = self.prep_data(X_src,Y_src,shuffle = False, mode = \"test\")\n print(\"TEST: Images-\",X_test.shape, \"Labels-\", Y_test.shape )\n X_test = X_test[:,:,:,[0,2]] #extract channels with data\n\n #load weights\n ret = self.load_weights()\n if ret:\n #test the model on unseen data\n print(\"Starting testing\")\n pred = self.model.predict(X_test)\n with open(Y_src,\"w\") as file:\n for i in tqdm(range(len(pred))):\n file.write(str(pred[i,0])+'\\n')\n # rewriting last prediction to eqaute number of frames and predictions\n file.write(str(pred[-1,0])+'\\n')\n\n print(\"Saved prediction\")\n \n else:\n print(\"Test failed to complete with improper weights\")", "def test_RawImage_write_out():\n i.write_out()\n # now compare the output with reference\n print i.outpath\n print t.processed_path\n assert_image_equal(i.outpath, t.processed_path)", "def test_on_skimage_png(self):\n from_skimage = diffread(TEST_PNG)\n\n self.assertTupleEqual(from_skimage.shape, (256, 256))\n self.assertTrue(np.allclose(from_skimage, np.ones_like(from_skimage)))", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def test_train_image_dims_content(self):\n iterator = self._dataset.get_train()\n sample = next(iterator)\n image, label = sample['image'], sample['label']\n\n with self.subTest(name='DataShape'):\n self.assertTupleEqual(image.shape, (self._batch_size, 32, 32, 3))\n\n with self.subTest(name='DataType'):\n self.assertTrue(np.issubdtype(image.dtype, float))\n\n with self.subTest(name='DataValues'):\n # Normalized by stddev., expect nothing to fall outside 3 stddev.\n self.assertTrue((image >= -3.).all() and (image <= 3.).all())\n\n with self.subTest(name='LabelShape'):\n self.assertLen(label, self._batch_size)\n\n with self.subTest(name='LabelType'):\n self.assertTrue(np.issubdtype(label.dtype, int))\n\n with self.subTest(name='LabelValues'):\n self.assertTrue((label >= 0).all() and\n (label <= self._dataset.num_classes).all())", "def test_image_rw(self):\n from ..image import Image\n from ..io.image import read_image, write_image\n shape = (5,5)\n pix = np.random.uniform(size=shape)\n ivar = np.random.uniform(size=shape)\n mask = np.random.randint(0, 3, size=shape)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0')\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n\n #- Check output datatypes\n self.assertEqual(img2.pix.dtype, np.float64)\n self.assertEqual(img2.ivar.dtype, np.float64)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- Rounding from keeping np.float32 on disk means they aren't equal\n self.assertFalse(np.all(img1.pix == img2.pix))\n self.assertFalse(np.all(img1.ivar == img2.ivar))\n\n #- But they should be close, and identical after float64->float32\n self.assertTrue(np.allclose(img1.pix, img2.pix))\n self.assertTrue(np.all(img1.pix.astype(np.float32) == img2.pix))\n self.assertTrue(np.allclose(img1.ivar, img2.ivar))\n self.assertTrue(np.all(img1.ivar.astype(np.float32) == img2.ivar))\n\n #- masks should agree\n self.assertTrue(np.all(img1.mask == img2.mask))\n self.assertEqual(img1.readnoise, img2.readnoise)\n self.assertEqual(img1.camera, img2.camera)\n self.assertEqual(img2.mask.dtype, np.uint32)\n\n #- should work with various kinds of metadata header input\n meta = dict(BLAT='foo', BAR='quat', BIZ=1.0)\n img1 = Image(pix, ivar, mask, readnoise=1.0, camera='b0', meta=meta)\n write_image(self.testfile, img1)\n img2 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img2.meta[key], 'meta[{}] not propagated'.format(key))\n\n #- img2 has meta as a FITS header instead of a dictionary;\n #- confirm that works too\n write_image(self.testfile, img2)\n img3 = read_image(self.testfile)\n for key in meta:\n self.assertEqual(meta[key], img3.meta[key], 'meta[{}] not propagated'.format(key))", "def assert_img_equal(request):\n\n testname = request.node.name\n filename = Path(request.module.__file__)\n test_dir = filename.parent / filename.stem\n test_dir.mkdir(exist_ok=True)\n\n def _img_equal(img, index=0):\n expected_file = test_dir / f\"{testname}_{index}.png\"\n actual_file = test_dir / f\"{testname}_{index}_actual.png\"\n if img.ndim == 2:\n cv2.imwrite(str(actual_file), img)\n else:\n img_bgr = img.copy()\n img_bgr[..., :3] = img_bgr[..., :3][..., ::-1]\n cv2.imwrite(str(actual_file), img_bgr) # img is RGB, imwrite expects BGR\n\n if not expected_file.exists():\n raise AssertionError(\n f\"{expected_file} does not exist! Check newly produced img with a command like:\\n\\n feh {actual_file}\\n\\n\"\n )\n\n try:\n pytest.helpers.assert_img_equal(expected_file, img)\n except Exception as e:\n raise AssertionError(f\"{expected_file} differs from {actual_file}\") from e\n\n return _img_equal", "def _test(self):\n self.pytorch_layer.eval()\n pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu()\n image_w_h = int(self.input_size ** 0.5)\n input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h)\n output_tensor = pytorch_layer(input_image)[0]\n for channel in range(self.n_in_channels):\n current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy()\n normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt(\n self.running_var[channel] + self.epsilon\n )\n if self.affine:\n output_numpy = (self.weights[channel] * normalized_data) + self.bias[\n channel\n ]\n else:\n output_numpy = normalized_data\n\n assert np.isclose(\n output_numpy,\n output_tensor[channel].detach().flatten().cpu().numpy(),\n atol=1e-6,\n ).all()", "def load_image_test(datapoint: dict) -> tuple:\n input_image = tf.image.resize(datapoint['image'], (IMG_SIZE, IMG_SIZE))\n input_mask = tf.image.resize(datapoint['segmentation_mask'], (IMG_SIZE, IMG_SIZE))\n\n input_image, input_mask = normalize(input_image, input_mask)\n\n return input_image, input_mask", "def test_one_image(self, img):\n return self.__image_pipeline(img)", "def __call__(self, sample):\n x, y = sample\n return TF.to_pil_image(x), TF.to_pil_image(y)", "def show_result(inputs, labels, outputs):\n num_classes = outputs.size(1)\n outputs = outputs.argmax(dim=1).detach().cpu().numpy()\n if num_classes == 2:\n outputs *= 255\n mask = outputs[0].reshape((360, 640))\n fig, ax = plt.subplots(1, 2, figsize=(20, 1 * 5))\n ax[0].imshow(inputs[0, :3, :, ].detach().cpu().numpy().transpose((1, 2, 0)))\n ax[0].set_title('Image')\n ax[1].imshow(labels[0].detach().cpu().numpy().reshape((360, 640)), cmap='gray')\n ax[1].set_title('gt')\n plt.show()\n plt.figure()\n plt.imshow(mask, cmap='gray')\n plt.title('Pred')\n plt.show()", "def test_model_sample(net, data_loader):\n net.eval()\n array = []\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n output = net(X)\n output = ToPILImage()(output)\n array.append(output)\n return array", "def test_2d_inputs():\n reseed()\n\n base_img1 = np.array([[0, 0, 1, 1],\n [0, 0, 1, 1],\n [0, 1, 1, 1]], dtype=np.uint8)\n base_img2 = np.array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 1, 0, 0]], dtype=np.uint8)\n\n base_img1_flipped = np.array([[1, 1, 0, 0],\n [1, 1, 0, 0],\n [1, 1, 1, 0]], dtype=np.uint8)\n base_img2_flipped = np.array([[1, 1, 0, 0],\n [1, 1, 1, 0],\n [0, 0, 1, 0]], dtype=np.uint8)\n\n images = np.array([base_img1, base_img2])\n images_flipped = np.array([base_img1_flipped, base_img2_flipped])\n images_list = [base_img1, base_img2]\n images_flipped_list = [base_img1_flipped, base_img2_flipped]\n images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]\n images_flipped_list2d3d = [base_img1_flipped, base_img2_flipped[:, :, np.newaxis]]\n\n aug = iaa.Fliplr(1.0)\n noaug = iaa.Fliplr(0.0)\n\n # one numpy array as input\n observed = aug.augment_images(images)\n assert np.array_equal(observed, images_flipped)\n\n observed = noaug.augment_images(images)\n assert np.array_equal(observed, images)\n\n # list of 2d images\n observed = aug.augment_images(images_list)\n assert array_equal_lists(observed, images_flipped_list)\n\n observed = noaug.augment_images(images_list)\n assert array_equal_lists(observed, images_list)\n\n # list of images, one 2d and one 3d\n observed = aug.augment_images(images_list2d3d)\n assert array_equal_lists(observed, images_flipped_list2d3d)\n\n observed = noaug.augment_images(images_list2d3d)\n assert array_equal_lists(observed, images_list2d3d)", "def test():\n\n # load image and adjust its format\n if MEMORY_CACHE:\n test_input = dataset[0]['file']\n oriImg = test_input.byte().permute((1, 2, 0)).numpy() # B,G,R order\n else:\n oriImg = cv2.imread(dataset[0]['file']) # B,G,R order\n test_input = torch.from_numpy(oriImg).permute((2, 0, 1)).float()\n \n # transfer data on GPU on demand\n if CUDA:\n test_input = test_input.cuda()\n\n # perform prediction\n net.eval()\n with torch.no_grad():\n result = net(test_input.unsqueeze(0))[0]\n\n print(result)\n\n # draw rectangles and its class\n img = cv2.cvtColor(oriImg, cv2.COLOR_BGR2RGB)\n for box, label, score in zip(result['boxes'], result['labels'], result['scores']):\n # if score > 0.5:\n if label < len(orig_labels):\n img = cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)\n img = cv2.putText(img, '{}: {:.0%}'.format(orig_labels[label], score), (box[0] + 5, box[3] - 5), cv2.FONT_HERSHEY_SIMPLEX, .7, (0, 255, 0), 2, cv2.LINE_AA)\n plt.imshow(img)\n plt.axis('off')\n plt.show()", "def test_get_image(self):\n\n spine_data_loader = SpineDataLoader(dirpath_data=self.dirpath,\n batch_size=4)\n\n for idx in range(4):\n image = spine_data_loader.get_image(str(idx))\n assert image.shape == (256, 256, 1)\n assert image.min() == 0.0\n assert image.max() == 1.0\n assert image.dtype == 'float64'", "def post_process(self, outputs, source_image_shape, conf_thres=0.5, image=None):\n scaled = []\n grids = []\n for out in outputs:\n out = self.sigmoid_v(out)\n _, _, width, height, _ = out.shape\n grid = self.make_grid(width, height)\n grids.append(grid)\n scaled.append(out)\n z = []\n for out, grid, stride, anchor in zip(scaled, grids, self.strides, self.anchor_grid):\n _, _, width, height, _ = out.shape\n out[..., 0:2] = (out[..., 0:2] * 2. - 0.5 + grid) * stride\n out[..., 2:4] = (out[..., 2:4] * 2) ** 2 * anchor\n out = out.reshape((1, -1, self.feature_count))\n z.append(out)\n pred = np.concatenate(z, 1)\n xc = pred[..., 4] > conf_thres\n pred = pred[xc]\n boxes, scores, cids = self.nms(pred)\n\n # Normalise box coordinates to be in the range (0, 1]\n h, w = source_image_shape[:2]\n h1, w1 = h, w\n if self.keep_ratio and h != w:\n # Padding was used during pre-process to make the source image square\n h1 = w1 = max(h, w)\n\n y_scale = h1 / float(self.input_size) / h\n x_scale = w1 / float(self.input_size) / w\n boxes[:, 0] *= x_scale\n boxes[:, 1] *= y_scale\n boxes[:, 2] *= x_scale\n boxes[:, 3] *= y_scale\n boxes = np.clip(boxes, 0, 1)\n\n if image is not None:\n self.draw_cv2(image, boxes, scores, cids)\n\n return (boxes, scores, cids), image", "def test_im_model(name, xtrain_short=None, ytrain_short=None, nodisplay=False, \n summary=False, do_test=False):\n print(\"Loading model...\")\n objs = {\"accuracy\": fret_accuracy()}\n model = keras.models.load_model(\"models/\"+name+\".hdf5\", custom_objects=objs)\n\n if summary:\n # if we are just loading and have not trained\n model.summary()\n\n # if (batchsize, guitarstringindex, probabilities) then categorical, else \n # (batchsize, stringpred) is regression-type\n shape = model.get_output_shape_at(-1)\n if len(shape) > 2:\n categorical = True\n else:\n categorical = False\n\n \"\"\"\n testing\n \"\"\"\n data = load_all_data(\"data/inference_model_train\", num_splits=0, \n display=(not nodisplay), do_test=do_test)\n xtest, _, _, ytest, _, _ = data\n\n print(\"Evaluating on test set w/ no transitions\")\n print(len(xtest), \"testing images\")\n results = model.evaluate(xtest, ytest, verbose=1)\n with open(\"stats/\"+name+\"/stats.txt\", \"a\") as f:\n f.write(\"\\nTest results (no transitions):\\n\")\n for i,metric in enumerate(model.metrics_names):\n print(\" \", metric+\":\", results[i])\n f.write(metric+\": \"+str(results[i])+\"\\n\")\n\n # free memory\n del data, xtest, ytest\n import gc; gc.collect()\n\n data = load_all_data(\"data/inference_model_train\", num_splits=0, \n display=(not nodisplay), do_test=do_test, no_transitions=False)\n xtest, _, _, ytest, _, _ = data\n\n print(\"Evaluating on test set w/ transitions\")\n print(len(xtest), \"testing images\")\n results = model.evaluate(xtest, ytest, verbose=1)\n with open(\"stats/\"+name+\"/stats.txt\", \"a\") as f:\n f.write(\"\\nTest results (with transitions):\\n\")\n for i,metric in enumerate(model.metrics_names):\n print(\" \", metric+\":\", results[i])\n f.write(metric+\": \"+str(results[i])+\"\\n\")\n\n scaleup = 2.0\n\n # on training set, if available\n if xtrain_short is not None:\n print(\"Generating video on train set predictions\")\n trainpreds = model.predict(xtrain_short, verbose=1)\n\n vid = [cv.resize(i, dsize=(0,0), fx=scaleup, fy=scaleup, \\\n interpolation=cv.INTER_LINEAR) for i in xtrain_short]\n\n annotate_vid(vid, trainpreds, ytrain_short, categorical)\n if not nodisplay:\n showvid(vid, name=\"train ims\", ms=300)\n writevid(vid, \"stats/\"+name+\"/results_visualization_trainset\")\n\n # on test set\n print(\"Generating video on test set predictions\")\n numframes = 1000\n testpreds = model.predict(xtest[:numframes], verbose=1)\n\n vid = [cv.resize(i, dsize=(0,0), fx=scaleup, fy=scaleup, \\\n interpolation=cv.INTER_LINEAR) for i in xtest[:numframes]]\n\n annotate_vid(vid, testpreds, ytest[:numframes], categorical)\n if not nodisplay:\n showvid(vid, name=\"test set\", ms=35)\n writevid(vid, \"stats/\"+name+\"/results_visualization_testset\")", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def test_image(self):\r\n self.testdata = open(TESTDATA_FILENAME).read()" ]
[ "0.5998976", "0.59870833", "0.5945812", "0.593989", "0.5886621", "0.58820266", "0.5840027", "0.5829975", "0.5829964", "0.58230793", "0.5811639", "0.57654047", "0.5735271", "0.5720358", "0.5716861", "0.5688382", "0.5685994", "0.5652558", "0.56406236", "0.5636332", "0.5618694", "0.561006", "0.55791914", "0.5575637", "0.55643463", "0.5489207", "0.5473518", "0.54712534", "0.5453483", "0.5442876" ]
0.61868006
0
get all snapshots created on the volume.
def get_volume_snapshots(self, volume): LOG.debug('get_volume_snapshot starts') pool_name = self.configuration.rbd_pool volume_name = 'volume-%s' % encodeutils.safe_encode(volume["id"]) snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name) snapshots = list() if snaps_on_vol is not None: for snap in snaps_on_vol: snap_name = str(snap["name"]) item = dict() if snap_name.startswith("snapshot-"): # snapshot directly created on volume. item["type"] = "volume_snap" item["uuid"] = snap_name[len('snapshot-'):] elif snap_name.startswith("volume-") and \ snap_name.endswith(".clone_snap"): # snapshot used for create volume on volume. item["type"] = "clone_snap" item["uuid"] = snap_name[len("volume-"):-len(".clone_snap")] elif snap_name.startswith("backup.") and ".snap." in snap_name: # snapshot used for backup volume. item["type"] = "backup_snap" item["uuid"] = \ snap_name[len("backup."):snap_name.index(".snap.")] else: item["type"] = "" item["uuid"] = "" snapshots.append(item) LOG.debug('volume snapshots: %s', snapshots) LOG.debug('get_volume_snapshots finished.') return snapshots
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine", "def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs", "def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))", "def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps", "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def snapshots_created(self):\n # log.debug(\"Getting snaps created for volume {0}\".format(self.volume_id))\n snaps_info = []\n for snap in self._derived_snapshots:\n snap_info = {}\n try:\n if snap.volume_id == self.volume_id:\n snap.update()\n snap_info['snap_id'] = snap.id\n snap_info['snap_progress'] = snap.progress\n snap_info['snap_status'] = snap.status\n snap_info['snap_desc'] = snap.description\n snaps_info.append(snap_info)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n return snaps_info", "def _list_snapshots(self):\n return self.resource.describe_snapshots(\n Filters=[\n {\n 'Name': 'tag:CreatedBy',\n 'Values': [\n 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n ]\n }\n ]\n )", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return", "def get_snapshots(vol_name):\n\n l = None\n try:\n cmd = 'gluster snapshot info volume %s --xml' % vol_name\n d, err = xml_parse.run_gluster_command(cmd)\n if err:\n raise Exception(err)\n if d:\n if d[\"op_status\"][\"op_ret\"] == 0:\n l, err = xml_parse.get_snapshots(d[\"root\"])\n if err:\n raise Exception(err)\n except Exception, e:\n return None, 'Error getting volume snapshots: %s' % str(e)\n else:\n return l, None", "def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()", "def GetVMSnapshotsList(self):\n try:\n current = self.vmInstance.get_current_snapshot_name()\n snapshots = self.vmInstance.get_snapshots()\n\n if current and snapshots:\n LOGGER.info('Name of current snapshot of virtual machine \"{}\": \"{}\"'.format(VM_NAME, current))\n LOGGER.info('List of all snapshots:')\n\n for i, snap in enumerate(snapshots):\n LOGGER.info(' {}. \"'.format(i + 1) + snap.get_name() + '\"')\n\n else:\n LOGGER.warning('No snapshots found for virtual machine \"{}\"!'.format(VM_NAME))\n\n except Exception as e:\n snapshots = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting list of snapshots of virtual machine \"{}\"!'.format(VM_NAME))\n\n return snapshots", "def list(self, detailed=True, search_opts=None, marker=None, limit=None,\n sort=None):\n resource_type = \"snapshots\"\n url = self._build_list_url(resource_type, detailed=detailed,\n search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n return self._list(url, resource_type, limit=limit)", "def list_snapshots(self, detail=False, **params):\n url = 'snapshots'\n list_schema = schema.list_snapshots_no_detail\n if detail:\n url += '/detail'\n list_schema = schema.list_snapshots_with_detail\n if params:\n url += '?%s' % urllib.urlencode(params)\n\n resp, body = self.get(url)\n body = json.loads(body)\n self.validate_response(list_schema, resp, body)\n return rest_client.ResponseBody(resp, body)", "def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data", "def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots", "def test_volume_snapshot_create_get_list_delete(self):\n volume = self.create_volume()\n self.addCleanup(self.delete_volume, volume['id'])\n\n s_name = data_utils.rand_name(self.__class__.__name__ + '-Snapshot')\n # Create snapshot\n snapshot = self.snapshots_client.create_snapshot(\n volume_id=volume['id'],\n display_name=s_name)['snapshot']\n\n def delete_snapshot(snapshot_id):\n waiters.wait_for_volume_resource_status(self.snapshots_client,\n snapshot_id,\n 'available')\n # Delete snapshot\n self.snapshots_client.delete_snapshot(snapshot_id)\n self.snapshots_client.wait_for_resource_deletion(snapshot_id)\n\n self.addCleanup(delete_snapshot, snapshot['id'])\n self.assertEqual(volume['id'], snapshot['volumeId'])\n # Get snapshot\n fetched_snapshot = self.snapshots_client.show_snapshot(\n snapshot['id'])['snapshot']\n self.assertEqual(s_name, fetched_snapshot['displayName'])\n self.assertEqual(volume['id'], fetched_snapshot['volumeId'])\n # Fetch all snapshots\n snapshots = self.snapshots_client.list_snapshots()['snapshots']\n self.assertIn(snapshot['id'], map(lambda x: x['id'], snapshots))", "def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def list_snapshots(project):\n data = {constants.PROJECT_PARAMETER: project}\n res = requests.post(_url + \"list_snapshots/\", data=data,\n auth=(_username, _password))\n if res.status_code == 200:\n snapshots = json.loads(res.content)\n table = PrettyTable(field_names=[\"Snapshot\", \"Parent\"])\n for snapshot in snapshots:\n table.add_row(snapshot)\n click.echo(table.get_string())\n else:\n click.echo(res.content)", "def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs", "def list(self, detailed=True, search_opts=None):\n query_string = utils.build_query_param(search_opts, sort=True)\n\n detail = \"\"\n if detailed:\n detail = \"/detail\"\n\n return self._list(\"/group_snapshots%s%s\" % (detail, query_string),\n \"group_snapshots\")", "def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()", "def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs", "def list_snapshots(session, verbose):\n # type: (Session, bool) -> Union[List[str], List[Dict[str,str]]]\n if not session.network:\n raise ValueError(\"Network must be set to list snapshots\")\n url_tail = \"/{}/{}/{}\".format(\n CoordConstsV2.RSC_NETWORKS, session.network, CoordConstsV2.RSC_SNAPSHOTS\n )\n return _get_list(session, url_tail, {CoordConstsV2.QP_VERBOSE: verbose})", "def get_all_snapshots(self, snapshot_ids=None,\r\n owner=None, restorable_by=None,\r\n filters=None):\r\n params = {}\r\n if snapshot_ids:\r\n self.build_list_params(params, snapshot_ids, 'SnapshotId')\r\n if owner:\r\n params['Owner'] = owner\r\n if restorable_by:\r\n params['RestorableBy'] = restorable_by\r\n if filters:\r\n self.build_filter_params(params, filters)\r\n return self.get_list('DescribeSnapshots', params,\r\n [('item', Snapshot)], verb='POST')", "def items(self):\n if self.__has_contents:\n return [dict(zip(['id', 'description', 'size', 'start_time', 'state'],\n [item['SnapshotId'], item['Description'], item['VolumeSize'],\n item['StartTime'], item['State']]))\n for item in self.__response['Snapshots']]\n else:\n return []", "def list(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, user=self.user, password=self.password)\n print(vmrun.listSnapshots())", "def create_volume_from_snapshot(snapshots, objects_created,\n wait_for_available=120):\n if type(snapshots) is not list:\n snapshots = [snapshots]\n v = []\n for snapshot in snapshots:\n command = 'cinder create --snapshot-id %s --name %s' % \\\n (snapshot['id'], snapshot['display_name'])\n volume_from_snapshot = parse_output(Popen(\n command.split(), stdout=STDOUT, stderr=STDERR).communicate()[0])\n volume_from_snapshot['device'] = snapshot['device']\n volume_from_snapshot['bootable'] = snapshot['bootable']\n v.append(volume_from_snapshot)\n if wait_for_available > 0:\n wait = 0\n again = False\n while wait < wait_for_available:\n time.sleep(5)\n wait += 5\n again = False\n for volume in v:\n command = 'cinder show %s' % volume['id']\n status = parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0]\n )['status']\n if status == 'error':\n # clean up and create volume again\n command = 'cinder delete %s' % volume['id']\n a = Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0]\n command = 'cinder create --snapshot-id %s' % \\\n volume['snapshot_id']\n volume_info = parse_output(Popen(\n command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])\n volume_info['bootable'] = volume['bootable']\n volume_info['device'] = volume['device']\n volume = volume_info\n again = True\n break\n elif status == 'creating':\n again = True\n break\n elif status == 'available':\n volume['status'] = status\n pass\n if again:\n continue\n else:\n break\n if again: # Loop ended due to timeout\n print 'Error creating volume from snapshot!'\n print 'The following entities were created in the process:'\n print_objects_created(objects_created)\n sys.exit(-1)\n return v", "def get_volume_snapshots(\n self,\n references=None, # type: List[models.ReferenceType]\n sources=None, # type: List[models.ReferenceType]\n authorization=None, # type: str\n x_request_id=None, # type: str\n destroyed=None, # type: bool\n filter=None, # type: str\n ids=None, # type: List[str]\n limit=None, # type: int\n names=None, # type: List[str]\n offset=None, # type: int\n sort=None, # type: List[str]\n source_ids=None, # type: List[str]\n source_names=None, # type: List[str]\n total_item_count=None, # type: bool\n total_only=None, # type: bool\n async_req=False, # type: bool\n _return_http_data_only=False, # type: bool\n _preload_content=True, # type: bool\n _request_timeout=None, # type: Optional[int]\n ):\n # type: (...) -> models.VolumeSnapshotGetResponse\n kwargs = dict(\n authorization=authorization,\n x_request_id=x_request_id,\n destroyed=destroyed,\n filter=filter,\n ids=ids,\n limit=limit,\n names=names,\n offset=offset,\n sort=sort,\n source_ids=source_ids,\n source_names=source_names,\n total_item_count=total_item_count,\n total_only=total_only,\n async_req=async_req,\n _return_http_data_only=_return_http_data_only,\n _preload_content=_preload_content,\n _request_timeout=_request_timeout,\n )\n kwargs = {k: v for k, v in kwargs.items() if v is not None}\n endpoint = self._volume_snapshots_api.api20_volume_snapshots_get_with_http_info\n _process_references(references, ['ids', 'names'], kwargs)\n _process_references(sources, ['source_ids', 'source_names'], kwargs)\n return self._call_api(endpoint, kwargs)" ]
[ "0.7807004", "0.7624306", "0.7623622", "0.75040907", "0.741505", "0.73093504", "0.7294166", "0.7233389", "0.7192934", "0.71839905", "0.7122615", "0.7049829", "0.699524", "0.69842076", "0.69395983", "0.6928033", "0.6828494", "0.68274504", "0.6782187", "0.6708912", "0.6700088", "0.6698779", "0.66946703", "0.665338", "0.6632016", "0.6626924", "0.6569361", "0.65200996", "0.6478464", "0.6465183" ]
0.7970192
0
get full clone chain of a volume or snapshot.
def _get_full_clone_chain(self, pool_name, volume_name, snap_name=None): full_clone_chain = dict() # get children clone chain. obj = self._generate_chain_obj(pool_name, volume_name, snap_name) self._get_children_chain(obj["children"], pool_name, volume_name, snap_name) # get parent clone chain. self._get_parent_chain(full_clone_chain, obj, pool_name, volume_name, snap_name) return full_clone_chain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshot_clone_chain(self, snapshot):\n LOG.debug('get_snapshot_clone_chain starts.')\n volume_name = 'volume-%s' % \\\n encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n pool_name = self.configuration.rbd_pool\n clone_chain = self._get_full_clone_chain(pool_name, volume_name,\n snap_name)\n LOG.debug('snapshot clone chain: %s', clone_chain)\n LOG.debug('get_snapshot_clone_chain finished.')\n return clone_chain", "def get_volume_clone_chain(self, volume):\n LOG.debug('get_volume_clone_chain starts.')\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n pool_name = self.configuration.rbd_pool\n clone_chain = self._get_full_clone_chain(pool_name, volume_name, None)\n LOG.debug('volume clone chain: %s', clone_chain)\n LOG.debug('get_volume_clone_chain finished.')\n return clone_chain", "def snap_clone(mnode, snapname, clonename):\n cmd = \"gluster snapshot clone %s %s --mode=script\" % (clonename, snapname)\n return g.run(mnode, cmd)", "def _get_clone_snapshot_name(self, volume):\n return 'cinder-clone-snapshot-%(id)s' % volume", "def get_chain(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM blockchain LIMIT 1\")\n sol = app_process_cursor.fetchall()\n app_process.commit()\n app_process.close()\n print(sol, \"in get chain\")\n return sol", "def create_cloned_volume(self, volume, src_vref):\n clone_name = self.get_volume_name(volume.id)\n src_name = self.get_volume_name(src_vref.id)\n src_vol = self.client.search(\"volumes\", name=src_name)\n src_map = self.client.search(\"mappings\", volume=src_vol)\n src_attach_info = dest_attach_info = None\n if src_map.total != 0:\n msg = _(\"K2 driver does not support clone of an attached volume. \"\n \"To get this done, create a snapshot from the attached \"\n \"volume and then create a volume from the snapshot.\")\n LOG.error(msg)\n raise KaminarioCinderDriverException(reason=msg)\n try:\n properties = volume_utils.brick_get_connector_properties(\n self.configuration.use_multipath_for_image_xfer,\n self.configuration.enforce_multipath_for_image_xfer)\n conn = self.initialize_connection(src_vref, properties)\n src_attach_info = self._connect_device(conn)\n self.create_volume(volume)\n conn = self.initialize_connection(volume, properties)\n dest_attach_info = self._connect_device(conn)\n volume_utils.copy_volume(src_attach_info['device']['path'],\n dest_attach_info['device']['path'],\n src_vref.size * units.Ki,\n self.configuration.volume_dd_blocksize,\n sparse=True)\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(volume, properties)\n self.terminate_connection(src_vref, properties)\n except Exception as ex:\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(src_vref, properties)\n self.terminate_connection(volume, properties)\n self.delete_volume(volume)\n LOG.exception(\"Create a clone: %s failed.\", clone_name)\n raise KaminarioCinderDriverException(reason=ex)", "def create_cloned_volume(self, volume, src_vref):\n LOG.info('Creating clone of volume: %s', src_vref['id'])\n snapshot = {'volume_name': src_vref['name'],\n 'volume_id': src_vref['id'],\n 'volume_size': src_vref['size'],\n 'name': self._get_clone_snapshot_name(volume)}\n # We don't delete this snapshot, because this snapshot will be origin\n # of new volume. This snapshot will be automatically promoted by NMS\n # when user will delete its origin.\n self.create_snapshot(snapshot)\n try:\n return self.create_volume_from_snapshot(volume, snapshot)\n except utils.NexentaException:\n LOG.error('Volume creation failed, deleting created snapshot '\n '%(volume_name)s@%(name)s', snapshot)\n try:\n self.delete_snapshot(snapshot)\n except (utils.NexentaException, exception.SnapshotIsBusy):\n LOG.warning('Failed to delete zfs snapshot '\n '%(volume_name)s@%(name)s', snapshot)\n raise", "def get_snapshots_tree(self):\n\t\treturn Job(SDK.PrlVm_GetSnapshotsTree(self.handle)[0])", "def copy(self):\n new_chain = []\n for block in self.chain:\n if block.index == 0:\n new_chain.append(self.create_genesis())\n else:\n new_block = Block()\n new_block.deserialize(block.serialize())\n new_chain.append(new_block)\n\n return BlockChain(new_chain)", "def snapshot(self):\n return (self.block_header.state_root, self.chaindb.snapshot())", "def reconstruct(self):\n volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.lastmountpoint),\n key=lambda v: v.mountpoint or \"\", reverse=True))\n\n try:\n root = list(filter(lambda x: x.lastmountpoint == '/', volumes))[0]\n except IndexError:\n self._debug(\"[-] Could not find / while reconstructing, aborting!\")\n return None\n\n volumes.remove(root)\n\n for v in volumes:\n v.bindmount(os.path.join(root.mountpoint, v.lastmountpoint[1:]))\n return root", "def command_clone(\n ctx: \"PlanemoCliContext\",\n src: str,\n dest: str,\n mirror: bool = False,\n branch: Optional[str] = None,\n depth: Optional[int] = None,\n) -> List[str]:\n cmd = [\"git\", \"clone\"]\n if mirror:\n cmd.append(\"--mirror\")\n if branch is not None:\n cmd.extend([\"--branch\", branch])\n if depth is not None:\n cmd.extend([\"--depth\", str(depth)])\n if urllib.parse.urlparse(src).scheme == \"\":\n src = f\"file://{src}\"\n cmd.extend([src, dest])\n return cmd", "def create_cloned_volume(self, volume, src_vref):\n LOG.info(_LI('new cloned volume: %s'), volume['name'])\n LOG.info(_LI('source volume for cloning: %s'), src_vref['name'])\n\n snapshot = {'volume_name': src_vref['name'],\n 'volume_id': src_vref['id'],\n 'volume_size': src_vref['size'],\n 'name': self._create_snapshot_name()}\n\n self.create_snapshot(snapshot)\n return self.create_volume_from_snapshot(volume, snapshot,\n method='MOVE')", "def create_cloned_volume(self, vol, src_vref):\n self.authenticate_user()\n name = self._get_volume_name(vol)\n srcname = self._get_vipr_volume_name(src_vref)\n number_of_volumes = 1\n\n try:\n if(src_vref['consistencygroup_id']):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Clone can't be taken individually on a volume\" + \\\n \" that is part of a Consistency Group\")\n except AttributeError as e:\n LOG.info(\"No Consistency Group associated with the volume\")\n\n try:\n (storageresType, storageresTypename) = self.volume_obj.get_storageAttributes(\n srcname, None, None)\n\n resource_id = self.volume_obj.storageResource_query(storageresType,\n srcname,\n None,\n None,\n self.configuration.vipr_project,\n self.configuration.vipr_tenant)\n\n self.volume_obj.clone(\n name,\n number_of_volumes,\n resource_id,\n sync=True)\n\n clone_vol_path = self.configuration.vipr_tenant + \"/\" + self.configuration.vipr_project + \"/\" + name\n detachable = self.volume_obj.is_volume_detachable(clone_vol_path)\n LOG.info(\"Is volume detachable : \" + str(detachable))\n \n #detach it from the source volume immediately after creation\n if(detachable):\n self.volume_obj.volume_clone_detach(\"\",clone_vol_path, True)\n\n except IndexError as e:\n LOG.exception(\"Volume clone detach returned empty task list\")\n\n except vipr_utils.SOSError as e:\n if(e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + name + \": clone failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : {%s} clone failed\") % name)", "def create_volume_from_snapshot(self, snapshot, volume, volume_db):\n self.authenticate_user()\n\n if self.configuration.vipr_emulate_snapshot == 'True':\n self.create_cloned_volume(volume, snapshot)\n return\n\n ctxt = context.get_admin_context()\n\n src_snapshot_name = None\n\n #src_snapshot_name = snapshot['display_name']\n src_vol_ref = volume_db.volume_get(ctxt, snapshot['volume_id'])\n new_volume_name = self._get_volume_name(volume)\n number_of_volumes = 1\n\n try:\n src_vol_name, src_vol_uri = self._get_vipr_volume_name(src_vol_ref, True)\n src_snapshot_name = self._get_vipr_snapshot_name(snapshot , src_vol_uri)\n\n (storageresType, storageresTypename) = self.volume_obj.get_storageAttributes(\n src_vol_name\n , None\n , src_snapshot_name)\n\n resource_id = self.volume_obj.storageResource_query(storageresType,\n src_vol_name,\n None,\n src_snapshot_name,\n self.configuration.vipr_project,\n self.configuration.vipr_tenant)\n\n self.volume_obj.clone(\n new_volume_name,\n number_of_volumes,\n resource_id,\n sync=True)\n\n except vipr_utils.SOSError as e:\n if(e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Snapshot \" +\n src_snapshot_name +\n \": clone failed\\n\" +\n e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(\n _(\"Snapshot : %s clone failed\") % src_snapshot_name)", "def create_cloned_volume(self, volume, src_vref):\n # Form the snapshot structure.\n snapshot = {'id': uuid.uuid4().__str__(),\n 'volume_id': src_vref['id'],\n 'volume': src_vref}\n\n # Create snapshot.\n self.create_snapshot(snapshot)\n\n try:\n # Create volume from snapshot.\n lun_info = self.create_volume_from_snapshot(volume, snapshot)\n finally:\n try:\n # Delete snapshot.\n self.delete_snapshot(snapshot)\n except exception.VolumeBackendAPIException:\n LOG.warning(_LW(\n 'Failure deleting the snapshot %(snapshot_id)s '\n 'of volume %(volume_id)s.'),\n {'snapshot_id': snapshot['id'],\n 'volume_id': src_vref['id']},)\n\n return {'provider_location': lun_info['ID'],\n 'lun_info': lun_info}", "def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))", "def get_mutable_clone(self, block_hash: Hash32) -> 'MutableSnapshot':\n return MutableSnapshot(\n signers=list(self.signers),\n block_hash=block_hash,\n votes=list(self.votes),\n tallies=self.tallies.copy()\n )", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots", "def create_volume_from_snapshot(self, volume, snapshot):\n self._ensure_shares_mounted()\n\n snapshot_vol = self._get_snapshot_volume(snapshot)\n nfs_share = snapshot_vol['provider_location']\n volume['provider_location'] = nfs_share\n nms = self.share2nms[nfs_share]\n\n vol, dataset = self._get_share_datasets(nfs_share)\n snapshot_name = '%s/%s/%s@%s' % (vol, dataset, snapshot['volume_name'],\n snapshot['name'])\n folder = '%s/%s' % (dataset, volume['name'])\n nms.folder.clone(snapshot_name, '%s/%s' % (vol, folder))\n\n try:\n self._share_folder(nms, vol, folder)\n except utils.NexentaException:\n try:\n nms.folder.destroy('%s/%s' % (vol, folder), '')\n except utils.NexentaException:\n LOG.warning(\"Cannot destroy cloned folder: \"\n \"%(vol)s/%(folder)s\",\n {'vol': vol, 'folder': folder})\n raise\n\n if self._get_nfs_server_version(nfs_share) < 4:\n sub_share, mnt_path = self._get_subshare_mount_point(nfs_share,\n volume)\n self._ensure_share_mounted(sub_share, mnt_path)\n\n if (('size' in volume) and (\n volume['size'] > snapshot['volume_size'])):\n self.extend_volume(volume, volume['size'])\n\n return {'provider_location': volume['provider_location']}", "def get_chain():\n response = {\n 'chain': blockchain.chain,\n 'length':len(blockchain.chain)\n }\n\n return jsonify(response), 200", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def get_pseudo_chain(length, genesis_block):\n blocks = []\n for slot in range(length * 3):\n blocks.append(\n genesis_block.copy(\n slot_number=slot,\n parent_hash=blocks[slot - 1].hash if slot > 0 else ZERO_HASH32\n )\n )\n\n return blocks", "def get_copy(self) -> Path:\n snapshot_source_dir = PERSISTENCE_SNAPSHOTS_DIR / self.version\n snapshot_copy_dir = Path(TemporaryDirectory().name) / self.version\n copytree(src=snapshot_source_dir, dst=snapshot_copy_dir)\n return snapshot_copy_dir", "def clone(self, replica=None):\n\n\t\tif replica == None:\n\t\t\treplica = Molecule()\n\n\t\treplica.copy(self)\n\n\t\tfor chain in self.chain:\n\t\t\tnewchain = chain.clone()\n\t\t\treplica.addChain(newchain)\n\n\t\treturn replica", "def clone(self):\n out, err, code = self.command( [\"git\", \"clone\", self.repo] )\n\n # find the directory into which the\n self.directory = self.path\n for path in os.listdir(self.path):\n self.directory = os.path.join(self.path,path)\n break", "def get_manifest_repo():\n # root dir is cwd for now\n cdup = GitRepo(\".\").command_process([\"rev-parse\", \"--show-cdup\"],\n capture_stdout=True,\n capture_stderr=True)\n if cdup.Wait() != 0:\n return None\n cdup_path = cdup.stdout.strip()\n if cdup_path:\n return GitRepo(cdup_path)\n else:\n return GitRepo(\".\")" ]
[ "0.80581087", "0.76706725", "0.57795644", "0.5629695", "0.55750954", "0.5569269", "0.55409795", "0.54759246", "0.5336027", "0.5335274", "0.5326693", "0.5305052", "0.527367", "0.52459395", "0.52345735", "0.5233602", "0.5205131", "0.5157778", "0.5122811", "0.5122811", "0.5089649", "0.5057036", "0.50489175", "0.504545", "0.504545", "0.5027126", "0.50197434", "0.50126857", "0.50020707", "0.49569443" ]
0.7822953
1
get volume's clone chain.
def get_volume_clone_chain(self, volume): LOG.debug('get_volume_clone_chain starts.') volume_name = 'volume-%s' % encodeutils.safe_encode(volume["id"]) pool_name = self.configuration.rbd_pool clone_chain = self._get_full_clone_chain(pool_name, volume_name, None) LOG.debug('volume clone chain: %s', clone_chain) LOG.debug('get_volume_clone_chain finished.') return clone_chain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshot_clone_chain(self, snapshot):\n LOG.debug('get_snapshot_clone_chain starts.')\n volume_name = 'volume-%s' % \\\n encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n pool_name = self.configuration.rbd_pool\n clone_chain = self._get_full_clone_chain(pool_name, volume_name,\n snap_name)\n LOG.debug('snapshot clone chain: %s', clone_chain)\n LOG.debug('get_snapshot_clone_chain finished.')\n return clone_chain", "def _get_full_clone_chain(self, pool_name, volume_name, snap_name=None):\n full_clone_chain = dict()\n # get children clone chain.\n obj = self._generate_chain_obj(pool_name, volume_name, snap_name)\n self._get_children_chain(obj[\"children\"], pool_name, volume_name,\n snap_name)\n # get parent clone chain.\n self._get_parent_chain(full_clone_chain, obj, pool_name, volume_name,\n snap_name)\n return full_clone_chain", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def create_cloned_volume(self, vol, src_vref):\n self.authenticate_user()\n name = self._get_volume_name(vol)\n srcname = self._get_vipr_volume_name(src_vref)\n number_of_volumes = 1\n\n try:\n if(src_vref['consistencygroup_id']):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Clone can't be taken individually on a volume\" + \\\n \" that is part of a Consistency Group\")\n except AttributeError as e:\n LOG.info(\"No Consistency Group associated with the volume\")\n\n try:\n (storageresType, storageresTypename) = self.volume_obj.get_storageAttributes(\n srcname, None, None)\n\n resource_id = self.volume_obj.storageResource_query(storageresType,\n srcname,\n None,\n None,\n self.configuration.vipr_project,\n self.configuration.vipr_tenant)\n\n self.volume_obj.clone(\n name,\n number_of_volumes,\n resource_id,\n sync=True)\n\n clone_vol_path = self.configuration.vipr_tenant + \"/\" + self.configuration.vipr_project + \"/\" + name\n detachable = self.volume_obj.is_volume_detachable(clone_vol_path)\n LOG.info(\"Is volume detachable : \" + str(detachable))\n \n #detach it from the source volume immediately after creation\n if(detachable):\n self.volume_obj.volume_clone_detach(\"\",clone_vol_path, True)\n\n except IndexError as e:\n LOG.exception(\"Volume clone detach returned empty task list\")\n\n except vipr_utils.SOSError as e:\n if(e.err_code == vipr_utils.SOSError.SOS_FAILURE_ERR):\n raise vipr_utils.SOSError(\n vipr_utils.SOSError.SOS_FAILURE_ERR,\n \"Volume \" + name + \": clone failed\\n\" + e.err_text)\n else:\n with excutils.save_and_reraise_exception():\n LOG.exception(_(\"Volume : {%s} clone failed\") % name)", "def create_cloned_volume(self, volume, src_vref):\n clone_name = self.get_volume_name(volume.id)\n src_name = self.get_volume_name(src_vref.id)\n src_vol = self.client.search(\"volumes\", name=src_name)\n src_map = self.client.search(\"mappings\", volume=src_vol)\n src_attach_info = dest_attach_info = None\n if src_map.total != 0:\n msg = _(\"K2 driver does not support clone of an attached volume. \"\n \"To get this done, create a snapshot from the attached \"\n \"volume and then create a volume from the snapshot.\")\n LOG.error(msg)\n raise KaminarioCinderDriverException(reason=msg)\n try:\n properties = volume_utils.brick_get_connector_properties(\n self.configuration.use_multipath_for_image_xfer,\n self.configuration.enforce_multipath_for_image_xfer)\n conn = self.initialize_connection(src_vref, properties)\n src_attach_info = self._connect_device(conn)\n self.create_volume(volume)\n conn = self.initialize_connection(volume, properties)\n dest_attach_info = self._connect_device(conn)\n volume_utils.copy_volume(src_attach_info['device']['path'],\n dest_attach_info['device']['path'],\n src_vref.size * units.Ki,\n self.configuration.volume_dd_blocksize,\n sparse=True)\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(volume, properties)\n self.terminate_connection(src_vref, properties)\n except Exception as ex:\n self._kaminario_disconnect_volume(src_attach_info,\n dest_attach_info)\n self.terminate_connection(src_vref, properties)\n self.terminate_connection(volume, properties)\n self.delete_volume(volume)\n LOG.exception(\"Create a clone: %s failed.\", clone_name)\n raise KaminarioCinderDriverException(reason=ex)", "def _get_clone_snapshot_name(self, volume):\n return 'cinder-clone-snapshot-%(id)s' % volume", "def get_chain(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM blockchain LIMIT 1\")\n sol = app_process_cursor.fetchall()\n app_process.commit()\n app_process.close()\n print(sol, \"in get chain\")\n return sol", "def clone(self):\n out, err, code = self.command( [\"git\", \"clone\", self.repo] )\n\n # find the directory into which the\n self.directory = self.path\n for path in os.listdir(self.path):\n self.directory = os.path.join(self.path,path)\n break", "def create_cloned_volume(self, volume, src_vref):\n LOG.info(_LI('new cloned volume: %s'), volume['name'])\n LOG.info(_LI('source volume for cloning: %s'), src_vref['name'])\n\n snapshot = {'volume_name': src_vref['name'],\n 'volume_id': src_vref['id'],\n 'volume_size': src_vref['size'],\n 'name': self._create_snapshot_name()}\n\n self.create_snapshot(snapshot)\n return self.create_volume_from_snapshot(volume, snapshot,\n method='MOVE')", "def chain_serial(self):\n return self.structure.chain_serial[self.mask]", "def service_chain(self):\n if hasattr(self, 'original_volume_service'):\n return [self] + self.original_volume_service.service_chain\n return [self]", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def copy(self):\n new_chain = []\n for block in self.chain:\n if block.index == 0:\n new_chain.append(self.create_genesis())\n else:\n new_block = Block()\n new_block.deserialize(block.serialize())\n new_chain.append(new_block)\n\n return BlockChain(new_chain)", "def create_cloned_volume(self, volume, src_vref):\n LOG.info('Creating clone of volume: %s', src_vref['id'])\n snapshot = {'volume_name': src_vref['name'],\n 'volume_id': src_vref['id'],\n 'volume_size': src_vref['size'],\n 'name': self._get_clone_snapshot_name(volume)}\n # We don't delete this snapshot, because this snapshot will be origin\n # of new volume. This snapshot will be automatically promoted by NMS\n # when user will delete its origin.\n self.create_snapshot(snapshot)\n try:\n return self.create_volume_from_snapshot(volume, snapshot)\n except utils.NexentaException:\n LOG.error('Volume creation failed, deleting created snapshot '\n '%(volume_name)s@%(name)s', snapshot)\n try:\n self.delete_snapshot(snapshot)\n except (utils.NexentaException, exception.SnapshotIsBusy):\n LOG.warning('Failed to delete zfs snapshot '\n '%(volume_name)s@%(name)s', snapshot)\n raise", "def get_chain():\n response = {\n 'chain': blockchain.chain,\n 'length':len(blockchain.chain)\n }\n\n return jsonify(response), 200", "def repos_clone_steps(self):\n platform = self.platform\n # required by coho tools to correctly resolve repo location\n if platform == \"blackberry10\":\n platform = \"blackberry\"\n return [\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-MSPEC\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Mobilespec'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-PLUGIN\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Plugins'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-\" + platform, \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone Platform'),\n ShellCommand(command=[\"node\", \"medic/checkout.js\", \"--path=medic/repos.json\", \"--cat=CORDOVA-JS\", \"--releasebranch=\" + CONFIG.branch_release], workdir='build', haltOnFailure=True, description='Clone JS'),\n ]", "def get_chain(self):\n return self.fragment.chain", "def pv_chain(self):\n index = self._ordered_input_names.index('pv_chain')\n return self._inputs[index]", "def command_clone(\n ctx: \"PlanemoCliContext\",\n src: str,\n dest: str,\n mirror: bool = False,\n branch: Optional[str] = None,\n depth: Optional[int] = None,\n) -> List[str]:\n cmd = [\"git\", \"clone\"]\n if mirror:\n cmd.append(\"--mirror\")\n if branch is not None:\n cmd.extend([\"--branch\", branch])\n if depth is not None:\n cmd.extend([\"--depth\", str(depth)])\n if urllib.parse.urlparse(src).scheme == \"\":\n src = f\"file://{src}\"\n cmd.extend([src, dest])\n return cmd", "def get_pseudo_chain(length, genesis_block):\n blocks = []\n for slot in range(length * 3):\n blocks.append(\n genesis_block.copy(\n slot_number=slot,\n parent_hash=blocks[slot - 1].hash if slot > 0 else ZERO_HASH32\n )\n )\n\n return blocks", "def snap_clone(mnode, snapname, clonename):\n cmd = \"gluster snapshot clone %s %s --mode=script\" % (clonename, snapname)\n return g.run(mnode, cmd)", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def chain(self):\n return self._chain", "def test_create_cloned_volume(self, mock_ghn):\n ctxt = context.get_admin_context()\n extra_specs = {}\n type_ref = volume_types.create(ctxt, 'hgst-1', extra_specs)\n orig = {'id': '1', 'name': 'volume1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10,\n 'provider_id': 'space_orig'}\n clone = {'id': '2', 'name': 'clone1', 'display_name': '',\n 'volume_type_id': type_ref['id'], 'size': 10}\n pid = self.driver.create_cloned_volume(clone, orig)\n # We must copy entier underlying storage, ~12GB, not just 10GB\n self.assertEqual(11444 * units.Mi, self.dd_count)\n self.assertEqual('1M', self.bs)\n # Check space-create command\n expected = {'redundancy': '0', 'group': 'xanadu',\n 'name': 'clone1', 'mode': '0777',\n 'user': 'kane', 'net': 'net1',\n 'storageserver': 'stor1:gbd0,stor2:gbd0,',\n 'size': '12'}\n self.assertDictMatch(expected, self.created)\n # Check the returned provider\n expected_pid = {'provider_id': 'clone1'}\n self.assertDictMatch(expected_pid, pid)", "def GetClone(self, *args, **kwargs):\n pass", "def _clone_to(vcs, location):\n try:\n subprocess.Popen([vcs.type, \"clone\", vcs.url], cwd=location)\n except:\n return location", "def create_cloned_volume(self, volume, src_vref):\n # Form the snapshot structure.\n snapshot = {'id': uuid.uuid4().__str__(),\n 'volume_id': src_vref['id'],\n 'volume': src_vref}\n\n # Create snapshot.\n self.create_snapshot(snapshot)\n\n try:\n # Create volume from snapshot.\n lun_info = self.create_volume_from_snapshot(volume, snapshot)\n finally:\n try:\n # Delete snapshot.\n self.delete_snapshot(snapshot)\n except exception.VolumeBackendAPIException:\n LOG.warning(_LW(\n 'Failure deleting the snapshot %(snapshot_id)s '\n 'of volume %(volume_id)s.'),\n {'snapshot_id': snapshot['id'],\n 'volume_id': src_vref['id']},)\n\n return {'provider_location': lun_info['ID'],\n 'lun_info': lun_info}" ]
[ "0.73028296", "0.7090097", "0.5828093", "0.5828093", "0.5751596", "0.5751596", "0.563764", "0.56228733", "0.55641687", "0.5510754", "0.5431384", "0.5429335", "0.542763", "0.54057914", "0.53919584", "0.5336411", "0.53236985", "0.52903306", "0.52415264", "0.52111065", "0.5160875", "0.51499605", "0.5142699", "0.5113809", "0.508342", "0.5066907", "0.5061643", "0.5055392", "0.5035701", "0.5011806" ]
0.82059187
0
get snapshot's clone chain
def get_snapshot_clone_chain(self, snapshot): LOG.debug('get_snapshot_clone_chain starts.') volume_name = 'volume-%s' % \ encodeutils.safe_encode(snapshot["volume_id"]) snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id']) pool_name = self.configuration.rbd_pool clone_chain = self._get_full_clone_chain(pool_name, volume_name, snap_name) LOG.debug('snapshot clone chain: %s', clone_chain) LOG.debug('get_snapshot_clone_chain finished.') return clone_chain
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_full_clone_chain(self, pool_name, volume_name, snap_name=None):\n full_clone_chain = dict()\n # get children clone chain.\n obj = self._generate_chain_obj(pool_name, volume_name, snap_name)\n self._get_children_chain(obj[\"children\"], pool_name, volume_name,\n snap_name)\n # get parent clone chain.\n self._get_parent_chain(full_clone_chain, obj, pool_name, volume_name,\n snap_name)\n return full_clone_chain", "def clone(self):", "def clone(self):\n return None", "def get_volume_clone_chain(self, volume):\n LOG.debug('get_volume_clone_chain starts.')\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n pool_name = self.configuration.rbd_pool\n clone_chain = self._get_full_clone_chain(pool_name, volume_name, None)\n LOG.debug('volume clone chain: %s', clone_chain)\n LOG.debug('get_volume_clone_chain finished.')\n return clone_chain", "def get_immutable_clone(self) -> Snapshot:\n return Snapshot(\n signers=frozenset(self.signers),\n block_hash=self.block_hash,\n votes=frozenset(self.votes),\n tallies=self.tallies.copy()\n )", "def snap_clone(mnode, snapname, clonename):\n cmd = \"gluster snapshot clone %s %s --mode=script\" % (clonename, snapname)\n return g.run(mnode, cmd)", "def snapshot(self):\n snapshot = super(VirtualMachineDAO, self).snapshot()\n for entry in snapshot:\n vm = entry.get(VirtualMachineDAO.INNER_OBJ)\n vm['network'] = VMNetworkDAO(self.session, vm.get(VirtualMachineDAO.FOREIGN_KEY)).snapshot()\n return snapshot", "def GetClone(self, *args, **kwargs):\n pass", "def snapshot(self):\n return (self.block_header.state_root, self.chaindb.snapshot())", "def clone(self):\n return _libsbml.FbcOr_clone(self)", "def get_mutable_clone(self, block_hash: Hash32) -> 'MutableSnapshot':\n return MutableSnapshot(\n signers=list(self.signers),\n block_hash=block_hash,\n votes=list(self.votes),\n tallies=self.tallies.copy()\n )", "def take_snapshot(self):\r\n self.snapshot = self.blockA, self.blockB, self.blockA_locked, self.blockB_locked, self.blockA_free, \\\r\n self.blockB_free, copy.copy(self.blockA_cells), copy.copy(self.blockB_cells), self.cut", "def copy(self):\n new_chain = []\n for block in self.chain:\n if block.index == 0:\n new_chain.append(self.create_genesis())\n else:\n new_block = Block()\n new_block.deserialize(block.serialize())\n new_chain.append(new_block)\n\n return BlockChain(new_chain)", "def clone(self):\n return _libsbml.FbcAnd_clone(self)", "def snapshot(self):\n pass", "def clone(self):\n return self", "def test_clone_scenario(self):\n pass", "def clone(self):\n return self.copy()", "def clone(self):\n raise NotImplementedError", "def __deepcopy__(self, memo):\n chain = Chain(model_id = self.model_id,\n chain_id = self.chain_id)\n for fragment in self.fragment_list:\n chain.add_fragment(copy.deepcopy(fragment, memo), True)\n return chain", "def get_snapshots_tree(self):\n\t\treturn Job(SDK.PrlVm_GetSnapshotsTree(self.handle)[0])", "def clone_snapshot(self, pool, project, snapshot, clone, arg):\n self.verify_avail_space(pool, project, clone['id'], clone['size'])\n svc = self.clone_path % (pool, project,\n snapshot['share_id'],\n snapshot['id'])\n ret = self.rclient.put(svc, arg)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error cloning '\n 'snapshot: %(snapshot)s on '\n 'share: %(share)s of '\n 'Pool: %(pool)s '\n 'project: %(project)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'snapshot': snapshot['id'],\n 'share': snapshot['share_id'],\n 'pool': pool,\n 'project': project,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n LOG.error(exception_msg)\n raise exception.ShareBackendException(msg=exception_msg)", "def clone(self):\n return _libsbml.SBase_clone(self)", "def clone(self):\n memo = dict()\n c = self._clone(memo)\n c._clone_rip(memo)\n return c", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def clone(self):\n return _libsbml.ModelHistory_clone(self)", "def clone(self):\n return copy.deepcopy(self)", "def clone_state(self):\n return self.strategy['state_handler'].clone(self.state)", "def test_clone_identical(self, cosmo):\n assert cosmo.clone() is cosmo", "def clone(self):\n cloned = org_copy.deepcopy(self)\n return cloned" ]
[ "0.7078835", "0.635539", "0.6342894", "0.6303413", "0.62447584", "0.6167417", "0.6093087", "0.6042666", "0.60092366", "0.58993983", "0.584199", "0.5835145", "0.583218", "0.5807996", "0.5806754", "0.5793995", "0.57841635", "0.575718", "0.57200295", "0.5693076", "0.5691469", "0.56806016", "0.56436193", "0.5636959", "0.56368923", "0.56295276", "0.5589237", "0.554936", "0.5511449", "0.55080944" ]
0.8238623
0
From a mixed list of ensembl IDs and associated gene names, retrieve information from the corresponding gene
def parse_id_name_list(mixed_list): logger.info('Total number of genes in the list: {}'.format(len(mixed_list))) bad_names_count = [] multiple_ids_per_name = [] a = re.compile('ENSG\d{11}') for gene_id in mixed_list: # the item is an ensembl gene id # Check if the name is an Ensembl gene ID (ENSG) if a.match(gene_id): try: g_entry = ensembl_release.gene_by_id(gene_id) except ValueError: bad_names_count.append(gene_id) continue # Else if the name is an associated gene name else: try: g_entry = ensembl_release.genes_by_name(gene_id) # Check if unique match associated gene name / ensembl ID if len(g_entry) == 1: g_entry = g_entry[0] else: multiple_ids_per_name.append(gene_id) except ValueError: bad_names_count.append(gene_id) continue yield (g_entry) logger.warn('Number of genes failing entry lookup (probably alias gene problem): {0} {1} [DISCARDED]'.format(len(bad_names_count), bad_names_count)) logger.warn('Number of gene names associated with more than 1 ensembl ID: {0} {1} [DISCARDED]'.format(len(multiple_ids_per_name), multiple_ids_per_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_genes(self, genes: Union[str, List[str]]) -> List[str]:\n if isinstance(genes, str):\n up = pd.read_table(genes, header=None, comment=\"#\", dtype=str)\n ups= up.values.astype(str)\n ups = list(np.squeeze(ups))\n elif isinstance(genes, (list, tuple)):\n ups = genes\n else:\n raise Exception(\"genes must be filepath, list or tuple\")\n # filter genes\n ups_new = [str(i) for i in ups if str(i) in self.genes]\n\n if len(ups_new) < 1: \n raise Exception(\"No genes found. Please input proper Entrez id\")\n return ups_new", "def GetGene(ids, base_url=BASE_URL, fout=None):\n n_out=0; tags=None;\n for id_this in ids:\n gene = rest.Utils.GetURL(base_url+'/gene/{0}'.format(id_this), parse_json=True)\n logging.debug(json.dumps(gene, indent=2))\n if not tags:\n tags = list(gene.keys())\n fout.write(\"\\t\".join(tags)+\"\\n\")\n vals = [(str(gene[tag]) if tag in gene else \"\") for tag in tags]\n fout.write(\"\\t\".join(vals)+\"\\n\")\n n_out+=1\n logging.info(\"n_out: %d\"%(n_out))", "def extract_gene_data(info):\n gene_id = None\n gene_type = None\n for i in info:\n if i.startswith('gene_id'):\n gene_id = i.split(\" \", 1)[1].replace('\"', '')\n elif i.startswith('gene_type'):\n gene_type = i.split(\" \", 1)[1].replace('\"', '')\n\n assert gene_id is not None, 'No gene_id found {0}'.format(info)\n assert gene_type is not None, 'No gene_type found {0}'.format(info)\n return gene_id, gene_type", "def get_gene_values(hotel_ids):\n hotel_genes = {}\n subcats = get_subcat_axes()\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT hotel_id, genome\n FROM hotel_genome\n WHERE hotel_id in (%s)\n \"\"\" % \",\".join([str(h) for h in hotel_ids])\n )\n for hotel_id, genome_str in cursor.fetchall():\n genome = [float(g.strip()) for g in genome_str.split(\",\")]\n hotel_genes[hotel_id] = get_hotel_genes_by_subcat(\n subcats, genome)\n return subcats, hotel_genes", "def fetch_gene_info(gene_list, batch_size=100):\n print(\"Looking up additional information about the genes identified by BLAST...\")\n post_handle = Entrez.epost(db=\"nucleotide\", id=\",\".join(gene_list))\n result = Entrez.read(post_handle)\n post_handle.close()\n webenv = result[\"WebEnv\"]\n query_key = result[\"QueryKey\"]\n count = len(gene_list)\n OUT = open(\"Log_Directory/fetch_results.txt\", \"w\")\n for start in range(0, count, batch_size):\n end = min(count, start + batch_size)\n print(\"Fetching records %i through %i\" % (start + 1, end))\n attempt = 0\n while attempt < 3:\n attempt += 1\n try:\n fetch_handle = Entrez.efetch(db=\"nucleotide\", rettype=\"gb\", retmode=\"text\", retstart=start, retmax=batch_size,\n webenv=webenv, query_key=query_key)\n except HTTPError as err:\n if 500 <= err.code <= 599:\n print(\"Received error from server %s\" % err)\n print(\"Attempt %i of 3\" % attempt)\n time.sleep(15)\n else:\n raise\n OUT.write(fetch_handle.read())\n fetch_handle.close()\n OUT.close()", "def genes():\n data=pd.read_csv(config['stan'], sep=\" \")\n return list(set(data['Gene_id']))", "def get_genes(infile,outfile):\n gene_list = []\n with open(infile) as gene:\n tag = False\n for line in gene:\n if line.startswith('name'):\n tag = True\n continue\n if tag:\n items = line.split()\n if len(items) > 0:\n gene_list.append(items[0])\n gene_list = gene_list[1:-7]\n with open(outfile, 'w') as outfile:\n for i in gene_list:\n outfile.write(i+'\\n')\n return True", "def map_probes(probeset, entrez_ids): \n entrez_idx = None\n mapping = {}\n with open(probeset) as probes:\n for line in probes:\n if line.startswith('ID'):\n entrez_idx = line.split('\\t').index('ENTREZ_GENE_ID')\n elif entrez_idx:\n # if the index has been defined then we're past the header\n row = [x.strip() for x in line.split('\\t')]\n # if we're doing percentile rank, we need all the mappings, otherwise can just track the mappings of interest\n if PERCENTILE_RANK:\n if '///' in row[entrez_idx]:\n # multile genes add an entry for every gene overlapped by the probe\n # TODO: FIX; THIS IS A MANY TO MANY MAPPING ISSUE \n # since this only happens once in this dataset, I'm just using the first one but can also use last (or develop a solution that works for all cases...)\n mapping[row[0]] = row[entrez_idx].split(' /// ')[0]\n \"\"\" # option to use the last one \n for entrez_id in [x for x in row[entrez_idx].split(' /// ')]:\n print('Entrez ID:'+str(entrez_id)+' in probe that maps to multiple genes')\n mapping[row[0]] = entrez_id[0] \n \"\"\"\n print('MANY TO MANY: '+str(row[0])+\"->\"+str(row[entrez_idx]))\n else:\n mapping[row[0]] = row[entrez_idx]\n elif row[entrez_idx] in entrez_ids:\n mapping[row[0]] = row[entrez_idx]\n\n return mapping", "def gene_search(\n self,\n genes:list=[\"MYL2\"], \n ):\n try: \n assert isinstance(genes, list)\n except AssertionError as e:\n e.args += (\"[genes] argument needs to be type(list)\", )\n raise\n \n\n self.genes = genes\n\n self.requestURL = f\"https://www.ebi.ac.uk/proteins/api/proteins?offset=0&size=100&gene={'%2C%20'.join(genes)}&organism=human\"\n \n r = requests.get(self.requestURL, headers={ \"Accept\" : \"application/json\"})\n \n if not r.ok:\n r.raise_for_status()\n sys.exit()\n\n self.responseBody = r.text\n self.data = json.loads(self.responseBody)\n\n return self.responseBody", "def _parse_genes(chrom: str, db: FeatureDB) -> List[Dict]:\n parsed_genes = []\n for gene in db.region(\n seqid=chrom, featuretype=[GFF3GeneFeatureTypes.GENE.value, GFF3GeneFeatureTypes.PSEUDOGENE.value]\n ):\n gene_id = gene.attributes.get(\"gene_id\", [None])[0]\n locus_tag = gene.attributes.get(\"locus_tag\", [None])[0]\n gene_symbol = gene.attributes.get(\"gene_name\", [gene.attributes.get(\"gene_symbol\", None)])[0]\n gene_biotype = gene.attributes.get(\"gene_biotype\", [gene.attributes.get(\"gene_type\", None)])[0]\n gene_qualifiers = {x: y for x, y in gene.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)}\n\n if Biotype.has_name(gene_biotype):\n gene_biotype = Biotype[gene_biotype]\n elif gene_biotype:\n gene_qualifiers[\"provided_biotype\"] = [gene_biotype]\n gene_biotype = None\n\n transcripts = []\n for i, transcript in enumerate(db.children(gene, level=1)):\n\n transcript_id = transcript.attributes.get(\"transcript_id\", [None])[0]\n transcript_symbol = transcript.attributes.get(\n \"transcript_name\", [gene.attributes.get(\"transcript_name\", None)]\n )[0]\n transcript_qualifiers = {\n x: y for x, y in transcript.attributes.items() if not BioCantorGFF3ReservedQualifiers.has_value(x)\n }\n provided_transcript_biotype = gene.attributes.get(\n \"transcript_biotype\", [gene.attributes.get(\"transcript_type\", None)]\n )[0]\n\n if Biotype.has_name(provided_transcript_biotype):\n transcript_biotype = Biotype[provided_transcript_biotype]\n else:\n # keep track of what they gave us, that did not match the enum\n if provided_transcript_biotype:\n transcript_qualifiers[\"provided_transcript_biotype\"] = provided_transcript_biotype\n # use the gene biotype\n transcript_biotype = gene_biotype\n\n if locus_tag is not None:\n if transcript_id is None:\n transcript_id = locus_tag\n if transcript_symbol is None:\n transcript_symbol = locus_tag\n\n exons = []\n cds = []\n for feature in db.children(transcript, level=1):\n if feature.featuretype == GFF3GeneFeatureTypes.EXON.value:\n exons.append(feature)\n elif feature.featuretype == GFF3GeneFeatureTypes.CDS.value:\n cds.append(feature)\n else:\n logger.warning(f\"Found non CDS/exon child of transcript in feature: {feature}\")\n\n # This gene has only a CDS/exon feature as its direct child\n # therefore, we really have one interval here\n if len(exons) == 0:\n if transcript.featuretype not in [\n GFF3GeneFeatureTypes.CDS.value,\n GFF3GeneFeatureTypes.EXON.value,\n ]:\n logger.warning(f\"Gene child feature has type {transcript.featuretype}; skipping\")\n continue\n logger.info(f\"gene {gene_id} had no transcript feature\")\n if transcript.featuretype == GFF3GeneFeatureTypes.CDS.value:\n exons = cds = [transcript]\n else:\n exons = [transcript]\n\n exons = sorted(exons, key=lambda e: e.start)\n exon_starts = [x.start - 1 for x in exons]\n exon_ends = [x.end for x in exons]\n start = exon_starts[0]\n end = exon_ends[-1]\n assert start <= end\n strand = Strand.from_symbol(transcript.strand)\n\n if len(cds) == 0:\n cds_starts = cds_ends = cds_frames = None\n protein_id = product = None\n else:\n # sort by start and end in case two blocks start at the same position\n cds = sorted(cds, key=lambda c: (c.start, c.end))\n cds_starts = [x.start - 1 for x in cds]\n cds_ends = [x.end for x in cds]\n cds_frames = [CDSPhase.from_int(int(f.frame)).to_frame().name for f in cds]\n # NCBI encodes protein IDs and products on the CDS feature\n protein_id = cds[0].attributes.get(\"protein_id\", [None])[0]\n product = cds[0].attributes.get(\"product\", [None])[0]\n\n tx = dict(\n exon_starts=exon_starts,\n exon_ends=exon_ends,\n strand=strand.name,\n cds_starts=cds_starts,\n cds_ends=cds_ends,\n cds_frames=cds_frames,\n qualifiers=filter_and_sort_qualifiers(transcript_qualifiers),\n is_primary_tx=False,\n transcript_id=transcript_id,\n transcript_type=transcript_biotype.name if transcript_biotype else transcript_biotype,\n transcript_symbol=transcript_symbol,\n sequence_name=chrom,\n protein_id=protein_id,\n product=product,\n )\n transcripts.append(tx)\n\n if len(transcripts) == 0:\n # infer a transcript for a gene\n logger.info(f\"Inferring a transcript for gene {gene_symbol}\")\n tx = dict(\n exon_starts=[gene.start],\n exon_ends=[gene.end],\n strand=Strand.from_symbol(gene.strand).name,\n qualifiers=gene_qualifiers,\n transcript_type=gene_biotype.name if gene_biotype else gene_biotype,\n transcript_id=gene_id,\n sequence_name=gene.seqid,\n )\n transcripts.append(tx)\n\n gene = dict(\n transcripts=transcripts,\n gene_id=gene_id,\n gene_symbol=gene_symbol,\n locus_tag=locus_tag,\n gene_type=gene_biotype.name if gene_biotype else gene_biotype,\n qualifiers=filter_and_sort_qualifiers(gene_qualifiers),\n sequence_name=chrom,\n )\n\n parsed_genes.append(gene)\n return parsed_genes", "def get_gene(identifier):\n for store in [GENES, ALIASES]:\n genes = store.get(identifier, None)\n if genes and len(genes) == 1:\n return genes\n else:\n raise ValueError('gene reference does not exist or refers to multiple genes')", "def lookup_gene_information(eck12_nums, num_threads=20,\n wsdl=\"http://regulondb.ccg.unam.mx/webservices/Gene.jws?wsdl\"):\n\n from .wsdl import ThreadedWSDLFetcher\n from Queue import Queue\n # use a threaded approach to server querying\n tasks = Queue()\n for i in range(num_threads):\n thrd = ThreadedWSDLFetcher(tasks, wsdl)\n thrd.start()\n # get all gene descriptions from RegulonDB\n descriptions = list()\n for gene in eck12_nums:\n tasks.put((\"getGene\", gene, descriptions))\n tasks.join()\n return descriptions", "def _load_genes(self):\n with open(self.gene_file_path, 'r') as gene_file:\n csv_reader = csv.reader(gene_file, delimiter=',')\n for gene in csv_reader:\n yield (gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])", "def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict", "def get_gene_by_id(gene_query):\n\n\tgene_query_wildcard = [ gene+'%' for gene in gene_query ]\n\tsql_query = \"SELECT * FROM genes WHERE \" + \"gene_id LIKE %s OR \" * len(gene_query_wildcard)\n\tsql_query = sql_query[:-3]\n\n\tdf = pd.read_sql(sql_query, params=(gene_query_wildcard,), con=db.get_engine(current_app, 'methylation_data'))\n\n\t#reorder genes to original order since SQL doesn't keep order.\n\tnew_index = []\n\tfor index, row in df.iterrows():\n\t\tfor i, gene_id in enumerate(gene_query):\n\t\t\tif gene_id in row['gene_id']:\n\t\t\t\tnew_index.append(i)\n\t\t\t\tbreak\n\tdf.index = new_index\n\tdf.sort_index(inplace=True)\n\n\treturn df.to_dict('records')", "def get_entrezid(gene):\n entrezurl = \"http://mygene.info/v3/query?q=\"\n entrezurl = entrezurl+gene\n\n res = requests.get(entrezurl)\n results = pandas.read_json(StringIO(res.text))\n\n entrezid = []\n if results.empty:\n return entrezid\n\n for i in results.ix[:, 0]:\n key = i.keys()\n value = i.values()\n for cntr, k in enumerate(key):\n if k == 'entrezgene':\n entrezid.append(value[cntr])\n return entrezid", "def get_uniprot_gene_info(uniprot_result):\n gene_lines = [l for l in uniprot_result.split('\\n') if l.startswith('GN')]\n\n gene_names = []\n\n for gn_line in gene_lines:\n parts = gn_line[2:].split(';')\n for p in parts:\n p = p.strip()\n if p.startswith('Name='):\n gene_names.append(p[5:])\n elif p.startswith('Synonyms='):\n gene_names += [s.strip() for s in p[9:].split(',')]\n\n return gene_names", "def mel_ncRNA_list(list):\n\tncRNA = [] #initiates list\n\tfor i in list:\n\t\tif i[2] == 'ncRNA':\n\t\t\tpreidRNA = i[8].split(';')[0]\n\t\t\t#[ID=FBgn0031208];Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'\n\t\t\tncRNA.append(preidRNA)\n\treturn ncRNA\n\t#['ID=FBtr0309810', 'ID=FBtr0347585', 'ID=FBtr0345732', 'ID=FBtr0345733', 'ID=FBtr0344052', 'ID=FBtr0344053', 'ID=FBtr0344032', 'ID=FBtr0336836', 'ID=FBtr0336837', 'ID=FBtr0336984', 'ID=FBtr0336985', 'ID=FBtr0336986', 'ID=FBtr0336987', 'ID=FBtr0336988', 'ID=FBtr0347594', 'ID=FBtr0347595']", "def FindProIDfromGeneID(geneName, strainName, mRNA_protein_dict=mRNA_protein):\n\n # in the first step here, we must find the right gene id based on the part gene id from raven\n dir0 = '../0_332yeast_genomes/332_genome_annotations/proteome_old_species_id/'\n # strain1 = 'candida_sorboxylosa'\n strain_dir = dir0 + strainName + '.max.pep'\n protein_faa = open(strain_dir, 'r').readlines()\n protein_faa_id = [x for x in protein_faa if '>' in x]\n # next based on the above gene0, we find the related right mRNAid\n gene1 = [x.replace('>', '').strip('\\n') for x in protein_faa_id if geneName in x]\n protein_id = []\n for x in gene1:\n print(mRNA_protein_dict[x])\n protein_id.append(mRNA_protein_dict[x])\n return protein_id", "def gene_descriptors(civic_gid19):\n return [civic_gid19]", "def GetGeneAssociations(ids, base_url=BASE_URL, fout=None):\n n_out=0; gene_tags=[]; assn_tags=[];\n for id_this in ids:\n gene = rest.Utils.GetURL(base_url+'/gene/{0}?showAssociations=true'.format(id_this), parse_json=True)\n assns = gene[\"associations\"] if \"associations\" in gene else []\n if not assns: continue\n if not gene_tags:\n for tag in gene.keys():\n if type(gene[tag]) not in (list,dict): gene_tags.append(tag)\n for assn in assns:\n logging.debug(json.dumps(assn, indent=2))\n if not assn_tags:\n assn_tags = list(assn.keys())\n fout.write(\"\\t\".join(gene_tags+assn_tags)+\"\\n\")\n vals = [(str(gene[tag]) if tag in gene else \"\") for tag in gene_tags]+[(str(assn[tag]) if tag in assn else \"\") for tag in assn_tags]\n fout.write(\"\\t\".join(vals)+\"\\n\")\n n_out+=1\n logging.info(\"n_out: %d\"%(n_out))", "def gene_list_reader():\n \n relPath = \"data/genes_met_modelling_human.csv\"\n \n geneL = []\n with file_importer(relPath, encodeS = \"utf-8-sig\") as inpF:\n for inpLine in inpF:\n inpI = inpLine.strip(\"\\n'\").split(\".\")[0]\n if inpI not in geneL: geneL.append(inpI)\n \n return geneL", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def get_genes(variant):\n genes = {}\n transcripts = []\n mongo_genes = []\n \n # Conversion from ensembl to refseq\n # ensembl_to_refseq is a dictionary with ensembl transcript id as keys and\n # a list of refseq ids as values\n ensembl_to_refseq = {}\n for gene_info in variant['info_dict'].get(\n 'Ensembl_transcript_to_refseq_transcript', []):\n splitted_gene = gene_info.split(':')\n transcript_info = splitted_gene[1]\n for transcript in transcript_info.split('|'):\n splitted_transcript = transcript.split('>')\n if len(splitted_transcript) > 1:\n ensembl_id = splitted_transcript[0]\n refseq_ids = splitted_transcript[1].split('/')\n ensembl_to_refseq[ensembl_id] = refseq_ids\n \n # A dictionary with clinical gene descriptions\n gene_descriptions = {}\n for gene_info in variant['info_dict'].get('Gene_description', []):\n splitted_gene = gene_info.split(':')\n hgnc_symbol = splitted_gene[0]\n description = splitted_gene[1]\n gene_descriptions[hgnc_symbol] = description\n \n # First we get all vep entrys that we find and put them under their \n # corresponding gene symbol in 'genes'\n for vep_entry in variant['vep_info'].get(variant['ALT'], []):\n transcript = get_transcript(vep_entry, ensembl_to_refseq)\n hgnc_symbol = transcript.hgnc_symbol\n if hgnc_symbol:\n if hgnc_symbol in genes:\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n else:\n genes[hgnc_symbol] = {}\n genes[hgnc_symbol]['transcripts'] = {}\n genes[hgnc_symbol]['transcripts'][transcript.transcript_id] = transcript\n genes[hgnc_symbol]['most_severe_transcript'] = transcript\n genes[hgnc_symbol]['omim_gene_id'] = None\n genes[hgnc_symbol]['phenotypic_terms'] = []\n genes[hgnc_symbol]['best_rank'] = 40\n genes[hgnc_symbol]['ensembl_id'] = transcript.ensembl_id\n \n for functional_annotation in transcript.functional_annotations:\n new_rank = SO_TERMS[functional_annotation]['rank']\n if new_rank < genes[hgnc_symbol]['best_rank']:\n genes[hgnc_symbol]['best_rank'] = new_rank\n genes[hgnc_symbol]['most_severe_function'] = functional_annotation\n \n \n ######################################################################\n ## There are two types of OMIM terms, one is the OMIM gene entry ##\n ## and one is for the phenotypic terms. ##\n ## Each key in the 'omim_terms' dictionary reprecents a gene id. ##\n ## Values are a dictionary with 'omim_gene_id' = omim_gene_id and ##\n ## 'phenotypic_terms' = [list of OmimPhenotypeObjects] ##\n ######################################################################\n\n # Fill the omim gene id:s:\n for annotation in variant['info_dict'].get('OMIM_morbid', []):\n if annotation:\n splitted_record = annotation.split(':')\n try:\n hgnc_symbol = splitted_record[0]\n omim_term = splitted_record[1]\n genes[hgnc_symbol]['omim_gene_id'] = omim_term\n except (ValueError, KeyError):\n pass\n\n # Fill the omim phenotype terms:\n for gene_annotation in variant['info_dict'].get('Phenotypic_disease_model', []):\n if gene_annotation:\n splitted_gene = gene_annotation.split(':')\n hgnc_symbol = splitted_gene[0]\n for omim_entry in splitted_gene[1].split('|'):\n splitted_record = omim_entry.split('>')\n \n phenotype_id = splitted_record[0]\n inheritance_patterns = []\n if len(splitted_record) > 1:\n inheritance_patterns = splitted_record[1].split('/')\n \n disease_model = PhenotypeTerm(\n phenotype_id=phenotype_id,\n disease_models=inheritance_patterns\n )\n \n genes[hgnc_symbol]['phenotypic_terms'].append(disease_model)\n \n for hgnc_symbol in genes:\n gene_info = genes[hgnc_symbol]\n most_severe = gene_info['most_severe_transcript']\n # Create a mongo engine gene object for each gene found in the variant\n mongo_gene = Gene(hgnc_symbol=hgnc_symbol)\n mongo_gene.description = gene_descriptions.get(hgnc_symbol)\n mongo_gene.ensembl_gene_id = gene_info.get('ensembl_id', None)\n mongo_gene.omim_gene_entry = gene_info.get(\n 'omim_gene_id', \n None\n )\n\n mongo_gene.omim_phenotypes = gene_info.get(\n 'phenotypic_terms', \n []\n )\n\n # Add a list with the transcripts:\n mongo_gene.transcripts = []\n for transcript_id in gene_info['transcripts']:\n mongo_gene.transcripts.append(gene_info['transcripts'][transcript_id])\n\n try:\n mongo_gene.functional_annotation = gene_info['most_severe_function']\n except AttributeError:\n pass\n try:\n mongo_gene.region_annotation = SO_TERMS[mongo_gene.functional_annotation]['region']\n except AttributeError:\n pass\n try:\n mongo_gene.sift_prediction = most_severe.sift_prediction\n except AttributeError:\n pass\n try:\n mongo_gene.polyphen_prediction = most_severe.polyphen_prediction\n except AttributeError:\n pass\n # Add the mongo engine gene to the dictionary\n mongo_genes.append(mongo_gene)\n\n return mongo_genes", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def getIDs():", "def retrieve_descriptions(gene, descriptions, empties):\n\n # Perform ESearch and grab list of IDs\n query = gene + '[Gene Name]'\n handle = Entrez.esearch(db='gene', term=query,\n retmax=10,\n retmode='xml')\n record = Entrez.read(handle)\n idlist = ','.join(record[\"IdList\"])\n handle.close()\n\n # Ensure you have results, exit if not\n if idlist == '':\n empties.append(gene)\n print('{} has empty description.'.format(gene))\n return\n\n # Generate summary from UID list\n handle = Entrez.esummary(db='gene', id=idlist)\n record = Entrez.read(handle)\n handle.close()\n\n # Grab description, counter for unique values\n desc_cnt = Counter()\n doc_sums = record[u'DocumentSummarySet'][u'DocumentSummary']\n for i in range(len(doc_sums)):\n # Use NomenclatureName first if not empty \n if doc_sums[i][u'NomenclatureName'] != '':\n desc = doc_sums[i][u'NomenclatureName']\n desc_cnt[desc] += 1\n # Otherwise add from OtherDesignations\n else:\n descs = doc_sums[i][u'OtherDesignations'].split('|')\n for desc in descs:\n # Ignore descriptions like 'protein IMPA1'\n if desc == 'protein {}'.format(gene):\n continue\n desc_cnt[desc] += 1\n\n desc = desc_cnt.most_common(1)[0][0]\n # Check for empty descriptions\n if desc == '':\n print('{} has empty description.'.format(gene))\n empties.append(gene)\n else:\n descriptions[gene] = desc\n print('{} has {} unique descriptions from {} results. Most common is:\\n{}'.format(\n gene, len(desc_cnt), len(doc_sums), desc))\n\n return(empties)", "def get_taxids(organism_names):\r\n\r\n taxids = []\r\n\r\n for organism in organism_names:\r\n handle = Entrez.esearch(db=\"Taxonomy\", term=organism)\r\n record = Entrez.read(handle)\r\n print(record[\"IdList\"])\r\n try:\r\n taxids.append(record[\"IdList\"][0])\r\n except IndexError:\r\n pass\r\n\r\n return taxids", "def test_ambiguous():\n genes = gene_enricher.get_genes('ABC1')\n assert genes == [\n {'symbol': u'ABCA1', 'entrez_id': u'19', 'ensembl_gene_id': u'ENSG00000165029'},\n {'symbol': u'HEATR6', 'entrez_id': u'63897', 'ensembl_gene_id': u'ENSG00000068097'}\n ]", "def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record" ]
[ "0.6468079", "0.64520854", "0.6415853", "0.6268371", "0.62580836", "0.60660505", "0.6048606", "0.6023636", "0.60008794", "0.59920865", "0.5961127", "0.59330606", "0.59125656", "0.5873164", "0.585526", "0.5834124", "0.58223885", "0.5777365", "0.57691514", "0.5756105", "0.5722927", "0.57128465", "0.5707637", "0.5690216", "0.56753176", "0.5661294", "0.5628206", "0.5602253", "0.5566031", "0.55591756" ]
0.72333103
0
uploads dataframe to google sheets
def df2gsheet(df: pd.DataFrame, sheet_name=None): CELL_MAX_CHAR_LIMIT = 50000 if 'course' in df.columns: aint_gonna_fit = df[df.course.str.len() >= CELL_MAX_CHAR_LIMIT] if not aint_gonna_fit.empty: print('WARNING ~ {filename} - row are too long'.format(filename=sheet_name)) print(aint_gonna_fit) df['course'] = df['course'].str[:CELL_MAX_CHAR_LIMIT - 1] df2gspread.upload(df, gsheet.id, sheet_name, col_names=True, row_names=False, clean=False, new_sheet_dimensions=df.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_google_sheet(df, SPREADSHEET_ID, ws_name, include_index=False, row=1, col=1):\n ws = init_worksheet(SPREADSHEET_ID, ws_name)\n gsdf.set_with_dataframe(ws, df, resize=True, include_index=include_index)", "def move_data_google_sheets_to_s3():\n\n # Set up service account object\n service_account = gspread.service_account()\n sheet = service_account.open(\"Workout log\")\n worksheet = sheet.get_worksheet(0)\n # Pull down worksheet as dataframe\n raw_df = get_as_dataframe(worksheet,\n parse_dates=True,\n usecols=[0, 1, 2],\n skiprows=None)\n _df = raw_df[pd.notnull(raw_df['Date'])]\n\n # Write dataframe to s3\n file_name = \"daily_run_log.csv\"\n csv_buffer = StringIO()\n _df.to_csv(csv_buffer)\n s3_resource = boto3.resource('s3')\n s3_resource.Object(BUCKET, file_name).put(Body=csv_buffer.getvalue())", "def gsheet_handler(spread_workbook:str, sheet_name:str,path_to_credentials:str('super_user.json'), method='Read',action = 'append_rows',is_add_sheet=False, df=None,row_cutoff=0,col_cutoff=0,keep_headers=False):\n \n scopes = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive',\n 'https://www.googleapis.com/auth/spreadsheets']\n \n credentials = ServiceAccountCredentials.from_json_keyfile_name(path_to_credentials, scopes=scopes)\n gsc = gspread.authorize(credentials)\n ws = gsc.open(spread_workbook)\n \n if is_add_sheet ==False:\n \n # Get existing sheet data\n wb = ws.worksheet(sheet_name)\n wb_df = wb.get_all_records()\n wb_df = pd.DataFrame.from_dict(wb_df)\n \n if wb_df is not None:\n n_row = wb_df.shape[0] \n n_col = wb_df.shape[1]\n \n if method == 'Read':\n \n return wb_df\n \n elif (method =='write') & (is_add_sheet):\n wb = ws.add_worksheet(rows=10000,cols=100,title=sheet_name)\n gd.set_with_dataframe(wb,df,include_column_header= keep_headers)\n \n elif (method =='write') & (action == 'refresh_sheet'):\n wb.clear()\n gd.set_with_dataframe(wb,df,row=1+row_cutoff,include_column_header=keep_headers) \n \n elif (method =='write') & (action == 'append_rows'):\n gd.set_with_dataframe(wb,df,row=n_row+1+row_cutoff,include_column_header=keep_headers) \n \n elif (method =='write') & (action == 'append_columns'):\n gd.set_with_dataframe(wb,df,col=n_col+1+col_cutoff,include_column_header=keep_headers) \n \n else:\n print(\"None action are performed\")\n \n return wb", "def sent_data(values):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n spreadsheet_id = '1PYRx1rladDsm_nW3dHbX_R7oX1OxCrUvHPDEIJfkOJ8'\n range_name = 'A:E'\n value_input_option = 'RAW'\n \n body = {\n 'values': values\n }\n result = service.spreadsheets().values().append(\n spreadsheetId=spreadsheet_id, range=range_name,\n valueInputOption=value_input_option, body=body).execute()\n\n return result", "def write_to_google_doc(garage_name):\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n spreadsheetId = '1zZ0XS0yDKLK9YkWeimEgv41u-7EP6_YzsyHPp_o-MBs'\n rangeName = garage_name +'!A2:E'\n #print rangeName\n #sys.stdout.flush()\n #result = service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=rangeName).execute()\n #values = result.get('values', [])\n\n #get the already present number of rows\n #num_rows = 0\n #if not values:\n #print('No data found.')\n #placeholder\n #xaaa = 0\n #else:\n #print('Name, Major:')\n #for row in values:\n #num_rows = num_rows + 1\n # Print columns\n #print('%s, %s, %s' % (row[0], row[1], row[2]))\n \n #print (num_rows)\n #testing writing\n \n body = {\n 'values': anomalies_for_google_docs\n }\n \n result = service.spreadsheets().values().append(\n spreadsheetId=spreadsheetId, range=rangeName,\n valueInputOption='USER_ENTERED', body=body).execute()", "def __upload_to_gcp_bucket(df, fname):\n blob = BUCKET.blob(fname)\n json_str = df.to_json(orient='records')\n blob.upload_from_string(json_str)", "def upload_sheet(self, request):\n file = self.request.data['file']\n\n # validating requested payload.\n if not file:\n return Response(\"Got no file! Please hit me again with file.\")\n # Only .csv/xls format file are allowed\n if file.name.rsplit('.')[1] == 'csv':\n sheet_as_df = pd.read_csv(file)\n elif file.name.rsplit('.')[1] == 'xls':\n sheet_as_df = pd.read_excel(file)\n else:\n return Response(\"Only .csv/.xls format type allowed for now.\")\n\n # sheet uploading code\n # =============Logic Start================\n header = ['last_name', 'first_name', 'state', 'phone_number']\n df = sheet_as_df\n if not set(header).issubset(df.columns):\n return False, f'Please check uploading sheet matching headers as: {header}'\n # filling empty(NaN) of data-frame entry with 0.0\n df = df.fillna(0)\n from itertools import islice\n batch_size = 100\n while True:\n content_instance = [Content(\n first_name=record['first_name'],\n last_name=record['last_name'],\n state=record['state'],\n phone_number=record['phone_number']\n ) for record in islice(df.to_dict('records'), batch_size)]\n if not content_instance:\n logger.info('Unable to update PhoneBook model with entries.')\n break\n PhoneBook.objects.bulk_create(content_instance, batch_size)\n # =============Logic End==================\n\n return Response('Successfully updated order entry!')", "def save_to_gcs(df, file, bucket=settings.ASSETS.BUCKET):\n output_file = NamedTemporaryFile().name\n df.to_csv(output_file, compression=\"gzip\", index=False)\n upload_blob(bucket, output_file, file)", "def upload_data(self, data_frame):\n pass", "def upload_protocol(sheet_no, data_struct):\n global num_uploads\n client = gspread.authorize(creds)\n sheet = client.open('Fridge Data Testing').get_worksheet(sheet_no)\n \n print('uploading...')\n for key, val in data_struct.items():\n time.sleep(7)\n d = key.strftime('%m/%d/%Y ')\n t = key.strftime('%H:%M:%S')\n data = [d + t]\n data += val\n try:\n sheet.insert_row(data, 2, value_input_option='USER_ENTERED')\n except gspread.exceptions as e:\n print('Exception: ' + str(e))\n return False\n print('upload number ' + num_uploads + ' completed')\n return True", "def save_new_excel_data(df, req_file_name, sheet):\r\n try:\r\n # select rows for a specific column and save a excel file\r\n dtc_table_ext = ['SW_DTC', 'Diagnosis_IDENTIFIER', 'Symptom', 'SW_Module', 'ISO_Pcode',\r\n 'Cust_Pcode', 'ScanT_Pcode', 'Description', 'Lamp_Manager', 'EPC_Lamp',\r\n 'SnapShot', 'MIL_FUEL_CONF', 'Diagnosis_Enabled', 'Diagnosis_presence',\r\n 'Severity', 'Priority', 'Diag_Call_task', 'Diag_Validation', 'Unit',\r\n 'Diag_DeValidation', 'DTC_available', 'EPC', 'MIL_FuelConf_bit1',\r\n 'MIL_FuelConf_bit0', 'Lamp_Manager_bit2', 'Lamp_Manager_bit1', 'Lamp_Manager_bit0',\r\n 'AUTOyyy', 'Prio_bit3', 'Prio_bit2', 'Prio_bit1', 'Prio_bit0',\r\n 'Snapshot_bit2', 'Snapshot_bit1', 'Snapshot_bit0', 'empty', 'ETC_highbit', 'ETC_lowbit']\r\n # Save df_all_cols extracted to a new excel file\r\n file_to_save = sheet+'_'+req_file_name\r\n with pd.ExcelWriter(file_to_save) as writer: # for writing more than 1 sheet\r\n df.to_excel(writer, sheet_name=sheet, index=False)\r\n # df.to_excel(writer, sheet_name=sheet, columns=dtc_table_ext, index=False)\r\n except PermissionError:\r\n print('DEBUG-->save_new_excel_data: exception raised: ', sys.exc_info())", "def upload_df_to_bq(df, project_id, table_id):\n\n df.to_gbq(table_id, project_id=project_id, if_exists='replace')\n return 'table uploaded to ' + project_id + '.' + table_id", "def _sheet_to_df(columns_config_url_or_path):\n url = columns_config_url_or_path.replace(\"edit#gid=\", \"export?format=csv&gid=\")\n try:\n return pd.read_csv(StringIO(requests.get(url, timeout=10).content.decode(\"utf-8\")))\n except Exception as e:\n raise BaseDosDadosException(\n \"Check if your google sheet Share are: Anyone on the internet with this link can view\"\n ) from e", "def start_upload():\n credentials = check_auth()\n body = load_items_from_json_file('crawling/items.json')\n\n # Call the Sheets API\n service = build('sheets', 'v4', credentials=credentials)\n sheet = service.spreadsheets()\n result = service.spreadsheets().values().update(\n spreadsheetId=SAMPLE_SPREADSHEET_ID, range=SAMPLE_RANGE_NAME,\n valueInputOption='RAW', body=body).execute()", "def excelSheetData(request, *args, **kwargs):\n if request.method == \"POST\":\n try:\n file = request.FILES.get('file')\n # Reading file using pandas\n data = pd.read_excel(file, dtype={'Column1': str, 'Column10': str})\n\n # data = pd.read_excel(file, dtype={'ASOF': str, 'USERNAME': srf})\n except Exception as e:\n logger = logging.getLogger('root')\n logger.error('Unable to upload file', exc_info=True, extra={\n 'exception': e,\n })\n return JsonResponse({'status': 500, 'exception': e})\n\n # uploadExcelSheet.delay(data)\n uploadExcelSheet(data)\n\n return JsonResponse({'status': 200})", "def write_to_gdocs(temp, ext_temp, humidity):\n # Account details for google docs\n email = '[email protected]'\n password = 'oknifzuxspiwyobt'\n spreadsheet = 'pitemp'\n\n # Login with your Google account\n try:\n gc = gspread.login(email, password)\n except:\n print \"Unable to log in. Check your email address/password\"\n sys.exit()\n\n # Open a worksheet from your spreadsheet using the filename\n try:\n worksheet = gc.open(spreadsheet).sheet1\n except:\n print \"Unable to open the spreadsheet. \\\n Check your filename: %s\" % spreadsheet\n sys.exit()\n\n try:\n values = [datetime.datetime.now(),\n temp,\n ext_temp,\n humidity]\n worksheet.append_row(values)\n\n #success!\n print \"Wrote a row to %s at %s\" % (spreadsheet,\n datetime.datetime.now())\n except:\n print \"Unable to append data. Check your connection?\"", "def _load_df(self):\n oauth_json = self.plugin_config[\"service_account_credentials\"]\n with tempfile.NamedTemporaryFile(mode=\"w+\", suffix=\".json\") as ntf:\n json.dump(oauth_json, ntf)\n ntf.seek(0)\n\n gc = gspread.service_account(filename=ntf.name)\n \n sheet_url = self.plugin_config[\"sheet_url\"]\n sheet = gc.open_by_url(sheet_url)\n self.worksheet = sheet.get_worksheet(0)\n data = self.worksheet.get_all_values()\n colnames = data.pop(0)\n\n self._df = pd.DataFrame(data, columns=colnames)", "def save_sheet(self):\n if self.data:\n self.do_save()", "def scrapper(request):\n bq_create_table()\n df = loop_req()\n csv = df.to_csv()\n upload_bucket(csv)\n return csv", "def append_result_to_spreadsheet(dataset_size: int = 15200, image_sizes: str = \"192x96px\",\n stroke_thicknesses: str = \"3\", staff_lines: str = \"60,67,74,81\",\n model_name: str = \"vgg4\", data_augmentation=\"20% zoom, 10° rotation\",\n optimizer: str = \"Adadelta\", early_stopping: int = 20, reduction_patience: int = 8,\n learning_rate_reduction_factor: float = 0.5, minibatch_size: int = 64,\n initialization: str = \"glorot_uniform\", initial_learning_rate: float = 1.0,\n accuracy: float = \"0.10\", date: str = \"24.12.9999\", use_fixed_canvas: bool = True,\n datasets: str = \"homus\", execution_time_in_seconds: int = \"0\",\n balancing_method=\"None\"):\n try:\n service, spreadsheet_id = get_service_and_spreadsheet_id()\n first_empty_line = get_first_empty_line_fast(service, spreadsheet_id)\n print(\"Uploading results to Google Spreadsheet and appending at first empty line {0}\".format(first_empty_line))\n data = [dataset_size, image_sizes, stroke_thicknesses, staff_lines, model_name, data_augmentation, optimizer,\n early_stopping, reduction_patience, learning_rate_reduction_factor, minibatch_size, initialization,\n initial_learning_rate, accuracy, date, use_fixed_canvas, datasets, execution_time_in_seconds,\n balancing_method]\n write_into_spreadsheet(service, spreadsheet_id, data, first_empty_line)\n except Exception as exception:\n print(\"Info: Results could not be uploaded to Google Spreadsheet because it is probably not configured: {0}\".format(str(exception)))\n # traceback.print_exc()", "def google_sheets_connector():\n print(\"Connecting to Google Sheets\")\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n spreadsheetId = SPREADSHEET_ID #from creds file\n rangeName = SPREADSHEET_RANGE #from creds file\n result = service.spreadsheets().values().get(\n spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', [])\n\n\n #Adding list to hold values to hold cast to push to mongo\n cast = []\n \n if not values:\n print('No data found.')\n else:\n try:\n for row in values:\n #Adding dict to hold cast information\n cast.append(create_character_dict(row))\n \n insert_characterlist_mongo(cast)\n except Exception as e:\n print('Error has occurred: ' + str(e))", "def get_sheet(sheet, doc):\n scope = [\"https://spreadsheets.google.com/feeds\", \"https://www.googleapis.com/auth/drive\"]\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(SECRET_FILE, scope)\n\n gc = gspread.authorize(credentials)\n wks = gc.open(doc)\n sheet = wks.worksheet(sheet)\n data = sheet.get_all_values()\n h1 = ffill(data[0])\n\n # remove extra whitespace\n h1 = [k.strip() for k in h1]\n h2 = [k.strip() for k in data[1]]\n\n # create a multiindex\n columns = MultiIndex.from_tuples(zip(h1, h2))\n\n # populate the dataframe\n df = DataFrame(data[2:], columns=columns)\n return df", "def write_one_sheet(self, key):\n # Get sheet #\n sheet = self.writer.sheets[key]\n # Get dataframes #\n all_dfs = self.sheet_to_dfs[key]\n # Initialize #\n row = 0\n # Loop #\n for info in all_dfs:\n # Get dataframe #\n df = info['dataframe']\n # Write custom title #\n sheet.write_string(row, 0, info.get('title', ''))\n row += 2\n # Add extras #\n df.index.name = info.get('y_extra', '')\n df.columns.name = info.get('x_extra', '')\n # Add Y labels #\n title, label = info.get('y_title', ''), info.get('y_label', '')\n df = pandas.concat({title: df}, names=[label])\n # Add X labels #\n title, label = info.get('x_title', ''), info.get('x_label', '')\n df = pandas.concat({title: df}, names=[label], axis=1)\n # Write dataframe #\n df.to_excel(self.writer,\n sheet_name = key,\n startrow = row,\n startcol = self.indentation)\n # Increment #\n row += len(df.index) + self.spacing", "def upload_dataframe_to_s3(df, bucket_name, output_filename):\n temporary_filepath = tempfile.mkstemp()[1]\n df.to_csv(temporary_filepath, sep=\",\", index=False)\n upload_file_to_s3(\n bucket_name=bucket_name,\n input_filepath=temporary_filepath,\n output_filename=output_filename,\n )", "def main():\n\n\n\n\n try:\n\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'\n 'version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discoveryUrl)\n\n # Specify Google Translation Spreadsheet\n spreadsheetId = 'ID' # Enter ID\n rangeName = 'surveys_locale!A2:L'\n\n result = service.spreadsheets().values().get(spreadsheetId=spreadsheetId, range=rangeName).execute()\n values = result.get('values', []) #column headers, table and specify rows\n\n #convert values into dataframe\n df = pd.DataFrame(values)\n\n #replace all non trailing blank values created by Google Sheets API with null values\n df_replace = df.replace([''], [None]) \n\n #convert back to list to insert into MySQL\n processed_dataset = df_replace.values.tolist() \n\n\n if not values:\n print('No data found.')\n else:\n with connection.cursor() as cursor: \n\n\n\n # CREATE translation table\n\n print('Creating translation_table table...')\n \n cursor.execute(\"\"\"CREATE TABLE `translation_table` (\n `tokens` varchar(255) NULL,\n `survey_group_name` varchar(255) COLLATE utf8_bin NULL,\n `en-US` varchar(255) COLLATE utf8_bin NULL default null,\n `nb-NO` varchar(255) COLLATE utf8_bin NULL default null,\n `sv-SE` varchar(255) COLLATE utf8_bin NULL default null,\n `de-DE` varchar(255) COLLATE utf8_bin NULL default null,\n `es-ES` varchar(255) COLLATE utf8_bin NULL default null,\n `pt-PT` varchar(255) COLLATE utf8_bin NULL default null,\n `fr-FR` varchar(255) COLLATE utf8_bin NULL default null,\n `da-DK` varchar(255) COLLATE utf8_bin NULL default null,\n `fi-FI` varchar(255) COLLATE utf8_bin NULL default null,\n `zh-CN` varchar(255) COLLATE utf8_bin NULL default null\n ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;\n\n \"\"\")\n\n\n # INSERT VALUES IN TABLE\n\n print('Inserting records into Translation table')\n\n #Iterate through the dataframe list and insert into MySQL row by row\n for keyrow, row in enumerate(processed_dataset):\n\n insert_sql = \"\"\"INSERT INTO trybe_stats.`translation_table` (`tokens`, `survey_group_name`, `en-US`, `nb-NO`, `sv-SE`, `de-DE`, `es-ES`, `pt-PT`, `fr-FR`, `da-DK`, `fi-FI`, `zh-CN`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(insert_sql, [row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11]])\n\n else:\n print('No rows found')\n\n print('Finished inserting.')\n\n\n # COUNT NUMBER OF ROWS\n\n cursor.execute(\"SELECT COUNT(*) from trybe_stats.`translation_table`\")\n result=cursor.fetchone()\n print(result.values()) #returns a dictionary, so values() gets the values which is the row count\n\n\n # connection is not autocommit by default. So you must commit to save\n # your changes.\n connection.commit()\n\n\n finally:\n connection.close()", "def rite2xl(df, file_name):\r\n print('writing dataframe to excel',)\r\n writer = pd.ExcelWriter(file_name ,engine = 'xlsxwriter')\r\n df.to_excel(writer,file_name)\r\n writer.save()\r\n print('writing to excel sheet completed')\r\n return(df)", "def update_koinex_google_sheet(self, price_data):\n credentials = getattr(self, 'credentials', self.get_credentials())\n http = credentials.authorize(httplib2.Http())\n discovery_url = ('https://sheets.googleapis.com/$discovery/rest?version=v4')\n service = discovery.build('sheets', 'v4', http=http,\n discoveryServiceUrl=discovery_url)\n spreadsheet_ids = settings.SPREADSHEET_IDS\n range_name = 'Overall!B2:B14'\n value_input_option = 'RAW'\n value_range_body = {\n \"range\": \"Overall!B2:B14\",\n \"majorDimension\": \"COLUMNS\",\n \"values\": [[price_data['BTC'], price_data['XRP'], price_data['LTC'], price_data['ETH'], price_data['BCH'],\n price_data['GNT'], price_data['OMG'], price_data['REQ'], price_data['ZRX'], price_data['BAT'], price_data['TRX'], price_data['AE']]]\n }\n for spreadsheet_id in spreadsheet_ids:\n request = service.spreadsheets().values().update(\n spreadsheetId=spreadsheet_id, range=range_name,\n valueInputOption=value_input_option, body=value_range_body)\n request.execute()", "def google_sheets_connector():\n scope = ['https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name('key.json', scope)\n client = gspread.authorize(credentials)\n sheet = client.open('backend').sheet1\n return sheet", "def post_cell():\n data = request.json\n attr = ('worksheetKey', 'row', 'col', 'value')\n\n if data is not None and all(key in data for key in attr):\n worksheet = data[attr[0]]\n row = data[attr[1]]\n col = data[attr[2]]\n value = data[attr[3]]\n ss.write_cell(worksheet, row, col, value)\n response.status = '201 Created'\n else:\n response.status = '400 Bad Request'\n\n return response" ]
[ "0.68912625", "0.6610926", "0.6601545", "0.63393456", "0.6315094", "0.6195817", "0.61924154", "0.61545527", "0.6118955", "0.6087999", "0.60400724", "0.60386956", "0.60276693", "0.6022702", "0.59579694", "0.58812815", "0.5848476", "0.57958424", "0.5772333", "0.5685371", "0.566473", "0.5661132", "0.5647473", "0.5630082", "0.5605386", "0.5576507", "0.55746114", "0.557119", "0.5557642", "0.55497396" ]
0.67953295
1
connect to wiktionary, get all part of speech, join them into one string, and return here
def translate(word: str) -> str: global LINE_DIVIDER parser = WiktionaryParser() def_ = parser.fetch(word.lower()) ret = "" for word_payload in def_: definitions = word_payload['definitions'] translations = {d['partOfSpeech']: LINE_DIVIDER.join(d['text']) for d in definitions} ret += LINE_DIVIDER.join(f"{k}: {v}" for k,v in translations.items()) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getWords(speech):\r\n return speech.split()", "def make_text(chains):\n\n # your code goes here\n n_gram = tuple(choice(chains['START']))\n words = [word for word in n_gram]\n\n while n_gram in chains:\n\n next_word = choice(chains[n_gram])\n if next_word == 'EOF':\n break\n words.append(next_word)\n\n n_gram = list(n_gram)[1:]\n n_gram.append(next_word)\n n_gram = tuple(n_gram)\n \n return \" \".join(words)", "def get_speech(self, phrase):\n src = os.path.join(constants.CONFIG_PATH, self.voice)\n text = phrase\n\n def preprocess(syllables):\n temp = []\n for syllable in syllables:\n for p in self.punctuation:\n syllable = syllable.replace(p, \"\")\n if syllable.isdigit():\n syllable = atc.num2chinese(syllable)\n new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)\n for e in new_sounds:\n temp.append(e)\n else:\n temp.append(syllable)\n return temp\n \n if not os.path.exists(src):\n logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))\n return None\n logger.debug(\"{} 合成中...\".format(self.SLUG))\n delay = 0\n increment = 355 # milliseconds\n pause = 500 # pause for punctuation\n syllables = lazy_pinyin(text, style=pypinyin.TONE3)\n syllables = preprocess(syllables)\n \n # initialize to be complete silence, each character takes up ~500ms\n result = AudioSegment.silent(duration=500*len(text))\n for syllable in syllables:\n path = os.path.join(src, syllable+\".wav\")\n sound_file = Path(path)\n # insert 500 ms silence for punctuation marks\n if syllable in self.punctuation:\n short_silence = AudioSegment.silent(duration=pause)\n result = result.overlay(short_silence, position=delay)\n delay += increment\n continue\n # skip sound file that doesn't exist\n if not sound_file.is_file():\n continue\n segment = AudioSegment.from_wav(path)\n result = result.overlay(segment, position=delay)\n delay += increment\n\n tmpfile = ''\n with tempfile.NamedTemporaryFile() as f:\n tmpfile = f.name\n result.export(tmpfile, format=\"wav\")\n logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))\n return tmpfile", "def wiktionary(bot, trigger):\n word = trigger.group(2)\n if word is None:\n bot.reply('You must tell me what to look up!')\n return\n\n _etymology, definitions = wikt(word)\n if not definitions:\n # Cast word to lower to check in case of mismatched user input\n _etymology, definitions = wikt(word.lower())\n if not definitions:\n bot.reply(\"Couldn't get any definitions for %s.\" % word)\n return\n\n result = format(word, definitions)\n if len(result) < 300:\n result = format(word, definitions, 3)\n if len(result) < 300:\n result = format(word, definitions, 5)\n\n bot.say(result, truncation=' […]')", "def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches", "def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words", "def summon_text(quote_mess):\n\n # clean up to get an individual quote out of the poorly formatted soup\n # goodreads does not format their webpages in a way that is scraping-friendly\n quotes = []\n for quote in quote_mess:\n to_trim = str(quote)\n trimmed = to_trim[30:]\n this_quote = ''\n for char in trimmed:\n if char == '<':\n break\n else:\n this_quote = this_quote + char\n quotes.append(this_quote)\n\n # clean up the line breaks and unnecessary punctuation\n # without this step, we would end up with random punctuation and unreliable chaining\n quote_list = []\n for quote in quotes:\n cleaned = quote.replace(\"\\n\", '')\n purged = re.sub(r'(\\.|,|:|;|\\(|\\)|\\\"|\\?|”|“|!)', '', cleaned)\n quote_list.append(purged)\n\n # create a final clean list of all the words available\n word_list = []\n for index in range(0, len(quote_list) - 1):\n quote = quote_list[index]\n words = quote.split(' ')\n for word in words:\n # just checking if we have a first person word or not\n if word != 'I' or word[:2] != \"I'\":\n word_list.append(word.lower())\n else:\n word_list.append(word)\n\n return word_list", "def get_text(data):\n return \" \".join([item[\"words\"] for item in data])", "def get_introduction(length=128, words=None):", "def _parse_xml(self, doc):\n target_speech = []\n tops = self._get_tops(doc)\n\n for top in tops:\n speeches = self._get_speeches(top)\n\n for speech in speeches:\n if self._is_authored_by_target(speech):\n parts = self._get_parts(speech)\n\n for part in parts:\n if str(part.string) != \"None\":\n target_speech.append(str(part.string))\n\n if not target_speech:\n return None\n\n return \" \".join(target_speech)", "def get_pirate_talk(phrase):\n\n pirate_dict = {\" sir\": \"matey\",\n \"hotel\" : \"fleabag inn\",\n \"student\" : \"swabbie\",\n \"boy\" : \"matey\",\n \"madam\" : \"proud beauty\",\n \"professor\" : \"foul blaggart\",\n \"restaurant\" : \"galley\",\n \"your\" : \"yer\",\n \"excuse\" : \"arr\",\n \"students\" : \"swabbies\",\n \"are\": \"be\",\n \"lawyer\" : \"foul blaggart\",\n \"the\": \"th\\'\",\n \"restroom\" : \"head\",\n \"my\": \"me\",\n \"hello\" : \"avast\",\n \"is\": \"be\",\n \"man\": \"matey\"}\n\n # split string into list so I can iterate by words.\n phrase_list = phrase.split()\n\n pirate_words = []\n\n for word in phrase_list:\n # if the word is in pirate dictioary, replace word with its corresponding key.\n if word in pirate_dict:\n word = pirate_dict[word]\n # all all words to the new, 'translated', list.\n pirate_words.append(word)\n\n pirate_words = \" \".join(pirate_words)\n\n return pirate_words", "def print_part_of_speech(part_of_speech):\n\n print(part_of_speech)", "def text_to_speech(entry):\n text = entry.get_text()\n if text:\n subprocess.call([\"milena_say\", text])", "def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words", "def wikiword(self):\r\n prefix = \"https://fr.wikipedia.org/w/api.php?action=query&prop=extracts&exintro&format=json&pageids=\"\r\n requestpageid = prefix + str(self.Pageid)\r\n\r\n request = requests.get(requestpageid)# to get json from wikipedia\r\n return_json_API = request.json() # get json file from the request\r\n\r\n try:\r\n # regular expression to remove HTML + linebreaks tags we get back from wiki\r\n wiki_content = re.sub('<[^>]+>|\\n','',return_json_API[\"query\"][\"pages\"][str(self.Pageid)][\"extract\"])\r\n except:\r\n wiki_content = 'Je n\\'ai rien trouve sur Wikipedia, enfin je veux dire ce ne me dit rien du tout :-(' \\\r\n 'Au fait n\\'oublie pas de mettre des majuscules aux noms propres !'\r\n\r\n return wiki_content", "def lyrics_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n x = song_lyrics(song)\r\n song = str(song)\r\n if ans in x:\r\n songs_list += song + \", \"\r\n return songs_list[:-2]", "def get_single_morpheme(query, xmlword):\n morpheme = ''\n xml_result = xmlword.findall(query)\n if xml_result:\n if query == './/{%s}mor/{%s}mw/{%s}pos/{%s}c' % (NS, NS, NS, NS): #PoS\n morpheme = xml_result[0].text\n xml_pos_subcategories = xmlword.findall('.//{%s}mor/{%s}mw/{%s}pos/{%s}s' % (NS, NS, NS, NS))\n for xml_pos_subcategory in xml_pos_subcategories:\n morpheme += \":\" + xml_pos_subcategory.text\n elif query == './/{%s}mor/{%s}mor-post' % (NS, NS): #clitic\n clitic_parts = xml_result[0].findall('.//{%s}mw' % NS)\n if clitic_parts:\n a = clitic_parts[0].findall('.//{%s}pos/{%s}c' % (NS, NS))\n b = clitic_parts[0].findall('.//{%s}stem' % NS)\n c = clitic_parts[0].findall('.//{%s}mk' % NS)\n morpheme = \" \".join([a[0].text if a else \"\", b[0].text if b else \"\", c[0].text if c else \"\"])\n else:\n morpheme = xml_result[0].text\n return morpheme", "def wiktionary_ety(bot, trigger):\n word = trigger.group(2)\n if word is None:\n bot.reply('You must give me a word!')\n return\n\n etymology, _definitions = wikt(word)\n if not etymology:\n bot.reply(\"Couldn't get the etymology for %s.\" % word)\n return\n\n result = \"{}: {}\".format(word, etymology)\n\n bot.say(result, truncation=' […]')", "def paren_references(article,word):\r\n all_references = ''\r\n # extract text inside parentheses containing the word\r\n pattern = r'\\(([^\\)]*\\b{}\\b.*?)\\)'.format(word)\r\n #[^5] will match any character except '5'\r\n matches = re.findall(pattern,article,re.IGNORECASE|re.DOTALL)\r\n if matches:\r\n all_references = '\\n'.join(matches)\r\n return all_references", "def synonyms_wiktionary(name, lang=\"fr\"):\n import wptools\n page = wptools.page(name, wiki='{0}.wiktionary.org'.format(\n lang), lang=lang, silent=True)\n page.get_parse()\n text = page.data['wikitext']\n syn = \"==== {{S|synonymes}} ====\"\n if syn not in text:\n return None\n text = text.split(syn)[1].split(\"====\")[0]\n reg = re.compile(\"[[]{2}(.*?)[]]{2}\")\n res = reg.findall(text)\n return res", "def phraseSound(self, toks):\n\t\tdef head(l):\n\t\t\treturn l[0] if l else None\n\t\ts = [head(self.word.get(t,[''])) for t in toks]\n\t\t#print('phraseSound(',toks,')=',s)\n\t\tif not all(s):\n\t\t\treturn []\n\t\t# nuke numbers, join into one string\n\t\tt = ' '.join([re.sub('\\d+', '', x) for x in s])\n\t\t# nuke consecutive duplicate sounds\n\t\tu = re.sub('(\\S+) \\\\1 ', '\\\\1 ', t)\n\t\tv = u.split()\n\t\t#print('phraseSound2=',v)\n\t\treturn v", "def return_wikipedia_term(res):\n rst = []\n if res['spotted']:\n for s in [s['spot'] for s in res['value']['spots']]:\n r = TagMeService.retrieve_taggings(s.encode('utf-8'), method='POST')\n if len(r['annotations']) != 0:\n for n in r['annotations']:\n if 'title' in n.keys():\n title = n['title'].replace(' ', '_') # strip whitespaces from dbpedia tag\n rst.append(title)\n else:\n print \"Cannot find title in annotations: \" + str(n)\n return rst", "def word_of_the_day():\n r = requests.get(\"http://www.urbandictionary.com\") # link is always homepage\n soup = BeautifulSoup(r.content, features=\"html.parser\") # sets up soup\n def_header = \"**\" + soup.find(\"div\", attrs={\"class\": \"def-header\"}).text.replace(\"unknown\",\n \"\") + \"**\" # header is the word we are defining\n # def_header = def_header[0:len(def_header) - 10] # header always ends in \"unknown\" this removes it\n meaning = soup.find(\"div\", attrs={\"class\": \"meaning\"}).text # gets the definition\n # formatting TODO move to controller\n for x in [1, 2, 3, 4, 5, 6, 7, 8, 9]:\n meaning = meaning.replace(str(x) + \". \", \"\\n\" + str(x) + \". \")\n for x in [\"v.\", \"n.\"]:\n meaning = meaning.replace(x, x.upper()[:-1])\n example = soup.find(\"div\", attrs={\"class\": \"example\"}).text # gets the example\n output = def_header + \": \" + \"```\" + meaning + \"\\nEx: \" + example + \"```\" # output string\n output = output.replace(\"&apos\", \"'\") # replaces weird formatting of ' from original\n return output # returns the word, defintion, and example", "def words(text):\n text = \" \".join(text) if text else 'We are the knights who say \"NI\"!'\n xml.words(text)\n return 0", "def peoples_speech(\n corpus_dir: Pathlike,\n output_dir: Pathlike,\n):\n prepare_peoples_speech(\n corpus_dir,\n output_dir=output_dir,\n )", "def words(self):\n return self.title + self.content", "def news_speech():\n #Fetches data from API and creates global varibles.\n news_handle(news_fetch(config_fetcher('news_region'), config_fetcher('news_key')))\n #Creates a daily breifing using varibles\n news_daily_news = Markup((f\"The top headline for today is entitled: {title_1}, and was \\\nwritten by {author_1}. Here is a second headline, entitled: {title_2}, written by {author_2}.\"))\n return news_daily_news", "def combine_genre(songs):\r\n combined = \"\"\r\n for song in songs:\r\n combined += song[\"lyrics\"]\r\n return combined", "def _get_full_vocabulary_string(self, html):\n # The kana represntation of the Jisho entry is contained in this div\n text_markup = html.select_one(\".concept_light-representation\")\n\n upper_furigana = text_markup.select_one(\".furigana\").find_all('span')\n\n # inset_furigana needs more formatting due to potential bits of kanji sticking together\n inset_furigana_list = []\n # For some reason, creating the iterator \"inset_furigana\" and then accessing it here\n # causes it to change, like observing it causes it to change. I feel like Schrodinger\n for f in text_markup.select_one(\".text\").children:\n cleaned_text = f.string.replace(\"\\n\", \"\").replace(\" \", \"\")\n if cleaned_text == \"\":\n continue\n elif len(cleaned_text) > 1:\n for s in cleaned_text:\n inset_furigana_list.append(s)\n else:\n inset_furigana_list.append(cleaned_text)\n\n children = zip_longest(upper_furigana, inset_furigana_list)\n\n full_word = []\n for c in children:\n if c[0].text != '':\n full_word.append(c[0].text)\n elif c[0].text == '' and contains_kana(c[1]):\n full_word.append(c[1])\n else:\n continue\n\n # print(''.join(full_word))\n # print(\"====\")\n return ''.join(full_word)", "def song_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n song = str(song)\r\n if ans in song.lower():\r\n songs_list += song + \", \"\r\n return songs_list[:-2]" ]
[ "0.62328875", "0.61990124", "0.6019341", "0.592498", "0.5885903", "0.5857776", "0.58438355", "0.57937104", "0.5765551", "0.57031536", "0.56936556", "0.5668371", "0.5629293", "0.55844367", "0.5553311", "0.5546056", "0.554421", "0.55373454", "0.55305636", "0.5502497", "0.5498724", "0.5495499", "0.54680544", "0.54610604", "0.5454486", "0.54331696", "0.5422253", "0.5408809", "0.5408517", "0.5385986" ]
0.63350236
0
Return a subset of the dataloader from the start batch index to the count specified.
def datasubset(loader, start, count, batch_size): # Note: start is the start index of batch, not image smaller_dataset = [] end_idx = count / batch_size for batch_idx, (orig_images, labels) in enumerate(loader): if start <= batch_idx < end_idx: smaller_dataset.append((orig_images, labels)) if batch_idx > end_idx: break return smaller_dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_batch(self, count):\n if self.index + count < len(self.pool):\n batch = self.pool[self.index:self.index+count]\n self.index += count\n return batch\n else:\n batch = self.pool[self.index:]\n self.index = 0\n np.random.shuffle(self.pool)\n return batch + self.get_batch(count - len(batch))", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]", "def batches(self, batch_size, count):\n entries = self.entries()\n for _ in range(count):\n yield [next(entries) for _ in range(batch_size)]", "def get_dataset_slice(\n global_params, full_dataset, start_idx, size, dataset=None):\n if size <= 0 or start_idx + size > full_dataset.num_points:\n raise IndexError\n if dataset is None:\n dataset = Dataset()\n dataset.num_points = size\n dataset.init_aux_structures(global_params)\n full_points_ref = full_dataset.get_ref('points')\n dataset_points_ref = dataset.get_ref('points')\n for i in range(size):\n for j in range(global_params.dims):\n dataset_points_ref[i][j] = full_points_ref[start_idx + i][j]\n return dataset", "def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = [np.array(arr) for arr in zip(*batch)]\n\n return batch", "def take(self, count: int) -> 'List':\n return List(self[:count] if count >= 0 else self[count:])", "def batch_slices(batch, sizes=False, include_ends=True):\n size = scatter_add(torch.ones_like(batch), batch)\n cumsum = torch.cumsum(size, dim=0)\n starts = cumsum - size\n ends = cumsum - 1\n\n slices = starts\n if include_ends:\n slices = torch.stack([starts, ends], dim=1).view(-1)\n\n if sizes:\n return slices, size\n return slices", "def Batch(dataset, batch_size, drop_last=False):\n\n return dataset.batch(batch_size=batch_size, drop_remainder=drop_last)", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def first_rows(self, n: int) -> \"SampleDataSet\":\n return SampleDataSet(self._data.iloc[:n].copy())", "def get_training_batch(self, batch_size):\n if self.current_state == 0:\n random.shuffle(self.training_indices)\n\n if (self.current_state + batch_size) > (len(self.training_indices) + 1):\n self.current_state = 0\n return self.get_training_batch(batch_size)\n else:\n self.current_state += batch_size\n batch_indices = self.training_indices[self.current_state:(self.current_state + batch_size)]\n if len(batch_indices) != batch_size:\n self.current_state = 0\n return self.get_training_batch(batch_size)\n return self.data_handler.slice_data(batch_indices)", "def sample(self, batch_size):\n if len(self._buffer) <= batch_size:\n print(\"There are only %d batches in the experience buffer.\" % len(self._buffer))\n return self._buffer\n idxes = [random.randint(0, len(self._buffer) - 1) for _ in range(batch_size)]\n return [self._buffer[idx] for idx in idxes]", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n #print(\"T is \",T)\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n #print(\"sampled data \", s, \" \",data, end=\" \")\n batch.append((*data, idx))\n\n idx = np.array([i[2] for i in batch])\n #idx in the offline buffer\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n names_batch = np.array([i[1] for i in batch])\n\n return names_batch, idx", "def fetch(self, count : int = 1000, offset : int = 0, viewer : M = None):\n query = Query(self.record_class).set_limit(count).set_offset(offset)\n return self.table.fetch(query)", "def split_workload(self, n: int) -> range:\n # cf. https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is None: # single-process data loading, return the full iterator\n workload = range(n)\n else:\n num_workers = worker_info.num_workers\n worker_id = worker_info.id # 1-based\n start = math.ceil(n / num_workers * worker_id)\n stop = math.ceil(n / num_workers * (worker_id + 1))\n workload = range(start, stop)\n return workload", "def fetch(self, batch_size):\n #handling case of arriving at the end of the file\n if self.buffer_pointer <= self.files_counter - 32:\n idxs = np.linspace(self.buffer_pointer, self.buffer_pointer + batch_size - 1 , batch_size)\n else:\n idxs1 = np.linspace(self.buffer_pointer, self.files_counter , self.files_counter - self.buffer_pointer + 1) \n idxs2 = np.linspace(0,(batch_size - (self.files_counter - self.buffer_pointer) - 2) ,(batch_size - (self.files_counter - self.buffer_pointer)-1))\n idxs = np.concatenate(idxs1, idxs2, axis = 0)\n \n self.buffer_pointer += batch_size\n return self.data_handler.fetch_minibatch(self.name,self.files_counter)", "def _init_al_dataset(self):\n\n self._init_dataset()\n\n train_dataset = self.datasets['train']\n\n dataset_size = len(train_dataset)\n self.budget = math.ceil(self.budget_frac*dataset_size)\n Sampler.__init__(self, config, self.budget) # TODO: Weird place to initialise this\n\n all_indices = set(np.arange(dataset_size))\n k_initial = math.ceil(len(all_indices)*self.initial_budget_frac)\n initial_indices = random.sample(list(all_indices), k=k_initial)\n\n sampler_init = data.sampler.SubsetRandomSampler(initial_indices) # need to sample from training dataset\n\n self.labelled_dataloader = data.DataLoader(train_dataset, sampler=sampler_init, batch_size=self.batch_size, drop_last=True)\n self.val_dataloader = data.DataLoader(self.datasets['valid'], batch_size=self.batch_size, drop_last=False)\n self.test_dataloader = data.DataLoader(self.datasets['test'], batch_size=self.batch_size, drop_last=False)\n\n return all_indices, initial_indices", "def get_batch(self, batch_size: int) -> jnp.ndarray:\n\n self._rng, key = jax.random.split(self._rng)\n samples = jax.random.choice(\n key,\n self.dataset.shape[0] - self.eval_batch_size,\n shape=(batch_size,),\n replace=False)\n return self.dataset[samples, ...]", "def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]", "def minibatches(dataset: List[T],\n batch_size: int,\n shuffle: bool = True) -> Iterator[List[T]]:\n # start indexes 0, batch_size, 2 * batch_size, ...\n batch_starts = [start for start in range(0, len(dataset),batch_size)]\n\n if shuffle: random.shuffle(batch_starts) #shuffle the butches\n\n for start in batch_starts:\n end = start + batch_size\n yield dataset[start: end]", "def get_batch(self, batch_size):\n n, _ = self.contexts.shape\n if self.buffer_s == -1:\n # use all the data\n ind = np.random.choice(range(n), batch_size)\n else:\n # use only buffer (last buffer_s observations)\n ind = np.random.choice(range(max(0, n - self.buffer_s), n), batch_size)\n return self.contexts[ind, :], self.rewards[ind, :]", "def slice_batch(x, n_gpus, part):\n sh = K.shape(x)\n L = sh[0] // n_gpus\n if part == n_gpus - 1:\n return x[part*L:]\n return x[part*L:(part+1)*L]", "def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]", "def acquire(self, n, starting=0, batch_size=None):\n sl = slice(starting, starting+n)\n if self._generate_index < sl.stop:\n self.generate(sl.stop - self._generate_index, batch_size=batch_size)\n return self.get_slice(sl)", "def get_dataloaders_with_index(path=\"../../data\", batch_size=64, num_labeled=250,\n lbl_idxs=None, unlbl_idxs=None, valid_idxs=None, which_dataset='cifar10', validation=True):\n\n # Define transform to normalize data\n normalize = transforms.Normalize(\n mean=[0.4914, 0.4822, 0.4465],\n std=[0.2023, 0.1994, 0.2010],\n )\n transform = transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])\n\n if which_dataset == 'cifar10':\n train_set = CustomCIFAR10(root=path, train=True, transform=transform)\n test_set = CustomCIFAR10(root=path, train=False, transform=transform)\n elif which_dataset == 'svhn':\n train_set = datasets.SVHN(root=path, split='train', download=True, transform=transform)\n test_set = datasets.SVHN(root=path, split='test', download=True, transform=transform)\n else:\n raise Exception('Not supported yet')\n\n\n # Split indexes between labeled, unlabeled and validation\n if which_dataset == 'cifar10':\n training_labels = train_set.targets\n elif which_dataset == 'svhn':\n training_labels = train_set.labels\n else :\n training_labels = train_set.targets\n\n if validation:\n train_labeled_idxs, train_unlabeled_idxs, val_idxs = labeled_unlabeled_val_split(training_labels, int(num_labeled / 10))\n else:\n train_labeled_idxs, train_unlabeled_idxs = labeled_unlabeled_split(training_labels, int(num_labeled / 10))\n val_idxs = []\n\n # If indexes are provided, use them\n if lbl_idxs is not None:\n train_labeled_idxs = lbl_idxs\n train_unlabeled_idxs = unlbl_idxs\n val_idxs = valid_idxs\n\n # Define samplers using indexes\n train_labeled_sampler = SubsetRandomSampler(train_labeled_idxs)\n train_unlabeled_sampler = SubsetRandomSampler(train_unlabeled_idxs)\n val_sampler = SubsetRandomSampler(val_idxs)\n\n # Create data loaders\n train_labeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_labeled_sampler, num_workers=0)\n train_unlabeled_loader = DataLoader(train_set, batch_size=batch_size, sampler=train_unlabeled_sampler, num_workers=0)\n val_loader = DataLoader(train_set, batch_size=batch_size, sampler=val_sampler, num_workers=0)\n test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=0)\n\n if not validation:\n val_loader = test_loader\n\n return train_labeled_loader, train_unlabeled_loader, val_loader, test_loader, train_labeled_idxs, train_unlabeled_idxs, val_idxs", "def subsample(self, dataset):\n sample_idx = np.random.choice(\n dataset.shape[0], self.sample_size, replace=True)\n sample = dataset[sample_idx,...]\n return sample", "def __getitem__(self, index):\r\n\r\n # Generate indexes of the batch\r\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\r\n\r\n # Find list of IDs\r\n list_ids_temp = [self.list_IDs[k] for k in indexes]\r\n\r\n # Calls function to load batch of data into memory\r\n X, y = self.__data_generation(list_ids_temp)\r\n\r\n return X, y", "def sample(self, batch_size):\n buffer_size = len(self.buffer)\n print(\"**\",buffer_size)\n index = np.random.choice(np.arange(buffer_size), size=batch_size, replace=False)\n return [self.buffer[i] for i in index]" ]
[ "0.6776425", "0.67340755", "0.6359823", "0.60727173", "0.5961567", "0.5891969", "0.5876109", "0.5873678", "0.5870568", "0.57987577", "0.5769156", "0.5766878", "0.5749042", "0.573495", "0.57309306", "0.57300115", "0.5710676", "0.5704602", "0.57015926", "0.5698963", "0.569104", "0.5678728", "0.56717575", "0.5663599", "0.56448346", "0.56424475", "0.5623515", "0.5620486", "0.56175995", "0.55702573" ]
0.71910286
0
Builds an entry from a database row.
def FromRow(cls, row): return Entry(*row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_update(meta, session, row):\n if row.type == assignment_sql.AssignmentType.USER_PROJECT:\n return build_user_project_entry(meta, session, row)\n elif row.type == assignment_sql.AssignmentType.GROUP_PROJECT:\n return build_group_project_entry(meta, session, row)\n elif row.type == assignment_sql.AssignmentType.USER_DOMAIN:\n return build_user_domain_entry(meta, session, row)\n elif row.type == assignment_sql.AssignmentType.GROUP_DOMAIN:\n return build_group_domain_entry(meta, session, row)\n # If the row type doesn't match any that we understand we drop\n # the data.", "def parse_batch_db_entry(row) -> PlantBatch:\n\n # We need to reassign to a new variable since dicts are immutable\n print(row)\n row0 = ast.literal_eval(row[0])\n row1 = ast.literal_eval(row[1])\n row2 = ast.literal_eval(row[2])\n\n plant: Plant = Plant(name=row0['name'], family_name=row0['family_name'], metadata=row0['metadata'])\n location: Location = Location(row1['name'], row1['area'], Climate(row1['climate_type']))\n tray: Tray = Tray(row2['tray_type'], row2['footprint'], row2['capacity'])\n n_trays: int = int(row[3])\n planting_time: datetime = datetime.fromisoformat(row[4])\n\n batch: PlantBatch = PlantBatch(plant, location, tray, n_trays, planting_time)\n\n return batch", "def build_row(raw_row):\n temp_row = dict()\n ### Plan\n # Add email addresses to row\n # If message == Clicked or message == Submitted data\n ## Append 'Time Clicked' to dict. Format MM/DD/YYYY | HH:mm\n ## If message == Submitted data\n ### Append Credentials Harvested: Yes to dict\n ## Else:\n ### Append Credentials Harvested: No to dict\n # Append Reported: No, Replied to Email: No, Notes: ''\n\n # Append email\n temp_row['Email Address'] = raw_row['email']\n\n if raw_row['message'] == 'Clicked Link' or raw_row['message'] == 'Submitted Data':\n # print(raw_row['time'])\n # print(arrow.get(raw_row['time'], 'YYYY-MM-DDTHH:mm:ss.SSSSSSSSS-ZZ').format('MM/DD/YYYY | HH:mm'))\n temp_row['Time Clicked'] = arrow.get(raw_row['time'], 'YYYY-MM-DDTHH:mm:ss.SSSSSSSSS-ZZ').format('MM/DD/YYYY | HH:mm')\n if raw_row['message'] == 'Submitted Data':\n temp_row['Credentials Harvested'] = 'Yes'\n else:\n temp_row['Credentials Harvested'] = 'No'\n else:\n temp_row['Time Clicked'] = 'N/A'\n temp_row['Credentials Harvested'] = 'No'\n\n temp_row.update({'Reported': '', 'Replied to Email': '', 'Notes': ''})\n return temp_row", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def create_from_esi_row(cls, data_row, entity_external_id, *args, **kwargs):\n try:\n db_object = cls._create_from_esi_row(\n data_row, entity_external_id, *args, **kwargs)\n db_object.entity = EveEntity.objects.get(\n external_id=entity_external_id)\n db_object.save()\n except Exception:\n message = (\n f\"Failed to create {cls.__name__} from ESI data row: {data_row}.\\n\"\n f\"Failed for entity {entity_external_id}\\n\"\n )\n logger.exception(message)", "def __init__(self, row):\n state = inspect(row)\n\n # Don't store the actual row, so we can serialize.\n self._model_cls = state.class_\n self._pk = state.identity\n\n self.data = Box(dict(row))", "def _convert_row(self, row) :\n\n self.row_id += 1\n data = [self.row_id]\n\n if type(row) == type({}) :\n data.extend(row.get(col, None) for col in self.cols[1:])\n elif type(row) in [type([]), type(())] :\n data.extend(row)\n elif type(row) == RowReference :\n data.extend(row.values())\n else :\n raise Exception(\n 'Don''t know how to add row from: %s ' % str(row)\n )\n\n if len(data) != len(self.cols) :\n raise Exception(\n 'Wrong number of values for new row with cols %s: %s' % \n (str(self.cols), str(data))\n \n )\n\n return data", "def feedDataRow(self, row, **kwargs): \n\n self.nullify()\n \n #avoid automatic call to \"read\" after _objectid changes \n self._feed = True\n self._objectid = row['objectid']\n self._feed = False\n \n self._original_values.clear()\n for cn in row:\n if cn in self._extra_sql_columns:\n self.__dict__[cn] = row[cn]\n if cn not in self._table: continue\n field = self._table[cn]\n decoded = field.val_sql2py ( row[cn] )\n self.__dict__[cn] = decoded\n self._original_values[cn] = decoded\n \n #if _astxt was not provided with the data, _reprfunc is not set,\n #get the text from database\n if '_astxt' in row:\n self._astxt = row['_astxt']\n elif self._reprfunc:\n self._astxt = self._reprfunc (self)\n else: \n self._astxt = self.TextCache[self._objectid] or \"(none)\"\n \n if self._resolve_child_ref:\n for child_handle in self._child_referencelist:\n t, f = self._table.reference_child_hash[child_handle] \n self._child_referencelist[child_handle].filter = f.name + \" = '\" + str(row['objectid']) + \"'\"\n self._child_referencelist[child_handle].reload()\n \n self._hasdata = True\n self._isnew = False\n self._ismodified = False\n self._modified_values.clear()", "def _create_from_esi_row(data_row, entity_external_id, *args, **kwargs):\n contract_id = data_row['contract_id']\n if EveContract.objects.filter(contract_id=contract_id).exists():\n contract = EveContract.objects.get(contract_id=contract_id)\n update = True\n else:\n contract = EveContract()\n update = False\n\n for key in data_row.keys():\n try:\n if type(data_row[key]) == pyswagger.primitives._time.Datetime:\n data_row[key] = data_row[key].to_json()\n setattr(contract, str(key), data_row[key])\n except AttributeError:\n logger.error(\n f\"Encountered unknown attribute {key} for EveContract\")\n\n if update:\n return contract # the other fields will already be sorted out if we're doing an update\n\n if not 'resolved_ids' in kwargs:\n logger.warning(\n \"Resolved IDs were not passed. Performance decrease.\")\n ids_to_resolve = [contract.acceptor_id, contract.assignee_id,\n contract.issuer_id, contract.issuer_corporation_id]\n resolved_ids = resolve_ids_with_types(ids_to_resolve)\n # todo resolve IDs\n else:\n resolved_ids = kwargs.get('resolved_ids')\n\n contracts_response = EveClient.call(\n 'get_characters_character_id_contracts_contract_id_items',\n character_id=entity_external_id, contract_id=contract.contract_id)\n\n if not contracts_response:\n items = None\n else:\n items = []\n for item in contracts_response.data:\n items.append(\"%s %s\" % (\n item['quantity'], resolve_type_id_to_type_name(item['type_id'])))\n contract.items = \"\\n\".join(items)\n\n contract.acceptor_name = resolved_ids[contract.acceptor_id]['name']\n contract.assignee_name = resolved_ids[contract.assignee_id]['name']\n contract.issuer_name = resolved_ids[contract.issuer_id]['name']\n contract.acceptor_type = resolved_ids[contract.acceptor_id]['type']\n contract.assignee_type = resolved_ids[contract.assignee_id]['type']\n contract.issuer_type = resolved_ids[contract.issuer_id]['type']\n contract.issuer_corporation_name = resolved_ids[contract.issuer_corporation_id]\n\n return contract", "def buildRows(keyValuePair):\n\n # define the rows and get the correct index to insert quanities\n id_ = keyValuePair[0][0]\n idx = setColumnValues(keyValuePair)\n\n # create and return row\n return (id_, [idx.get(i, 0) for i in range(8)])", "def dict_factory(cursor, row):\n rowdict = {}\n for idx, col in enumerate(cursor.description):\n rowdict[col[0]] = row[idx]\n return rowdict", "def fromrow(cls, row, backup_dir):\n # pylint: disable=protected-access\n idx = int(row[0])\n backup = Revoker._get_backup(backup_dir, idx, row[1])\n backup_key = Revoker._get_backup(backup_dir, idx, row[2])\n\n obj = cls(backup)\n obj.add_meta(idx, row[1], row[2], backup, backup_key)\n return obj", "def construct_row(self, ridx, row_data):\n \n # construct the row bits\n row_bits = [self.row_base[0].format(self.labels[ridx])]\n for cidx in range(self.ncols):\n data_str = self.cell_base.format(row_data[cidx])\n cell_str = self.row_base[cidx+self.col_start].format(data_str)\n row_bits.append(cell_str) \n \n # stick it all together and return\n row_str = f\"|{'|'.join(row_bits)}|\\n\"\n return row_str", "def dict_factory(cursor, row):\n fields = [column[0] for column in cursor.description]\n return {key: value for key, value in zip(fields, row)}", "def sqlite3_dict_factory(cursor, row):\n dict_row = dict()\n for idx, col in enumerate(cursor.description):\n dict_row[col[0]] = row[idx]\n dict_row[idx] = row[idx]\n return dict_row", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\r\n\td = {}\r\n\tfor idx, col in enumerate(cursor.description):\r\n\t\td[col[0]] = row[idx]\r\n\treturn d", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d", "def dict_factory(cursor, row):\n dic = {}\n for idx, col in enumerate(cursor.description):\n if isinstance(row[idx], unicode):\n dic[col[0]] = u.unicode_to_string(row[idx])\n else:\n dic[col[0]] = row[idx]\n return dic", "def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs):\n initial_data = kwargs.get('initial_data', None)\n model = model_class()\n\n # _log.debug(\"map_row's mappings {}\".format(mapping))\n\n # If there are any initial states we need to set prior to mapping.\n if initial_data:\n model = apply_initial_data(model, initial_data)\n\n # concat is not used as of 2016-09-14\n # concat = _set_default_concat_config(concat)\n\n for raw_field, value in row.items():\n is_extra_data = True if raw_field in extra_data_fields else False\n\n # Save the value if is is not None, keep empty fields.\n if value is not None:\n model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner)\n\n return model", "def _insert_table_row(self, db: str, table: str, row: Dict[str, Any]):\n pass", "def process_row(self, table, row):\n for index, column in enumerate(table.columns):\n hash_key = hash(frozenset(column.items()))\n column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)\n if row[index] == None and ('timestamp' not in column_type or not column['default']):\n row[index] = '\\N'\n elif row[index] == None and column['default']:\n if self.tz:\n row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset\n else:\n row[index] = '1970-01-01 00:00:00'\n elif 'bit' in column_type:\n row[index] = bin(ord(row[index]))[2:]\n elif isinstance(row[index], (str, unicode, basestring)):\n if column_type == 'bytea':\n row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]\n elif 'text[' in column_type:\n row[index] = '{%s}' % ','.join('\"%s\"' % v.replace('\"', r'\\\"') for v in row[index].split(','))\n else:\n row[index] = row[index].replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace(\n '\\t', r'\\t').replace('\\r', r'\\r').replace('\\0', '')\n elif column_type == 'boolean':\n # We got here because you used a tinyint(1), if you didn't want a bool, don't use that type\n row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]\n elif isinstance(row[index], (date, datetime)):\n if isinstance(row[index], datetime) and self.tz:\n try:\n if row[index].tzinfo:\n row[index] = row[index].astimezone(self.tz).isoformat()\n else:\n row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()\n except Exception as e:\n print e.message\n else:\n row[index] = row[index].isoformat()\n elif isinstance(row[index], timedelta):\n row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()\n else:\n row[index] = AsIs(row[index]).getquoted()", "def _parse_result_entry(result):\n entry = ParsedEntry()\n\n if \"content\" in result and len(result.content) > 0:\n entry.content = result.content[0].value\n # if not html, have to escape\n if result.content[0].type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n elif \"summary_detail\" in result:\n entry.content = result.summary_detail.value\n # if not html, have to escape\n if result.summary_detail.type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n else:\n entry.content = \"\"\n entry.link = result.get(\"link\", None)\n entry.title = result.get(\"title\", None)\n if \"author_detail\" in result and \"name\" in result.author_detail:\n entry.author = result.author_detail.name\n else:\n entry.author = None\n if \"updated_parsed\" in result and result.updated_parsed is not None:\n entry.date = int(calendar.timegm(result.updated_parsed))\n elif \"published_parsed\" in result and result.published_parsed is not None:\n entry.date = int(calendar.timegm(result.published_parsed))\n else:\n entry.date = int(time.time())\n # try to find something to use as GUID, or fall back to static string\n guid_content = result.get(\"id\", entry.title)\n if guid_content is None:\n guid_content = \"None\"\n entry.guid = hashlib.sha1(guid_content.encode('utf-8')).hexdigest()\n return entry", "def get_db_entry_from_gql_input(model, input):\n data = gql_input_to_dictionary(input)\n print(data)\n database_entry = get_entry_by_id(model, data['id'])\n return database_entry", "def make_insert_row(table_str, attribute_value_dict): #works\n#Aanpassen zodat query niet uitgevoerd wordt als pk al bestaat\n #initialize input for string formatting\n attributes_string = \"(\"\n values_list = []\n #retrieve attributes and values from dictionary and add them to the string\n for key in attribute_value_dict:\n values_list += [attribute_value_dict[key]]\n attributes_string += \"%s, \" % key\n attributes_string = attributes_string[:(len(attributes_string)-2)]\n attributes_string += ')'\n values = str(tuple(values_list))\n sql = \"\"\"INSERT INTO `%s` %s VALUES %s \"\"\" % (table_str, attributes_string, values)\n return sql", "def make_dicts(cursor, row):\n return dict((cursor.description[idx][0], value)\n for idx, value in enumerate(row))", "def make_dicts(cursor, row):\n return dict((cursor.description[idx][0], value)\n for idx, value in enumerate(row))", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()" ]
[ "0.64496917", "0.61219496", "0.58081305", "0.58061045", "0.57769185", "0.5762503", "0.5753291", "0.5748479", "0.5658568", "0.5652168", "0.55532646", "0.5488344", "0.548819", "0.5483393", "0.5455265", "0.54362726", "0.5413206", "0.5409367", "0.5409367", "0.5409367", "0.5401953", "0.5373751", "0.5360162", "0.5305119", "0.5295613", "0.526557", "0.52110296", "0.5209598", "0.5209598", "0.52094215" ]
0.7453001
0
Filters and sorts the entries to be prefetched for a given domain. Uses the default thresholds defined in resource_prefetch_common.cc.
def FilterAndSort(entries, domain): result = filter( lambda x: ((domain is None or x.main_page_url == domain) and x.confidence > .7 and x.number_of_hits >= 2), entries) return sorted(result, key=operator.attrgetter('score'), reverse=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def assign_priorities_and_dp(self):\r\n taskset_copy = copy(self.taskset)\r\n tasks = taskset_copy.sorted_by_crit()", "def order_domain_values(self, var, assignment):\n # retrieve the domain for the variable\n domain = self.domains[var]\n # initialise a dictionary for sorting the values in the variable's domain\n sorting_dict = {} \n # for each of the values in the variable's domain \n for value in domain:\n # set the constraint counter to zero\n sorting_dict[value] = 0\n # for each of the neighbors of the variable\n for neighbor in self.crossword.neighbors(var):\n # retrieve the overlap indexes\n overlap = self.crossword.overlaps[(neighbor, var)]\n # for each of the overlap's possible values (the overlap's domain)\n for test in self.domains[neighbor]:\n # if the overlap letter is not the same\n if test[overlap[0]] != value[overlap[1]]:\n # this value constrains the neighbor's domain\n sorting_dict[value] += 1\n # sort the dictionary by the value of the sorting key\n sorted_vars = sorted(domain, key=lambda x: sorting_dict[x])\n return sorted_vars", "def queue_domain(event, context):\n\n domain = event['domain']\n fetch_limit = int(os.environ['PAGE_FETCH_LIMIT'])\n if 'limit' in event:\n fetch_limit = int(event['limit'])\n\n index = os.environ['CC_INDEX']\n if 'index' in event:\n index = event['index']\n\n # pull all entries for this domain from index\n indices = list(get_warc_indices_for_domain(domain, index))\n\n # sample returned indices to 'limit' (where they exceed 'limit')\n sampled_indices = indices\n if fetch_limit < len(indices):\n sampled_indices = random.sample(indices, fetch_limit)\n\n # for each sampled index, get stored page text by URL\n lambda_client = boto3.client('lambda')\n\n results = list()\n\n for index in sampled_indices:\n results.append(\n lambda_client.invoke(\n FunctionName='fetch_wet_entry',\n Payload=json.dumps(index),\n InvocationType='Event'\n )\n )\n\n return {\n \"total_index_count\": len(indices),\n \"requested_indices\": sampled_indices\n }", "def schedule_offloading(\n rp_boxes: List[np.ndarray]\n) -> List[np.ndarray]:\n return prioritize_larger_rp(rp_boxes)", "def __order_domain_values(self, var, assignment):\n values_to_inconsistencies = {}\n unassigned_vars = self.__unassigned_variables(assignment)\n unassigned_neighbors = self.__unassigned_neighbors(var, unassigned_vars)\n for value in var.domain:\n inconsistent_value_count = 0\n for unassigned_neighbor in unassigned_neighbors:\n consistent_domain_values = self.__consistent_domain_values(var, value, unassigned_neighbor)\n inconsistencies = unassigned_neighbor.domain.difference(consistent_domain_values)\n inconsistent_value_count += len(inconsistencies)\n values_to_inconsistencies[value] = inconsistent_value_count\n\n ordered_values = sorted(values_to_inconsistencies, key=values_to_inconsistencies.get)\n return ordered_values", "def priority(predicates, predicates_rules):\n return sorted(predicates, key=lambda k: keysort(k[\"name\"], predicates_rules))", "def apply_sorting(tasks, *conditions):\n return tasks.sort(conditions)", "def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)", "def normalize_prefetch_lookups(lookups, prefix=None):\n ret = []\n for lookup in lookups:\n if not isinstance(lookup, Prefetch):\n lookup = Prefetch(lookup)\n if prefix:\n lookup.add_prefix(prefix)\n ret.append(lookup)\n return ret", "def order_domain_values(csp, variable):\n domain = variable.domain\n returned = []\n \"\"\"\n print variable\n for a in csp.constraints[variable]:\n print a\n \"\"\"\n for x in domain:\n returned.append(conflict_count(csp, variable,x))\n\n ret = sorted(returned, key=itemgetter(1))\n rett = []\n for x in ret:\n rett.append(x[0])\n \n return rett\n # TODO implement this\n pass", "def sort_and_reduce(self):\n self.data = sorted(self.data, key=lambda item: item.pubDate)\n if len(self.data) > MAX_SIZE:\n self.data = self.data[-MAX_SIZE:]", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def prefetch(self, oids):\n self.timeline.reset()\n self.timeline.start(\"prefetch\")\n fetch(oids)\n self.timeline.end(\"prefetch\")", "def items_by_domain(self, domain: str) -> List[dict]:\n if not self.connected:\n raise NotConnected(\"Please call connect first.\")\n return [value for key, value in self._states.items() if key.startswith(domain)]", "def pre_sort(self, qs):\n return qs", "def order_domain_values(self, var):\n return least_constraining_value(self, var.name)", "def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites", "def __sort_by_priority(self, input_list):\n print(\"========================Start of __sort_by_priority() Method *\")\n # temp1 = input_list.sort(key=operator.attrgetter(\"submission_time\"))\n # temp1 = temp1.sort(key=operator.attrgetter(str(\"__req_start\")))\n\n # sending one item from list at a time to be enqueued ensuring sorted-nes\n for j in range(len(input_list)):\n self.current_queue.enqueue(input_list[j])\n # print(\"Enqueued the FF item from Input list :\" + input_list[j].showFlightInfo())\n # print(\"*De-queued the FF item from Queue :\" + self.current_queue.dequeue(j).showFlightInfo())\n \"\"\"\n if input_list[i].get_reqStart <= self.current_queue.first.get_reqStart:\n if input_list[i].get_submissionTime <= self.current_queue.first.get_submissionTime:\n temp = self.current_queue.first\n self.current_queue.first = input_list[i]\n self.current_queue.first.next = temp\"\"\"\n print(\"========================End of __sort_by_priority() Method *\")", "def prune(self,domains,constraint):\n left_var = constraint.left[0]\n left_const_mult = constraint.left[1]\n left_val = constraint.left[2]\n\n right_var = constraint.right[0]\n right_const_mult = constraint.right[1]\n right_val = constraint.right[2]\n\n new_domains = deepcopy(domains)\n\n\n # Simple Variable-Value Labeling\n if (left_val == [0] and left_const_mult == [1]) and (right_const_mult == [0]):\n new_domains[left_var[0]] = [right_val[0]]\n \n # Simple Variable-Variable Labeling\n elif (left_val == [0] and left_const_mult == [1]) and (right_val == [0] and right_const_mult == [1]):\n new_set = set(new_domains[left_var[0]]) & set(new_domains[right_var[0]])\n new_domains[left_var[0]] = list(new_set)\n new_domains[right_var[0]] = list(new_set)\n\n else:\n l = 0\n for var,mult in zip(left_var,left_const_mult):\n l += mult*max(domains[var])\n for const in left_val:\n l += const\n\n r = 0\n for var,mult in zip(right_var,right_const_mult):\n r += mult*min(domains[var])\n for const in right_val:\n r += const\n\n # print(l,r)\n # print(new_domains)\n # print(constraint)\n\n for var,mult in zip(left_var,left_const_mult):\n max_var = max(domains[var])\n comp = (r-(l-mult*max_var)) / mult\n for elem in domains[var]:\n if elem < comp:\n new_domains[var].remove(elem)\n\n for var,mult in zip(right_var,right_const_mult):\n min_var = min(domains[var])\n comp = (l-(r-mult*min_var)) / mult\n for elem in domains[var]:\n if elem > comp:\n new_domains[var].remove(elem)\n\n # for i,domain in enumerate(new_domains):\n # if len(domain) == 0:\n # print(i,l,r)\n # print(\"Old:\",domains)\n # print(\"New:\",new_domains)\n # print(domains)\n # print(constraint)\n # print(\"------------------------\")\n # raise SystemError(\"Domain is Empty!!\")\n\n return new_domains", "def _reorder_collected(self, data):\n priority = {\n 'post': 1,\n 'get': 2,\n 'put': 2,\n 'patch': 2,\n 'head': 2,\n 'options': 2,\n 'delete': 3,\n }\n data = sorted(\n data,\n key=lambda x: priority.get(getattr(x, 'name', ''), 4))\n return data", "def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()", "def sort_valid_by_loading(self, loading_validities):\n # returns adresses with validity and loading\n sorted_addresses = sorted([(x, y[1])\n for x, y in loading_validities.items()\n if y[0]], key=lambda (x, y): y)\n self.logger.trace('sorted_addresses={}'.format(sorted_addresses))\n return sorted_addresses", "def load_conditions(self):\n self.loading = True\n m = self.get_current_measurement()\n d = {}\n for key in m.priority:\n arr1 = m.priority[key]\n for arr2 in arr1:\n val1, val2, weight = arr2\n condition = self.format_condition(key, val1, val2)\n d[condition] = weight\n \n load_table_from_dict(d, self.priorityTableWidget)\n self.loading = False", "def order_domain_values(var,assignment,csp):\n #right now it works only as just convert value and return\n #no special black magic yet\n return var.domain", "def test_health_networks_ordering(self):\n state = State.objects.get(name=\"NC\")\n hns = HealthNetwork.objects.filter(state=state)\n hns_p = {n: [hn for hn in hns if hn.priority == n] for n in PRIORITY.keys()}\n self.assertEqual(\n hns_p[0], hns[: len(hns_p[0])], msg=\"priority 0 health networks should be first\"\n )\n self.assertEqual(\n hns_p[1],\n hns[len(hns_p[0]): len(hns_p[0]) + len(hns_p[1])],\n msg=\"priority 1 health networks should be second\",\n )\n self.assertEqual(\n hns_p[2],\n hns[len(hns_p[0]) + len(hns_p[1]): len(hns_p[0]) + len(hns_p[1]) + len(hns_p[2])],\n msg=\"priority 2 health networks should be third\",\n )", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def init_priority_list(self, urls):\n self.priority_store.clear()\n for u in urls:\n expire, depth = 1, 0\n data = (expire, u, self.start_delay, depth)\n self.priority_store.push(data, expire)", "def _merge_and_sort_cut(self, per_entity_prediction, threshold=0, topk=-1):\n\n def quality_method(p):\n return p.get_quality(self.quality, self.quality_aggregation)\n\n per_entity_prediction_filtered = defaultdict(list)\n for sub, per_obj_predictions in per_entity_prediction.items():\n # print([(k, p.triple[2], qaulity_method(p)) for k, p in per_obj_predictions.items()])\n merged_predictions = list(\n filter(lambda p: quality_method(p) > threshold, list(per_obj_predictions.values())))\n\n merged_predictions.sort(key=quality_method, reverse=True)\n\n include = topk if topk > 0 else len(merged_predictions)\n per_entity_prediction_filtered[sub] = merged_predictions[:include]\n\n return per_entity_prediction_filtered", "def non_deferred(self):\n\n return self.filter(priority__lt='4')" ]
[ "0.517122", "0.50378364", "0.49676886", "0.483992", "0.47872436", "0.4769185", "0.47265437", "0.47085798", "0.46775368", "0.46577793", "0.4654167", "0.4642485", "0.46311262", "0.46287495", "0.45954353", "0.45926008", "0.45721456", "0.45484707", "0.45371377", "0.45344085", "0.45265892", "0.45108804", "0.4505873", "0.45041585", "0.45001683", "0.4496703", "0.4459394", "0.4446616", "0.44431126", "0.4428566" ]
0.6398537
0
Standings page by the season year.
def standings_by_season(season): season = int(season) + 1 scoreboard = nba_py.Scoreboard(month=7, day=1, year=season) east_standings = scoreboard.east_conf_standings_by_day() west_standings = scoreboard.west_conf_standings_by_day() return render_template("standings.html", title="standings", east_standings=enumerate(east_standings, 1), west_standings=enumerate(west_standings, 1), team=CITY_TO_TEAM)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape():\n league_year = Config.get_property(\"league_year\")\n\n # Create table\n season_data = client.season_schedule(league_year)\n season_data = br_enum_to_string(season_data)\n return season_data", "def get_seasons_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #make API to get season information, gets back list of season information\n seasons_results = guidebox_season_info(guidebox_id)\n\n for season in seasons_results:\n date = season[\"first_airdate\"]\n year = str(date)[0:4]\n season[\"first_airdate\"] = year\n\n return jsonify(seasons_results)", "def with_season(title, season):\n videos = list(mythVideo.searchVideos(title = title, season = season))\n\n for video in videos:\n video.label = video.title + \" - \" + video.subtitle\n video.url = \"/videos/\" + video.title + \"/\" + video.hash\n\n videos = sorted(videos, key = lambda video: video.episode)\n return render_template('list.html', items = videos, page_title = title + \" Season \" + str(season))", "def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1", "def season_games(year):\n\tLOG.debug('Getting season %d', year)\n\tdata = read_html(io=season_games_url(year),\n\t\t\t\t\t attrs={'id': 'games'},\n\t\t\t\t\t infer_types=False,\n\t\t\t\t\t header=0)\n\tif len(data) != 1:\n\t\traise CantFindTheRightTable\n\tdata = data.pop()\n\n\t# Cleaning.\n\tdel data[\"Unnamed: 3\"]\n\t# The code below issues \"UserWarning: \" So we catch UserWarnings.\n\twith warnings.catch_warnings():\n\t\twarnings.filterwarnings(action='ignore', category=UserWarning,\n\t\t\t\t\t\t\t\tmodule=r'pandas\\.core\\.frame',\n\t\t\t\t\t\t\t\tmessage=(r\"Boolean Series key will be reindexed\"\n\t\t\t\t\t\t\t\t\t\t r\" to match DataFrame index\\.\"))\n\t\t# These rows are mid-table header rows.\n\t\tdata = data[data.Week != \"Week\"][data.Week != \"nan\"]\n\n\tdata['week'] = (data.Week\n\t\t\t\t\t.replace(\"WildCard\", \"wild-card\")\n\t\t\t\t\t.replace(\"Division\", \"divisional\")\n\t\t\t\t\t.replace(\"ConfChamp\", \"conference\")\n\t\t\t\t\t.replace(\"SuperBowl\", \"super-bowl\")\n\t\t\t\t\t.apply(\n\t\t\t\t\t\tlambda s: (int(s)\n\t\t\t\t\t\t\t\t if all(c in '1234567890' for c in s)\n\t\t\t\t\t\t\t\t else s)))\n\tdel data['Week']\n\n\tdata['season'] = year\n\tdata['game_date'] = pd.to_datetime(\n\t\tdata.Date\n\t\t.replace(r\"$\", r\", %d\" % year, regex=True)\n\t\t.replace(r\"^(January|February) (\\d+), \\d+$\", r\"\\1 \\2, %d\" % (year + 1),\n\t\t\t\t regex=True))\n\tdel data['Date']\n\n\tfor column in \"PtsW\", \"PtsL\", \"YdsW\", \"TOW\", \"YdsL\", \"TOL\":\n\t data[column] = data[column].apply(int)\n\n\tdata['WatL'] = data['Unnamed: 5'].apply(lambda x: x == '@')\n\tdel data['Unnamed: 5']\n\tdata['hometeam'] = (~data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\tdata.WatL * data['Loser/tie'])\n\tdata['awayteam'] = (data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\t~data.WatL * data['Loser/tie'])\n\tdata['winner'] = data['Winner/tie']\n\tfor column in 'Winner/tie', 'Loser/tie', \"WatL\":\n\t\tdel data[column]\n\tfor column in 'hometeam', 'awayteam', 'winner':\n\t\tdata[column] = data[column].apply(lambda s: s.split()[-1].lower())\n\n\treturn data", "def getshowsbyseason(season,year, session = None):\n url = APIURL.format(season=season, year = year)\n headers = {\"X-CSRF-TOKEN\":session.cookies['X-CSRF-TOKEN']}\n data = alrequests.GET_json(url,session = session, headers=headers)\n return {cat:[Show(**show) for show in shows] for cat,shows in data.items()}", "def by_season(self, season):\n return self.get_queryset().filter(season=season)", "def scrape(self):\n self._validate_date_range(self.start_date, self.end_date)\n self._validate_team()\n self._cache_source()\n soup = self.season_raw_cache[self.start_date.year]\n df = self._parse_raw(soup)\n return self._apply_filters(df)", "def betting_lines(year):\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Webapges are by dates\n all_dates = m.find('game_log', {'season': year}, {'_id': 0, 'date': 1}).distinct('date')\n\n browser = webdriver.Chrome('chromedriver')\n\n # Iterate through each date in a season\n for game_date in all_dates:\n\n # Get URL\n url = 'https://classic.sportsbookreview.com/betting-odds/nba-basketball/money-line/?date=' + datetime.strftime(game_date, '%Y%m%d')\n\n scrape_betting_page(url, browser, m, game_date)\n\n browser.close()", "def seasonNumber(self):\n return self.index", "def set_season(self, season):\n self.set_date_range(dt.date(season, 1, 1),\n dt.date(season, 12, 31))", "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def set_season(date_obj):\n date_year = date_obj.year\n\n for key, val in SEASONS.items():\n start = datetime(year=date_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=date_year, month=val['end']['month'], day=val['end']['day'])\n if key == 'Winter':\n start_year = date_year - 1 if date_obj.month in [1, 2, 3] else date_year\n end_year = date_year + 1 if date_obj.month == 12 else date_year\n start = datetime(year=start_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=end_year, month=val['end']['month'], day=val['end']['day'])\n\n if start <= date_obj <= end:\n return key", "def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def list_chassis_per_season(start_year=1950, end_year=2020, file_changed=False):\n\n def _get_chassis_names(years):\n req = requests.get(WIKIPEDIA_F1_URL)\n soup = BeautifulSoup(req.content, 'html.parser')\n links = soup.find_all('a')\n\n for link in links:\n link_text = ['Formula', 'One', 'season', 'cars']\n if str(link.get('href')).split('_')[-4:] == link_text:\n link_season = str(WIKIPEDIA + link.get('href')).replace(' ', '')\n season = str(link.get('href')).replace(':', '_').split('_')[-5:-4][0]\n\n if int(season) in years:\n\n req = requests.get(link_season)\n soup = BeautifulSoup(req.content, 'html.parser')\n team_divs = soup.findAll('div', attrs={'class': 'mw-category-group'})\n teams_season_list = []\n\n for team_div in team_divs:\n team_links = team_div.findAll('a')\n for team_link in team_links:\n teams_season_list.append(team_link.get('title'))\n\n\n # Checking missing teams\n missing_teams = MISSING_CHASSIS.get(season)\n for missing_team in missing_teams:\n if missing_team not in teams_season_list:\n teams_season_list.append(missing_team)\n\n seasons_chassis[season] = teams_season_list\n\n save_json(seasons_chassis)\n\n return seasons_chassis\n\n seasons_chassis = load_json(F1_CHASSIS)\n\n # Check if we got the chassis names for all seasons listed\n update_list = []\n\n for year in range(start_year, end_year + 1):\n if str(year) not in seasons_chassis.keys():\n update_list.append(year)\n\n chassis = _get_chassis_names(update_list) if update_list else seasons_chassis\n\n return chassis", "def current_season() -> int:\n now = datetime.now()\n month, year = now.month, now.year\n if month < 4:\n year -= 1\n return year", "def get_pvp_season_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/pvp-season/index', region, **filters)", "def season(self, seasonnum, order='aired'):\n if order=='aired':\n seasons = self.seasons\n elif order == 'dvd':\n seasons = self.dvd_seasons\n try:\n return seasons[seasonnum]\n except KeyError:\n raise SeasonNotFoundError(\n 'Season no %s does not exists' % seasonnum\n ), None, sys.exc_info()[2]", "def test_seasons(self):\n response = Tmdb.season(tmdb_show_id = 69740, season_number = 1)\n self.assertTrue(int(response.status_code) == 200)\n data = response.json()\n self.assertTrue(isinstance(data['episodes'], list))\n # TODO check if all the shows are in the good format (can be from_dict/to_dict)", "def seasons(years, timeout=None, concurrency=_DEFAULT_CONCURRENCY):\n\ttables, failures = [], []\n\tfor year in years:\n\t\tLOG.info('=' * 10 + ' %d ' + '=' * 10, year)\n\t\ttable, failure = season(year, timeout=timeout, concurrency=concurrency)\n\t\ttables.append(table)\n\t\tfailures.extend(failure)\n\treturn pd.concat(tables), failures", "def Seasons(year):\n mar_equinox = _FindSeasonChange(0, year, 3, 19)\n jun_solstice = _FindSeasonChange(90, year, 6, 19)\n sep_equinox = _FindSeasonChange(180, year, 9, 21)\n dec_solstice = _FindSeasonChange(270, year, 12, 20)\n return SeasonInfo(mar_equinox, jun_solstice, sep_equinox, dec_solstice)", "def get_player_games(self, year, use_local=True):", "def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games", "def year_archive(request, year):\n articles = Article.objects.filter(pub_date__year=year)\n context = { 'year': year, 'articles': articles }\n return render(request, 'news/year_archive.html', context)", "def parse(self, response):\n # gather season numbers and titles from the page\n season_numbers = response.css('select[name=\"sea\"] option::attr(value)').getall()\n season_titles = response.css('select[name=\"sea\"] option::text').getall()\n\n for season_number, season_title in zip(season_numbers, season_titles):\n # assemble URL for this season\n season_url = response.url + \"/?sea=\" + season_number\n\n # scrape the season page\n yield scrapy.Request(url=season_url,\n callback=self.parse_season,\n meta=dict(season_id=season_number,\n season_title=season_title.split(\" \")[0]))", "def get_seasons(link, debug=False):\n\n if debug:\n print(\"begin get_seasons()\")\n\n # get the BeautifulSoup data\n show_url = \"https://www.imdb.com/\" + link + \"episodes/\"\n tv_soup = bs4.BeautifulSoup(requests.get(show_url).text, features=\"html.parser\")\n\n # We are acquiring this data from a drop down, which the below line selects\n select_elem = tv_soup.select('#bySeason')\n seasons = []\n # account for the possibility of a one season show\n if len(select_elem) == 0:\n seasons.append(1)\n else:\n # get contents of drop down\n options = select_elem[0].select('option')\n\n # add each season\n for season in options:\n seasons.append(season.get('value'))\n if debug:\n print(f\"Seasons {seasons}\")\n\n return seasons", "def interpolateseasons(self):\n\n remainder = self.season - self.startseason\n f1 = 1.0 - remainder\n self.data = (self.startdata * f1) + (self.stopdata * remainder)", "def get_season_url(\n base_url: str, year: Optional[int] = None, season: Optional[str] = None\n) -> str:\n if year is None or season is None:\n return f\"{base_url}/season\"\n return f\"{base_url}/season/{year}/{season.lower()}\"", "def segment_by_season(self, dt, winter = None, summer = None):\n if winter == None:\n winter = [10, 11, 12, 1, 2, 3]\n if summer == None:\n summer = [4, 5, 6, 7, 8, 9]\n\n if dt.month in winter:\n ind = []\n for date in self.historic_data.index:\n if date.month in winter:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n else:\n ind = []\n for date in self.historic_data.index:\n if date.month in summer:\n ind.append(date)\n segmented_data = self.historic_data.reindex(ind)\n\n return RollingWindow(self.name, segmented_data, self.source_type,\n self.dayahead_data)" ]
[ "0.6855133", "0.6743384", "0.6705959", "0.66812474", "0.66199046", "0.65923786", "0.6505892", "0.647663", "0.6457529", "0.62005526", "0.6138608", "0.61354065", "0.6126372", "0.6063681", "0.60521215", "0.60376775", "0.60246587", "0.60244566", "0.60032177", "0.59783655", "0.59369767", "0.5890995", "0.5888153", "0.58782065", "0.5871872", "0.5853492", "0.5847663", "0.5839573", "0.58379686", "0.5828187" ]
0.7681088
0
Standings page after using the datepicker plugin.
def standings_post_request(): date = request.form["date"] datetime_object = datetime.datetime.strptime(date, "%m-%d-%Y") scoreboard = nba_py.Scoreboard(month=datetime_object.month, day=datetime_object.day, year=datetime_object.year) east_standings = scoreboard.east_conf_standings_by_day() west_standings = scoreboard.west_conf_standings_by_day() return render_template("standings.html", title="standings", east_standings=enumerate(east_standings, 1), west_standings=enumerate(west_standings, 1), team=CITY_TO_TEAM)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow_workoutplan(request, pk):\n return render(request, 'workouts/starting_date_form.html')", "def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})", "def schedule(request):\n return render(request, 'vaxcharts/schedule.html')", "def dashboard():", "def mainpage():\n return render_template('presence_weekday.html')", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def index():\n # call the datetime function\n datetime_today = datetime.datetime.now(central)\n datestring_today = datetime_today.strftime(\"%m-%d-%Y\")\n return render_score_page(\"index.html\", datestring_today, \"bballfast.com\")", "def page_dashboard(state):\n\n st.title(\":chart_with_upwards_trend: Prediction Results Dashboard\")\n\n st.markdown(\"# Select Stocks to View Results:\")\n if state.finalized_data:\n for stock_data in state.finalized_data:\n st.write(\"---\")\n st.markdown(\"## \" + stock_data[\"stock\"])\n if st.checkbox(\"View Results for \" + stock_data[\"stock\"]):\n\n ############################################\n\n st.markdown(\"### Historical Predictions:\")\n\n df2 = pd.DataFrame.from_dict(stock_data[\"prev_predictions\"])\n\n select_lbl = (\n \"Enter the names of models for \" + stock_data[\"stock\"] + \":\"\n )\n models_selections = st.multiselect(\n label=select_lbl,\n options=df2.columns,\n ) # allow users to display specific model results on dataframe graph\n\n if not models_selections: # if nothing is selected show all models!\n st.line_chart(df2)\n else:\n st.line_chart(df2[models_selections])\n\n st.markdown(\n \"*Note:* 'Prices' are the actual prices for those days. The rest are model predictions for those days.\\nPrices (in USD) are on the y-axis, the day number in the data is on the x-axis.\"\n )\n\n ############################################\n\n st.markdown(\"### Future (Next-Day) Predictions:\")\n\n df = pd.DataFrame()\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"swing_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"next_day_predictions\"]]\n )\n )\n df = df.append(\n pd.DataFrame([stock_data[\"prediction_results\"][\"model_scores\"]])\n )\n\n df.index = [\n \"Swing Predicton\",\n \"Price Prediction ($)\",\n \"Model Fit Score\",\n ]\n df = df.transpose()\n df # display chart\n\n st.markdown(\n \"- The current price of the stock is *$\"\n + str(\n round(stock_data[\"prediction_results\"][\"current_prev_close\"], 2)\n )\n + \"*.\"\n )\n\n if state.period == \"1mo\":\n st.markdown(\"- *Recommended Model (for 1mo):* SVR-RBF\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"6mo\":\n st.markdown(\n \"- *Recommended Model (for 6mo):* SVR-Poly (most recommended), LR, EN, or Lasso.\"\n )\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n elif state.period == \"1y\":\n st.markdown(\"- *Recommended Model (for 1yr):* SVR-Poly\")\n st.markdown(\n \"- *View the homescreen for more model & dataset size combination recommendations.*\"\n )\n else:\n st.markdown(\n \"- *Note:* View the home screen for information about the best models and training data size combinations.\"\n )\n\n ############################################\n st.markdown(\"### View Other Information:\")\n\n if st.checkbox(\n \"View \" + stock_data[\"stock\"] + \"'s Model Efficiency Timings\"\n ):\n st.markdown(\"#### Model Efficiencies:\")\n st.markdown(\n \"Shows the time in seconds it took models to complete specific tasks:\"\n )\n df3 = pd.DataFrame()\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"training_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"testing_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"new_predictions_times\"]]\n )\n )\n df3 = df3.append(\n pd.DataFrame(\n [stock_data[\"prediction_results\"][\"prev_predictions_times\"]]\n )\n )\n df3.index = [\n \"Training\",\n \"Testing/Scoring\",\n \"Future Predictions\",\n \"Historical Predictions\",\n ]\n df3 = df3.transpose()\n df3\n\n ############################################\n\n if st.checkbox(\"View \" + stock_data[\"stock\"] + \"'s Information\"):\n st.markdown(\"#### Company Information:\")\n for key in stock_data[\"stock_info\"].keys():\n st.write(\"*\", key + \":\", stock_data[\"stock_info\"][key])\n else:\n st.markdown(\n \"## Generate data to populate and initialize this page by going to the 'Settings' page and running the tool!\"\n )", "def get_standings(self):\n self.standings = self.soup.find('table', id='standingsTable')", "def programme(request):\n try:\n festival = Festival.objects.latest(field_name=\"date_creation\")\n except:\n return render_to_response('festival/accueil.html')\n\n dates = []\n\n for evenement in festival.evenements.all():\n dates.append(evenement.heure_passage.date())\n for activite in festival.activites.all():\n dates.append(activite.date.date())\n\n liste_dates = list(set(dates))\n liste_dates.sort()\n\n retour = {\n 'festival': festival,\n 'dates': liste_dates,\n }\n return render_to_response('festival/programme.html', RequestContext(request, retour))", "def calendarPageChanged(self, year, month):\n success = self.porker_thread.extendDates(datetime.date(year, month, 1))\n #if not success:\n # self.alertMessage(\"Failure!\",\"Unable to extend the thread's dates for some reason.\")\n #efficiency = self.porker_thread.getEfficiencyFor(self.getActiveDate())\n #self.porker_thread.sentDatesData = False", "def index(request):\n\n # Fetches the latest 3 listings published\n listings = Listing.objects.order_by('-list_date').filter(is_published=True)[:3]\n context = {\n 'listings': listings,\n 'state_choices': state_choices,\n 'bedroom_choices': bedroom_choices,\n 'price_choices': price_choices\n }\n return render(request, \"pages/index.html\", context)", "def standings():\n pass", "def standings():\n teams = Team.query.all()\n teams = list(reversed(sorted(teams, key=lambda team: team.points)))\n for team in teams:\n team.logo = url_for('static', filename='images/teams/{}'.format(team.logo_image))\n\n return render_template('standings/standings.html', teams=teams, title='Standings')", "def baron_schedule(request):\n assert isinstance(request, HttpRequest)\n\n return render(\n request,\n 'AscensionESports_Baseline/schedule.html',\n {\n 'background': getBaronBackground(),\n 'color': getBaronColor(),\n 'title':'Baron League Schedule',\n 'query_results': Baron_Match_Report_Request(request),\n 'year': datetime.now().year,\n }\n )", "def _select_date_changed(self):\n self.model.edit_traits(view=View(\n UCustom('date'),\n buttons=['OK'],\n title=u'数据生成日期选择',\n kind='panel',\n ))", "def _get_schedule_html_for_date(squadron_url: str, date_state: str) -> str:\n state = date_state.copy()# don't mutate the original\n headers = {'Content-Type': 'application/x-www-form-urlencoded'}\n state['btnViewSched'] = 'View Schedule'\n html_string = _get_page_html(squadron_url, state, headers=headers)\n return html_string", "def daytrader():\n\t\n\tif request.method == \"POST\":\n\n\t\tif \"grab\" in request.form:\n\t\t\tdatatype = request.form.get('datatype')\n\t\t\tticker = request.form[\"ticker\"]\n\t\t\t\n\t\t\treturn redirect(url_for('datapage',datatype=datatype,ticker=ticker))\n\n\treturn render_template('daytrader.html')", "def scheduleMe(page):\n querystring_combos = request.cookies.get('course_combos')\n if not querystring_combos:\n return render_template('404.html'), 404\n combos = json.loads(querystring_combos)\n #print querystring_combos\n\n count = len(combos)\n pagination_needed = count > PER_PAGE\n this_page_combos = combos\n if pagination_needed:\n this_page_combos = getCombosForPage(page, PER_PAGE, count, combos)\n last_page = isLastPage(page, count, PER_PAGE)\n if not this_page_combos and page != 1:\n return render_template('404.html'), 404\n return render_template(\"sched.html\",\n title=\"Scheduler\",\n combos=this_page_combos,\n combo_amount=str(count),\n page=page,\n last_page=last_page,\n pagination=pagination_needed)", "def showSelectedDate(self):\n pass", "def historical(request):\n mekongBuffer = ee.FeatureCollection('ft:1LEGeqwlBCAlN61ie5ol24NdUDqB1MgpFR_sJNWQJ');\n mekongRegion = mekongBuffer.geometry();\n\n region = ee.Geometry.Rectangle([-180,-90,180,90])\n\n algorithm_selection = SelectInput(\n # display_text='Select Surface Water Algorithm:',\n name='algorithm_selection',\n multiple=False,\n options=[('Surface Water Tool', 'SWT'), ('JRC Tool', 'JRC')],\n initial=['JRC Tool'],\n )\n\n # Date Picker Options\n date_picker1 = DatePicker(name='date_picker1',\n # display_text='Start Date',\n autoclose=True,\n format='yyyy-mm-dd',\n start_date='1/1/1990',\n start_view='decade',\n today_button=True,\n initial='2000-01-01')\n\n # Date Picker Options\n date_picker2 = DatePicker(name='date_picker2',\n # display_text='End Date',\n autoclose=True,\n format='yyyy-mm-dd',\n start_date='1/1/1990',\n start_view='decade',\n today_button=True,\n initial='2015-12-31')\n\n month_slider = RangeSlider(display_text='Month',\n name='month_slider',\n min=1,\n max=12,\n initial=7,\n step=1)\n\n\n view_options = MVView(\n projection='EPSG:4326',\n center=[101.75, 16.50],\n zoom=5,\n maxZoom=18,\n minZoom=2\n )\n\n water_map = MapView(\n height='100%',\n width='100%',\n controls=['FullScreen',\n {'MousePosition': {'projection': 'EPSG:4326'}}],\n basemap='OpenSteetMap',\n view=view_options\n )\n\n update_button = Button(\n display_text='Update Map',\n name='update-button',\n icon='glyphicon glyphicon-refresh',\n style='success',\n attributes={\n 'title':'Update Map'\n }\n )\n\n context = {\n 'update_button': update_button,\n 'date_picker1': date_picker1,\n 'date_picker2': date_picker2,\n 'month_slider': month_slider,\n 'algorithm_selection': algorithm_selection,\n 'water_layer': water_layer,\n 'water_map': water_map,\n }\n\n return render(request, 'hydraviewer/historical.html', context)", "def on_btnReservatool_clicked(self, widget):\n try:\n panelactual = variables.panel.get_current_page()\n if panelactual != 1:\n variables.panel.set_current_page(1)\n funcioneshab.listadonumhab(self)\n else:\n pass\n except:\n print(\"error botón cliente barra herramientas\")", "def index():\n # return render_template('index.html', events=get_calendar_events_today(CALENDAR_URL))\n return render_template('index.html', events=get_calendar_events_limit(CALENDAR_URL), events_sorted=True)", "async def standings(self, ctx: commands.Context, *, search: HockeyStandings = None) -> None:\n source = {\n \"all\": StandingsPages,\n \"conference\": ConferenceStandingsPages,\n \"western\": ConferenceStandingsPages,\n \"eastern\": ConferenceStandingsPages,\n \"division\": DivisionStandingsPages,\n \"massmutual\": DivisionStandingsPages,\n \"central\": DivisionStandingsPages,\n \"discover\": DivisionStandingsPages,\n \"scotia\": DivisionStandingsPages,\n \"north\": DivisionStandingsPages,\n \"massmutual\": DivisionStandingsPages,\n \"east\": DivisionStandingsPages,\n \"honda\": DivisionStandingsPages,\n \"west\": DivisionStandingsPages,\n }\n if search is None:\n search = \"division\"\n standings, page = await Standings.get_team_standings(search.lower(), session=self.session)\n for team in TEAMS:\n if \"Team\" in team:\n source[team.replace(\"Team \", \"\").lower()] = DivisionStandingsPages\n else:\n source[team] = TeamStandingsPages\n await BaseMenu(\n source=source[search](pages=standings),\n page_start=page,\n delete_message_after=False,\n clear_reactions_after=True,\n timeout=60,\n ).start(ctx=ctx)", "def _setData(self):\n data_list = []\n results = self.query.all()\n formatter = date.getLocaleFormatter(self.request, \"date\", \"long\")\n time_formatter = date.getLocaleFormatter(self.request, \"time\", \"short\")\n for result in results:\n data = {}\n data[\"subject\"] = result.short_name\n # this tab appears in the workspace pi/ view...\n data[\"url\"] = url.set_url_context(\"../calendar/sittings/obj-%i/schedule\" %\n result.sitting_id)\n # Note: same UI is also displayed at: \n # /business/sittings/obj-%i/schedule % result.sitting_id\n data[\"items\"] = \"\"\n data[\"status\"] = misc.get_wf_state(result)\n data[\"status_date\"] = formatter.format(result.status_date)\n data[\"owner\"] = \"\"\n data[\"type\"] = result.group.type\n data[\"group\"] = u\"%s %s\" % (\n result.group.type.capitalize(), result.group.short_name)\n data[\"time_from_to\"] = (\n time_formatter.format(result.start_date),\n time_formatter.format(result.end_date))\n data[\"date\"] = formatter.format(result.start_date) \n if result.venue:\n data[\"venue\"] = _(result.venue.short_name)\n else:\n date[\"venue\"] = \"\"\n if type(result)==domain.Question:\n data[\"to\"] = result.ministry.short_name\n else:\n data[\"to\"]= \"\"\n # past, present, future\n today = datetime.datetime.today().date()\n startday = result.start_date.date()\n if today==startday:\n data[\"css_class\"] = \"present\"\n elif today>startday:\n data[\"css_class\"] = \"past\"\n else:\n data[\"css_class\"] = \"future\"\n data_list.append(data)\n self._data = data_list", "def main():\r\n yesterday = (dt.date.today() - dt.timedelta(1)).strftime(\"%Y-%m-%d\")\r\n entries = query_db(\"\"\"select TeamAway, TeamHome, spread, Predicted FROM Spreads\"\"\",\r\n one=False)\r\n results = query_db(\"\"\"SELECT Team, Opponent, spread, gameDate,\r\n Differential,\r\n Predicted, beatSpreadSLED\r\n FROM PredRes WHERE gameDate=?\"\"\",\r\n [yesterday])\r\n return render_template('main.html', entries=entries, results=results)", "def on_date_change(self):\n self.date = self.ui.calendarWidget.selectedDate()\n self.update_views()", "def calendar(self):\r\n self.cal = QCalendarWidget()\r\n self.cal.setWindowTitle(\"Get Birthday\")\r\n self.cal.show()\r\n self.cal.clicked.connect(self.dateB)", "def i_see_the_set_dates(_step):\r\n verify_date_or_time(COURSE_START_DATE_CSS, '12/20/2013')\r\n verify_date_or_time(COURSE_END_DATE_CSS, '12/26/2013')\r\n verify_date_or_time(ENROLLMENT_START_DATE_CSS, '12/01/2013')\r\n verify_date_or_time(ENROLLMENT_END_DATE_CSS, '12/10/2013')\r\n\r\n verify_date_or_time(COURSE_START_TIME_CSS, DUMMY_TIME)\r\n # Unset times get set to 12 AM once the corresponding date has been set.\r\n verify_date_or_time(COURSE_END_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_START_TIME_CSS, DEFAULT_TIME)\r\n verify_date_or_time(ENROLLMENT_END_TIME_CSS, DUMMY_TIME)", "def admin_start(request):\n games = Game.objects.filter(is_done=False, is_closed=False)\n return render_to_response('ms/admin_start.html', RequestContext(request, {'games': games}))" ]
[ "0.6044334", "0.60336864", "0.5919623", "0.58946955", "0.58439314", "0.5783241", "0.5722915", "0.5698653", "0.56796974", "0.5668093", "0.5652515", "0.5632157", "0.56211615", "0.56042194", "0.55611235", "0.5546947", "0.5542898", "0.5529295", "0.55291355", "0.54914933", "0.54750246", "0.54635584", "0.5460354", "0.54429513", "0.5429672", "0.5412376", "0.53920275", "0.5387846", "0.53205323", "0.53143024" ]
0.65683556
0
Link for specific score pages for a certain day.
def scores(datestring): return render_score_page("scores.html", datestring, datestring)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def index():\n # call the datetime function\n datetime_today = datetime.datetime.now(central)\n datestring_today = datetime_today.strftime(\"%m-%d-%Y\")\n return render_score_page(\"index.html\", datestring_today, \"bballfast.com\")", "def scrape(self, day):\n url = '{base_url}/{month_day}'.format(\n base_url=self.url,\n month_day=day.strftime('%B_%-d')\n )\n\n response = requests.get(url)\n\n return day, response", "def menu_python_daily(self, event=None):\n self.link('http://www.pythonware.com/daily/')", "def scores_post_request():\n date = request.form[\"date\"]\n print(date)\n return render_score_page(\"scores.html\", date, date)", "def day(d):\n\t\tx = db.cquery(\"day\",d)\n\t\tprint \"Total:\", x[0]\n\t\tf = raw_input(\"[L]ist [N]ew overview or [B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(), i[0], i[1], \" \", ui.statstimein(), i[2], ui.statstimeout(), i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\":\n\t\t\thome_stats()\n\t\telif f == \"b\":\n\t\t\thome()\n\t\telse:\n\t\t\tpass", "def get_link_for_matches_in_x_days(main_link, days=16):\n try:\n now = datetime.datetime.now()\n date_to_check = now + datetime.timedelta(days)\n # Here script could make it go through days until 14 day from now\n print(f\"Checking matches at: {date_to_check.date()}\")\n year = date_to_check.strftime(\"%Y\")\n month = date_to_check.strftime(\"%m\")\n day = date_to_check.strftime(\"%d\")\n link_for_checking = f\"{main_link}?year={year}&month={month}&day={day}\"\n return link_for_checking\n except:\n print(f\"There was a problem with creating link, returning input link {main_link}\")\n return main_link", "def route100days():\n return render_template(\"100days.html.j2\")", "def next_link(self, page):\n\n date_links = page.find(name='div', attrs={'id': 'prevNext'}).findAll(name='a', recursive=False)\n if len(date_links) == 2:\n data = re.search(\"<a.*league=([\\w\\d.]+)\\&.*date=([\\d]{8}).*>\", str(date_links[1])).groups()\n return \"http://soccernet-akamai.espn.go.com/scoreboard?league=%s&date=%s&cc=5739\" % (data[0], data[1])\n return False", "def lessons_for_day(self, day, schedule_id=None, eager=True):\n q = Lesson.query_current(schedule_id)\n q = q.filter(Lesson.day == day).\\\n filter(Lesson.teacher_id == self.id)\n\n if eager:\n q = q.options(eagerload('group'), eagerload('group.year'))\n\n return q.all()", "def home(request):\n today = datetime.date.today()\n return HttpResponseRedirect(\"%s/newsletter/%d/%d/%d/\" % (SUBSITE, today.year, today.month, today.day))", "def lessons_today(self, bot, update, group_name):\n week_number = self.week()\n day_number = pendulum.now('Europe/Kiev')\n\n bot.send_message(update.message.chat_id,\n text='`{}`\\n{}'.format(group_name,\n self.timetable.lessons_per_day(group_name,\n day_number,\n week_number)),\n parse_mode='Markdown')", "def view_date(request, year, month, day):\n logged_in = request.user.is_authenticated()\n\n try:\n date = datetime.date(int(year), int(month), int(day))\n except:\n return HttpResponse('bad date (somehow)')\n \n if date > datetime.date.today() and not logged_in:\n return HttpResponse('this newsletter has yet to be published')\n\n items = NewsItem.objects.all().filter(date_to_publish=date).order_by('position')\n left_sections = {\n 0: None,\n 1: None,\n 2: None,\n }\n right_sections = {\n 3: None,\n 4: None,\n 5: None,\n }\n extras = []\n \n for item in items:\n item_position = item.position\n if item.content:\n if item_position < 3:\n left_sections[item.position] = item\n elif item_position <= 5:\n right_sections[item.position] = item\n else:\n extras.append(item)\n\n args = {\n 'left_sections': left_sections,\n 'right_sections': right_sections,\n 'extras': extras,\n 'date': date.isoformat(),\n 'logged_in': logged_in,\n 'SUBSITE': SUBSITE,\n 'CDN': CDN, #included here for when we go live and need a place for people to view the newsletter\n }\n return render(request, 'newsletter.html', args)", "def change_date(self, date):\n self.date = date\n relative_url = \"https://www.sevenrooms.com/manager/twelvewest/reservations/day/\" + date.strftime(\"%m-%d-20%y\")\n self.driver.get(relative_url)\n self.update_html()", "async def daily_all(self, ctx):\n embed = await self.daily_embed(\n [\"psna\", \"pve\", \"pvp\", \"wvw\", \"fractals\", \"strikes\"], ctx=ctx)\n embed.set_thumbnail(\n url=\"https://wiki.guildwars2.com/images/1/14/Daily_Achievement.png\"\n )\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n await ctx.send(\"Need permission to embed links\")", "def results(request, year, month, day, slug, template_name=\"pollit/results.html\"):\n params = {\n 'pub_date__year': year,\n 'pub_date__month': datetime.datetime.strptime(month, '%b').month,\n 'slug': slug,\n }\n \n if MULTIPLE_SITES:\n params['sites__pk'] = settings.SITE_ID\n if day is not None:\n params['pub_date__day'] = day\n \n try:\n poll = Poll.objects.get(**params)\n except:\n raise Http404\n \n ip = get_client_ip(request)\n poll_choice_id = get_poll_choice_id_from_cookies(poll, request.COOKIES)\n poll_choice = poll.get_poll_choice(request.user, poll_choice_id, ip)\n \n return render_to_response(template_name,\n {'poll': poll,\n 'has_voted': poll_choice is not None,\n 'user_choice': poll_choice},\n context_instance=RequestContext(request))", "def appointment_stats(request):\n # TODO: Consider/Look into Django cache framework\n # Default is to load up yesterday's stats\n data = {}\n if request.GET.get('lookback'):\n data['lookback'] = request.GET.get('lookback')\n appointments = get_appointments_word(request.GET.get('lookback'))\n else:\n data['lookback'] = 'yesterday'\n appointments = get_appointments_word('yesterday')\n data.update(get_appointment_stats(appointments))\n return render(request, 'doctor/stats.html', data)", "def score_selector(request):\n\n\t# Look for the list of gymnasts entered by the user\n\tgymnast_list = request.GET.get('gymnast_list', False)\n\n\tif gymnast_list:\n\n\t\t# Get the rest of the information submitted through the form\n\t\tgymnasts = gymnast_list.split(\"\\r\\n\")\n\t\tevent = request.GET.get('event', False)\n\t\tsumstat = request.GET.get('sumstat', False)\n\t\ttime = request.GET.get('time', False)\n\n\t\t# Set the date range \n\t\tnow = datetime.datetime.now()\n\t\tif time==\"year\":\n\t\t\tdate_range = [now-relativedelta(years=1), now]\n\t\telif time == \"season\":\n\t\t\tdate_range = [datetime.date(2019, 10, 13), now] # Since last world championships\n\t\telse:\n\t\t\tdate_range = [datetime.date(2016, 8, 21), now] # Since last olympics\n\n\t\t# Get the score data for the results table\n\t\ttable_data = []\n\t\tfor gymnast in gymnasts:\n\t\t\tgymnast = Gymnast.objects.get(name=gymnast)\n\t\t\tthis_gymnast_scores = []\n\t\t\tthis_gymnast_scores.append(gymnast)\n\t\t\tif event == \"AA\":\n\t\t\t\tfor sub_event in [\"VT\", \"UB\", \"BB\", \"FX\"]:\n\t\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=sub_event), score_num=1)\n\t\t\t\t\tif scores.count() > 0:\n\t\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\t\t# Add up AA average\n\t\t\t\tif isinstance(this_gymnast_scores[1], float) and isinstance(this_gymnast_scores[2], float) and isinstance(this_gymnast_scores[3], float) and isinstance(this_gymnast_scores[4], float):\n\t\t\t\t\taa_total = float(this_gymnast_scores[1]) + float(this_gymnast_scores[2]) + float(this_gymnast_scores[3]) + float(this_gymnast_scores[4])\n\t\t\t\t\tthis_gymnast_scores.append(aa_total)\n\t\t\t\telse:\n\t\t\t\t\tthis_gymnast_scores.append(\"\")\n\t\t\telif event == \"VT\":\n\t\t\t\tfor vt_num in [1, 2]:\n\t\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=\"VT\"), score_num=vt_num)\n\t\t\t\t\tif scores.count() > 0:\n\t\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\t\t# Get two-vault average\n\t\t\t\tif isinstance(this_gymnast_scores[1], float) and isinstance(this_gymnast_scores[2], float):\n\t\t\t\t\tvt_avg = (float(this_gymnast_scores[1]) + float(this_gymnast_scores[2]))/2\n\t\t\t\t\tthis_gymnast_scores.append(vt_avg)\n\t\t\t\telse:\n\t\t\t\t\tthis_gymnast_scores.append(\"\")\n\t\t\telse:\n\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=event))\n\t\t\t\tif scores.count() > 0:\n\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\telse:\n\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\ttable_data.append(this_gymnast_scores)\n\telse: \n\t\tgymnast_list = \"\"\n\t\tgymnasts = []\n\t\ttable_data = []\n\t\tevent = \"AA\"\n\t\tsumstat = \"avg\"\n\t\ttime = \"year\"\n\n\tcontext = {\n\t\t'gymnast_list': gymnast_list, # Return what they entered so that it shows up again with the results of their request\n\t\t'gymnasts': gymnasts,\n\t\t'table_data': table_data,\n\t\t'event': event,\n\t\t'sumstat': sumstat,\n\t\t'time': time,\n\t}\n\treturn render(request, 'score_selector.html', context=context)", "def crawl_url(_date, page):\n\tencode = base64.b64encode(date.strftime(_date, '%Y/%m/%d').encode('utf-8'))\n\tfdate = '/fdate/' + encode.decode('utf-8')\n\tpage = '/page/' + str(page)\n\n\treturn base_url + fdate + page", "def schedule_for_day(self, day, schedule_id=None):\n lessons = self.lessons_for_day(day, schedule_id, eager=True)\n return self._process_schedule(lessons)", "def time_search(year, month, day):\n entries = []\n cur = g.db.execute(\n \"\"\"\n SELECT entries.location FROM entries\n WHERE CAST(strftime('%Y',entries.published)AS INT) = {year}\n AND CAST(strftime('%m',entries.published)AS INT) = {month}\n AND CAST(strftime('%d',entries.published)AS INT) = {day}\n ORDER BY entries.published DESC\n \"\"\".format(year=int(year), month=int(month), day=int(day)))\n\n for (row,) in cur.fetchall():\n if os.path.exists(row+\".md\"):\n entries.append(file_parser(row+\".md\"))\n return render_template('blog_entries.html', entries=entries)", "def add_day_to_hive_smtr_table(spark, year, month, day, data_threshold=100, langs=('en',), table='isaacj.smtr'):\n wikis = \"('\" + \"','\".join([f'{l}.wikipedia' for l in langs]) + \"')\"\n langs = \"('\" + \"','\".join(langs) + \"')\"\n\n query = f\"\"\"\n WITH potential_referrals AS (\n SELECT\n host_to_site(PARSE_URL(referer, 'HOST')) AS source,\n normalized_host.project AS lang,\n page_id,\n access_method\n FROM wmf.pageview_actor\n WHERE\n year = {year} AND month = {month} AND day = {day}\n AND is_pageview\n AND normalized_host.project_class = 'wikipedia'\n AND normalized_host.project IN {langs}\n AND namespace_id = 0\n AND agent_type = 'user'\n AND referer_class = 'external'\n AND (referer LIKE '%reddit.com%' OR\n referer LIKE '%tiktok%' OR\n referer LIKE '%facebook.com%' OR \n referer LIKE '%t.co/%' OR \n referer LIKE '%twitter.com%' OR \n referer LIKE '%youtu%')\n ),\n social_media_referrals AS (\n SELECT\n source,\n lang,\n page_id,\n SUM(IF(access_method = 'desktop', 1, 0)) as sviews_desktop,\n SUM(IF(access_method = 'mobile web', 1, 0)) as sviews_mobile\n FROM potential_referrals\n WHERE\n source IS NOT NULL\n GROUP BY\n source,\n lang,\n page_id\n HAVING COUNT(1) > {data_threshold} \n ),\n distinct_pages AS (\n SELECT DISTINCT\n lang,\n page_id\n FROM social_media_referrals\n ),\n total_pageviews AS (\n SELECT\n s.lang AS lang,\n p.page_id AS page_id,\n SUM(view_count) AS total_views\n FROM wmf.pageview_hourly p\n INNER JOIN distinct_pages s\n ON (p.project = CONCAT(s.lang, '.wikipedia') \n AND p.page_id = s.page_id)\n WHERE\n year = {year} AND month = {month} AND day = {day}\n AND namespace_id = 0\n AND agent_type = 'user'\n AND project IN {wikis}\n GROUP BY\n s.lang,\n p.page_id\n )\n INSERT OVERWRITE TABLE {table}\n PARTITION(year={year}, month={month}, day={day})\n SELECT\n source,\n s.lang,\n s.page_id,\n sviews_desktop,\n sviews_mobile,\n total_views\n FROM social_media_referrals s\n LEFT JOIN total_pageviews t\n ON (s.lang = t.lang\n AND s.page_id = t.page_id)\"\"\"\n\n print(query)\n spark.sql(query)", "def n_subimissions_per_day( url, headers ):", "def generate_cinema_schedule_url(self, site_url, thnumber, show_day):\n main_url = re.findall(r'(http://.+/)site', site_url)[0]\n url = main_url + 'schedule/pc/s0100_{thnumber}_{show_day}.html'.format(\n thnumber=thnumber, show_day=show_day)\n return url", "def score(self, urlids, wordids):\r\n\t\tself.urlids = urlids\r\n\t\tself.wordids = wordids\r\n\t\tfor urlid in self.urlids:\r\n\t\t \tsql = \"select pagerank from pagelink where urlid=%d\" % urlid\r\n\t\t\tpr = self.cur.execute(sql).fetchone()[0]\r\n\t\t\tself.scores[urlid] = pr\r\n\t\treturn self.scores", "def view_post(year, month, day, slug):\n post = Post.query.filter_by(slug=slug, pub_date=datetime.date(year, month, day)).first()\n return flask.render_template('post.html', post=post)", "def about(request, pk=None):\n if pk and get_grouptype('3') in request.user.groups.all():\n ts = get_object_or_404(TimeSlot, pk=pk)\n else:\n ts = get_timeslot()\n return render(request, \"results/about_grades.html\", {\n 'scores': CategoryAspectResult.ResultOptions,\n \"categories\": GradeCategory.objects.filter(TimeSlot=ts),\n 'ts': ts,\n })", "def __call__(self, date):\n for game in self._games:\n if game.datetime.year == date.year and \\\n game.datetime.month == date.month and \\\n game.datetime.day == date.day:\n return game\n raise ValueError('No games found for requested date')", "def visitAbout(self, date):\n raise NotImplementedError()", "def index(request):\n import datetime\n values = default_values(request)\n values['recent_links'] = Link.objects.all().order_by('-date_submitted')[0:10] \n values['most_popular_links'] = Link.objects.filter(date_submitted__gte=(datetime.datetime.today() - datetime.timedelta(days=1)) ).annotate(num_clicks_views=Count('stat')).order_by('-num_clicks_views')[0:10]\n return render_to_response(\n 'shortener/index.html',\n values,\n context_instance=RequestContext(request))", "def download(self, day=None, month=None, year=None):\n # Make sure we see the \"Day\" pannel\n tabactive = self.wait_n_get(By.CLASS_NAME, 'tabactive')\n if not tabactive.text == 'Day':\n self.click(id_day)\n\n # Select the right day\n if day:\n self.select_date(day, month, year)\n\n # Hover over the download button\n try:\n self.hover_over(id_hover)\n self.click(id_click)\n except Exception as e_1:\n # Check if the data is available for that day by looking for the info bubble\n try:\n el = self.wait_n_get(By.ID, id_info)\n if 'info.png' in el.get_attribute('src'):\n print('no data available for this day')\n return None\n else:\n # Not sure what just happen there\n raise(e_1)\n except Exception as e_2:\n if 'Unable to locate element' in str(e_2):\n # The info icon isn't available\n print(e_2)\n raise(e_1)\n else:\n # Not sure what just happen there\n print(e_1)\n print(e_2)\n #raise (e1, e2)\n\n # Download the data for the day\n res = self.driver.request('GET', url_data_graph)\n if res.status_code == 200:\n print('sucess')\n else:\n raise Exception('Error:', res.text)\n return res" ]
[ "0.6154567", "0.5618616", "0.5526901", "0.5496418", "0.5482043", "0.53149104", "0.52406067", "0.51252085", "0.5077746", "0.50192183", "0.5014762", "0.501392", "0.4963462", "0.4912558", "0.4900883", "0.48971343", "0.48908174", "0.48814574", "0.48384798", "0.4829943", "0.4819453", "0.4770697", "0.47548103", "0.4730427", "0.47157043", "0.470197", "0.469877", "0.46951482", "0.4693792", "0.4692303" ]
0.6060666
1
The score page after using the datepicker plugin.
def scores_post_request(): date = request.form["date"] print(date) return render_score_page("scores.html", date, date)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scores(datestring):\n return render_score_page(\"scores.html\", datestring, datestring)", "def index():\n # call the datetime function\n datetime_today = datetime.datetime.now(central)\n datestring_today = datetime_today.strftime(\"%m-%d-%Y\")\n return render_score_page(\"index.html\", datestring_today, \"bballfast.com\")", "def score_selector(request):\n\n\t# Look for the list of gymnasts entered by the user\n\tgymnast_list = request.GET.get('gymnast_list', False)\n\n\tif gymnast_list:\n\n\t\t# Get the rest of the information submitted through the form\n\t\tgymnasts = gymnast_list.split(\"\\r\\n\")\n\t\tevent = request.GET.get('event', False)\n\t\tsumstat = request.GET.get('sumstat', False)\n\t\ttime = request.GET.get('time', False)\n\n\t\t# Set the date range \n\t\tnow = datetime.datetime.now()\n\t\tif time==\"year\":\n\t\t\tdate_range = [now-relativedelta(years=1), now]\n\t\telif time == \"season\":\n\t\t\tdate_range = [datetime.date(2019, 10, 13), now] # Since last world championships\n\t\telse:\n\t\t\tdate_range = [datetime.date(2016, 8, 21), now] # Since last olympics\n\n\t\t# Get the score data for the results table\n\t\ttable_data = []\n\t\tfor gymnast in gymnasts:\n\t\t\tgymnast = Gymnast.objects.get(name=gymnast)\n\t\t\tthis_gymnast_scores = []\n\t\t\tthis_gymnast_scores.append(gymnast)\n\t\t\tif event == \"AA\":\n\t\t\t\tfor sub_event in [\"VT\", \"UB\", \"BB\", \"FX\"]:\n\t\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=sub_event), score_num=1)\n\t\t\t\t\tif scores.count() > 0:\n\t\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\t\t# Add up AA average\n\t\t\t\tif isinstance(this_gymnast_scores[1], float) and isinstance(this_gymnast_scores[2], float) and isinstance(this_gymnast_scores[3], float) and isinstance(this_gymnast_scores[4], float):\n\t\t\t\t\taa_total = float(this_gymnast_scores[1]) + float(this_gymnast_scores[2]) + float(this_gymnast_scores[3]) + float(this_gymnast_scores[4])\n\t\t\t\t\tthis_gymnast_scores.append(aa_total)\n\t\t\t\telse:\n\t\t\t\t\tthis_gymnast_scores.append(\"\")\n\t\t\telif event == \"VT\":\n\t\t\t\tfor vt_num in [1, 2]:\n\t\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=\"VT\"), score_num=vt_num)\n\t\t\t\t\tif scores.count() > 0:\n\t\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\t\telse:\n\t\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\t\t# Get two-vault average\n\t\t\t\tif isinstance(this_gymnast_scores[1], float) and isinstance(this_gymnast_scores[2], float):\n\t\t\t\t\tvt_avg = (float(this_gymnast_scores[1]) + float(this_gymnast_scores[2]))/2\n\t\t\t\t\tthis_gymnast_scores.append(vt_avg)\n\t\t\t\telse:\n\t\t\t\t\tthis_gymnast_scores.append(\"\")\n\t\t\telse:\n\t\t\t\tscores = Score.objects.filter(gymnast=gymnast, \n\t\t\t\t\tmeet__in=Meet.objects.filter(start_date__range=date_range), event__in=Event.objects.filter(name=event))\n\t\t\t\tif scores.count() > 0:\n\t\t\t\t\tif sumstat == \"avg\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Avg('score'))['score__avg']\n\t\t\t\t\telif sumstat == \"max\":\n\t\t\t\t\t\tscores_sumstat = scores.aggregate(Max('score'))['score__max']\n\t\t\t\telse:\n\t\t\t\t\tscores_sumstat = \"\"\n\t\t\t\tthis_gymnast_scores.append(scores_sumstat)\n\t\t\ttable_data.append(this_gymnast_scores)\n\telse: \n\t\tgymnast_list = \"\"\n\t\tgymnasts = []\n\t\ttable_data = []\n\t\tevent = \"AA\"\n\t\tsumstat = \"avg\"\n\t\ttime = \"year\"\n\n\tcontext = {\n\t\t'gymnast_list': gymnast_list, # Return what they entered so that it shows up again with the results of their request\n\t\t'gymnasts': gymnasts,\n\t\t'table_data': table_data,\n\t\t'event': event,\n\t\t'sumstat': sumstat,\n\t\t'time': time,\n\t}\n\treturn render(request, 'score_selector.html', context=context)", "def scoringpage (request):\n # Define views here\n context = {}\n return render(request, 'scoringPage.html', context=context)", "def analyze(self):\r\n self.current = 'score'\r\n popup = AnalyzeInterface(self.current_screen).open()", "def initUi(self):\n\n wndw_box = QtGui.QVBoxLayout()\n\n #Calendar\n wndw_box.addWidget(QtGui.QLabel(\"Enter the date of the Exam\"))\n self.cal = QtGui.QCalendarWidget()\n wndw_box.addWidget(self.cal)\n\n #Score Entry Box\n wndw_box.addWidget(QtGui.QLabel(\"Enter Scores Below\"))\n self.score_entry_box = QtGui.QTextEdit()\n wndw_box.addWidget(self.score_entry_box)\n\n #Buttons\n btn_box = QtGui.QHBoxLayout()\n btn_box.addStretch(1)\n\n self.sub_btn = QtGui.QPushButton('Submit')\n self.ccl_btn = QtGui.QPushButton('Cancel')\n self.rst_btn = QtGui.QPushButton('Reset')\n \n btn_box.addWidget(self.sub_btn)\n btn_box.addWidget(self.ccl_btn)\n btn_box.addWidget(self.rst_btn)\n wndw_box.addLayout(btn_box)\n \n self.setLayout(wndw_box)\n self.setGeometry(100, 100, 300, 550)\n self.setWindowTitle('Enter Scores')\n self.show()", "def _select_date_changed(self):\n self.model.edit_traits(view=View(\n UCustom('date'),\n buttons=['OK'],\n title=u'数据生成日期选择',\n kind='panel',\n ))", "def enter_game_scores():\n pass", "def standings_post_request():\n date = request.form[\"date\"]\n datetime_object = datetime.datetime.strptime(date, \"%m-%d-%Y\")\n\n scoreboard = nba_py.Scoreboard(month=datetime_object.month,\n day=datetime_object.day,\n year=datetime_object.year)\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return render_template(\"standings.html\",\n title=\"standings\",\n east_standings=enumerate(east_standings, 1),\n west_standings=enumerate(west_standings, 1),\n team=CITY_TO_TEAM)", "def finish_page(app: dash.Dash, data: GameData) -> html:\n score_user, score_ai = count_score(data)\n\n layout = dbc.Container([\n dbc.Row([\n dbc.Col(dbc.Card([\n dbc.CardHeader(\n 'You',\n className=\"align-items-center d-flex justify-content-center\"),\n dbc.CardBody(\n html.H1(score_user),\n className=\"align-items-center d-flex justify-content-center\")\n ]),\n width=4),\n dbc.Col(dbc.Card([\n dbc.CardHeader(\n 'AI', className=\"align-items-center d-flex justify-content-center\"),\n dbc.CardBody(\n html.H1(score_ai),\n className=\"align-items-center d-flex justify-content-center\")\n ]),\n width=4)\n ],\n justify=\"center\",\n className=\"mb-4\"),\n dbc.Row(dbc.Col(dbc.Button('Play again!',\n id=\"btn-reset\",\n block=True,\n href=\"/\",\n color=\"primary\",\n style={\n \"background-color\": COLOR_STATWORX,\n \"border-color\": COLOR_STATWORX\n }),\n width=4),\n justify=\"center\")\n ],\n className=\"mb-4\")\n\n return layout", "def on_date_change(self):\n self.date = self.ui.calendarWidget.selectedDate()\n self.update_views()", "def update_score():\n pass", "def post(self, request):\n form = Game1ChoiceForm(request.POST)\n roll = Roll.objects.get(game_no=1)\n scores = Scores.objects.get(player_no=1)\n #zdefiniować błędy w formularzu, tj. niedopuszczalne wybory (odwołując się dp chosen_numbers)\n if form.is_valid():\n put_aside = form.cleaned_data['put_aside']\n chosen_numbers = []\n result = 0\n if \"1\" in put_aside:\n chosen_numbers.append(roll.dice_1)\n if \"2\" in put_aside:\n chosen_numbers.append(roll.dice_2)\n if \"3\" in put_aside:\n chosen_numbers.append(roll.dice_3)\n if \"4\" in put_aside:\n chosen_numbers.append(roll.dice_4)\n if \"5\" in put_aside:\n chosen_numbers.append(roll.dice_5)\n \"\"\"Count the result\"\"\"\n if len(chosen_numbers) == 5:\n if len(set(chosen_numbers)) == 1:\n if 1 in chosen_numbers:\n result += 400\n elif 5 in chosen_numbers:\n result += 200\n else:\n result += 120\n elif len(set(chosen_numbers)) == 2:\n if chosen_numbers.count(1) == 4 and 5 in chosen_numbers:\n result += 205\n elif chosen_numbers.count(1) == 3 and 5 in chosen_numbers:\n result += 110\n elif chosen_numbers.count(1) == 2 and 5 in chosen_numbers:\n result += 70\n elif chosen_numbers.count(1) == 1 and 5 in chosen_numbers:\n result += 110\n elif chosen_numbers.count(1) == 2 and 5 not in chosen_numbers:\n result += 50\n elif chosen_numbers.count(5) == 2 and 1 not in chosen_numbers:\n result += 40\n elif chosen_numbers.count(1) == 1 and 1 not in chosen_numbers:\n result += 70\n elif chosen_numbers.count(5) == 1 and 5 not in chosen_numbers:\n result += 65\n else:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu - jak wyświetlić???\n elif len(set(chosen_numbers)) == 3:\n if 1 in chosen_numbers and 5 in chosen_numbers:\n result += 45\n else:\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 4:\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 5:\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu\n elif len(chosen_numbers) == 4:\n if len(set(chosen_numbers)) == 1:\n if 1 in chosen_numbers:\n result += 200\n elif 5 in chosen_numbers:\n result += 100\n else:\n result += 60\n elif len(set(chosen_numbers)) == 2:\n if chosen_numbers.count(1) == 3 and 5 in chosen_numbers:\n result += 105\n elif chosen_numbers.count(1) == 2 and 5 in chosen_numbers:\n result += 30\n elif chosen_numbers.count(5) == 3 and 1 in chosen_numbers:\n result += 60\n elif chosen_numbers.count(5) == 4:\n result += 100\n elif 1 in chosen_numbers and 5 not in chosen_numbers:\n result += 40\n elif 5 in chosen_numbers and 1 not in chosen_numbers:\n result += 35\n else:\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 3:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 4:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu\n elif len(chosen_numbers) == 3:\n if len(set(chosen_numbers)) == 1:\n if 1 in chosen_numbers:\n result += 100\n elif 5 in chosen_numbers:\n result += 50\n else:\n result += 30\n elif len(set(chosen_numbers)) == 2:\n if chosen_numbers.count(1) == 2 and 5 in chosen_numbers:\n result += 25\n elif chosen_numbers.count(5) == 2 and 1 in chosen_numbers:\n result += 20\n else:\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 3:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu\n elif len(chosen_numbers) == 2:\n if len(set(chosen_numbers)) == 1:\n if 1 in chosen_numbers:\n result += 20\n elif 5 in chosen_numbers:\n result += 10\n else:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu\n elif len(set(chosen_numbers)) == 2:\n if 1 in chosen_numbers and 5 in chosen_numbers:\n result += 15\n else:\n return HttpResponseRedirect('/game_1/')#komunikat o błędzie w formularzu\n elif len(chosen_numbers) == 1:\n if 1 in chosen_numbers:\n result += 10\n elif 5 in chosen_numbers:\n result += 5\n else:\n #komunikat o błędzie w formularzu\n return HttpResponseRedirect('/game_1/')\n scores.total += result\n scores.dices_amount -= len(chosen_numbers)\n if scores.dices_amount == 0:\n scores.dices_amount += 5\n scores.save()\n return HttpResponseRedirect('/start/')\n else: #if form is not is_valid\n return HttpResponseRedirect('/game_1/') #komunikat o błędzie w formularzu/ przez form.add_error??", "def disp_score():", "def fullleaderboard (request):\n # Define views here\n score_submit = EventEntryModel.objects.exclude(winner__isnull=True).count()\n active_players = PlayerModel.objects.all()\n\n loaded_points = list(EventEntryModel.objects.aggregate(Sum('points')).values())[0]\n awarded_points = list(EventEntryModel.objects.exclude(winner__isnull=True).aggregate(Sum('points')).values())[0]\n\n context = {\n 'score_submit': score_submit,\n 'active_players': active_players,\n 'loaded_points': loaded_points,\n 'awarded_points': awarded_points,\n }\n\n return render(request, 'fullLeaderboard.html', context=context)", "def score(self):", "def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")", "def envoi_score_requete(pseudo):\n global score\n global nom_niveau\n data = {\"pseudo\":pseudo, \"score\":score, \"niveau\":nom_niveau}\n r = requests.post(\"http://rushhour.cf/scores.php?new_score=true\", data = data)\n if r.text ==\"ok\": #Si le serveur a envoyé une réponse favorable\n retour = Label(envoi_score, text=\"Score envoyé avec succès.\") # On informe le joueur\n else :\n retour = Label(envoi_score, text=\"Un erreur s'est produite\")\n retour.pack()\n annuler.config(text=\"Fermer\")", "def main():\r\n yesterday = (dt.date.today() - dt.timedelta(1)).strftime(\"%Y-%m-%d\")\r\n entries = query_db(\"\"\"select TeamAway, TeamHome, spread, Predicted FROM Spreads\"\"\",\r\n one=False)\r\n results = query_db(\"\"\"SELECT Team, Opponent, spread, gameDate,\r\n Differential,\r\n Predicted, beatSpreadSLED\r\n FROM PredRes WHERE gameDate=?\"\"\",\r\n [yesterday])\r\n return render_template('main.html', entries=entries, results=results)", "def to_form(self):\n return ScoreForm(user_name=self.user.get().name, won=self.won,\n date=str(self.date), bombs=self.bombs)", "def showSelectedDate(self):\n pass", "def setup_scores_frame(self):\r\n if self.score_window is not None:\r\n self.score_window.setup_scores_frame()", "def update_score_window(self):\r\n\r\n if not self.display_game:\r\n return\r\n\r\n if self.score_window is not None:\r\n self.score_window.update_window()", "def rd1leaderboard(request):\n\n #Add views\n playing_players = Rd1SlotModel.objects.filter(player_name__isnull=False)\n\n endurance_leader = Rd1SlotModel.objects.aggregate(Max('endurance_score'))\n\n #Add context\n context = {\n 'playing_players': playing_players,\n 'endurance_leader': endurance_leader,\n }\n\n return render(request, 'rd1Leaderboard.html', context=context)", "def calendarPageChanged(self, year, month):\n success = self.porker_thread.extendDates(datetime.date(year, month, 1))\n #if not success:\n # self.alertMessage(\"Failure!\",\"Unable to extend the thread's dates for some reason.\")\n #efficiency = self.porker_thread.getEfficiencyFor(self.getActiveDate())\n #self.porker_thread.sentDatesData = False", "def setup_score_window(self, score_window):\r\n self.score_window = score_window", "def on_clicked_update(self):\n process = crawler.CrawlerProcess(\n {\n \"USER_AGENT\": \"currency scraper\",\n \"SCRAPY_SETTINGS_MODULE\": \"currency_scraper.currency_scraper.settings\",\n \"ITEM_PIPELINES\": {\n \"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline\": 300,\n }\n }\n )\n process.crawl(InvestorSpider)\n try:\n process.start()\n gui_warnings.update_notification()\n except error.ReactorNotRestartable:\n gui_warnings.warning_already_updated()", "def scoring(self):\n pass", "def show_score(self):\n self._pause = True # pause the game when you check the score\n score_list = self.get_high_score(self._filename) # get the record\n top = tk.Toplevel() # create a Toplevel\n top.title('Score Board')\n # create a text label for notification\n title = tk.Label(top, text='High Scored Player in This Level', width=70)\n title.pack(side=tk.TOP, ipady=1)\n if score_list is None: # check whether the record is empty\n tk.Label(top, text='No record in this level yet!', width=70).pack(side=tk.TOP, ipady=1)\n else: # if not empty\n for record in score_list: # shows up all the detail\n tk.Label(top, text=record[0] + ' : ' + record[1]).pack(side=tk.TOP, ipady=1)", "def main():\n# year = int(input(\"Enter year for calendar: \"))\n# first_day = first_day_of_year(year)\n\n # Loop through months 1 through 12\n # for month in range(1, NUM_MONTHS + 1):\n# first_day = print_month(first_day, month, year)\n\n canvas = make_canvas(CANVAS_WIDTH, CANVAS_HEIGHT, 'Calendar')\n # present the header, today's date\n\n top_rows(canvas)\n # present two buttons: weekly display and monthly display\n weekly_display_type = True\n date_to_present = date.today()\n #button_weekly(canvas,weekly_display_type,date_to_present)\n #button_monthly(canvas, weekly_display_type, date_to_present)\n # present weekly display\n canvas.update()\n canvas.mainloop()" ]
[ "0.66856855", "0.6264408", "0.5798715", "0.57950044", "0.5549387", "0.55020916", "0.54524344", "0.544693", "0.5446883", "0.54193795", "0.538566", "0.53187484", "0.5316336", "0.5307269", "0.52999324", "0.52608603", "0.52032787", "0.5194402", "0.5177872", "0.5137147", "0.5116508", "0.5101937", "0.5098719", "0.50945777", "0.5076276", "0.5064914", "0.5010649", "0.4981531", "0.49754763", "0.49483964" ]
0.7255501
0
Get list of games in daily scoreboard.
def get_games(date): scoreboard = nba_py.Scoreboard(month=date.month, day=date.day, year=date.year) line_score = scoreboard.line_score() game_header = scoreboard.game_header() games = [] current_game = {} game_sequence = 0 game_sequence_counter = 0 # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score. for i, value in enumerate(line_score): if (value["GAME_SEQUENCE"] != game_sequence): game_sequence += 1 current_game["GAME_ID"] = value["GAME_ID"] home_team_id = game_header[game_sequence - 1]["HOME_TEAM_ID"] if (home_team_id == value["TEAM_ID"]): current_game["HOME_TEAM"] = value["TEAM_ABBREVIATION"] current_game["HOME_TEAM_WINS_LOSSES"] = value["TEAM_WINS_LOSSES"] current_game["HOME_TEAM_PTS"] = value["PTS"] current_game["HOME_TEAM_ID"] = value["TEAM_ID"] if (current_game["HOME_TEAM"] in TEAM_ID_DATA): current_game["HOME_TEAM_IMG"] = TEAM_ID_DATA[current_game["HOME_TEAM"]]["img"] else: current_game["AWAY_TEAM"] = value["TEAM_ABBREVIATION"] current_game["AWAY_TEAM_WINS_LOSSES"] = value["TEAM_WINS_LOSSES"] current_game["AWAY_TEAM_PTS"] = value["PTS"] current_game["AWAY_TEAM_ID"] = value["TEAM_ID"] if (current_game["AWAY_TEAM"] in TEAM_ID_DATA): current_game["AWAY_TEAM_IMG"] = TEAM_ID_DATA[current_game["AWAY_TEAM"]]["img"] if (value["TEAM_ABBREVIATION"] in TEAMS): if (home_team_id == value["TEAM_ID"]): current_game["HOME_TEAM_FULL_NAME"] = TEAMS[value["TEAM_ABBREVIATION"]]["city"] + \ " " + TEAMS[value["TEAM_ABBREVIATION"]]["name"] else: current_game["AWAY_TEAM_FULL_NAME"] = TEAMS[value["TEAM_ABBREVIATION"]]["city"] + \ " " + TEAMS[value["TEAM_ABBREVIATION"]]["name"] game_sequence = value["GAME_SEQUENCE"] game_sequence_counter += 1 elif game_sequence_counter == 1: if ("AWAY_TEAM" in current_game): current_game["HOME_TEAM"] = value["TEAM_ABBREVIATION"] current_game["HOME_TEAM_WINS_LOSSES"] = value["TEAM_WINS_LOSSES"] current_game["HOME_TEAM_PTS"] = value["PTS"] current_game["HOME_TEAM_ID"] = value["TEAM_ID"] if (current_game["HOME_TEAM"] in TEAM_ID_DATA): current_game["HOME_TEAM_IMG"] = TEAM_ID_DATA[current_game["HOME_TEAM"]]["img"] else: current_game["AWAY_TEAM"] = value["TEAM_ABBREVIATION"] current_game["AWAY_TEAM_WINS_LOSSES"] = value["TEAM_WINS_LOSSES"] current_game["AWAY_TEAM_PTS"] = value["PTS"] current_game["AWAY_TEAM_ID"] = value["TEAM_ID"] if (current_game["AWAY_TEAM"] in TEAM_ID_DATA): current_game["AWAY_TEAM_IMG"] = TEAM_ID_DATA[current_game["AWAY_TEAM"]]["img"] if (value["TEAM_ABBREVIATION"] in TEAMS): if ("AWAY_TEAM" in current_game): current_game["HOME_TEAM_FULL_NAME"] = TEAMS[value["TEAM_ABBREVIATION"]]["city"] + \ " " + TEAMS[value["TEAM_ABBREVIATION"]]["name"] else: current_game["AWAY_TEAM_FULL_NAME"] = TEAMS[value["TEAM_ABBREVIATION"]]["city"] + \ " " + TEAMS[value["TEAM_ABBREVIATION"]]["name"] current_game["GAME_STATUS_TEXT"] = game_header[game_sequence - 1]["GAME_STATUS_TEXT"] if not game_header[game_sequence - 1]["NATL_TV_BROADCASTER_ABBREVIATION"]: current_game["BROADCASTER"] = "" else: current_game["BROADCASTER"] = game_header[game_sequence - 1]["NATL_TV_BROADCASTER_ABBREVIATION"] games.append(current_game) current_game = {} game_sequence = value["GAME_SEQUENCE"] game_sequence_counter -= 1 east_standings = scoreboard.east_conf_standings_by_day() west_standings = scoreboard.west_conf_standings_by_day() return (games, east_standings, west_standings)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_games(season, date):\n url = \"http://live.nhl.com/GameData/SeasonSchedule-\" + season + \".json\"\n response = urllib.urlopen(url)\n data = json.loads(response.read())\n games = []\n for game in data:\n if game[\"est\"][:8] == date:\n games.append(game)\n return games", "def find_games(days_ahead=0):\n headers = {\n 'Host': 'stats.nba.com',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Referer': 'https://stats.nba.com/',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'x-nba-stats-origin': 'stats',\n 'x-nba-stats-token': 'true'\n }\n board = scoreboardv2.ScoreboardV2(day_offset=days_ahead, headers=headers).get_data_frames()[0]\n board.replace(id_to_abrv, inplace=True)\n return board[['GAME_DATE_EST', 'GAME_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID']]", "def get_games_by_date(self, date):\n return self._db.Games.find({'date' : date})", "def get_games():\n feed = feedparser.parse(FEED_URL)\n\n games = []\n for entry in feed['entries']:\n game = Game(entry.title, entry.link)\n games.append(game)\n\n return games", "def get_all_games():\n games = brain.get_all_games()\n return games", "async def fetch_games(self):\n return await self.http.get_game_list()", "def _get_live_games(self):\n response = requests.get(self._get_score_url())\n if response.status_code == 200:\n return [g for g in response.json()['games'] if g['status']['state'] == self.desired_game_state]", "def get_all_games(season):\n url = BASE_URL.format(season)\n json_data = requests.get(url, headers=HEADERS).json()\n all_games = json_data[\"resultSets\"][0][\"rowSet\"]\n return all_games", "def scoreboard(year, month, day):\n # Get data from mlbgame library\n data = mlbgame.data.get_scoreboard(year, month, day)\n # Parse through returned data\n parsed = etree.parse(data)\n root = parsed.getroot()\n output = []\n # Loop through the list of games that are returned\n for game in root:\n if game.tag == 'data':\n return []\n # Get the Team Names\n teams = game.findall('team')\n home_name = teams[0].attrib['name']\n away_name = teams[1].attrib['name']\n # Building a dictionary\n # I am really only interested in the scores.... not sure if\n # game_id is actually necessary....but here it stays\n game_data = game.find('game')\n game_id = game_data.attrib['id']\n home_team_data = teams[0].find('gameteam')\n home_team = home_name\n home_team_runs = int(home_team_data.attrib['R'])\n away_team_data = teams[1].find('gameteam')\n away_team = away_name\n away_team_runs = int(away_team_data.attrib['R'])\n score = {\n 'home_team': home_team,\n 'home_team_runs': home_team_runs,\n 'away_team': away_team,\n 'away_team_runs': away_team_runs\n }\n output.append(score)\n return output", "def get_games_from_database (self):\n r = requests.get (self.url_endpoint)\n if (r.status_code != 200):\n print (\"Failed to get games:\\n\", r.text)\n return r\n \n games = json.loads (r.text)['games']\n return_list = []\n for game in games:\n return_list.append (game['game_state'])\n return return_list", "def get_games():\r\n feed = feedparser.parse(FEED_URL)\r\n games = []\r\n for entry in feed.entries:\r\n games.append(Game(title = entry['title']\r\n , link = entry['link']\r\n ))\r\n return games", "def scoreboard(year, month, day, home=None, away=None):\n # get data\n data = mlbgame.data.get_scoreboard(year, month, day)\n # parse data\n parsed = etree.parse(data)\n root = parsed.getroot()\n games = {}\n output = {}\n # loop through games\n for game in root:\n if game.tag == 'data':\n return []\n # get team names\n teams = game.findall('team')\n home_name = teams[0].attrib['name']\n away_name = teams[1].attrib['name']\n # check if teams match parameters\n if (home_name == home and home is not None) \\\n or (away_name == away and away is not None) \\\n or (away is None and home is None):\n # throw all the data into a complicated dictionary\n game_tag = game.tag\n game_data = game.find('game')\n game_id = game_data.attrib['id']\n game_league = game_data.attrib['league']\n game_status = game_data.attrib['status']\n game_start_time = game_data.attrib['start_time']\n home_team_data = teams[0].find('gameteam')\n home_team = home_name\n home_team_runs = int(home_team_data.attrib['R'])\n home_team_hits = int(home_team_data.attrib['H'])\n home_team_errors = int(home_team_data.attrib['E'])\n away_team_data = teams[1].find('gameteam')\n away_team = away_name\n away_team_runs = int(away_team_data.attrib['R'])\n away_team_hits = int(away_team_data.attrib['H'])\n away_team_errors = int(away_team_data.attrib['E'])\n # check type of game\n if game_tag == 'go_game' or game_tag == 'ig_game':\n try:\n w_pitcher_data = game.find('w_pitcher')\n w_pitcher = w_pitcher_data.find('pitcher').attrib['name']\n w_pitcher_wins = int(w_pitcher_data.attrib['wins'])\n w_pitcher_losses = int(w_pitcher_data.attrib['losses'])\n except Exception:\n w_pitcher = \"\"\n w_pitcher_wins = 0\n w_pitcher_losses = 0\n try:\n l_pitcher_data = game.find('l_pitcher')\n l_pitcher = l_pitcher_data.find('pitcher').attrib['name']\n l_pitcher_wins = int(l_pitcher_data.attrib['wins'])\n l_pitcher_losses = int(l_pitcher_data.attrib['losses'])\n except Exception:\n l_pitcher = \"\"\n l_pitcher_wins = 0\n l_pitcher_losses = 0\n try:\n sv_pitcher_data = game.find('sv_pitcher')\n sv_pitcher = sv_pitcher_data.find('pitcher').attrib['name']\n sv_pitcher_saves = int(sv_pitcher_data.attrib['saves'])\n except Exception:\n sv_pitcher = \"\"\n sv_pitcher_saves = 0\n output = {\n 'game_id': game_id,\n 'game_tag': game_tag,\n 'game_league': game_league,\n 'game_status': game_status,\n 'game_start_time': game_start_time,\n 'home_team': home_team,\n 'home_team_runs': home_team_runs,\n 'home_team_hits': home_team_hits,\n 'home_team_errors': home_team_errors,\n 'away_team': away_team,\n 'away_team_runs': away_team_runs,\n 'away_team_hits': away_team_hits,\n 'away_team_errors': away_team_errors,\n 'w_pitcher': w_pitcher,\n 'w_pitcher_wins': w_pitcher_wins,\n 'w_pitcher_losses': w_pitcher_losses,\n 'l_pitcher': l_pitcher,\n 'l_pitcher_wins': l_pitcher_wins,\n 'l_pitcher_losses': l_pitcher_losses,\n 'sv_pitcher': sv_pitcher,\n 'sv_pitcher_saves': sv_pitcher_saves\n }\n # games that were not played\n elif game_tag == 'sg_game':\n try:\n p_pitcher_data = game.findall('p_pitcher')\n p_pitcher_home_data = p_pitcher_data[0]\n p_pitcher_home = p_pitcher_home_data.find(\n 'pitcher').attrib['name']\n p_pitcher_home_wins = int(p_pitcher_home_data.\n attrib['wins'])\n p_pitcher_home_losses = int(p_pitcher_home_data.\n attrib['losses'])\n p_pitcher_away_data = p_pitcher_data[1]\n p_pitcher_away = p_pitcher_away_data.find(\n 'pitcher').attrib['name']\n p_pitcher_away_wins = int(p_pitcher_away_data.\n attrib['wins'])\n p_pitcher_away_losses = int(p_pitcher_away_data.\n attrib['losses'])\n except Exception:\n p_pitcher_home = ''\n p_pitcher_home_wins = 0\n p_pitcher_home_losses = 0\n p_pitcher_away = ''\n p_pitcher_away_wins = 0\n p_pitcher_away_losses = 0\n output = {\n 'game_id': game_id,\n 'game_tag': game_tag,\n 'game_league': game_league,\n 'game_status': game_status,\n 'game_start_time': game_start_time,\n 'home_team': home_team,\n 'home_team_runs': home_team_runs,\n 'home_team_hits': home_team_hits,\n 'home_team_errors': home_team_errors,\n 'away_team': away_team,\n 'away_team_runs': away_team_runs,\n 'away_team_hits': away_team_hits,\n 'away_team_errors': away_team_errors,\n 'p_pitcher_home': p_pitcher_home,\n 'p_pitcher_home_wins': p_pitcher_home_wins,\n 'p_pitcher_home_losses': p_pitcher_home_losses,\n 'p_pitcher_away': p_pitcher_away,\n 'p_pitcher_away_wins': p_pitcher_away_wins,\n 'p_pitcher_away_losses': p_pitcher_away_losses\n }\n # put this dictionary into the larger dictionary\n games[game_id] = output\n return games", "def grouped_games(self):\n # Game times are stored as UTC, need to be offset back to EST or the\n # night games overflow into the next day.\n offset = datetime.timedelta(hours=-5)\n date_grouper = lambda g: (g.start + offset).date()\n time_grouper = lambda g: g.start.time()\n for date, games1 in groupby(self.games.fetch(100), date_grouper):\n group = [(time, list(games2)) for time, games2\n in groupby(games1, time_grouper)]\n yield date, group", "def get_free_games(self) -> List[Game]:", "def retrieveGames():\n result = cs411_game.getGames()\n return prepJSON(result)", "def own_games(self):\r\n return sorted(self.games + self.finals, key=lambda g: (g.datetime, g.pitch.rank))", "def get_games(url):\n \n import urllib\n import urllib2\n import re\n\n response = urllib2.urlopen(url + 'matches')\n html = response.read()\n\n games_html = re.findall('<tr class=\"match-row custom\">(.*?)</tr', html, flags=re.MULTILINE|re.DOTALL)\n\n games = []\n\n for game_html in games_html:\n game_match = re.search('.*?<td>(.*?)</td>.*?<td class=\"align-center\">(.*?)</td>.*?<span class=\"match-win\">(.*?)</span>.*?<td class=\"align-right\">[\\r\\n\\t]*(.*?)[\\r\\n\\t]*</td>', game_html, flags=re.MULTILINE|re.DOTALL)\n \n game = {}\n \n if game_match:\n game['map'] = game_match.group(1)\n game['type'] = game_match.group(2)\n game['outcome'] = game_match.group(3)\n game['date'] = game_match.group(4)\n games.append(game)\n \n return games", "def as_games(self):\n self._assert_no_aggregate()\n\n self._sort_tables = [types.Game]\n ids = self._ids('game', self._sorter)\n results = []\n q = 'SELECT %s FROM game %s %s'\n with Tx(self._db) as cursor:\n q = q % (\n types.select_columns(types.Game),\n _prefix_and(_sql_pkey_in(cursor, ['gsis_id'], ids['game'])),\n self._sorter.sql(tabtype=types.Game),\n )\n cursor.execute(q)\n\n for row in cursor.fetchall():\n results.append(types.Game.from_row(self._db, row))\n return results", "def games(self, competition_id: int, season_id: int) -> DataFrame[Any]:", "def get_bga_game_list():\n result = requests.get(\"https://www.boardgamegeek.com/xmlapi2/geeklist/252354\")\n return result.text", "def get_games(self, start_game_id, end_game_id):\n games = []\n \n num_games = end_game_id - start_game_id + 1\n \n for game_id in range(start_game_id, end_game_id + 1):\n try:\n game = self.get_game(game_id)\n games.append(game)\n except:\n print ('game_id =', game_id, 'failed')\n \n time.sleep(0.4)\n \n update_progress(game_id - start_game_id + 1, num_games)\n \n return games", "def get_player_games(self, year, use_local=True):", "def get_games():\n\n return jsonify({\"games\": list(map(make_public_game, games))})", "def all_games(self):\r\n return sorted(self.games + list(g for sp in self.sub_pools for g in sp.games) + self.finals,\r\n key=lambda g: (g.datetime, g.pitch.rank))", "def get_games(msg: telebot.types.Message):\n games = Game.select()\n m = ''\n for game in games:\n m += f'{game.id}: {jsonpickle.encode(game)}\\n'\n\n bot.send_message(\n msg.from_user.id,\n m\n )", "def scrape_all_world_cup_games():\n\n def scrape_scores_year(year):\n urls = scrape_world_cup_scoreboard(year)\n scores = [scrape_fifa_game(url, 'FIFA World Cup') for url in urls]\n return scores\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_scores_year(year))\n return l", "def game_list_full(self, uid=0):\n games = session.query(Game).all()\n return games", "def scrape_tournament_games(competition_name, tournament_id, edition_ids):\n\n l = []\n for edition_id in edition_ids:\n urls = scrape_fifa_scoreboard(tournament_id, edition_id)\n games = [scrape_fifa_game(url, competition_name) for url in urls]\n l.extend(games)\n return l", "def get_games(driver, verbose=False):\n\n games = driver.find_elements_by_class_name(GAME_CLASS)\n\n if verbose:\n print(f\"len(games): {len(games)}\")\n _ = [print(g.text) for g in games]\n\n return games", "def get_games_behind_history(self, team):\n abbr = convert_name(team, how='abbr')\n res = self._db.Teams.aggregate([{'$match': {'Tm' : abbr}},\n {'$unwind': '$Schedule'},\n {'$project':\n {'_id' : 0,\n 'Date' : '$Schedule.Date',\n 'GB': '$Schedule.GB'}}])\n hist = [x for x in list(res) if 'GB' in x.keys()]\n return hist" ]
[ "0.73755777", "0.73113036", "0.71790415", "0.69887614", "0.696558", "0.6951601", "0.6938847", "0.6931797", "0.69291914", "0.6795544", "0.6662662", "0.6488449", "0.6464402", "0.639219", "0.6384629", "0.637464", "0.6315723", "0.6276213", "0.62460154", "0.6242827", "0.6199504", "0.6156218", "0.61472005", "0.6111179", "0.6075513", "0.6028021", "0.59952986", "0.59912205", "0.5976009", "0.5953107" ]
0.74339443
0
Search post request when searching for a specific player or team.
def search(): name = request.form["searchname"] if name.upper() == "YAO MING": return redirect(url_for("players", playerid="2397")) team_id = "" split_name = name.split(" ", 1) if (len(split_name) == 1): try: get_player = player.get_player(split_name[0], just_id=False) get_team = False except: get_player = False if (split_name[0].upper() in TEAMS): team_id = TEAMS[split_name[0].upper()]["id"] team_summary = team.TeamSummary(team_id) get_team = team_summary.info() else: get_team = False else: try: get_player = player.get_player(split_name[0], last_name=split_name[1], just_id=False) get_team = False except: get_player = False if (name.lower() in TEAM_NAME_TO_ID): team_id = TEAM_NAME_TO_ID[name.lower()]["id"] team_summary = team.TeamSummary(team_id) get_team = team_summary.info() else: get_team = False if get_player: return redirect(url_for("players", playerid=get_player["PERSON_ID"])) elif get_team: return redirect(url_for("teams", teamid=team_id)) else: return render_template("search.html")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, request, custom_url: str) -> render:\n\n c_user = request.user\n\n context = self.get_menu_context('music', 'Музыка')\n context['matching'] = True\n if not request.POST.get('query'):\n context['matching'] = False\n context['music_list'] = c_user.profile.get_music_list()\n return render(request, 'music/search.html', context)\n query = request.POST.get('query')\n search_fields = ['title', 'artist']\n context['c_matches'] = Music.objects.filter(search_filter(search_fields, query))\n\n return render(request, 'music/search.html', context)", "def do_search(self, **criteria):\n return self.app.get(url(controller='dex_search',\n action='move_search',\n **criteria))", "def search(request):\r\n\tinput_text = request.GET.get('search-text', '')\r\n\tgames = Game.objects.filter(name__icontains=input_text)\r\n\treturn render(request, 'home.html', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})", "def search(self, query):", "async def search(self, *args, **kwargs):\n pass", "def search(self, *args, **kwargs):", "def post(self):\n query = self.request.get('search')\n if query:\n self.redirect('/searchdemo/charlie?' + urllib.urlencode(\n #{'query': query}))\n {'query': query.encode('utf-8')}))\n else:\n self.redirect('/searchdemo/charlie/')", "def search(request):\n if 'find_project' in request.GET and request.GET['find_project']:\n project_name=request.GET.get('find_project')\n \n searched_project=Project.search_project(project_name)\n \n return render(request,'search_results.html',{'searched_project':searched_project})", "def search(request):\n\tif request.method == 'GET':\n\t\ttitle = request.GET.get('title')\n\t\tname = request.GET.get('person')\n\t\tif title:\n\t\t\treturn search_by_title(title)\n\t\telif name:\n\t\t\treturn search_by_person(name)\n\t\telse:\n\t\t\treturn JSONResponse({})", "def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)", "def search(request):\n\n term = \"\"\n organizations = None\n memberships = None\n events = None\n persons = None\n airports = None\n training_requests = None\n comments = None\n only_result = None\n\n if request.method == \"GET\" and \"term\" in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n term = form.cleaned_data.get(\"term\", \"\")\n tokens = re.split(r\"\\s+\", term)\n\n organizations = Organization.objects.filter(\n Q(domain__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"fullname\")\n if len(organizations) == 1 and not only_result:\n only_result = organizations[0]\n\n memberships = Membership.objects.filter(\n registration_code__icontains=term\n ).order_by(\"-agreement_start\")\n if len(memberships) == 1 and not only_result:\n only_result = memberships[0]\n\n events = Event.objects.filter(\n Q(slug__icontains=term)\n | Q(host__domain__icontains=term)\n | Q(host__fullname__icontains=term)\n | Q(url__icontains=term)\n | Q(contact__icontains=term)\n | Q(venue__icontains=term)\n | Q(address__icontains=term)\n ).order_by(\"-slug\")\n if len(events) == 1 and not only_result:\n only_result = events[0]\n\n # if user searches for two words, assume they mean a person\n # name\n if len(tokens) == 2:\n name1, name2 = tokens\n complex_q = (\n (Q(personal__icontains=name1) & Q(family__icontains=name2))\n | (Q(personal__icontains=name2) & Q(family__icontains=name1))\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n )\n persons = Person.objects.filter(complex_q)\n else:\n persons = Person.objects.filter(\n Q(personal__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(secondary_email__icontains=term)\n | Q(github__icontains=term)\n ).order_by(\"family\")\n\n if len(persons) == 1 and not only_result:\n only_result = persons[0]\n\n airports = Airport.objects.filter(\n Q(iata__icontains=term) | Q(fullname__icontains=term)\n ).order_by(\"iata\")\n if len(airports) == 1 and not only_result:\n only_result = airports[0]\n\n training_requests = TrainingRequest.objects.filter(\n Q(group_name__icontains=term)\n | Q(family__icontains=term)\n | Q(email__icontains=term)\n | Q(github__icontains=term)\n | Q(affiliation__icontains=term)\n | Q(location__icontains=term)\n | Q(user_notes__icontains=term)\n )\n if len(training_requests) == 1 and not only_result:\n only_result = training_requests[0]\n\n comments = Comment.objects.filter(\n Q(comment__icontains=term)\n | Q(user_name__icontains=term)\n | Q(user_email__icontains=term)\n | Q(user__personal__icontains=term)\n | Q(user__family__icontains=term)\n | Q(user__email__icontains=term)\n | Q(user__github__icontains=term)\n ).prefetch_related(\"content_object\")\n if len(comments) == 1 and not only_result:\n only_result = comments[0]\n\n # only 1 record found? Let's move to it immediately\n if only_result and not form.cleaned_data[\"no_redirect\"]:\n msg = format_html(\n \"You were moved to this page, because your search <i>{}</i> \"\n \"yields only this result.\",\n term,\n )\n if isinstance(only_result, Comment):\n messages.success(request, msg)\n return redirect(\n only_result.content_object.get_absolute_url()\n + \"#c{}\".format(only_result.id)\n )\n elif hasattr(only_result, \"get_absolute_url\"):\n messages.success(request, msg)\n return redirect(only_result.get_absolute_url())\n\n else:\n messages.error(request, \"Fix errors below.\")\n\n # if empty GET, we'll create a blank form\n else:\n form = SearchForm()\n\n context = {\n \"title\": \"Search\",\n \"form\": form,\n \"term\": term,\n \"organisations\": organizations,\n \"memberships\": memberships,\n \"events\": events,\n \"persons\": persons,\n \"airports\": airports,\n \"comments\": comments,\n \"training_requests\": training_requests,\n }\n return render(request, \"dashboard/search.html\", context)", "def search(self):\r\n # If this is a url /username/search then we need to update the search\r\n # form action to /username/results\r\n mdict = self.matchdict\r\n username = mdict.get('username', None)\r\n return {'username': username}", "def search(request):\n template = 'tracks.html'\n search_by = request.GET.get('search_by')\n if search_by != 'genres':\n search_dict = {search_by + '__icontains': request.GET.get('lookup')}\n tracks_list = Tracks.objects.filter(**search_dict)\n else:\n gen_list = [x.strip() for x in request.GET.get('lookup').split(',')]\n id_list = Genres.objects.filter(genre__in=gen_list).values_list('id', flat=True)\n tracks_list = Tracks.objects.filter(genres__in=id_list).distinct()\n context = {'track_list': tracks_list, 'call': 'search', }\n return render(request, template, context)", "def search(request):\n raise NotImplementedError", "def playerSearch(self, start, count, level, formation, position, nationality, league, team, minBid, maxBid, minBIN, maxBIN):\n searchstring = \"\"\n cardList = list()\n\n if level != \"\" and level != \"any\":\n searchstring += \"&lev=\" + level\n if formation != \"\" and formation != \"any\":\n searchstring += \"&form=\" + formation\n if position != \"\" and position != \"any\":\n if position == \"defense\" or position == \"midfield\" or position == \"attacker\":\n searchstring += \"&zone=\" + position\n else:\n searchstring += \"&pos=\" + position\n if nationality > 0:\n searchstring += \"&nat=\" + str(nationality)\n if league > 0:\n searchstring += \"&leag=\" + str(league)\n if team > 0:\n searchstring += \"&team=\" + str(team)\n if minBIN > 0:\n searchstring += \"&minb=\" + str(minBIN)\n if maxBIN > 0:\n searchstring += \"&maxb=\" + str(maxBIN)\n if minBid > 0:\n searchstring += \"&micr=\" + str(minBid)\n if maxBid > 0:\n searchstring += \"&macr=\" + str(maxBid)\n\n requestor = UrlRequestor(\"https://utas.fut.ea.com/ut/game/fifa13/auctionhouse?type=player&start=\" + str(start) + \"&num=\" + str(count) + searchstring, {'Content-Type': 'application/json', 'Cookie': self.EASW_KEY + \"; \" + self.EASF_SESS + \"; \" + self.FUTPHISHING + \"; \", 'X-UT-SID': self.XUT_SID, 'x-http-method-override': 'GET'}, \"\")\n requestor.open()\n lol = requestor.getReturnData().get('auctionInfo')\n\n for card in lol:\n cardList.append(Card(card, self))\n return cardList", "def PostModelsSearches(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def search_query():\n g.form.process(request.form)\n\n if g.form.submit.data and g.form.search.data:\n query = g.form.search.data\n try:\n result = search.search_code(query)\n except search.NoPostcode:\n # Pass along to search results page to process\n return redirect(url_for(\".search_results\", query=query))\n\n if isinstance(result, models.StopPoint):\n return redirect(url_for(\".stop_atco\", atco_code=result.atco_code))\n elif isinstance(result, models.Postcode):\n return redirect(url_for(\".list_near_postcode\", code=result.text))\n else:\n return redirect(url_for(\".search_results\", query=query))\n else:\n return redirect(url_for(\".search_results\"))", "def search_companies(request):\n search = request.data.get('search', None)\n if search:\n companies = Company.objects.filter(name__search=search)\n else:\n companies = Company.objects.all()\n \n context={'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n print(search)\n return Response(serializer.data)", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def PostSearches(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def search(self, name=None):\r\n params = base.get_params(('name', ), locals())\r\n request = http.Request('GET', self.get_url(), params)\r\n\r\n return request, parsers.parse_json", "def found_specific_player(self) -> Player:\n search_question = ('Nom du joueur recherché : ',\n 'Prénom du joueur recherché : ')\n search_response = []\n for question in search_question:\n valid = self.ask_and_store_text(question)\n while not valid[0]:\n valid = self.ask_and_store_text(question)\n search_response.append(valid[1])\n\n for player in Player.PLAYERS:\n if player.name.upper() == search_response[0].upper() and \\\n player.first_name.capitalize() == search_response[1].capitalize():\n return player\n\n self.view_menu.stand_by_msg(\"Joueur introuvable !\\n\"\n \"Rechercher à nouveau ou créer le joueur\")", "def search(self, query, maxhits=100):", "def search(self, param):\n data = self._http_get(\"search\", query=param)\n return data.json()", "def search():\n pass", "def search_process():\n\n # processing search parameters common to each person\n open_now = request.args.get(\"open_now\")\n time = request.args.get(\"time\")\n limit = request.args.get(\"limit\")\n search_type = request.args.get(\"search-type\")\n\n # person 1's search parameters\n your_term = request.args.get(\"your_term\")\n your_latitude = float(request.args.get(\"your_latitude\"))\n your_longitude = float(request.args.get(\"your_longitude\"))\n your_radius = request.args.get(\"your_radius\")\n your_price = str(request.args.get(\"your_price\"))\n # person 2's search parameters\n friends_latitude = float(request.args.get(\"friends_latitude\"))\n friends_longitude = float(request.args.get(\"friends_longitude\"))\n friends_price = str(request.args.get(\"friends_price\"))\n\n # uses the Google Maps API to geocode and functions written in midpt_formula.py\n # to find the midpoint of the two given addresses\n # your_location = geocoding(st_address1, city1, state1)\n # friends_location = geocoding(st_address2, city2, state2)\n your_location = [your_latitude, your_longitude]\n friends_location = [friends_latitude, friends_longitude]\n mid_lat, mid_lng = midpt_formula(your_location, friends_location)\n\n if search_type == 'midpt':\n friends_term = request.args.get(\"friends_term\")\n friends_radius = request.args.get(\"friends_radius\")\n sort_by = request.args.get(\"sort_by\")\n # sort only works for midpt because of the sets used in venn diagram calculations\n\n params_midpt = {'term': avoid_term_duplicates(your_term, friends_term),\n 'latitude': mid_lat,\n 'longitude': mid_lng,\n 'radius': mi_to_m(stricter_radius(your_radius, friends_radius)),\n 'sort_by': sort_by,\n 'limit': limit,\n }\n\n if time:\n params_midpt['open_at'] = unix_time(time)\n elif open_now:\n params_midpt['open_now'] = open_now\n\n # results for Midpoint Formula calculation Yelp search\n responses = search_yelp(params_midpt)\n\n elif search_type == 'venn':\n # the dictionary of search parameters to submit to the Yelp API\n your_parameters = {'term': your_term,\n 'latitude': your_location[0],\n 'longitude': your_location[1],\n 'radius': mi_to_m(your_radius),\n }\n # import pdb; pdb.set_trace()\n distance_between_us = calculate_distance(tuple(your_location), tuple(friends_location))\n friends_parameters = {'term': your_term,\n 'latitude': friends_location[0],\n 'longitude': friends_location[1],\n 'radius': distance_between_us,\n }\n\n # adds the search parameter price if either user inputs a price\n if your_price or friends_price:\n your_parameters['price'] = avoid_price_duplicates(your_price, friends_price)\n friends_parameters['price'] = avoid_price_duplicates(your_price, friends_price)\n\n # adds the business hours parameter if they specify whether they would want\n # to go to the business now or at a future time\n if time:\n your_parameters['open_at'] = unix_time(time)\n friends_parameters['open_at'] = unix_time(time)\n elif open_now:\n your_parameters['open_now'] = open_now\n friends_parameters['open_now'] = open_now\n\n # results for Venn Diagram calculation: two separate Yelp searches for both\n your_search_results = search_yelp(your_parameters)\n friends_search_results = search_yelp(friends_parameters)\n\n\n # finding the results common to both and adding them to a dictionary\n responses = {'businesses': get_common_restaurants(your_search_results, friends_search_results)}\n\n responses['your_location'] = your_location\n responses['friends_location'] = friends_location\n return jsonify(responses)\n\n # sends the locations of each person for creating markers on the map\n\n # do a for loop for when I get more than 2 people meeting up", "def on_search(self, data: Any = None):\n raise NotImplementedError", "async def team_search(self, ctx: commands.Context, username: str):\n all_usernames = {team_id: team.username for team_id, team in self.teams.items()\n if team is not None}\n suggestions = []\n log.info(repr(fuzzywuzzy.process.extract(\n username, all_usernames, limit=5)))\n for fuzz_username, rating, fuzz_id in fuzzywuzzy.process.extract(\n username, all_usernames, limit=5):\n if rating < 50:\n break\n fuzz_team = self.teams[fuzz_id]\n suggestions.append(\n f'(ID: **{fuzz_team.team_id}**) **{fuzz_team.display_name[:40]}**'\n f' -- {len(fuzz_team.users)} registered members')\n if suggestions:\n await ctx.send('\\n'.join(suggestions))\n else:\n await ctx.send(f\"Couldn't find any teams whose usernames resembled `{username}`\")" ]
[ "0.62685007", "0.5986824", "0.5942399", "0.59355116", "0.58643496", "0.583581", "0.58182544", "0.5798136", "0.5775005", "0.5677074", "0.56644845", "0.5644946", "0.5592587", "0.55699605", "0.55672497", "0.55518734", "0.55378574", "0.5534999", "0.5517781", "0.55081433", "0.5487099", "0.54743403", "0.5442817", "0.5434605", "0.5429894", "0.5410614", "0.5407718", "0.53912425", "0.5375451", "0.5372283" ]
0.73069435
0
this function will indicate if a line should be ignored, if it's a comment line or empty
def shouldTheLineBeIgnored(self,line): global multi_comment_line_mode if multi_comment_line_mode: if line.find("*/") != -1: # we found the ending line multi_comment_line_mode = False return False,line[line.find("*/")+2:]+'$endOfMultiLine' else: # still searching for the end of the comment return True,'' if line == '\n': # in case it's a clean line return True,'' if line == "": return True,'' if line[0:2] == "//": return True,'' if line[0] == "/" and (line[1:3] == '**' or line[1:2] == '*'): # it's a multi line comment case if line[3:].find("*/") != -1: # in this case the multi line comment ends here. we will return the rest of the line index_for_the_rest_of_line = line[3:].find("*")+5 # starting after the - 2 for */ and 3 for the real # index if index_for_the_rest_of_line == len(line)-1: return True,'' #in this case we can ignore return False,line[index_for_the_rest_of_line:] #returnning the rest else: multi_comment_line_mode = True return True,'' else: return False,'' # it's not the kind of line we want to ignore
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_line(line):\r\n if not line.strip():\r\n return False # if the last line is blank\r\n if line.startswith(\"#\"):\r\n return False # comment line\r\n if line.startswith(\" #\"):\r\n return False # comment line\r\n return line", "def skip_line(line):\n return IGNORE_LINES.search(line) is not None", "def IgnoreLine(self, str):\n if not str.strip(): return True\n else: return str.startswith('==') or str.startswith('**')", "def isThereApartToIgnore(self,line):\n good_line = ''\n curr_line = line\n # there are 3 options: or the first of the next line is a comment, or a qoute, or a //. each time we will check\n # what is first\n global multi_comment_line_mode\n bad_line = line.find(\"//\")\n bad_part_start = line.find(\"/*\")\n if (bad_line == -1 and bad_part_start == -1 and not multi_comment_line_mode):\n # if there is no problem\n return line\n while curr_line != '':\n bad_line = curr_line.find(\"//\")\n curr_lenght_line = len(curr_line)\n bad_part_start = curr_line.find(\"/*\")\n qoutes_start = curr_line.find('\"')\n # handling the case in which bad part is first\n if bad_line==-1 and bad_part_start==-1 and qoutes_start==-1:\n good_line += ' ' + curr_line\n return good_line\n if (bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_part_start == min(bad_part_start,bad_line,qoutes_start) or (bad_part_start!=-1 and bad_line==-1\n and qoutes_start == -1) or (bad_part_start!=-1 and bad_line==-1 and qoutes_start!=-1\n and bad_part_start < qoutes_start )or\n (bad_part_start!=-1 and bad_line!=-1 and qoutes_start==-1 and\n bad_part_start < bad_line )):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n good_line += ' ' +curr_line[:bad_part_start]# adding this part to good line\n if bad_part_end != -1:\n # good_line += curr_line[:bad_part_start]\n if bad_part_start + bad_part_end + 2 == curr_lenght_line - 1:\n break\n curr_line = curr_line[bad_part_start + bad_part_end + 2:]\n continue\n else:\n # in this case there are more lines which are bad\n # global multi_comment_line_mode\n multi_comment_line_mode = True\n return good_line\n # hadling the case in which bad line is first\n elif ((bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n bad_line == min(bad_part_start,bad_line,qoutes_start))or\n (qoutes_start == -1 and bad_line !=-1 and bad_part_start == -1) or (qoutes_start!=-1 and bad_line!=-1\n and bad_line<qoutes_start ) or (bad_line!=-1 and bad_part_start!=-1 and qoutes_start ==-1\n and bad_line<bad_part_start)):\n curr_line = curr_line[:bad_line]\n continue\n # handling the case in which quates the first\n if(bad_line!=-1 and bad_part_start!= -1 and qoutes_start!=-1 and\n qoutes_start == min(bad_part_start,bad_line,qoutes_start) or\n (qoutes_start != -1 and bad_line ==-1 and bad_part_start==-1) or\n (qoutes_start != -1 and bad_line !=-1 and bad_part_start==-1 and qoutes_start<bad_line) or\n (qoutes_start != -1 and bad_part_start !=-1 and bad_line==-1 and qoutes_start<bad_part_start)):\n end_qoutes = curr_line[qoutes_start+1:].find('\"')\n good_line+=' '+curr_line[:qoutes_start]+curr_line[qoutes_start:end_qoutes+qoutes_start+2]\n curr_line = curr_line[end_qoutes+qoutes_start+2:]\n continue\n # need???\n elif ((qoutes_start!=-1 and bad_part_start!=-1 and qoutes_start > bad_part_start) or\n (qoutes_start==-1 and bad_part_start!=-1)):\n curr_bad = curr_line[bad_part_start:]\n bad_part_end = curr_bad.find(\"*/\")\n if bad_part_end != -1:\n good_line += ' '+curr_line[:bad_part_start] # adding this part to good line\n if bad_part_start+bad_part_end+2 == curr_lenght_line-1:\n break\n curr_line = curr_line[bad_part_start+bad_part_end+2:]\n else:\n # in this case there are more lines which are bad\n multi_comment_line_mode = True\n return good_line\n else:\n good_line+=' '+ curr_line\n break\n return good_line", "def _is_comment_line(self):\n pattern = re.compile(r\"^(\\s)*(//)+\")\n return pattern.search(self._line)", "def _is_comment_or_blank(line):\n return re.sub(\"#.*\", \"\", line).rstrip() == \"\"", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def __clean_line_comments(self):\n self.lines = [l for l in self.lines if not l.startswith(\"//\") and len(l) != 0]", "def is_skippable(line: str) -> bool:\n return len(line) == 0 or line[0] == ';'", "def ignores_line(self, line):\n # Ignore empty lines stemming from only a line break.\n if not line.strip():\n # Yes, ignore the line if it's empty.\n return True\n # Either a `_SRE_Match` instance or `None`\n match = self._total_regex.search(line)\n return bool(match)", "def is_line(self): \n return False", "def emptyline(self):", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def emptyline(self):\n pass", "def _LineContainsRelevantDisableComment(line: str, removal_type: str) -> bool:\n if FINDER_DISABLE_COMMENT_GENERAL in line:\n return True\n if FINDER_DISABLE_COMMENT_BASE + removal_type in line:\n return True\n return False" ]
[ "0.7507151", "0.7377068", "0.73594356", "0.7340736", "0.72747046", "0.7187449", "0.71126205", "0.71126205", "0.7084072", "0.69851965", "0.69462186", "0.68963456", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.6768147", "0.67454976" ]
0.80938894
0
creates a new token and adds it to the list of tokens.
def createToken(self,type,value): newToken = Token(type, value) self.tokensTable.append(newToken)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addToken(self, tokenType, value):\r\n\t\tself.tokens.append( Token(tokenType, value) )", "def add_token(self,token):\n\t\tif not token:\n\t\t\tlogging.error(\"Token cannot be empty!\")\n\t\t\texit()\n\n\t\tself.tokens.append(token.lower())\n\t\t#self.user_defined_token = token.lower()", "def addToken(self, token: Token, offset: int):\n self.__tokens.append(token)\n self.__tokenMap[token] = offset", "def push_token(self, tok):\n self.tokens.appendleft(tok)", "def add_token(self, token):\n token = self.process_token(token)\n self._token_count.update([token])", "def add_token(self, token):\n token = self.process_token(token)\n self._token_count.update([token])", "def _add_token(self, token_type: TokenType, literal: Any = None):\n text = self.source[self.start : self.current]\n\n self.tokens.append(\n Token(token_type=token_type, lexeme=text, literal=literal, line=self.line)\n )", "def insert_tokens(self, token_list):\n\n assert type(token_list) == list, \"Token list should be a list of tuples of the format (token_name, regex).\"\n\n self.tokens += token_list\n self.__create_regex()", "def build_tokens(self):\n self.advance()\n while self.__token != \"\":\n self.__tokens.append(self.token_type())\n self.advance()", "def AddToken(self, token, merge=False):\n token.SetLangId(self.id_)\n if not merge:\n self.tokens_.append(token)\n else:\n exists = self.MatchToken(token)\n if exists is None:\n self.tokens_.append(token)\n else:\n exists.IncrementCount(token.Count())", "def create_token(self, token_id, data):\n raise exception.NotImplemented() # pragma: no cover", "def add_tokens(self, tokens):\n self.result.extend([d for d in tokens])", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def add_token(\r\n self,\r\n ind: int,\r\n w: str,\r\n ) -> AddToken:\r\n self.t = self.t + 1\r\n content = self._encrypt_update(self.t, Op.ADD, ind, w)\r\n return self.sigma.add_token(content, w)", "def __add__(self, other):\n if isinstance(other, Token):\n return Token(self.text + other.text, self.position, self.category)\n else:\n return Token(self.text + other, self.position, self.category)", "def _add_non_empty_token(self, token: str):\n if token != \"\":\n self._tokens.append(token)", "def add(self, token):\n self.rpc.call(MsfRpcMethod.AuthTokenAdd, [token])", "def token(self, id):\r\n return Token(self, id)", "def token(self, value):\r\n self._token = value", "def Token(l, token):\n\n return Red(l, lambda _: token)", "def addTokenFromBuffer(self, buff):\r\n\t\t# If buffer is empty, we don't bother adding it as a token\r\n\t\tif len(buff) == 0:\r\n\t\t\treturn\r\n\r\n\t\t# Get contents of buffer as a string\r\n\t\tbufferStr = \"\".join(buff)\r\n\t\t# Check if buffer is a keyword we care about\r\n\t\tif bufferStr == \"from\":\r\n\t\t\tself.tokens.append( Token(\"from\") )\r\n\t\telif bufferStr == \"import\":\r\n\t\t\tself.tokens.append( Token(\"import\") )\r\n\t\telse:\r\n\t\t\t# Check if buffer is a valid identifier.\r\n\t\t\tif self.IDENTIFIER_REGEX.search(bufferStr):\r\n\t\t\t\ttokenType = \"identifier\"\r\n\t\t\telse:\r\n\t\t\t\ttokenType = \"other\"\r\n\t\t\t# Add token with the found type and make sure to clear the buffer\r\n\t\t\tself.tokens.append( Token(tokenType, bufferStr) )\r\n\t\t# Clear buffer\r\n\t\tdel buff[:]", "def add_tokens(self, tokens):\n if self.pad:\n tokens = [START_OF_SEQ] * self.order + tokens + [END_OF_SEQ]\n\n for i in range(len(tokens) - self.order):\n current_state = tuple(tokens[i:i + self.order])\n next_state = tokens[i + self.order]\n self.add_state(current_state, next_state)", "def append_position_to_token_list(token_list):\r\n return [PositionToken(value.content, value.gd, index, index+1) for (index, value) in enumerate(token_list)]", "def __init__(self):\n self.tokens = []", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def visit(self, token: tokenize.TokenInfo) -> None:\n self._lines[token.start[0]].append(token)", "def __create_list(self, tokens : List[Union[str,int]]) -> List[List[Union[str,int]]]:\n if tokens:\n return [self.__add_instruction(cp(tokens[:1+syntaxParametersDict.get(tokens[0])]))] + self.__create_list(cp(tokens[1+syntaxParametersDict.get(tokens[0]):]))\n return []", "def tokens(self):\r\n return Tokens(self)", "def add_token(self, amount):\n self.M += amount", "def _make_tokens(self, count, token_type=None):\n if token_type:\n for _ in range(count):\n yield (self._make_token(token_type), token_type)\n else:\n for _ in range(count):\n yield self._make_token(token_type)" ]
[ "0.742958", "0.72666687", "0.70169747", "0.70086443", "0.6972637", "0.6972637", "0.6870133", "0.6566164", "0.6503664", "0.6501116", "0.6451819", "0.63962394", "0.6373598", "0.62586296", "0.62049127", "0.6193795", "0.6131426", "0.60756105", "0.60682315", "0.60681415", "0.60532385", "0.6029612", "0.6023692", "0.60177064", "0.60028225", "0.60028225", "0.60021365", "0.59950197", "0.59839445", "0.59100974" ]
0.7692151
0
this function will just create a temporary xml file with all the tokens with their labels, in order to debug. this xml will not be generated in the final program
def writeToTempXml(self): name = self.fileToProcess.name all_tokens = ET.Element("tokens") for token in self.tokensTable: if token.getType() == KEYWORD: keyword = ET.SubElement(all_tokens, "keyword") keyword.text = ' '+token.getValue()+' ' elif token.getType() == IDENTIFIER: identifier = ET.SubElement(all_tokens, "identifier") identifier.text = ' '+token.getValue()+' ' elif token.getType() == SYMBOL: symbol = ET.SubElement(all_tokens, "symbol") symbol.text = ' '+token.getValue()+' ' elif token.getType() == STRING_CONST: stringConstant = ET.SubElement(all_tokens, "stringConstant") stringConstant.text = ' '+token.getValue()+' ' elif token.getType() == INT_CONST: integerConstant = ET.SubElement(all_tokens, "integerConstant") integerConstant.text = ' '+token.getValue()+' ' tree = ET.ElementTree(all_tokens) tree.write(name + 'T' + '.xml')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokens_dump(docid):\n tagged_strings = set()\n labels = get_labels()\n tagged_sequence = labels # replacing prep_inputs method. still works?\n tagged_strings.add(tuple(tagged_sequence))\n outfile = SETTINGS.XML_LOCATION + \"/\" + docid + \".xml\"\n try:\n os.remove(outfile)\n except OSError:\n pass\n appendListToXMLfile(tagged_strings,\n MODULE,\n outfile)\n if len(queue) == 0:\n return \"All done!\"\n else:\n return queue.pop(0)", "def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it", "def tokenize(self):\n self.__create_tokens()\n copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)\n if self.__copy:\n copy_obj.copy_file(self.__write_to, \"tokenize.data\")\n copy_obj.rename(self.__write_to, self.__file)\n os.remove(self.__write_to)", "def create_xmlfile(images_path, txt_file,class_name):\n\n#\tsavepath = os.path.join(images_path, \"{}_annotations\".format(class_name))\n#\tprint \"savepath:{}\".format(savepath)\n#\tif not os.path.exists(savepath):\n#\t\tos.mkdir(savepath)\n\n\ttxt = open(txt_file, 'r')\n\tfor line in txt:\n#\t\tpdb.set_trace()\n\t\tprint ('line:{}'.format(line))\n\t\twords = line.split(\" \")\n\t\tword_len = len(words)\n\t\tprint('length of words:{}'.format(word_len))\n\t\tprint (\"word_len:{}\".format(word_len))\n\t\t\n\t\tif word_len >3:\n\t\t\ta,b = words[0].split('.')\n\t\t\t\n\t\t\timg_path =a+'.jpg' #words[0]\n\t\t\timg_name =img_path # os.path.basename(img_path)\n\t\t\tprint ('image Name:%s'%img_name)\n\t\t\timg = Image.open('/home/graymatics/py-faster-rcnn/data/violence/'+img_name)\n\t\t\tprint(img)\n\t\t\tw,h = img.size\n\t\t\t#create xml\n\t\t\tannotation = et.Element('annotation')\n\t\t\tet.SubElement(annotation,'folder').text = 'demo'\n\t\t\tet.SubElement(annotation,'filename').text = img_name\n\n\t\t\tsource = et.SubElement(annotation, 'source')\n\t\t\tet.SubElement(source, 'database').text = 'internet'\n\t\t\tet.SubElement(source, 'annotation').text = 'Lyushuen'\n\t\t\tet.SubElement(source, 'image').text = 'unknown'\n\n\t\t\tsize = et.SubElement(annotation, 'size')\n\t\t\tet.SubElement(size, 'width').text = str(w)\n\t\t\tet.SubElement(size, 'height').text =str(h)\n\t\t\tet.SubElement(size, 'depth').text = '3'\n\n\t\t\tet.SubElement(annotation, 'segmented').text = str(0)\n\t for i in range(word_len/4 + 1):\n print (\"I size:{}\".format(i))\n if i == 0:\n print \"Image name is :{}\".format(words[0])\n elif i >= 1:\n index = i - 1\n\n\t\t\t\t\tobj = et.SubElement(annotation, 'object')\n\t\t\t\t\tet.SubElement(obj, 'name').text = class_name #words[5]#class_name\n\t\t\t\t\tet.SubElement(obj, 'pose').text = 'Unspecified'\n\t\t\t\t\tet.SubElement(obj, 'truncated').text = '0'\n\t\t \t\t \tet.SubElement(obj, 'difficult').text = '0'\n\n\t\t \t\t\tbox = et.SubElement(obj, 'bndbox')\n\t\t\t \t\tet.SubElement(box, 'xmin').text = str(int(round(float(words[index*4+1]))))\n\t\t\t \t\tet.SubElement(box, 'ymin').text = str(int(round(float(words[index*4+2]))))\n\t\t\t \t\tet.SubElement(box, 'xmax').text = str(int(round(float(words[index*4+3]))))\n\t\t\t \t\tet.SubElement(box, 'ymax').text = str(int(round(float(words[index*4+4]))))\n\n\t\t #write to file\n\t\t \tname, exten = os.path.splitext(img_name)\n\t\t \tanno_path = os.path.join(src_img,name+'.xml') #path of annotation files\n\t\t\tprint \"anno_path:{}\".format(anno_path)\n\t\t \ttree = et.ElementTree(annotation)\n\t\t \ttree.write(anno_path)\n\ttxt.close()", "def tokenize(self, file_name):\n main_body = self.cast.nodes[0].body[-1]\n token_string = self.visit(main_body)\n\n variable_map = self.dump_var_map()\n value_map = self.dump_val_map()\n\n out_file = open(file_name, \"w\")\n out_file.write(f\"{token_string}\\n\")\n\n for var in variable_map:\n out_file.write(f\"{var}\\n\")\n\n for val in value_map:\n out_file.write(f\"{val}\\n\")", "def create_xml_regression(lfiles, lsbj, foxml):\n\n impl = xml.dom.minidom.getDOMImplementation()\n doc = impl.createDocument(None, \"some_tag\", None)\n top_element = doc.documentElement\n\n e = doc.createElement('subject')\n e.setAttribute('id', 'case')\n\n for i, fn in enumerate(lfiles):\n v = doc.createElement('visit')\n v.setAttribute('id', \"subj{}\".format(i))\n\n f = doc.createElement('filename')\n f.setAttribute('object_id', \"face\")\n t = doc.createTextNode(fn)\n f.appendChild(t)\n\n a = doc.createElement('age')\n x = doc.createTextNode(str(lsbj[i][\"age\"]))\n a.appendChild(x)\n\n\n v.appendChild(f)\n v.appendChild(a)\n e.appendChild(v)\n\n top_element.appendChild(e)\n\n with open(foxml, \"w\") as fo:\n fo.write(doc.toprettyxml())", "def write_tokens(self, tokenizer):\n output_file = '{}ktT.xml'.format(tokenizer.filename[:-5])\n with open(output_file, 'w') as f:\n print 'writing tokens to {}'.format(output_file)\n f.write(''.join(tokenizer.token_output))", "def __gen_tokenization_file(self):\n paula_id = '{}.{}.tok'.format(self.corpus_name, self.name)\n E, tree = gen_paula_etree(paula_id)\n self.paulamap['tokenization'] = paula_id\n\n base_paula_id = '{}.{}.text'.format(self.corpus_name, self.name)\n mlist = E('markList',\n {'type': 'tok',\n XMLBASE: base_paula_id+'.xml'})\n tok_tuples = self.dg.get_tokens()\n for (tid, onset, tlen) in get_onsets(tok_tuples):\n # even SaltNPepper still uses xpointers for string-ranges!\n xp = \"#xpointer(string-range(//body,'',{},{}))\".format(onset, tlen)\n mlist.append(E('mark', {'id': tid,\n XLINKHREF: xp}))\n tree.append(mlist)\n self.files[paula_id] = tree\n self.file2dtd[paula_id] = PaulaDTDs.mark\n return paula_id", "def print_xml(self, filename):\n\n # TODO: check what happens when input is not an xml file\n # TODO: add xmldec, processing instructions and comments\n\n xml_string = u'' # TODO: use a string buffer\n offset = 0\n stack = []\n\n for char in self.text:\n\n # any tags on the stack that can be closed?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n # any new opening tags?\n for t in self.source_tags.opening_tags.get(offset,[]):\n stack.append(t)\n xml_string += \"<%s%s>\" % (t.name, t.attributes_as_string())\n\n # any of those need to be closed immediately (non-consuming tags)?\n (stack, matching) = self._matching_closing_tags(offset, stack, [])\n for t in matching:\n xml_string += \"</%s>\" % t.name\n\n xml_string += escape(char)\n offset += 1\n\n fh = open(filename, 'w')\n fh.write(xml_string.encode('utf-8'))", "def saveSessionToXML(self, filename):\r\n xmlStr = self.createXMLStr()\r\n \r\n #Write to the file\r\n #xml.dom.ext.PrettyPrint(doc, open(filename, 'w'))\r\n xmlFile = open(filename, 'w')\r\n xmlFile.write(xmlStr)\r\n xmlFile.close()", "def writexml(file):\n OUTFILE=open(file,\"w\")\n doc = xml.dom.minidom.Document()\n\n # Create the <dec_reg_list> base element\n decl_reg_list = doc.createElement(\"decl_reg_list\")\n doc.appendChild(decl_reg_list)\n\n regname_old=\"\"\n rows.pop(0)\n for row in rows:\n (regdesc,regname,offset,default,regtype,expose_reg,depth,incsz,bitdesc,bitname,loc,bittype)= row\n if regname != regname_old:\n # Create the register element\n register = doc.createElement(\"register\")\n register.setAttribute(\"name\", regname)\n register.setAttribute(\"offset\", offset)\n if default != \"\" : register.setAttribute(\"default\", default)\n register.setAttribute(\"type\", regtype)\n if expose_reg == \"1\": register.setAttribute(\"usr\", expose_reg)\n if depth != \"\": register.setAttribute(\"size\", depth)\n if incsz != \"\": register.setAttribute(\"incsz\", incsz)\n text = doc.createTextNode(regdesc)\n register.appendChild(text)\n decl_reg_list.appendChild(register)\n \n # Create the field element\n if bitname != \"\":\n field = doc.createElement(\"field\")\n field.setAttribute(\"name\", bitname)\n if loc !=\"\": field.setAttribute(\"loc\", addcolon(loc))\n if bittype != \"\": field.setAttribute(\"type\", bittype)\n if bitdesc != \"\":\n text = doc.createTextNode(bitdesc)\n field.appendChild(text)\n register.appendChild(field)\n regname_old = regname\n\n\n # Print our newly created XML\n #print doc.toprettyxml(indent=\" \")\n #OUTFILE.write(doc.saveXML(decl_reg_list))\n OUTFILE.write(doc.toprettyxml(indent=\" \"))\n OUTFILE.close()", "def tokensToXML(tokens: Tokens) -> str:\n xml = \"<tokens>\\n\"\n\n for token in tokens:\n xml += token.toXML() + \"\\n\"\n\n xml += \"</tokens>\\n\"\n\n return xml", "def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False", "def raw_to_xml(self):\n xmlfilename = self.logfilename.replace('.raw','.xml')\n fout = codecs.open(xmlfilename, encoding=\"utf-8\", mode=\"w\")\n for line in codecs.open(self.logfilename,encoding=\"utf-8\"):\n fout.write(sanitize(line))\n\n fout.close()\n return xmlfilename", "def create_config(self, memory: str, corpora: str, cur_dir: str, postprocessing: str = \"\") -> Tuple[str, str]:\n _, filepath = mkstemp(dir=cur_dir, suffix=\".xml\")\n memory_file = p.join(cur_dir, \"memory\"+memory)\n with open(filepath, \"w\") as file:\n file.write(TEMPLATE.format(\n memory=memory_file,\n corpora=corpora,\n postprocessing=postprocessing\n ))\n return filepath, memory_file", "def save_xunit(self,filename):\n f = open(filename,'w')\n f.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>')\n f.write('<testsuite name=\"fbtest\" tests=\"%i\" errors=\"%i\" failures=\"%i\" untested=\"%i\" skip=\"%i\">' %\n (len(self.results),self.get_error_count(),self.get_fail_count(),\n self.get_untested_count(),self.get_skipped_count()))\n for result in self.values():\n if result.outcome == Result.PASS:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\" />' % (\n result.id,result.get_elapsed()))\n else:\n f.write('<testcase classname=\"Test\" name=\"%s\" time=\"%.3f\">' % (\n result.id,result.get_elapsed()))\n if result.outcome == Result.ERROR:\n if result.has_key(Result.EXCEPTION):\n e = result[Result.EXCEPTION]\n exc = e[:e.find(':')]\n msg = e[e.find(':')+2:]\n exc = exc[exc.find(\"'\")+1:exc.rfind(\"'\")]\n msg = msg.lstrip()\n f.write('<error type=%s message=%s>' % (self._quoteattr(exc),\n self._quoteattr(msg)))\n f.write('</error>')\n else:\n msg = result.get_cause()\n f.write('<error type=\"error\" message=%s>' % (self._quoteattr(msg)))\n f.write('</error>')\n elif result.outcome == Result.FAIL:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if result.has_key(key):\n cdata = as_utf8(result[key])\n f.write('<failure type=\"fail\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('<![CDATA[%s]]>' % escape_cdata(cdata))\n f.write('</failure>')\n elif result.outcome == Result.UNTESTED:\n f.write('<failure type=\"untested\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n elif result.outcome == Result.SKIPPED:\n f.write('<failure type=\"skipped\" message=%s>' % self._quoteattr(result.get_cause()))\n f.write('</failure>')\n f.write('</testcase>')\n f.write('</testsuite>')\n f.close()", "def create_parser_file():\n lark_file = os.path.join(dirname(__file__), 'hcl2.lark')\n with open(lark_file, 'r') as lark_file, open(PARSER_FILE, 'w') as parser_file:\n lark_inst = Lark(lark_file.read(), parser=\"lalr\", lexer=\"standard\")\n\n data, memo = lark_inst.memo_serialize([TerminalDef, Rule])\n\n print(PARSER_FILE_TEMPLATE % (data, memo), file=parser_file)", "def export_gexf_termos(rotulos,similaridades,nome_arquivo,threshold,excluir_negativos):\n\n tbl = dict.fromkeys(i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P'))\n\n arquivo = codecs.open(nome_arquivo + \".gexf\",\"w\",\"utf-8\")\n arquivo.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n arquivo.write('<gexf xmlns=\"http://www.gexf.net/1.2draft\" version=\"1.2\">\\n')\n arquivo.write('\\t<graph mode=\"static\" defaultedgetype=\"undirected\">\\n')\n arquivo.write('\\t\\t\\t<nodes>\\n')\n arquivo.flush()\n\n cont=0\n cont2=0;\n for key in rotulos:\n arquivo.write(u\"\\t\\t\\t\\t<node id=\\\"%d\\\" label=\\\"%s\\\"/>\\n\" % (cont2,key))\n cont = cont+1\n cont2 = cont2+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</nodes>\\n')\n arquivo.write('\\t\\t\\t<edges>\\n')\n arquivo.flush()\n\n cont=0\n for similaridade in similaridades:\n if(excluir_negativos and (similaridade[2] < 0)):\n continue\n\n if abs(similaridade[2]) >= threshold:\n label = ' - '.join((similaridade[0],similaridade[1]))\n arquivo.write(\"\\t\\t\\t\\t<edge source=\\\"%d\\\" target=\\\"%d\\\" weight=\\\"%f\\\" label=\\\"%s\\\" />\\n\" % (rotulos.index(similaridade[0]),rotulos.index(similaridade[1]),similaridade[2],label))\n\n cont = cont+1\n if cont == 50:\n arquivo.flush()\n cont = 0\n\n arquivo.write('\\t\\t\\t</edges>\\n')\n arquivo.write('\\t</graph>\\n')\n arquivo.write('</gexf>')\n arquivo.close() # you can omit in most cases as the destructor will call it", "def saving_file(xml):\r\n\r\n xml_string = etree.tostring(xml)\r\n parsed = minidom.parseString(xml_string)\r\n with open(self.app_path + \"\\\\temp_\\\\\" + file_name + \".xml\", \"w\") as file:\r\n file.write(parsed.toprettyxml(indent=\" \"))", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def split_file(filename):\n \n \n#tree = ElementTree.ElementTree()\n#root = ElementTree.Element(\"root\")\n#a = ElementTree.Element(\"a\")\n#a.text = \"1\"\n#root.append(a)\n#tree._setroot(root)\n#tree.write(\"sample.xml\" \n\n \n find_counter = 0\n check_counter = 0 \n tree_file = files()\n #outfile = next(tree_file)\n \n \n with open(filename,mode =\"r\") as file :\n \n for line in file :\n \n if line.startswith(\"<?xml\"):\n outfile = next(tree_file)\n outfile.write(line)", "def xml2tokens(xml_tagged_sent, tokenized_sent, raw_sent):\n raw, entities = get_entities(xml_tagged_sent)\n if re.search(r\"ENAMEX\", raw):\n print(xml_tagged_sent)\n print(raw)\n # count += 1\n\n tokens, syllables = word_tokenize(tokenized_sent, raw_sent)\n level1_syl_tags = [\"O\" for i in range(len(syllables))]\n level2_syl_tags = [\"O\" for i in range(len(syllables))]\n level3_syl_tags = [\"O\" for i in range(len(syllables))]\n\n level1_token_tags = [\"O\" for i in range(len(tokens))]\n level2_token_tags = [\"O\" for i in range(len(tokens))]\n level3_token_tags = [\"O\" for i in range(len(tokens))]\n\n flag = False\n for entity in entities:\n value = entity[\"value\"]\n start = entity[\"start\"]\n end = entity[\"end\"]\n entity_type = entity[\"type\"]\n start_syl_id, end_syl_id = find_syl_index(start, end, syllables)\n start_tok_id, end_tok_id = find_tok_index(start_syl_id, end_syl_id, tokens)\n\n if start_syl_id != None and end_syl_id != None:\n if entity[\"level\"] == 1:\n level1_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level1_syl_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level2_syl_tags[i] = \"I-\" + entity_type\n else:\n level3_syl_tags[start_syl_id] = \"B-\" + entity_type\n for i in range(start_syl_id + 1, end_syl_id):\n level3_syl_tags[i] = \"I-\" + entity_type\n else:\n print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start,end,value,raw,xml_tagged_sent))\n flag = True\n\n if start_tok_id != None and end_tok_id != None:\n if entity[\"level\"] == 1:\n level1_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id+1, end_tok_id):\n level1_token_tags[i] = \"I-\" + entity_type\n elif entity[\"level\"] == 2:\n level2_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level2_token_tags[i] = \"I-\" + entity_type\n else:\n level3_token_tags[start_tok_id] = \"B-\" + entity_type\n for i in range(start_tok_id + 1, end_tok_id):\n level3_token_tags[i] = \"I-\" + entity_type\n else:\n pass\n # print(\"{},{},\\\"{}\\\" in '{}' ({})\".format(start_syl_id, end_syl_id, value, raw, xml_tagged_sent))\n\n ret_syllables = list(zip([ s.text for s in syllables], level1_syl_tags, level2_syl_tags, level3_syl_tags))\n ret_tokens = list(zip( [tk.text for tk in tokens], level1_token_tags, level2_token_tags, level3_token_tags))\n return ret_syllables, ret_tokens, raw, flag", "def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))", "def create_tags(tag_dict, o_tree):\n for i, o in tag_dict.items():\n subtag1 = o_tree.find(o[0])\n subtag2 = etree.Element(i)\n subtag1.addnext(subtag2)\n o_tree.write(f'{output_path}/ppt/presentation.xml', pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n \n return", "def prettyprint(self, _file):\n xstr = \"reg \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def writeXmlForDET(filename,trigger,listDict,stopInfo):\n # trigger == 'variable trigger'\n # Variables == 'variables changed in the branch control logic block'\n # associated_pb = 'CDF' in case multibranch needs to be performed\n # stopInfo {'end_time': end simulation time (already stopped), 'end_ts': end time step}\n root=ET.Element('Branch_info')\n root.set(\"end_time\",str(stopInfo['end_time']))\n if \"end_ts\" in stopInfo.keys():\n root.set(\"end_ts\",str(stopInfo['end_ts']))\n triggerNode=ET.SubElement(root,\"Distribution_trigger\")\n triggerNode.set(\"name\",trigger)\n for varInfo in listDict:\n var=ET.SubElement(triggerNode,'Variable')\n var.text=varInfo['name']\n var.set('type',varInfo['type'])\n var.set('old_value',str(varInfo['old_value']))\n var.set('actual_value',str(varInfo['new_value']))\n if 'associated_pb' in varInfo.keys():\n var.set('probability',str(varInfo['associated_pb']))\n with open(filename,'w') as fileObject:\n fileObject.write(minidom.parseString(ET.tostring(root, 'utf-8')).toprettyxml(indent=\"\\t\"))", "def print_tags(self, filename):\n fh = open(filename, 'w')\n for t in self.source_tags.tags:\n fh.write(\"%d\\t%d\\t%s\" % (t.begin, t.end, t.name))\n for (attr, val) in t.attrs.items():\n fh.write(\"\\t%s=\\\"%s\\\"\" % (attr, val.replace('\"','&quot;')))\n fh.write(\"\\n\")", "def createxmlmall():\r\n\r\n root = ET.Element(\"state\")\r\n model = ET.SubElement(root, \"model\")\r\n model.text = r\"\"\r\n\r\n dataid = ET.SubElement(root, \"dataids\")\r\n application = ET.SubElement(root, \"application\")\r\n\r\n application.text = \"SIBS Configurator\"\r\n safecookie = ET.SubElement(root, \"safecookie\")\r\n steps = ET.SubElement(root, \"steps\")\r\n prev = ET.SubElement(steps, \"prev\")\r\n\r\n lastproxy = ET.SubElement(root, \"last-proxy\").text = \"tcserver0\"\r\n\r\n tree = ET.ElementTree(root) # saves tree in variable \"tree\"\r\n return tree, safecookie, steps, prev", "def _tokens(self):\n # get my renderer\n renderer = self.renderer\n # sign on\n yield \"\"\n yield renderer.commentLine(\"tokens\")\n # simple tokens\n yield from renderer.set(name=\"empty\")\n yield from renderer.set(name=\"comma\", value=\",\")\n yield from renderer.set(name=\"space\", value=\"$(empty) $(empty)\")\n\n # characters that don't render easily and make the makefile less readable\n yield from renderer.set(name=\"esc\", value='\"\\x1b\"')\n\n # all done\n return", "def __createXMLFileForClear():\r\n #description\r\n #Root\r\n clear_root = Element('clear-users-request', {'xmlns':SYMPLECTIC_XMLNS_URI,} )\r\n #Feed\r\n SubElement(clear_root, 'feed-id').text = IMPORT_USERS_FEED_ID\r\n #Convert to ElementTree and write xml version to file\r\n xml_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_USER_FOLDER + SYMPLECTIC_LOCAL_USER_CLEARFILE\r\n ElementTree(clear_root).write(xml_filename)\r\n #Return xml filename\r\n return xml_filename" ]
[ "0.66678095", "0.58849597", "0.5764239", "0.571944", "0.5707829", "0.5673825", "0.5667747", "0.55502224", "0.55148125", "0.5463388", "0.5434148", "0.54197377", "0.54130113", "0.5357553", "0.5313254", "0.5286458", "0.5276797", "0.5256187", "0.5254526", "0.5239966", "0.52307767", "0.5227354", "0.52146244", "0.52123326", "0.52056485", "0.5187481", "0.513619", "0.511087", "0.5107037", "0.5106376" ]
0.7522477
0
Obtain a mask for each image in the list of images query_img. The method will be determined by the passed method argument.
def bg_mask(query_imgs, method): print("Obtaining masks") segmentation_method = get_method(method) return [segmentation_method(img) for img in query_imgs]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_mask(query_imgs, masks, method):\n resulting_imgs = []\n for img, mask in zip(query_imgs, masks):\n positions = np.where(mask == 255)\n if method == CBHS: # Special treatment for cell-based bg segmentation to mantain \n x_min, x_max, y_min, y_max = positions[0][0], positions[0][-1], positions[1][0], positions[1][-1]\n img = img[x_min:x_max, y_min:y_max]\n else:\n mask = mask == 255\n img = img[mask].reshape(-1, 3)\n\n resulting_imgs.append(img)\n \n if isDebug():\n addDebugImage(img)\n if isDebug():\n showDebugImage()\n print(\"Finished to apply masks\")\n \n return resulting_imgs", "def get_mask(self, img):\n raise NotImplementedError()", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # get masks, all pixels\n np_mask = np.array(np.ones(image.shape[0:2]), dtype=bool)\n\n return np_mask", "def getHitmask(self,image):\n\t\tmask = []\n\t\tfor x in range(image.get_width()):\n\t\t\tmask.append([])\n\t\t\tfor y in range(image.get_height()):\n\t\t\t\tmask[x].append(bool(image.get_at((x,y))[3]))\n\t\treturn mask", "def mask_images(self, folder_name, mask_image_name):\n\n photo_list = self.get_photo_list(folder_name)\n masked_folder_name = folder_name + '_background'\n\n try:\n print(\"Making dir \" + str(masked_folder_name) + \" for masking\")\n os.mkdir(masked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this masking??\")\n return\n\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(i)\n print (folder_name + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n masked_image = img\n\n size = img.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if full_mask_image[row_pixel, column_pixel] != 0:\n masked_image[row_pixel, column_pixel] = img[row_pixel, column_pixel]\n\n else:\n masked_image[row_pixel, column_pixel] = 0\n\n cv2.imwrite(masked_folder_name + '/' + image_name, masked_image.astype(np.uint16))", "def compute(image, mask_band, bits, options=None, name_all='all_masks'):\n # cast params in case they are not EE objects\n bits_dict = ee.Dictionary(bits)\n opt = ee.List(options) if options else bits_dict.keys()\n image = ee.Image(image).select(mask_band)\n\n first = ee.Image.constant(0).select([0], [name_all]) # init image\n\n # function for iterate over the options\n def for_iterate(option, ini):\n i = ee.Image(ini) # cast ini\n all = i.select([name_all])\n\n # bits relation dict contains the option?\n cond = bits_dict.contains(option)\n\n def for_true():\n ''' function to execute if condition == True '''\n # get the mask for the option\n mask = tools.compute_bits(bits_dict.get(option),\n bits_dict.get(option),\n option)(image)\n\n # name the mask\n # mask = ee.Image(mask).select([0], [option])\n newmask = all.Or(mask)\n\n # return ee.Image(all.Or(mask)).addBands(mask)\n return tools.replace(i, name_all, newmask).addBands(mask)\n\n return ee.Image(ee.Algorithms.If(cond, for_true(), i))\n\n good_pix = ee.Image(opt.iterate(for_iterate, first))\n\n # return good_pix.Not()\n return good_pix", "def mask_image(image):\n pass", "def getHitmask(image):\n mask = []\n for x in range(image.get_width()):\n mask.append([])\n for y in range(image.get_height()):\n mask[x].append(bool(image.get_at((x,y))[3]))\n return mask", "def getHitmask(image):\n mask = []\n for x in range(image.get_width()):\n mask.append([])\n for y in range(image.get_height()):\n mask[x].append(bool(image.get_at((x,y))[3]))\n return mask", "def getHitmask(image):\n mask = []\n for x in range(image.get_width()):\n mask.append([])\n for y in range(image.get_height()):\n mask[x].append(bool(image.get_at((x,y))[3]))\n return mask", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def im_detect_mask(model, im_scales, boxes):\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n\n M_HEIGHT = cfg.MRCNN.RESOLUTION_H\n M_WIDTH = cfg.MRCNN.RESOLUTION_W\n if boxes.shape[0] == 0:\n pred_masks = np.zeros((0, M, M), np.float32)\n return pred_masks\n\n inputs = {'mask_rois': _get_rois_blob(boxes, im_scales)}\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS:\n _add_multilevel_rois_for_test(inputs, 'mask_rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.mask_net.Proto().name)\n\n # Fetch masks\n pred_global_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_global_probs')\n ).squeeze()\n pred_char_masks = workspace.FetchBlob(\n core.ScopedName('mask_fcn_char_probs')\n ).squeeze()\n # pred_char_boxes = workspace.FetchBlob(\n # core.ScopedName('mask_fcn_charbox_pred')\n # ).squeeze()\n pred_global_masks = pred_global_masks.reshape([-1, 1, M_HEIGHT, M_WIDTH])\n pred_char_masks = pred_char_masks.reshape([-1, M_HEIGHT, M_WIDTH, 37])\n pred_char_masks = pred_char_masks.transpose([0,3,1,2])\n # pred_char_boxes = pred_char_boxes.reshape([-1, 4, M_HEIGHT, M_WIDTH])\n\n return pred_global_masks, pred_char_masks, None", "def __call__(cls, image, mask,\n interp_mask=DEFAULT_INTERP_MASK,\n BADPIX_INTERP=maskbits.BADPIX_INTERP,\n min_cols=DEFAULT_MINCOLS,\n max_cols=DEFAULT_MAXCOLS,\n invalid_mask=DEFAULT_INVALID_MASK,\n add_noise=DEFAULT_ADD_NOISE,\n clobber=DEFAULT_CLOBBER,\n block_size=DEFAULT_BLOCK_SIZE,\n logger=logger):\n\n # Pass the locals as kwargs\n kwargs = locals()\n image, mask = zipp.zipper_interp_rows(**kwargs)\n return image, mask", "def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask", "def get_mask(self, anno, img_info) -> np.ndarray:\n m = np.zeros((img_info[\"height\"], img_info[\"width\"]), dtype=np.float32)\n\n for obj in anno:\n if obj[\"iscrowd\"]:\n rle = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n m += mask\n elif obj[\"num_keypoints\"] == 0:\n rles = pycocotools.mask.frPyObjects(obj[\"segmentation\"], img_info[\"height\"], img_info[\"width\"])\n for rle in rles:\n mask = pycocotools.mask.decode(rle)\n if mask.shape != m.shape:\n logger.warning(f\"Mask shape {mask.shape} does not match image shape {m.shape} for image {img_info['file_name']}\")\n continue\n\n m += mask\n\n return (m < 0.5).astype(np.float32)", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n # Get mask directory from image path\n mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n\n # Read mask files from .png image\n mask = []\n # for f in next(os.walk(mask_dir))[2]:\n m = skimage.io.imread(os.path.join(mask_dir, info['id']+'.png')).astype(np.bool)\n mask.append(m)\n # print(mask)\n mask = np.stack(mask, axis=-1)\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID, we return an array of ones\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def load_mask(self, image_id):\n\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"dsb\":\n return super(self.__class__, self).load_mask(image_id)\n\n path = image_info[\"dir\"]\n\n mascara = next(os.walk(path + '/masks/'))[2]\n masc = skimage.io.imread(path + '/masks/' + mascara[0])\n height, width = masc.shape\n\n mask = np.zeros((height, width, len(mascara)), dtype=np.uint8)\n\n for i, mask_file in enumerate(mascara):\n mask[:,:,i] = skimage.io.imread(path + '/masks/' + mask_file)\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def mask_images(im_dir, wt_dir, im_masked_dir, wt_masked_dir, imtype='intbgsub', wttype='rrhr'):\n int_suff, rrhr_suff = '*-{}.fits'.format(imtype), '*-{}.fits'.format(wttype)\n int_images = sorted(glob.glob(os.path.join(im_dir, int_suff)))\n rrhr_images = sorted(glob.glob(os.path.join(wt_dir, rrhr_suff)))\n\n for i in range(len(int_images)):\n image_infile = int_images[i]\n wt_infile = rrhr_images[i]\n\n image_outfile = os.path.join(im_masked_dir, os.path.basename(image_infile))\n wt_outfile = os.path.join(wt_masked_dir, os.path.basename(wt_infile))\n\n mask_galex(image_infile, wt_infile, image_outfile, wt_outfile)", "def apply_mask(self):\n for mask, param in self.masked_parameters:\n param.mul_(mask)", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask_paths = glob.glob(info['path'].replace('images', 'masks').replace('.png', '*.png'))\n masks = []\n class_ids = []\n for mask_path in mask_paths:\n# print(mask_path)\n mask = cv2.imread(mask_path,cv2.IMREAD_GRAYSCALE) \n masks.append(mask)\n if 'normal' in mask_path:\n class_ids.append(0)\n if 'benign' in mask_path:\n class_ids.append(1)\n if 'malignant' in mask_path:\n class_ids.append(2)\n masks = np.moveaxis(masks,0,-1)\n class_ids = np.array(class_ids)\n return masks, class_ids", "def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def __getitem__(self,image_id):\n # read the image\n image_path = (os.path.join(self.dataset_dir,self.list_dir[image_id],\"images/{}.png\".format(self.list_dir[image_id])))\n image = io.imread(image_path)\n # read the mask\n mask_dir = os.path.join(self.dataset_dir,self.list_dir[image_id],'masks')\n masks_list = []\n\n for i, f in enumerate (next(os.walk(mask_dir))[2]):\n if f.endswith ('.png'):\n m = io.imread(os.path.join(mask_dir,f)).astype(np.bool)\n m = m[:,:,0]\n masks_list.append(m)\n #combine all the masks corresponding of an invidual sample image into single binary mask\n if len(masks_list) != 1:\n masks = np.logical_or(masks,masks_list[i])\n else:\n masks = masks_list[i]\n # do the transforms..\n trans_img,trans_masks = self.transform(image,masks,self.aug)\n sample = {\"image\":trans_img,\"masks\":trans_masks}\n\n return(sample)", "def get_mask(self, methods=None): \r\n plot_mask = np.ones(self.ndetects, np.bool_)\r\n rec_tr = self.rec_track\r\n\r\n for nm,method in enumerate(methods):\r\n method_mask = rec_tr[method] == 1\r\n plot_mask = np.multiply(~method_mask, plot_mask)\r\n\r\n return plot_mask", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # change pure black to pure white\n imager = image[:, :, 0] == 0\n imageg = image[:, :, 1] == 0\n imageb = image[:, :, 2] == 0\n image_mask = np.expand_dims(np.logical_and(np.logical_and(imager, imageg), imageb), axis=-1)\n image = np.where(image_mask, [255,255,255], image)\n image = np.array(image, dtype=np.uint8)\n\n # convert to gray-scale\n image_grey = rgb2gray(image)\n\n # apply filter\n for pf in self.pre_filter:\n image = pf(image)\n\n # get masks, any pixel that is less than 0.8\n np_mask = np.less_equal(image_grey, self.grey_level)\n\n # apply morphological transforms\n for mt in self.morph_transform:\n np_mask = mt(np_mask)\n\n return np_mask", "def batch_image_mask(patch_R, patch_C):\n\n conf = configparser.ConfigParser()\n conf.read(os.path.join(current_path, \"..\", \"sys.ini\"))\n image_dir = conf.get(\"UTILS_MASK\", \"IMAGE_DIR\")\n images = glob.glob(os.path.join(image_dir, \"*.png\"))\n images = sorted(images)\n\n info_logger = get_logger(level=\"info\")\n error_logger = get_logger(level=\"error\")\n\n DEVICE = \"/gpu:1\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n with tf.device(DEVICE):\n seg_model = load_maskrcnn_model()\n for image in images:\n try:\n image_mask(image, patch_R, patch_C, seg_model)\n info_logger.info(f\"Create mask {image} success\")\n except Exception as e:\n error_logger.error(f\"Create mask {image} error\", exc_info=True)", "def image_special_func(img):\n return myimg.image_special_func(img.tolist())", "def _filter_and_extract(\n imgs,\n extraction_function,\n parameters,\n memory_level=0,\n memory=Memory(location=None),\n verbose=0,\n confounds=None,\n sample_mask=None,\n copy=True,\n dtype=None,\n):\n # Since the calling class can be any *Nifti*Masker, we look for exact type\n if verbose > 0:\n class_name = enclosing_scope_name(stack_level=10)\n\n # If we have a string (filename), we won't need to copy, as\n # there will be no side effect\n imgs = stringify_path(imgs)\n if isinstance(imgs, str):\n copy = False\n\n if verbose > 0:\n print(\n f\"[{class_name}] Loading data \"\n f\"from {_utils._repr_niimgs(imgs, shorten=False)}\"\n )\n\n # Convert input to niimg to check shape.\n # This must be repeated after the shape check because check_niimg will\n # coerce 5D data to 4D, which we don't want.\n temp_imgs = _utils.check_niimg(imgs)\n\n # Raise warning if a 3D niimg is provided.\n if temp_imgs.ndim == 3:\n warnings.warn(\n \"Starting in version 0.12, 3D images will be transformed to \"\n \"1D arrays. \"\n \"Until then, 3D images will be coerced to 2D arrays, with a \"\n \"singleton first dimension representing time.\",\n DeprecationWarning,\n )\n\n imgs = _utils.check_niimg(\n imgs, atleast_4d=True, ensure_ndim=4, dtype=dtype\n )\n\n target_shape = parameters.get(\"target_shape\")\n target_affine = parameters.get(\"target_affine\")\n if target_shape is not None or target_affine is not None:\n if verbose > 0:\n print(f\"[{class_name}] Resampling images\")\n imgs = cache(\n image.resample_img,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n ignore=[\"copy\"],\n )(\n imgs,\n interpolation=\"continuous\",\n target_shape=target_shape,\n target_affine=target_affine,\n copy=copy,\n )\n\n smoothing_fwhm = parameters.get(\"smoothing_fwhm\")\n if smoothing_fwhm is not None:\n if verbose > 0:\n print(f\"[{class_name}] Smoothing images\")\n imgs = cache(\n image.smooth_img,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(imgs, parameters[\"smoothing_fwhm\"])\n\n if verbose > 0:\n print(f\"[{class_name}] Extracting region signals\")\n region_signals, aux = cache(\n extraction_function,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(imgs)\n\n # Temporal\n # --------\n # Detrending (optional)\n # Filtering\n # Confounds removing (from csv file or numpy array)\n # Normalizing\n if verbose > 0:\n print(f\"[{class_name}] Cleaning extracted signals\")\n runs = parameters.get(\"runs\", None)\n region_signals = cache(\n signal.clean,\n memory=memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(\n region_signals,\n detrend=parameters[\"detrend\"],\n standardize=parameters[\"standardize\"],\n standardize_confounds=parameters[\"standardize_confounds\"],\n t_r=parameters[\"t_r\"],\n low_pass=parameters[\"low_pass\"],\n high_pass=parameters[\"high_pass\"],\n confounds=confounds,\n sample_mask=sample_mask,\n runs=runs,\n **parameters[\"clean_kwargs\"],\n )\n\n return region_signals, aux" ]
[ "0.77740014", "0.66345733", "0.65263313", "0.6186163", "0.59989136", "0.59502006", "0.5948844", "0.5948451", "0.5936673", "0.5936673", "0.5936673", "0.5858566", "0.5851606", "0.5791609", "0.5766119", "0.5734559", "0.57131267", "0.57121515", "0.57069325", "0.5696809", "0.5687493", "0.56807923", "0.56792295", "0.5677336", "0.567707", "0.56744814", "0.56715196", "0.56653255", "0.562994", "0.56277084" ]
0.8243731
0
Apply mask to each image in the query set, based on the method that was applied
def apply_mask(query_imgs, masks, method): resulting_imgs = [] for img, mask in zip(query_imgs, masks): positions = np.where(mask == 255) if method == CBHS: # Special treatment for cell-based bg segmentation to mantain x_min, x_max, y_min, y_max = positions[0][0], positions[0][-1], positions[1][0], positions[1][-1] img = img[x_min:x_max, y_min:y_max] else: mask = mask == 255 img = img[mask].reshape(-1, 3) resulting_imgs.append(img) if isDebug(): addDebugImage(img) if isDebug(): showDebugImage() print("Finished to apply masks") return resulting_imgs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bg_mask(query_imgs, method):\n print(\"Obtaining masks\")\n segmentation_method = get_method(method)\n return [segmentation_method(img) for img in query_imgs]", "def apply_mask(self):\n for mask, param in self.masked_parameters:\n param.mul_(mask)", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def mask_image(image):\n pass", "def mask_images(self, folder_name, mask_image_name):\n\n photo_list = self.get_photo_list(folder_name)\n masked_folder_name = folder_name + '_background'\n\n try:\n print(\"Making dir \" + str(masked_folder_name) + \" for masking\")\n os.mkdir(masked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this masking??\")\n return\n\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(i)\n print (folder_name + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n masked_image = img\n\n size = img.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if full_mask_image[row_pixel, column_pixel] != 0:\n masked_image[row_pixel, column_pixel] = img[row_pixel, column_pixel]\n\n else:\n masked_image[row_pixel, column_pixel] = 0\n\n cv2.imwrite(masked_folder_name + '/' + image_name, masked_image.astype(np.uint16))", "def mask_the_images(working_path,set_name):\n\n file_list=glob('/media/talhassid/My Passport/haimTal/test_images_0b8afe447b5f1a2c405f41cf2fb1198e.npy')\n out_images = [] #final set of images for all patients\n for fname in file_list:\n out_images_per_patient = []\n print (\"working on file \", fname)\n imgs_to_process = np.load(fname.replace(\"lungmask\",\"images\")) # images of one patient\n masks = np.load(fname)\n for i in range(len(imgs_to_process)):\n mask = masks[i]\n img = imgs_to_process[i]\n new_size = [512,512] # we're scaling back up to the original size of the image\n img= mask*img # apply lung mask\n #\n # renormalizing the masked image (in the mask region)\n #\n new_mean = np.mean(img[mask>0])\n new_std = np.std(img[mask>0])\n #\n # Pulling the background color up to the lower end\n # of the pixel range for the lungs\n #\n old_min = np.min(img) # background color\n img[img==old_min] = new_mean-1.2*new_std # resetting backgound color\n img = img-new_mean\n img = img/new_std\n #make image bounding box (min row, min col, max row, max col)\n labels = measure.label(mask)\n regions = measure.regionprops(labels)\n #\n # Finding the global min and max row over all regions\n #\n min_row = 512\n max_row = 0\n min_col = 512\n max_col = 0\n for prop in regions:\n B = prop.bbox\n if min_row > B[0]:\n min_row = B[0]\n if min_col > B[1]:\n min_col = B[1]\n if max_row < B[2]:\n max_row = B[2]\n if max_col < B[3]:\n max_col = B[3]\n width = max_col-min_col\n height = max_row - min_row\n if width > height:\n max_row=min_row+width\n else:\n max_col = min_col+height\n #\n # cropping the image down to the bounding box for all regions\n # (there's probably an skimage command that can do this in one line)\n #\n img = img[min_row:max_row,min_col:max_col]\n mask = mask[min_row:max_row,min_col:max_col]\n if max_row-min_row <5 or max_col-min_col<5: # skipping all images with no god regions\n pass\n else:\n # moving range to -1 to 1 to accomodate the resize function\n mean = np.mean(img)\n img = img - mean\n min = np.min(img)\n max = np.max(img)\n img = img/(max-min)\n new_img = resize(img,[512,512], mode='constant')\n out_images_per_patient.append(new_img)\n\n id = re.sub(r'.*_images_(.*)\\.npy',r'\\1',fname)\n patient_images_and_id = (out_images_per_patient,id)\n out_images.append(patient_images_and_id)\n print (\"Delete files: {} \\n\\t {} \".format(fname,re.sub(\"lungmask\",\"images\",fname)))\n os.remove(fname)\n os.remove(fname.replace(\"images\",\"lungmask\")) # images of one patient\n\n\n np.save(working_path+\"{}Images.npy\".format(set_name),out_images)", "def process_mask(self, image):\n image = np.array(image)\n image[image == 5] = 1 # set un-classified to undestroyed\n return Image.fromarray(image)", "def get_mask(self, img):\n raise NotImplementedError()", "def _process(self, img, mask=False):\n\n if mask:\n return preprocess_image(img, self.imsize, False, False)\n\n return preprocess_image(img, self.imsize, self.scale,\n self.invert_white_images)", "def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])", "def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])", "def apply_mask(components):\n img = components[10]\n mask = components[11]\n if mask is not None:\n img[:, :, 0] = img[:, :, 0] * mask\n img[:, :, 1] = img[:, :, 1] * mask\n img[:, :, 2] = img[:, :, 2] * mask\n img[img == 0] = 128\n return components", "def apply_mask(image, mask):\n image = image.astype(np.uint8)\n image = np.array(image)\n \n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n cv2.blur(image[:, :, c],(40,40)),\n image[:, :, c])\n return image", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def __call__(self, mask):\n mapping = self.calculate_mapping(mask)\n return self.apply_mapping(mask, mapping)", "def mask(self):", "def apply_masks(masks):\n masks = ee.List(masks) if isinstance(masks, list) else masks\n first = ee.Image.constant(0)\n\n def compute(mask, first):\n first = ee.Image(first)\n return first.Or(mask)\n\n bad_pixels = ee.Image(masks.iterate(compute, first))\n good_pixels = bad_pixels.Not()\n\n return good_pixels", "def apply_filter(self, image):\n pass", "def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # change pure black to pure white\n imager = image[:, :, 0] == 0\n imageg = image[:, :, 1] == 0\n imageb = image[:, :, 2] == 0\n image_mask = np.expand_dims(np.logical_and(np.logical_and(imager, imageg), imageb), axis=-1)\n image = np.where(image_mask, [255,255,255], image)\n image = np.array(image, dtype=np.uint8)\n\n # convert to gray-scale\n image_grey = rgb2gray(image)\n\n # apply filter\n for pf in self.pre_filter:\n image = pf(image)\n\n # get masks, any pixel that is less than 0.8\n np_mask = np.less_equal(image_grey, self.grey_level)\n\n # apply morphological transforms\n for mt in self.morph_transform:\n np_mask = mt(np_mask)\n\n return np_mask", "def batch_image_mask(patch_R, patch_C):\n\n conf = configparser.ConfigParser()\n conf.read(os.path.join(current_path, \"..\", \"sys.ini\"))\n image_dir = conf.get(\"UTILS_MASK\", \"IMAGE_DIR\")\n images = glob.glob(os.path.join(image_dir, \"*.png\"))\n images = sorted(images)\n\n info_logger = get_logger(level=\"info\")\n error_logger = get_logger(level=\"error\")\n\n DEVICE = \"/gpu:1\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n with tf.device(DEVICE):\n seg_model = load_maskrcnn_model()\n for image in images:\n try:\n image_mask(image, patch_R, patch_C, seg_model)\n info_logger.info(f\"Create mask {image} success\")\n except Exception as e:\n error_logger.error(f\"Create mask {image} error\", exc_info=True)", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def apply_masks_to_volume(root_dir):\n\n # Get the full path of the ClearImages, CoregisteredBlurryImages, and Masks directories\n clear_image_dir = join(root_dir, 'ClearImages')\n blurry_image_dir = join(root_dir, 'CoregisteredBlurryImages')\n mask_dir = join(root_dir, 'Masks')\n\n # Iterate over the entire list of images (doesn't matter if it's clear_image_dir or blurry_image_dir)\n for file_name in os.listdir(clear_image_dir):\n if file_name.endswith('.jpg') or file_name.endswith('.png'):\n # Read the clear and blurry images as grayscale images in the form of numpy arrays\n clear_image = cv2.imread(join(clear_image_dir, file_name), 0)\n blurry_image = cv2.imread(join(blurry_image_dir, file_name), 0)\n mask_image = cv2.imread(join(mask_dir, file_name), 0)\n\n if type(blurry_image) is None:\n pass\n\n # Apply the mask to the clear image AND the blurry image\n clear_image_masked = clear_image * (mask_image // 255)\n blurry_image_masked = blurry_image * (mask_image // 255)\n\n # Save the clear and blurry image back\n cv2.imwrite(filename=join(clear_image_dir, file_name), img=clear_image_masked)\n cv2.imwrite(filename=join(blurry_image_dir, file_name), img=blurry_image_masked)\n\n ''' Just logging\n # Show the clear image, clear masked image, blurry image, and blurry masked image\n logger.show_images([(\"clear_image\", clear_image),\n (\"blurry_image\", blurry_image),\n (\"clear_image_masked\", clear_image_masked),\n (\"blurry_image_masked\", blurry_image_masked)])\n '''", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def unmask_images(self, folder_name, background_image_name, mask_image_name):\n # TODO add functionality to unmask the correct background for each image\n\n photo_list = self.get_photo_list(folder_name)\n unmasked_folder_name = folder_name + '_unmasked'\n\n try:\n print(\"Making dir \" + str(unmasked_folder_name) + \" for unmasking\")\n os.mkdir(unmasked_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this unmasking??\")\n return\n\n full_unmask_image = cv2.imread(background_image_name, cv2.IMREAD_ANYDEPTH)\n full_mask_image = cv2.imread(mask_image_name, cv2.IMREAD_ANYDEPTH)\n\n for i, image_name in enumerate(photo_list):\n print(\"0,\" + str(i))\n print (folder_name + '/' + image_name)\n img = cv2.imread(folder_name + '/' + image_name, cv2.IMREAD_ANYDEPTH)\n unmasked_image = img\n\n size = img.shape\n for rowPixel in range(0, size[0]):\n for columnPixel in range(0, size[1]):\n if full_mask_image[rowPixel, columnPixel] != 0:\n unmasked_image[rowPixel, columnPixel] = img[rowPixel, columnPixel]\n\n elif full_mask_image[rowPixel, columnPixel] == 0:\n unmasked_image[rowPixel, columnPixel] = full_unmask_image[rowPixel, columnPixel]\n\n cv2.imwrite(unmasked_folder_name + '/' + image_name, unmasked_image.astype(np.uint16))", "def load_mask(self, image_id):\n # If not a pedestrian dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pedestrian\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)", "def __call__(self, image: np.ndarray) -> np.ndarray:\n # convert PIL image to numpy array\n image = np.asarray(image)\n\n # get masks, all pixels\n np_mask = np.array(np.ones(image.shape[0:2]), dtype=bool)\n\n return np_mask", "def _load_mask(self, image_id):\n\n mask_pattern = os.path.join(self.directory, image_id, \"masks/*.png\")\n ic = ImageCollection(mask_pattern)\n\n mask = np.zeros(self.imsize, dtype='uint8')\n for lbl, indiv_mask in enumerate(ic):\n mask += ((\n 1 + lbl) * self._process(indiv_mask, True).astype('uint8'))\n\n return mask", "def postprocess_masks(\n self,\n masks: paddle.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...], ) -> paddle.Tensor:\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False, )\n masks = masks[..., :input_size[0], :input_size[1]]\n masks = F.interpolate(\n masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks" ]
[ "0.7808987", "0.67079467", "0.639066", "0.62912345", "0.61960936", "0.61865187", "0.6183271", "0.61609346", "0.6134059", "0.6008446", "0.6008446", "0.5994534", "0.5989167", "0.5982266", "0.5970466", "0.596114", "0.5945611", "0.59365463", "0.58869183", "0.58798075", "0.58615243", "0.5853385", "0.5842189", "0.5824004", "0.57907605", "0.57845604", "0.57824796", "0.57745236", "0.5773518", "0.5767557" ]
0.78092
0
Obtain left, top, right and bottom positions in the celled histogram that provides maximum change. With these positions a rectangular matrix can be constructed.
def obtain_rectangular_segmentation(celled_hist, cells): celled_hist = np.array(celled_hist).reshape(cells[0]*cells[1], -1) scores = l1_dist(celled_hist, celled_hist) layers = cells[0]//2 left, right, top, bottom = compute_positions(scores, layers, cells, "lr"), compute_positions(scores, layers, cells, "rl"), \ compute_positions(scores, layers, cells, "tb"), compute_positions(scores, layers, cells, "bt") return left, right, top, bottom
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bottom_left_tile_value(self):\n\t\treturn self.expected_cols * (self.expected_rows - 1) + 1", "def bottom_right_tile_value(self):\n\t\treturn self.expected_rows * self.expected_cols", "def find_max(self):\n\n max_x = -10\n max_y = -10\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n return max_x, max_y", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def calculate_positions(self):\n return {cell: (cell.column, -cell.row) for cell in self.game.get_cells()}", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)", "def MaxColumn(I, E, d, ro):\n # http://en.wikipedia.org/wiki/Buckling\n B = 1.86635\n g = 9.81\n h = (9 * B**2 * E*I / (4*ro*g*np.pi * (d/2)**2))**(1/3)\n return h", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def calculate_min_max_tiles(self):", "def table_top_abs(self):\n table_height = np.array([0, 0, self.table_full_size[2]])\n return string_to_array(self.floor.get(\"pos\")) + table_height", "def find_max_score_location(grid, shape):", "def _getRawBound(self):\n if self._colormap is None:\n return None\n elif self._index == 0:\n return self._colormap.getVMin()\n else: # self._index == 1\n return self._colormap.getVMax()", "def get_borders(self):\r\n return (self.tiles[0][0], self.tiles[-1][-1])", "def top_right_tile_value(self):\n\t\treturn self.expected_cols", "def __getMaxUpperLeftCoordinate(self, entityNodeList):\r\n minX = sys.maxint\r\n minY = sys.maxint\r\n for node in entityNodeList:\r\n if(node.graphObject_.y < minY):\r\n minY = node.graphObject_.y\r\n if(node.graphObject_.x < minX):\r\n minX = node.graphObject_.x \r\n return (minX, minY)", "def max_area_under_histogram(histogram):\n\tif not histogram:\n\t\treturn 0\n\n\t# helper class to contain stack elements\n\tclass StackElement(object):\n\t\tdef __init__(self, index, height):\n\t\t\tself.index = index\n\t\t\tself.height = height\n\t\tdef __repr__(self):\n\t\t\treturn \"index: %s, height: %s\" % (self.index, self.height)\n\n\t# holds the best result found so far\n\tmax_area = 0\n\n\t# always growing stack\n\tstack = []\n\n\tfor index, height in enumerate(histogram, start=0):\n\t\t# unstack everything above the current bar\n\t\twhile stack and stack[-1].height >= height:\n\t\t\tpopped = stack.pop()\n\t\t\tprev_index = stack[-1].index + 1 if stack else 0\n\t\t\tpopped_area = popped.height * (index - prev_index)\n\t\t\tmax_area = max(max_area, popped_area)\n\n\t\t# stack the current bar\n\t\tstack.append(StackElement(index, height))\n\n\t# handle the remaining stack element that can expand all the way to the right\n\tindex += 1\n\twhile stack:\n\t\tpopped = stack.pop()\n\t\tprev_index = stack[-1].index + 1 if stack else 0\n\t\tpopped_area = popped.height * (index - prev_index)\n\t\tmax_area = max(max_area, popped_area)\n\n\treturn max_area", "def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax", "def fn(i, j):\n if i == 0 and j == 0: return grid[0][0], grid[0][0]\n if i < 0 or j < 0: return -inf, inf\n if grid[i][j] == 0: return 0, 0\n mx1, mn1 = fn(i-1, j) # from top\n mx2, mn2 = fn(i, j-1) # from left \n mx, mn = max(mx1, mx2)*grid[i][j], min(mn1, mn2)*grid[i][j]\n return (mx, mn) if grid[i][j] > 0 else (mn, mx)", "def maximalRectangle(self, matrix: List[List[str]]) -> int:\n if not matrix or not matrix[0]:\n return 0\n n, m = len(matrix), len(matrix[0])\n columns_height = [0] * m\n res = 0\n for i in range(n):\n \n for j in range(m):\n if matrix[i][j] == '1':\n columns_height[j] += 1\n else:\n columns_height[j] = 0\n # then do #84 for each round\n res = max(res, self.largestRectangleArea(columns_height))\n \n return res", "def to_index(self):\r\n return (BOARD_HEIGHT - 1 - self.y) * BOARD_HEIGHT + (BOARD_WIDTH - 1 - self.x)", "def max_level(board):\n acc_board = accum_board(board)\n for row in acc_board:\n row.append(0)\n acc_board.append([0]*len(acc_board[0]))\n m, n = len(board), len(board[0])\n max_level_sum = float('-inf')\n top_left = None\n for i in range(m):\n for j in range(n):\n for k in range(min(m-i, n-j)):\n level = (acc_board[i+k][j+k] +\n acc_board[i-1][j-1] -\n acc_board[i-1][j+k] -\n acc_board[i+k][j-1])\n if level > max_level_sum:\n max_level_sum = level\n top_left = (j+1, i+1, k+1)\n return top_left", "def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK", "def _getColormapRange(self):\n item = self.item()\n if item is not None and self._colormap is not None:\n return self._colormap.getColormapRange(item)\n else:\n return 1, 100 # Fallback", "def calc_grid(self):\n return int(self._posn.x / cell_size), int(self._posn.y / cell_size)", "def maxit(board):\n maxval = -2\n\n row_index = None\n col_index = None\n # if terminal board, terminate the function.\n if terminal(board) == True:\n result = utility(board)\n return (result, 0, 0) \n # for each possible move, calculate its utility, saving the maximum.\n for i in range(0, 3):\n for j in range(0, 3):\n if board[i][j] == EMPTY:\n board[i][j] = X\n (m, mini, minj) = minit(board)\n if m > maxval:\n maxval=m\n row_index=i\n col_index=j\n board[i][j] = EMPTY\n return (maxval, row_index, col_index)", "def find_blank_cell(self, board: list):\n cells = {}\n for i in range(9): # Iterate over rows\n for j in range(9): # Iterate over columns\n if board[i][j] == 0:\n cells[str(i) + ' ' + str(j)] = self.count_numbers(board, j, i)\n m = max(cells.values())\n for k in cells:\n if cells[k] == m:\n s = k.split()\n x, y = int(s[1]), int(s[0])\n return x, y", "def _rect_top(self):\n\treturn max(self.y, self.y + self.h)", "def _findBottom(self,col):\n min = GAME_HEIGHT\n mpos = 0\n for x in range(self.getLengthAlien()):\n if self._aliens[x][col] != None and self._aliens[x][col].y < min:\n min = self._aliens[x][col].y\n mpos = x\n return mpos" ]
[ "0.6224814", "0.6038693", "0.6004788", "0.59364384", "0.5898767", "0.58359027", "0.5803097", "0.57720894", "0.5764861", "0.5733206", "0.57220376", "0.57121265", "0.56905895", "0.56598204", "0.5611036", "0.5577255", "0.557451", "0.55710006", "0.55709165", "0.5556517", "0.5552331", "0.55206215", "0.54967415", "0.5474349", "0.54707366", "0.5458701", "0.5442257", "0.54212445", "0.5413915", "0.5413407" ]
0.61352736
1
Compute a cell matrix histogram. If cells=[32,32], returns a 32x32 matrix where each position contains a 2D color histogram (image descriptor).
def obtain_celled_histograms(img, cells, w_ranges, h_ranges): results = [] img = cv2.cvtColor(img, cv2.COLOR_RGB2Lab) histogram_matrix = [] for i in range(cells[0]): row = [] for j in range(cells[1]): img_part = img[w_ranges[i]:w_ranges[i+1], h_ranges[j]:h_ranges[j+1]] cr = img_part[:, :, 1].reshape(-1) cb = img_part[:, :, 2].reshape(-1) vals = np.histogram2d(cr, cb, bins=(np.arange(42, 226, 10), np.arange(20, 223, 10)))[0] normalized_hist = vals/vals.sum() vals2 = np.histogram(img_part[:, :, 0], bins=(np.arange(0, 255, 20)))[0] normalized_hist2 = vals2/(vals2.sum()+1e-10) row.append(np.concatenate([normalized_hist.reshape(-1), normalized_hist2.reshape(-1)])) histogram_matrix.append(row) return histogram_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cell_based_hist_segmentation(img, cells=[32, 32]):\n descriptor = []\n w,h = img.shape[:2]\n w_ranges = [(i*w)//cells[0] for i in range(cells[0])]+[-1]\n h_ranges = [(i*h)//cells[1] for i in range(cells[1])]+[-1]\n\n celled_hist = obtain_celled_histograms(img, cells, w_ranges, h_ranges)\n\n rectangular_segm = obtain_rectangular_segmentation(celled_hist, cells)\n mask = create_mask(rectangular_segm, img, cells)\n return mask", "def compute_histogram(self, image):\n hist = [0] * 256\n [h, w] = image.shape\n print(h,w)\n i = 0\n while i < 256:\n for row in range(h):\n for col in range(w):\n if image[row, col] == i:\n hist[i] += 1\n #print(hist[i])\n i += 1\n\n return hist", "def compute_histogram(self, image):\n\n hist = [0] * 256\n x, y = image.shape[:2]\n #print(image.shape)\n for i in range(x):\n for j in range(y):\n hist[image[i, j]] += 1\n\n return hist", "def gen_single_img_histogram(img_descriptors_codebook_pair):\n img_descriptors, codebook = img_descriptors_codebook_pair\n\n # Initially, each image will have a count of 0 for each codeword.\n histogram_of_codewords = [0 for _ in range(len(codebook))]\n\n # Keep track of which descriptor idxs map to which code word.\n descriptor_to_codeword_map = [[] for _ in range(len(codebook))]\n map_descriptor = lambda word_idx, descriptor_idx : \\\n descriptor_to_codeword_map[word_idx].append(descriptor_idx)\n\n for idx, descriptor in enumerate(img_descriptors):\n # Step 3.1\n closest_cluster_idx = hp.get_idx_of_1_NN(descriptor, codebook, dist_func=hp.euclidean_distance)\n histogram_of_codewords[closest_cluster_idx] += 1\n\n map_descriptor(closest_cluster_idx, idx)\n\n return histogram_of_codewords, descriptor_to_codeword_map", "def calc_histogram(self, img_data):\n\n histogram = [0] * self.color_depth\n\n for w in range(img_data.shape[0]):\n for h in range(img_data.shape[1]):\n pixel = img_data[w][h]\n histogram[pixel] += 1\n\n return histogram", "def _fast_hist_2d(data, bin_edges):\n # Yes, I've tested this against histogramdd().\n xassign = np.digitize(data[:,0], bin_edges[1:-1]) \n yassign = np.digitize(data[:,1], bin_edges[1:-1])\n nbins = len(bin_edges) - 1\n flatcount = np.bincount(xassign + yassign * nbins, minlength=nbins*nbins)\n return flatcount.reshape((nbins, nbins))", "def color_hist(im, col_bins):\n assert im.ndim == 3 and im.shape[2] == 3, \"image should be rgb\"\n arr = np.concatenate((im, color.rgb2lab(im)), axis=2).reshape((-1, 6))\n desc = np.zeros((col_bins * 6,), dtype=np.float)\n for i in range(3):\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(0, 255))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n\n # noinspection PyUnboundLocalVariable\n i += 1\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(0, 100))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n for i in range(4, 6):\n desc[i * col_bins:(i + 1) * col_bins], _ = np.histogram(\n arr[:, i], bins=col_bins, range=(-128, 127))\n desc[i * col_bins:(i + 1) * col_bins] /= np.sum(\n desc[i * col_bins:(i + 1) * col_bins]) + (\n np.sum(desc[i * col_bins:(i + 1) * col_bins]) < 1e-4)\n return desc", "def compute_histogram(im, block_factor=3, color_space='HSV'):\n\n # Shape = rows and columns\n remainder_rows = im.shape[0] % block_factor\n remainder_cols = im.shape[1] % block_factor\n\n im_block = cv2.copyMakeBorder(im, block_factor - remainder_rows, 0, block_factor - remainder_cols, 0,\n cv2.BORDER_CONSTANT)\n\n windowsize_r = int(im_block.shape[0] / block_factor)\n windowsize_c = int(im_block.shape[1] / block_factor)\n\n # print(im_block.shape)\n # print(str(windowsize_r)+' '+str(windowsize_c))\n # cv2.imshow(\"fullImg\", im_block)\n\n hist = []\n for r in range(0, im_block.shape[0], windowsize_r):\n for c in range(0, im_block.shape[1], windowsize_c):\n hist_blocks = []\n window = im_block[r:r + windowsize_r, c:c + windowsize_c]\n if color_space == 'GRAY':\n window_gray = cv2.cvtColor(window, cv2.COLOR_BGR2GRAY)\n hist_block = cv2.calcHist([window_gray], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'RGB':\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n elif color_space == 'HSV':\n window = cv2.cvtColor(window, cv2.COLOR_BGR2HSV)\n hist_block = cv2.calcHist([window], [0], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [1], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n hist_block = cv2.calcHist([window], [2], None, [256], [0, 256])\n cv2.normalize(hist_block, hist_block, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)\n hist_blocks.append(hist_block)\n \n hist.append(hist_blocks)\n\n return hist", "def getCellCount(self, idx = None, cell = 1, verbose = 0):\n\n if idx is None: idx = np.arange(self.atoms.shape[0])\n if isinstance(idx, (int, np.integer)): idx = [idx]\n\n areas = self.getAreas(idx = idx, cell = cell)\n if cell == 1:\n base_area = np.abs(np.linalg.det(self.base_1[:2, :2]))\n elif cell == 2:\n base_area = np.abs(np.linalg.det(self.base_2[:2, :2]))\n\n count = areas / base_area\n\n if verbose > 0:\n string = \"Cell count for cell %i, with %i index, max deviation: %.4E\"\\\n % (cell, len(count), np.max(count - np.round(count, 0)))\n ut.infoPrint(string)\n\n return count", "def compute_histogram(self, image):\n\n # in-built function to calculate histogram\n print(\"size of image: \", np.shape(image))\n print(\"number of pixels: \", np.shape(image)[0] * np.shape(image)[1])\n # hist1 = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n # hist = np.ravel(cv2.calcHist([image], [0], None, [256], [0, 256]))\n\n # created function to calculate histogram\n hist = np.zeros(256)\n [rows, columns] = np.shape(image)\n for k in range(256):\n count = 0\n for i in range(rows):\n for j in range(columns):\n if image[i, j] == k:\n count = count + 1\n hist[k] = count\n\n # print(\"Check if histogram is same: \", np.array_equal(hist, hist1))\n\n return hist", "def hog_feature(im):\n # convert rgb to grayscale if needed\n if im.ndim == 3:\n image = rgb2gray(im)\n else:\n image = np.at_least_2d(im)\n\n sx, sy = image.shape # image size\n orientations = 9 # number of gradient bins\n cx, cy = (8, 8) # pixels per cell\n\n gx = np.zeros(image.shape)\n gy = np.zeros(image.shape)\n gx[:, :-1] = np.diff(image, n=1, axis=1) # compute gradient on x-direction\n gy[:-1, :] = np.diff(image, n=1, axis=0) # compute gradient on y-direction\n grad_mag = np.sqrt(gx**2 + gy**2) # gradient magnitude\n grad_ori = np.arctan2(\n gx, (gy + 1e-15)) * (180 / np.pi) + 90 # gradient orientation\n\n n_cellsx = int(np.floor(sx / cx)) # number of cells in x\n n_cellsy = int(np.floor(sy / cy)) # number of cells in y\n orientation_histogram = np.zeros((n_cellsx, n_cellsy, orientations))\n for i in range(orientations):\n temp_ori = np.where(grad_ori < (180 / orientations) * (i + 1),\n grad_ori, 0)\n temp_ori = np.where(temp_ori >= (180 / orientations) * i, temp_ori, 0)\n temp_mag = np.where(temp_ori > 0, grad_mag, 0)\n orientation_histogram[:, :, i] = uniform_filter(\n temp_mag, size=(cx, cy))[round(cx / 2)::cx,\n round(cy / 2)::cy].T\n\n return orientation_histogram.ravel()", "def compute_histogram(image, n_bins, color_space=\"RGB\"):\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n\n hist_channels = list(range(n_channels))\n hist_bins = [n_bins,]*n_channels\n hist_range = [0, 256]*n_channels\n\n hist = cv.calcHist([image], hist_channels, None, hist_bins,\n hist_range)\n hist = cv.normalize(hist, hist, alpha=0, beta=1,\n norm_type=cv.NORM_MINMAX).flatten() # change histogram range from [0,256] to [0,1]\n return hist", "def histeq( im, nbr_bins = 256):\n\t# get image histogram \n\timhist, bins = histogram( im.flatten(), nbr_bins, normed = True) \n\tcdf = imhist.cumsum() \n\t# cumulative distribution function cdf = 255 * cdf / cdf[-1] \n\t# normalize \n\t# use linear interpolation of cdf to find new pixel values \n\tim2 = interp( im.flatten(), bins[:-1], cdf) \n\treturn im2.reshape( im.shape), cdf", "def histeq(im,nbr_bins=256):\r\n # Calculate histogram of images\r\n imhist,bins = histogram(im.flatten(),nbr_bins,normed=True)\r\n cdf = imhist.cumsum() # cumulative distribution function\r\n cdf = 255 * cdf / cdf[-1] # 归一化\r\n # Using the linear interpolation of cumulative distribution function, the new pixel value is calculated.\r\n im2 = interp(im.flatten(),bins[:-1],cdf)\r\n return im2.reshape(im.shape), cdf", "def numpy_gw_hist(data, bins, scale):\n data = np.atleast_1d(data)\n bins = np.atleast_1d(bins)\n nbins, ndata = bins.size, data.size\n\n scale = np.zeros(ndata) + scale\n\n logsm_bin_matrix = np.repeat(\n bins, ndata).reshape((nbins, ndata)).astype('f4')\n data_matrix = np.tile(data, nbins).reshape((nbins, ndata)).astype('f4')\n smoothing_kernel_matrix = np.tile(\n scale, nbins).reshape((nbins, ndata)).astype('f4')\n\n cdf_matrix = norm.cdf(\n logsm_bin_matrix, loc=data_matrix, scale=smoothing_kernel_matrix)\n\n prob_bin_member = np.diff(cdf_matrix, axis=0) # Shape (nbins-1, ndata)\n\n total_num_bin_members = np.sum(\n prob_bin_member, axis=1) # Shape (nbins-1, )\n\n return total_num_bin_members", "def histeq(im, nbr_bins=256):\n # get image histogram\n imhist, bins = np.histogram(im.flatten(), nbr_bins, normed=True)\n cdf = imhist.cumsum() # cumulative distribution function\n cdf = 255 * cdf / cdf[-1] # normalize\n # use linear interpolation of cdf to find new pixel values\n im2 = np.interp(im.flatten(), bins[:-1], cdf)\n return im2.reshape(im.shape), cdf", "def calculate_histogram(img, channel):\n\n # histogram arrays for each channel\n hist_gs_or_red = np.zeros((256, 1), dtype=np.int32)\n hist_green = np.zeros((256, 1), dtype=np.int32)\n hist_blue = np.zeros((256, 1), dtype=np.int32)\n\n # Calculate the histogram for red channel for RGB images\n # or the the first channel for gray-scale of shape (M, N, 1) images.\n if channel == [0]:\n # one-dimensional array\n if img.ndim == 1:\n raise Exception('Cannot calculate the hist of one-dimensional array.')\n\n # if there is one channel, or in case of gray-scale images, it's OK!\n elif img.ndim == 2:\n for pixel in np.ceil(img.flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # an RGB image\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 0:1].flatten()).astype(np.int):\n hist_gs_or_red[pixel] = hist_gs_or_red[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n\n return hist_gs_or_red\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [1]:\n # Not 3-D array that represent the image with 3 color channels.\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of green channel for non-rgb images/ 3-D array')\n\n # If it's a 3-D array of 3 color channels\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 1:2].flatten()).astype(np.int):\n hist_green[pixel] = hist_green[pixel] + 1\n\n # more than 3 dimensions\n else:\n raise Exception('Cannot calculate the hist of more than 3-dimensional array.')\n return hist_green\n\n # Calculate the histogram of green channel for RGB images\n elif channel == [2]:\n if img.ndim <= 2:\n raise Exception('Cannot calculate the hist of blue channel for non-rgb images/ 3-D array')\n elif img.ndim == 3:\n for pixel in np.ceil(img[:, :, 2:].flatten()).astype(np.int):\n hist_blue[pixel] = hist_blue[pixel] + 1\n return hist_blue\n\n # Invalid value of channel parameter\n else:\n raise Exception('ValueError: only [0], [1], [2] are possible as value for the channel parameter.')", "def histogram(img):\n BINS = 8\n RANGE = np.tile(np.array([0, 255]), (3, 1))\n\n # histogram of the first image\n r = np.ravel(img[:, :, 0])\n g = np.ravel(img[:, :, 1])\n b = np.ravel(img[:, :, 2])\n hist, endpoints = np.histogramdd([r, g, b], bins = BINS, range = RANGE)\n\n # normalize the images\n return hist/np.sum(hist)", "def get_n_cell_atoms(p_state, idx_image=-1, idx_chain=-1):\n return int(_Get_N_Cell_Atoms(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))", "def entropycell(self):\n cells = [0] * self.k\n for i in range(self.width):\n cells[int(self.config[self.t, i])] += 1\n\n \"\"\"Calculates the Shannon entropy and the the average entropy so far.\"\"\"\n shannon = 0\n for i in range(self.k):\n if(cells[i] != 0):\n probability = cells[i] / self.width\n shannon -= probability * np.log2(probability)\n self.average_cell = (self.average_cell * self.t + shannon) / (self.t + 1)", "def histograms(probs, actual, bins=100):\n actual = actual.astype(np.bool)\n edges, step = np.linspace(0., 1., bins, retstep=True, endpoint=False)\n idx = np.digitize(probs, edges) - 1\n top = np.bincount(idx, weights=actual, minlength=bins)\n bot = np.bincount(idx, weights=(~actual), minlength=bins)\n return top, bot, edges, step", "def component_histograms(image, transform=None):\n if transform == None:\n transform = lambda x: x\n\n shape = image.shape\n assert len(shape) == 3\n _, _, num_comp = shape\n return [np.histogram(transform(image[:, :, [idx]]).flatten(),\n bins=NUM_HIST_BINS, range=(0, 256))[0]\n for idx in xrange(0, num_comp)]", "def count_cells(fpath):\n cells = []\n for i in range(40):\n fname = f\"{fpath}/Mesh2d_{i}.vtu\"\n if not os.path.exists(fname):\n print(f\"File {fname} does not exist.\")\n break\n mesh = meshio.read(fname)\n for cell_block in mesh.cells:\n if cell_block.type in (\"triangle\"):\n num_cells = len(cell_block)\n print(f\"{i:2d}: {num_cells:6d} elements, {len(mesh.points):6d} vertices\")\n cells.append(num_cells)\n continue\n return cells", "def compute_histogram_blocks(image, text_box, n_bins, color_space=\"RGB\", block_size=16):\n\n image = cv.cvtColor(image, OPENCV_COLOR_SPACES[color_space])\n\n # image_id = int(ntpath.basename(image_path.replace('.jpg', '')))\n # boxes = pickle.load(open(os.path.join(boxes_path), 'rb'))[image_id][0]\n\n if text_box:\n tlx_init = text_box[0]\n tly_init = text_box[1]\n brx_init = text_box[2]\n bry_init = text_box[3]\n\n sizeX = image.shape[1]\n sizeY = image.shape[0]\n\n hist_concat = None\n \n for i in range(0,block_size):\n for j in range(0, block_size):\n # Image block\n img_cell = image[int(i*sizeY/block_size):int(i*sizeY/block_size) + int(sizeY/block_size) ,int(j*sizeX/block_size):int(j*sizeX/block_size) + int(sizeX/block_size)]\n\n if not text_box:\n hist = compute_histogram(img_cell, n_bins, color_space)\n\n # If there's a text bounding box ignore the pixels inside it\n else:\n tlx = tlx_init-int(j*sizeX/block_size)\n tly = tly_init-int(i*sizeY/block_size)\n brx = brx_init-int(j*sizeX/block_size)\n bry = bry_init-int(i*sizeY/block_size)\n\n img_cell_vector = []\n\n for x in range(img_cell.shape[1]-1):\n for y in range(img_cell.shape[0]-1):\n if not (tlx < x < brx and tly < y < bry):\n img_cell_vector.append(img_cell[y,x,:])\n\n img_cell_vector = np.asarray(img_cell_vector)\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n # Using 3D histograms --> total_bins = n_bins_per_channel ^ n_channels\n hist=np.zeros(n_bins**n_channels,dtype=np.float32)\n\n if img_cell_vector.size!=0:\n img_cell_matrix = np.reshape(img_cell_vector,(img_cell_vector.shape[0],1,-1))\n hist = compute_histogram(img_cell_matrix, n_bins, color_space)\n\n if hist_concat is None:\n hist_concat = hist\n else:\n hist_concat = cv.hconcat([hist_concat, hist])\n\n return hist_concat", "def _make_histogram(values, bins):\n values = values.reshape(-1)\n counts, limits = np.histogram(values, bins=bins)\n limits = limits[1:]\n\n sum_sq = values.dot(values)\n return HistogramProto(min=values.min(),\n max=values.max(),\n num=len(values),\n sum=values.sum(),\n sum_squares=sum_sq,\n bucket_limit=limits,\n bucket=counts)", "def equalise_hist(image, bin_count=256):\n # TODO: your histogram equalization code\n #define arrays\n image = img_as_ubyte(image)\n row,col = image.shape\n new_image = np.zeros((row,col),dtype='uint8') \n\n # compute the value of each grayscale,and save in image_hist \n image_hist = np.bincount(image.flatten(), minlength=(bin_count))\n\n # normalise n[]\n norm_arr = (np.cumsum(image_hist)/(image.size))*(bin_count-1)\n norm_arr = norm_arr.astype('uint8')\n \n #Compute a normalized cumulative histogram\n for x in range(row):\n for y in range(col):\n new_image[x,y] = norm_arr[image[x,y]]\n \n return new_image", "def count_cells(rule, n=500):\n ca = Cell1D(rule, n)\n ca.start_single()\n\n res = []\n for i in range(1, n):\n cells = np.sum(ca.array)\n res.append((i, i**2, cells))\n ca.step()\n\n return res", "def histogram(self, mask=None, extrema=None):\r\n uni, counts = self._getcolors()\r\n return [l for l in counts]", "def color_histogram_descriptors(bbdd_paintings, color_channel=0):\n X=np.empty((0))\n for painting in bbdd_paintings:\n painting = cv2.cvtColor(painting,cv2.COLOR_BGR2HSV)\n if color_channel==0 or color_channel==1 or color_channel==2:\n col_hist = cv2.calcHist(painting,[color_channel], None, [20], [0,256])\n elif color_channel==-1:\n col1_hist = cv2.calcHist(painting,[0], None, [256], [0,256])\n col2_hist = cv2.calcHist(painting,[1], None, [256], [0,256])\n col3_hist = cv2.calcHist(painting,[2], None, [256], [0,256])\n col_hist = np.concatenate([col1_hist, col2_hist, col3_hist])\n\n if X.size>0:\n X = np.vstack((X, col_hist.T))\n else:\n X = col_hist.T\n return X", "def __get_color_histogram(self, image, seed, hist_res):\n \n L=[]\n N=len(seed)\n for i in range(N):\n \n L.append(image[seed[i][1],seed[i][0]])\n image_part=np.array(L)\n \n \n hist, bins= np.histogramdd(image_part,bins=hist_res,range=((0,255),(0,255),(0,255)) )\n #hist= ndimage.gaussian_filter(hist,sigma=7) # Gaussian smoothing\n\n return hist /np.linalg.norm(hist)" ]
[ "0.71106833", "0.6293851", "0.6281191", "0.61843", "0.61735624", "0.60875905", "0.59519565", "0.5949663", "0.5861934", "0.5823168", "0.57868886", "0.57169867", "0.570334", "0.5700379", "0.56820166", "0.56749356", "0.56629336", "0.5627494", "0.5611744", "0.559957", "0.5583366", "0.55659294", "0.554491", "0.5517866", "0.5517856", "0.5501054", "0.55007666", "0.5499086", "0.54906434", "0.54716736" ]
0.7789359
0
Update finding with new id and updatedAt fields.
def update_finding_id(finding, new_id, updated_at=None): finding["Id"] = new_id if updated_at: finding["UpdatedAt"] = updated_at return finding
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, table, id, **kwargs):\n pairs = [column + ' = %s' for column in kwargs.keys()]\n values = kwargs.values()\n if 'changed_by' in self.get_columns(table):\n pairs += ['changed_by = %s', 'date_changed = now()']\n values += [self.user_id]\n self.db.execute(\n 'update %s set %s where %s = %%s' %\n (table, ', '.join(pairs), table + '_id'), *(values + [id]))\n return id", "def update(self, commit=True, **kwargs):\n # Prevent changing IDS\n kwargs.pop('id', None)\n for attr, value in kwargs.iteritems():\n # Flask-restful makes everything None by default\n if value is not None:\n setattr(self, attr, value)\n return commit and self.save() or self", "def update(self, **kwargs):\n expr = self.model.__table__.update().where(self.query).values(**kwargs)\n return self._runquery(expr)", "def update(self, async=True):\n\n self.update_element(\n condition=QueryBuilder.build_pk_clause(self.__table__, **self.get_pk_fields()),\n async=async, **self.get_non_pk_fields(filtered=True)\n )", "def update(self, id: str, **kwargs: dict):\n kwargs = self._preprocess(**kwargs)\n j = self._jsonify(kwargs)\n\n if isinstance(id, uuid.UUID):\n id = str(id)\n\n with rconnect() as conn:\n query = self.q.get(id).update(j, return_changes=True)\n rv = query.run(conn)\n if len(rv['changes']):\n return self.__model__(rv['changes'][0]['new_val'])\n else:\n return self.get(id)", "def update(self):\n values = {}\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if not attr.auto_value and attr._updated:\n values[field] = getattr(self, field)\n object.__setattr__(attr, '_updated', False)\n result = self.where({self.__class__.get_primary():self.primary})\n if len(values) == 0:\n logging.warning('update() called on model with no changed fields.')\n return None\n return result.update(values)[0]", "def updated_by_id(self, updated_by_id):\n self._updated_by_id = updated_by_id", "def update(self, document_id, update_spec, namespace, timestamp):\n\n index, doc_type = self._index_and_mapping(namespace)\n document = self.BulkBuffer.get_from_sources(index,doc_type,u(document_id))\n if document:\n updated = self.apply_update(document, update_spec)\n # _id is immutable in MongoDB, so won't have changed in update\n updated['_id'] = document_id\n self.upsert(updated, namespace, timestamp)\n else:\n updated = {\"_id\": document_id}\n self.upsert(updated, namespace, timestamp, update_spec)\n # upsert() strips metadata, so only _id + fields in _source still here\n return updated", "def update(self, dt):\n for obj in self.objects:\n obj.update(dt)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, **kwargs):\n return self.manager.update(self, **kwargs)", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def update(cls, query_filter, query_update):\n if query_update.get('$set') and not query_update.get('$set').get('updated'):\n query_update['$set']['updated'] = datetime.datetime.utcnow()\n\n return mongo_db[cls.__collection__].update_one(\n query_filter,\n query_update\n )", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self, record):\n record = dict_for_mongo(record)\n id_dict = {'_id': self.record['_id']}\n self.collection.update(id_dict, {'$set': record})\n\n # Set record to the latest record from the database\n self.record = self.__class__.collection.find_one(id_dict)", "def patch_record(\n self, id_: str, fields: Dict[str, Union[str, list, None]]\n ) -> None:\n instance = self._get(id_)\n instance.update(fields)\n self.db.session.commit()", "def record_update_for_project_by_id(record_id, values):\n values['updated_at'] = datetime.datetime.utcnow()\n\n session = get_session()\n with session.begin():\n record_ref = get_project_record_by_id(record_id, session=session)\n record_ref.update(values)\n record_ref.save(session=session)\n\n return record_ref", "def update_db_record(self, update_body: dict):\n for attribute, value in update_body.items():\n if attribute in self._update_allowed_fields:\n setattr(self, attribute, value)\n self.updated_at = datetime.now()\n self.save()", "def update(cls, row_id, **kwargs):\n cls.delete(row_id)\n # obj = cls.query.filter_by(id=row_id).first()\n # for k, v in kwargs.items():\n # obj[k] = v\n # obj = cls.query.filter_by(id=row_id).update(kwargs)\n kwargs[\"id\"] = row_id\n obj = cls(**kwargs)\n #print(\"the type of updated object is\", type(obj))\n return commit(obj)", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def updated_at(self, updated_at: \"datetime\"):\n self._attrs[\"updatedAt\"] = updated_at", "def update(\n self,\n *args: Union[dict, Mapping],\n session: Optional[ClientSession] = None\n ):\n self.set_session(session=session)\n return (\n self.UpdateQueryType(\n document_model=self.document_model,\n find_query=self.get_filter_query(),\n )\n .update(*args)\n .set_session(session=self.session)\n )", "def update(self, **kwargs):\n return self._object.update(meta=kwargs)", "def save(self):\n self.updated_at = datetime.now()", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)" ]
[ "0.61465585", "0.61105394", "0.609029", "0.60817885", "0.59753263", "0.5965248", "0.5913162", "0.58955085", "0.58839965", "0.58768344", "0.58768344", "0.58768344", "0.58369416", "0.5829934", "0.5824282", "0.58113456", "0.58023703", "0.57954216", "0.57895267", "0.5767917", "0.5765524", "0.57544273", "0.57542574", "0.57542574", "0.57542574", "0.5739307", "0.57017523", "0.5685842", "0.56827223", "0.56061476" ]
0.7418794
0
For each depth, generate a polygon describing the cut on the "north" side.
def generate_cuts(depths, side=SIDE_LENGTH): for num, den in depths: ad = num * side / den poly = Polygon([(0, 0), (side, 0), (side, ad), (0, ad)]) yield poly
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePolygons():", "def regular_polygon(sides, radius, height):\n global _cmds\n _cmds = \"}\\n\\n\" + _cmds\n for wedge in range(sides):\n p1 = _cart(radius, wedge*360/sides)\n p2 = _cart(radius, (wedge+1)*360/sides)\n triangle([0, 0], p1, p2, height)\n _cmds = \"union(){\\n\" + _cmds", "def get_dancefloor_area(self):\n cords = []\n\n x1 = self.coordinates[0]\n y1 = self.coordinates[1]\n x2 = self.coordinates[0] + self.width\n y2 = self.coordinates[1] + self.height\n if self.width <= 0:\n x1, x2 = x2, x1\n if self.height <= 0:\n y1, y2 = y2, y1\n\n for x in range(x1, x2):\n for y in range(y1, y2):\n if x % 20 == 0 and y % 20 == 0:\n cords.append([x, y])\n return cords", "def _crop_region(polygons, left, bottom, right, top, precision):\n cropped_polygons = []\n for p in polygons:\n clipped_polys = clipper._chop(p, [top, bottom], 1, 1 / precision)\n # polygon, [cuts], axis, scale\n for cp in clipped_polys[1]:\n result = clipper._chop(cp, [left, right], 0, 1 / precision)\n cropped_polygons += list(result[1])\n return cropped_polygons", "def copy_mean_depths_to_subgrid_outside_polygon(self,polygon_shp,overwrite=True,cells=True,edges=True):\n\n subgrid_regions = self.read_polygon_shp(polygon_shp)\n\n if cells:\n area = self.cells_area()\n depth = self.cells['depth_mean']\n # funny indexing to add unit dimension,\n # and zip to make these into tuples like ([area[0]],[depth[0]])\n for c in range(self.Ncells()):\n if overwrite or self.cells['subgrid'][c]==0:\n cell_nodes = self.cells['nodes'][c]\n print( cell_nodes )\n print( self.nodes['x'][cell_nodes] )\n cell_poly = geometry.Polygon(np.asarray(self.nodes['x'][cell_nodes]))\n # check for intersection (boolean)\n intersect = self.check_for_intersection(cell_poly,subgrid_regions)\n # reset depths OUTSIDE of intersection region\n if not intersect:\n self.cells['subgrid'][c] = (area[c,None],depth[c,None])\n\n if edges:\n length = self.edges_length()\n depth = self.edges['depth_mean']\n # funny indexing to add unit dimension,\n # and zip to make these into tuples like ([area[0]],[depth[0]])\n for j in range(self.Nedges()):\n if overwrite or self.edges['subgrid'][j]==0:\n edge_nodes = self.edges['nodes'][j]\n edge_line = geometry.LineString(self.nodes['x'][edge_nodes])\n # check for intersection (boolean)\n intersect = self.check_for_intersection(edge_line,subgrid_regions)\n # reset depths OUTSIDE of intersection region\n if not intersect:\n self.edges['subgrid'][j] = (length[j,None],depth[j,None])", "def draw_initial_polygon( sides = 6, radius = 1.0, center = Vector((0,0,0)) ):\n\n points = []\n edges = []\n step = ( 2.0 / sides )\n i = 0\n\n for i in range( sides ):\n t = ( i * step )\n x1 = cos( t * pi ) * radius\n y1 = sin( t * pi ) * radius\n\n points.append( center + Vector(( x1, y1, 0 )) )\n\n for i in range( len(points) ):\n edge = []\n\n if i == len( points ) - 1:\n edge.append( i )\n edge.append( 0 )\n else:\n edge.append( i )\n edge.append( i + 1)\n\n edges.append( tuple( edge ) )\n\n return { 'verts' : points, 'edges' : edges }", "def makeup_polygons(\n draw: ImageDraw,\n num_cells: int,\n width: int,\n height: int,\n rgb_im: Image,\n random: bool,\n):\n voronoi, points = generate_voronoi_diagram(num_cells, width, height)\n for point, index in zip(points, voronoi.point_region):\n # Getting the region of the given point\n region = voronoi.regions[index]\n # Getting the points in arrays\n polygon = list()\n for i in region:\n # If vector is out of plot do not add\n if i != -1:\n polygon.append(voronoi.vertices[i])\n # Make tuples of the points\n polygon_tuples = list()\n for l in polygon:\n polygon_tuples.append(tuple(l))\n rgb = (0, 0, 0)\n if random:\n # Get random color\n rgb = random_color()\n else:\n # Get colors of the middle point\n rgb = get_color_of_point(point, rgb_im, width, height)\n # Draw the calculated polygon with the color of the middle point\n if polygon and polygon_tuples:\n draw.polygon(polygon_tuples, rgb)", "def test_polygons_with_inner_rings(self):\n\n # Define two (closed) outer rings - clock wise direction\n outer_rings = [numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]]),\n numpy.array([[106.76, -6.23],\n [106.72, -6.23],\n [106.72, -6.22],\n [106.72, -6.21],\n [106.76, -6.23]])]\n\n tmp_filename = unique_filename(suffix='.shp')\n\n # Do outer rings first (use default geometry type polygon)\n v_ref = Vector(geometry=outer_rings)\n assert v_ref.is_polygon_data\n\n v_ref.write_to_file(tmp_filename)\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_file.is_polygon_data\n\n # Do it again but with (closed) inner rings as well\n\n # Define inner rings (counter clock wise)\n inner_rings = [\n # 2 rings for feature 0\n [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]],\n # 1 ring for feature 1\n [numpy.array([[106.73709, -6.22752],\n [106.73911, -6.22585],\n [106.74265, -6.22814],\n [106.73971, -6.22926],\n [106.73709, -6.22752]])[::-1]]]\n\n polygons = []\n for i, outer_ring in enumerate(outer_rings):\n p = Polygon(outer_ring=outer_ring, inner_rings=inner_rings[i])\n polygons.append(p)\n\n v_ref = Vector(geometry=polygons)\n assert v_ref.is_polygon_data\n data_bbox = v_ref.get_bounding_box()\n\n # Check data from Vector object\n geometry = v_ref.get_geometry(as_geometry_objects=True)\n for i, g in enumerate(geometry):\n assert numpy.allclose(g.outer_ring, outer_rings[i])\n if i == 0:\n assert len(g.inner_rings) == 2\n else:\n assert len(g.inner_rings) == 1\n\n for j, ring in enumerate(inner_rings[i]):\n assert numpy.allclose(ring, g.inner_rings[j])\n\n # Write to file and read again\n v_ref.write_to_file(tmp_filename)\n #print 'With inner rings, written to ', tmp_filename\n v_file = read_layer(tmp_filename)\n assert v_file == v_ref\n assert v_file.is_polygon_data\n assert numpy.allclose(v_file.get_bounding_box(), data_bbox,\n rtol=1.0e-12, atol=1.0e-12)\n\n # Check data from file\n geometry = v_file.get_geometry(as_geometry_objects=True)\n for i, g in enumerate(geometry):\n assert numpy.allclose(g.outer_ring, outer_rings[i])\n if i == 0:\n assert len(g.inner_rings) == 2\n else:\n assert len(g.inner_rings) == 1\n\n for j, ring in enumerate(inner_rings[i]):\n assert numpy.allclose(ring, g.inner_rings[j])", "def viewpoly(self, depth: Number) -> np.ndarray:\n cy = self.imgsz[1] / 2 + self.c[1]\n uv = np.array([(0, cy), (self.imgsz[0], cy)])\n xyz = self.uv_to_xyz(uv, directions=False, depth=depth)\n return np.row_stack([self.xyz, xyz, self.xyz])", "def add_floor_corners(mesh, tile):\n corner_directions = [[BmeshFactory.W], [BmeshFactory.W, BmeshFactory.N], [BmeshFactory.N]]\n tile_below = tile.get_tile_in_direction([], -1)\n ceiling_below = False\n if tile_below is not None:\n ceiling_below = True\n for d in Direction:\n corner_directions[1][1] = d\n corner_directions[2][0] = d\n l = len(mesh.verts)\n add_corner = False\n try:\n if tile.terrain.extend_to:\n for e in corner_directions:\n neighbor_tile = tile.get_tile_in_direction(e)\n if neighbor_tile is None or (neighbor_tile.terrain.extend_to and not tile.terrain.make_edges_to):\n add_corner = True\n neighbor_tile = tile.get_tile_in_direction([d])\n if neighbor_tile is None or neighbor_tile.terrain.make_edges_to:\n mesh.from_object(bpy.data.objects['FLOOR_Cen'], bpy.context.scene)\n # for tiles that do not get extended to but help connect diagonals\n if tile.terrain.connect_diag and tile.terrain.make_edges_to:\n neighbor_tile1 = tile.get_tile_in_direction(corner_directions[0])\n neighbor_tile2 = tile.get_tile_in_direction(corner_directions[2])\n if neighbor_tile1.terrain.extend_to and neighbor_tile2.terrain.extend_to and \\\n not neighbor_tile1.terrain.make_edges_to and not neighbor_tile2.terrain.make_edges_to:\n add_corner = True\n mesh.from_object(bpy.data.objects['FLOOR_OD'], bpy.context.scene)\n except AttributeError:\n pass\n if add_corner:\n num_walls = 0\n for e in corner_directions:\n neighbor_tile = tile.get_tile_in_direction(e)\n if neighbor_tile is not None and neighbor_tile.terrain.terrain_type == TerrainType.WALL:\n num_walls += 1\n if num_walls < 3:\n mesh.from_object(bpy.data.objects['FLOOR_CORNER'], bpy.context.scene)\n if ceiling_below:\n BmeshFactory.add_ceiling_single_corner(mesh, tile_below, corner_directions, True)\n try:\n neighbor_tile = tile.get_tile_in_direction(corner_directions[0])\n diag_tile = tile.get_tile_in_direction(corner_directions[1])\n if neighbor_tile is None or neighbor_tile.terrain.make_edges_to:\n if diag_tile is None or not (diag_tile.terrain.extend_to and neighbor_tile.terrain.connect_diag)\\\n or not neighbor_tile.terrain.connect_diag or not diag_tile.terrain.connect_diag:\n mesh.from_object(bpy.data.objects['FLOOR_Cor0'], bpy.context.scene)\n neighbor_tile = tile.get_tile_in_direction(corner_directions[2])\n if neighbor_tile is None or neighbor_tile.terrain.make_edges_to:\n if diag_tile is None or not (diag_tile.terrain.extend_to and neighbor_tile.terrain.connect_diag)\\\n or not neighbor_tile.terrain.connect_diag or not diag_tile.terrain.connect_diag:\n mesh.from_object(bpy.data.objects['FLOOR_Cor2'], bpy.context.scene)\n except AttributeError:\n print(\"unexpected None Type Attribute Error\")\n elif tile.terrain.extend_to:\n mesh.from_object(bpy.data.objects['FLOOR_ID'], bpy.context.scene)\n bmesh.ops.rotate(mesh, verts=mesh.verts[l:len(mesh.verts)], cent=BmeshFactory.center, matrix=BmeshFactory.rot_dict[d])\n corner_directions[0][0] = d\n corner_directions[1][0] = d", "def fill_depth(depth):\n ht, wd = depth.shape\n x, y = np.meshgrid(np.arange(wd), np.arange(ht))\n xx = x[depth > 0].astype(np.float32)\n yy = y[depth > 0].astype(np.float32)\n zz = depth[depth > 0].ravel()\n return interpolate.griddata((xx, yy), zz, (x, y), method='nearest')", "def findPolygons(self):\n # perform marching cubes algorithm\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n # format values for entry\n values = [self.world[x][y][z], self.world[x + 1][y][z], self.world[x + 1][y + 1][z],\n self.world[x][y + 1][z],\n self.world[x][y][z + 1], self.world[x + 1][y][z + 1], self.world[x + 1][y + 1][z + 1],\n self.world[x][y + 1][z + 1]]\n # perform marchine cubes\n self.polygons[x][y][z] = marchingCubesPolygons(values, self.worldThreshold)", "def createEntrances(self):\n\t\tfor x in range(self.width):\n\t\t\tif self.isFloor(x, 1):\n\t\t\t\tself.setFloor(x, 0)\n\t\t\t\tbreak\n\t\tfor x in range(self.width - 1, 0, -1):\n\t\t\tif self.isFloor(x, self.height - 2):\n\t\t\t\tself.setFloor(x, self.height - 1)\n\t\t\t\tbreak", "def convert_lane_boundaries_to_polygon(right_lane_bounds: np.ndarray, left_lane_bounds: np.ndarray) -> np.ndarray:\n assert right_lane_bounds.shape[0] == left_lane_bounds.shape[0]\n polygon = np.vstack([right_lane_bounds, left_lane_bounds[::-1]])\n polygon = np.vstack([polygon, right_lane_bounds[0]])\n return polygon", "def polygon(n,r):\n \n window = turtle.Screen()\n\n david = turtle.Turtle()\n david.pensize(2)\n\n a = float(360 / n) \t\t #this is the angle the turtle will turn each time\n l = 2 * (math.sin(math.radians(a / 2)) * r) #this is the length of the sides\n\n david.penup()\n david.speed(0)\n david.right(90)\n david.forward(r * math.cos(math.radians(a / 2)))\n david.right(90)\n david.forward(l / 2)\n david.left(180)\n david.pendown()\n david.speed(1/2)\n\n for x in range(n):\n david.forward(l)\n david.left(a)", "def draw_pyramid(self):\n for item in self.subdivision_list:\n glBegin(GL_POLYGON)\n glColor3f(0.5, 0.5, 0.5)\n glVertex3f(item[0].x, item[0].y, item[0].z)\n glVertex3f(item[1].x, item[1].y, item[1].z)\n glVertex3f(item[2].x, item[2].y, item[2].z)\n glEnd()", "def optimal_polygon(y, w=0.5, debug=False):\n # Make sure that we use numpy array\n y = np.array(y)\n x = np.arange(len(y))\n\n # Initialization\n y = np.round(y, 6)\n p_plus = (x[0], y[0] + w)\n l_plus = (x[0], y[0] + w)\n r_plus = (x[1], y[1] + w)\n s_plus = {(x[0], y[0] + w): (x[1], y[1] + w)}\n t_plus = {(x[1], y[1] + w): (x[0], y[0] + w)}\n p_minus = (x[0], y[0] - w)\n l_minus = (x[0], y[0] - w)\n r_minus = (x[1], y[1] - w)\n s_minus = {(x[0], y[0] - w): (x[1], y[1] - w)}\n t_minus = {(x[1], y[1] - w): (x[0], y[0] - w)}\n q = []\n i = 2\n\n while i < len(y):\n # Updating CH_plus (convex hull) and CH_minus\n p = (x[i - 1], y[i - 1] + w)\n p_i_plus = (x[i], y[i] + w)\n while (p != p_plus) and _angle(p_i_plus, p, t_plus[p], '+') > np.pi:\n p = t_plus[p]\n s_plus[p] = p_i_plus\n t_plus[p_i_plus] = p\n\n p = (x[i - 1], y[i - 1] - w)\n p_i_minus = (x[i], y[i] - w)\n while (p != p_minus) and _angle(p_i_minus, p, t_minus[p], '-') > np.pi:\n p = t_minus[p]\n s_minus[p] = p_i_minus\n t_minus[p_i_minus] = p\n\n # Check if CH_plus and CH_minus intersect\n if _angle(p_i_plus, l_plus, r_minus, '+') < np.pi:\n q.append((_intersect(l_plus, r_minus, p_plus, p_minus), l_plus, r_minus, p_plus, p_minus))\n p_minus = r_minus\n p_plus = _intersect(l_plus, r_minus, (x[i - 1], y[i - 1] + w), p_i_plus)\n s_plus[p_plus] = p_i_plus\n t_plus[p_i_plus] = p_plus\n r_plus = p_i_plus\n r_minus = p_i_minus\n l_plus = p_plus\n l_minus = p_minus\n while _angle(l_minus, r_plus, s_minus[l_minus], '-') < np.pi:\n l_minus = s_minus[l_minus]\n elif _angle(p_i_minus, l_minus, r_plus, '-') < np.pi:\n q.append((_intersect(l_minus, r_plus, p_minus, p_plus), l_minus, r_plus, p_minus, p_plus))\n p_plus = r_plus\n p_minus = _intersect(l_minus, r_plus, (x[i - 1], y[i - 1] - w), p_i_minus)\n s_minus[p_minus] = p_i_minus\n t_minus[p_i_minus] = p_minus\n r_minus = p_i_minus\n r_plus = p_i_plus\n l_minus = p_minus\n l_plus = p_plus\n while _angle(l_plus, r_minus, s_plus[l_plus], '+') < np.pi:\n l_plus = s_plus[l_plus]\n else:\n # Updating the two seperating and supporting lines\n if _angle(p_i_plus, l_minus, r_plus, '+') < np.pi:\n r_plus = p_i_plus\n while _angle(p_i_plus, l_minus, s_minus[l_minus], '+') < np.pi:\n l_minus = s_minus[l_minus]\n\n if _angle(p_i_minus, l_plus, r_minus, '-') < np.pi:\n r_minus = p_i_minus\n while _angle(p_i_minus, l_plus, s_plus[l_plus], '-') < np.pi:\n l_plus = s_plus[l_plus]\n i += 1\n\n # Add last change point\n a = _intersect(l_plus, r_minus, p_plus, p_minus)\n b = _intersect(l_minus, r_plus, p_minus, p_plus)\n p = ((a[0] + b[0]) / 2, (a[1] + b[1]) / 2)\n q.append((p, r_minus, r_plus, p_minus, p_plus))\n\n end_a = _intersect(p, r_plus, p_i_minus, p_i_plus)\n end_b = _intersect(p, r_minus, p_i_minus, p_i_plus)\n end = ((end_a[0] + end_b[0]) / 2, (end_a[1] + end_b[1]) / 2)\n q.append((end, (None, None), (None, None), p_i_minus, p_i_plus))\n\n if debug:\n return np.array(q)\n else:\n return np.array([o[0] for o in q])", "def polygonpts(nSides, radius=1.0):\n\treturn [[cos(theta)*radius, sin(theta)*radius] for theta in frange(0, twopi, nSides+1)[:-1] ]", "def boundaries_and_initialize():\n greenLower = (29, 86, 6) # define the lower and upper boundaries of the \"green\"\n greenUpper = (64, 255, 255)\n pts = [((200,300),(255,255,255), 0)]\n blanks = []\n linecolor = (0,0,0)\n counter = 1\n radius = 11\n return greenLower, greenUpper, pts, linecolor, counter, blanks, radius", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def render_fill_2d(self, **kwds):\n poly = [polygon2d(self.coordinates_of(p), **kwds) \n for p in self.polygons]\n return sum(poly)", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n for ps in polygonsets:\n for ii in range(len(ps.polygons)):\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n return polygonsets", "def get_polygonsets(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.rotation is not None:\n ct = numpy.cos(self.rotation * numpy.pi / 180.0)\n st = numpy.sin(self.rotation * numpy.pi / 180.0) * _mpone\n if self.x_reflection:\n xrefl = numpy.array((1, -1))\n if self.magnification is not None:\n mag = numpy.array((self.magnification, self.magnification), dtype=float)\n if self.origin is not None:\n orgn = numpy.array(self.origin)\n polygonsets = self.ref_cell.get_polygonsets(depth=depth)\n array = []\n for i in range(self.columns):\n for j in range(self.rows):\n spc = numpy.array([self.spacing[0] * i, self.spacing[1] * j])\n for polygonset in polygonsets:\n ps = libcopy.deepcopy(polygonset)\n for ii in range(len(ps.polygons)):\n if self.magnification is not None:\n ps.polygons[ii] = ps.polygons[ii] * mag + spc\n else:\n ps.polygons[ii] = ps.polygons[ii] + spc\n if self.x_reflection:\n ps.polygons[ii] = ps.polygons[ii] * xrefl\n if self.rotation is not None:\n ps.polygons[ii] = (\n ps.polygons[ii] * ct + ps.polygons[ii][:, ::-1] * st\n )\n if self.origin is not None:\n ps.polygons[ii] = ps.polygons[ii] + orgn\n array.append(ps)\n return array", "def expanded_boundaries(self):\n width = self._points[0][3][0] - self._points[0][1][0]\n height = self._points[0][3][1] - self._points[0][1][1]\n factor = np.multiply((width, height), Window.BORDER)\n return (\n np.subtract(self._points[0][1], factor),\n np.add(self._points[0][3], factor))", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def depth_conversion(point_depth, w, h, f):\n i_c = np.float(h) / 2 - 1\n j_c = np.float(w) / 2 - 1\n columns, rows = np.meshgrid(np.linspace(0, w - 1, num=w), np.linspace(0, h - 1, num=h))\n distance_from_center = ((rows - i_c) ** 2 + (columns - j_c) ** 2) ** 0.5\n return point_depth / (1 + (distance_from_center / f) ** 2) ** 0.5", "def make_boundary_wall(self, height, width) -> None:\n for x in range(0, width):\n Wall(self, x, 0)\n Wall(self, x, height - 1)\n for y in range(1, height - 1):\n Wall(self, 0, y)\n Wall(self, width - 1, y)", "def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename", "def cutout(self, centre, radius):" ]
[ "0.6352084", "0.5903977", "0.57167846", "0.57037365", "0.56498015", "0.5617988", "0.5543349", "0.55307436", "0.549076", "0.5488289", "0.54753643", "0.547494", "0.54746133", "0.5447459", "0.54430604", "0.5431998", "0.54062235", "0.5366982", "0.53622246", "0.53498185", "0.5343422", "0.53354263", "0.5328841", "0.53280383", "0.53143", "0.53131765", "0.5308632", "0.53008187", "0.52808297", "0.52777725" ]
0.81020045
0
Creates file for each NBA team containing game info from last season
def create_game_logs_file(team_id): # team game log path = os.path.join(TEAM_BASE_PATH, TEAM_DICT[team_id] + '.json') if not os.path.exists(path): print("Retrieving team " + TEAM_DICT[team_id] + " game log, season stats ... Please wait.") game_logs = team.TeamGameLogs(team_id, '2016-17').json with open(path, 'w') as outfile: json.dump(game_logs, outfile) # playoff game log playoff_path = os.path.join(TEAM_PLAYOFF_PATH, TEAM_DICT[team_id] + '.json') if not os.path.exists(playoff_path): playoff_games = team.TeamGameLogs(team_id, '2016-17', constants.SeasonType.Playoffs).json if len(playoff_games['resultSets'][0]['rowSet']): with open(playoff_path, 'w') as playoff_files: json.dump(playoff_games, playoff_files) # season stats season_path = os.path.join(TEAM_SEASON_PATH, TEAM_DICT[team_id] + '.json') if not os.path.exists(season_path): season_stats = team.TeamSeasons(team_id).json with open(season_path, 'w') as season_files: json.dump(season_stats, season_files)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init():\n for team_id in TEAM_DICT.keys():\n create_game_logs_file(team_id)", "def write_league_players(understat_path, season): \n loop = asyncio.get_event_loop()\n players = loop.run_until_complete(get_league_players(season))\n player = pd.DataFrame.from_dict(players) # Equivalent of players_raw.csv\n player.to_csv(understat_path + 'understat_players.csv', index = False)\n return player", "def create_table_per_team(self) -> None:\n for team_id in self.teams:\n home = self.file.loc[(self.file[\"IdHomeTeam\"] == team_id)].reset_index(drop=True)\n home = home.rename(columns={\"FTHG\": \"FTG_asH\", \"FTR\": \"FT_RESULT\", \"HTHG\": \"HTG_asH\",\n \"HTR\": \"HT_RESULT\", \"HS\": \"Shoot_asH\", \"HST\": \"ShootTarget_asH\",\n \"HF\": \"Fouls_asH\", \"HC\": \"Corner_asH\", \"HY\": \"YellowC_asH\", \"HR\": \"RedC_asH\",\n \"FTAG\": \"FT_against_H\", \"HTAG\": \"HT_against_H\",\n \"AS\": \"Shoot_against_H\", \"AST\": \"ShootTarget_against_H\",\n \"AF\": \"Fouls_against_H\",\n \"AC\": \"Corner_against_H\", \"AR\": \"RedC_againts_H\", \"AY\": \"YellowC_against_H\"})\n home[\"nWeekHome\"] = home.index + 1\n home.drop([\"nWeekAway\", \"IdAwayTeam\"], axis=1,\n inplace=True)\n home.loc[(home[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Winn\"\n home.loc[(home[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n home.loc[(home[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Lost\"\n home.loc[(home[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Winn\"\n home.loc[(home[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n home.loc[(home[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Lost\"\n\n away = self.file.loc[(self.file[\"IdAwayTeam\"] == team_id)].reset_index(drop=True)\n away = away.rename(columns={\"FTAG\": \"FTG_asA\", \"FTR\": \"FT_RESULT\", \"HTAG\": \"HTG_asA\",\n \"HTR\": \"HT_RESULT\", \"AS\": \"Shoot_asA\", \"AST\": \"ShootTarget_asA\",\n \"AF\": \"Fouls_asA\", \"AC\": \"Corner_asA\", \"AY\": \"YellowC_asA\", \"AR\": \"RedC_asA\",\n \"FTHG\": \"FT_against_A\", \"HTHG\": \"HT_against_A\",\n \"HS\": \"Shoot_against_A\", \"HST\": \"ShootTarget_against_A\",\n \"HF\": \"Fouls_against_A\",\n \"HC\": \"Corner_against_A\", \"HR\": \"RedC_againts_A\", \"HY\": \"YellowC_against_A\"})\n away[\"nWeekAway\"] = away.index + 1\n away.drop([\"nWeekHome\", \"IdHomeTeam\"], axis=1,\n inplace=True)\n away.loc[(away[\"FT_RESULT\"] == \"H\"), \"FT_RESULT\"] = \"Lost\"\n away.loc[(away[\"FT_RESULT\"] == \"D\"), \"FT_RESULT\"] = \"Draw\"\n away.loc[(away[\"FT_RESULT\"] == \"A\"), \"FT_RESULT\"] = \"Winn\"\n away.loc[(away[\"HT_RESULT\"] == \"H\"), \"HT_RESULT\"] = \"Lost\"\n away.loc[(away[\"HT_RESULT\"] == \"D\"), \"HT_RESULT\"] = \"Draw\"\n away.loc[(away[\"HT_RESULT\"] == \"A\"), \"HT_RESULT\"] = \"Winn\"\n\n self.home = self.home.append(home, ignore_index=True)\n self.away = self.away.append(away, ignore_index=True)\n\n self.home[\"Date\"] = pd.to_datetime(self.home[\"Date\"], format=\"%Y-%m-%d\")\n self.away[\"Date\"] = pd.to_datetime(self.away[\"Date\"], format=\"%Y-%m-%d\")\n self.home.reset_index(drop=True)\n self.away.reset_index(drop=True)", "def season_game_logs(team, year):\n\n # Check year value\n if year > 2019 or year < 1950:\n raise ValueError('Year Value Incorrect')\n\n # Rename teams that moved\n team = scrape_utils.rename_team(team, year)\n\n # Get HTML content\n url = 'http://www.basketball-reference.com/teams/%s/%s/gamelog' % (team, year)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n season_stats = soup.find(id='tgl_basic')\n games = season_stats.find('tbody')\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # To find opponent statistics\n opponent = re.compile('^opp_.*$')\n\n # Loop through every game in a team's season\n for game in games.find_all('tr', {'class': None}):\n\n curr_team = {'team': team}\n opp_team = {}\n\n # Loop through each stat\n for stat in game.find_all('td'):\n\n stat_name = stat['data-stat']\n\n # These are opponent stats\n if re.match(opponent, stat_name):\n opp_team[stat_name[4:]] = scrape_utils.stat_parse(stat_name, stat.string)\n else:\n curr_team[stat_name] = scrape_utils.stat_parse(stat_name, stat.string)\n\n # Remove unnecessary information\n del curr_team['game_season']\n del curr_team['x']\n\n # Rename relocated teams\n curr_team['team'] = scrape_utils.rename_team(team)\n opp_team['team'] = scrape_utils.rename_team(opp_team.pop('id'))\n\n # Use the same ID as basketball reference\n result = {'date': datetime.strptime(curr_team.pop('date_game'), \"%Y-%m-%d\"),\n 'season': year,\n 'result': scrape_utils.determine_home_win(curr_team['game_location'], curr_team.pop('game_result')),\n '_id': game.find('a')['href'][-17:-5]}\n\n # Place the teams in the correct spot depending on who is the home team\n if curr_team.pop('game_location') == 0:\n result['home'] = curr_team\n result['away'] = opp_team\n else:\n result['home'] = opp_team\n result['away'] = curr_team\n\n # Insert into database\n m.insert('game_log', result)", "def save_games(season, logging_level=logging.INFO):\n logger.info('Starting the download of games...')\n\n if season.season == get_current_season():\n current_game_events_ids = season.get_current_game_events_ids()\n game_ids_list = list(current_game_events_ids.values())\n else:\n game_ids_list = season.get_game_ids()\n\n\n n_checkpoints = 4\n checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]\n for i in range(len(game_ids_list)):\n game_id = int(game_ids_list[i]) % 1000\n url2 = BASE_URL + \"/fichas/LACB{}.php\".format(game_ids_list[i])\n filename = os.path.join(season.GAMES_PATH, str(game_id)+\"-\" +str(game_ids_list[i]) + '.html')\n\n open_or_download(file_path=filename, url=url2)\n if i in checkpoints:\n logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))\n\n logger.info('Download finished! (new {} games in {})\\n'.format(len(game_ids_list), season.GAMES_PATH))", "def all_seasons_save():\n year_arr = [str(f'{i}-{str((i + 1))[-2:]}') for i in reversed(range(1950, 2021))]\n for year in year_arr:\n c_name = f'season_player_game_logs_{year.replace(\"-\", \"_\")}'\n try:\n coll = db[c_name]\n season_game_log_save(year, coll)\n print(f'{c_name} successfully saved')\n time.sleep(1)\n except OperationFailure as e:\n print(f'{c_name} not saved!!')\n print(e)", "def g_n():\n for gname in os.listdir(sroot):\n if gname != 's1-league1-game1':\n continue\n if gname.startswith('s1'):\n p0 = os.path.join(sroot, gname)\n p1 = os.path.join(p0, 'commitment', 'jperret')\n p2 = os.path.join(p0, 'commitment', 'sa')\n if os.path.isdir(p1) and os.path.isdir(p2):\n for fname in os.listdir(p1):\n if fname.endswith('.aa'):\n bname = fname[:-3]\n #~ if bname == 's1-league1-game2_07':\n #~ continue\n a = ad.Annotations(os.path.join(p1, fname))\n a.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a.gen_full_struct()\n a.commitments = list(u for u in a.units if u.type == 'Commitment')\n a2 = ad.Annotations(os.path.join(p2, fname))\n a2.load_text(os.path.join(p0, 'unannotated', bname+'.ac'))\n a2.gen_full_struct()\n a2.commitments = list(u for u in a2.units if u.type == 'Commitment')\n yield bname, (a, a2)", "def get_games(date):\n scoreboard = nba_py.Scoreboard(month=date.month,\n day=date.day,\n year=date.year)\n line_score = scoreboard.line_score()\n game_header = scoreboard.game_header()\n\n games = []\n current_game = {}\n game_sequence = 0\n game_sequence_counter = 0\n\n # Get HOME TEAM and AWAY TEAM data for each boxscore game in line_score.\n for i, value in enumerate(line_score):\n if (value[\"GAME_SEQUENCE\"] != game_sequence):\n game_sequence += 1\n\n current_game[\"GAME_ID\"] = value[\"GAME_ID\"]\n home_team_id = game_header[game_sequence - 1][\"HOME_TEAM_ID\"]\n\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (home_team_id == value[\"TEAM_ID\"]):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n \n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter += 1\n elif game_sequence_counter == 1:\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"HOME_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"HOME_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"HOME_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"HOME_TEAM\"] in TEAM_ID_DATA):\n current_game[\"HOME_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"HOME_TEAM\"]][\"img\"]\n else:\n current_game[\"AWAY_TEAM\"] = value[\"TEAM_ABBREVIATION\"]\n current_game[\"AWAY_TEAM_WINS_LOSSES\"] = value[\"TEAM_WINS_LOSSES\"]\n current_game[\"AWAY_TEAM_PTS\"] = value[\"PTS\"]\n current_game[\"AWAY_TEAM_ID\"] = value[\"TEAM_ID\"]\n if (current_game[\"AWAY_TEAM\"] in TEAM_ID_DATA):\n current_game[\"AWAY_TEAM_IMG\"] = TEAM_ID_DATA[current_game[\"AWAY_TEAM\"]][\"img\"]\n\n if (value[\"TEAM_ABBREVIATION\"] in TEAMS):\n if (\"AWAY_TEAM\" in current_game):\n current_game[\"HOME_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n else:\n current_game[\"AWAY_TEAM_FULL_NAME\"] = TEAMS[value[\"TEAM_ABBREVIATION\"]][\"city\"] + \\\n \" \" + TEAMS[value[\"TEAM_ABBREVIATION\"]][\"name\"]\n\n current_game[\"GAME_STATUS_TEXT\"] = game_header[game_sequence - 1][\"GAME_STATUS_TEXT\"]\n if not game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]:\n current_game[\"BROADCASTER\"] = \"\"\n else:\n current_game[\"BROADCASTER\"] = game_header[game_sequence - 1][\"NATL_TV_BROADCASTER_ABBREVIATION\"]\n\n games.append(current_game)\n\n current_game = {}\n\n game_sequence = value[\"GAME_SEQUENCE\"]\n game_sequence_counter -= 1\n\n east_standings = scoreboard.east_conf_standings_by_day()\n west_standings = scoreboard.west_conf_standings_by_day()\n\n return (games, east_standings, west_standings)", "def save(self):\n file_name = common.RANK_FILE % (self.week.season.name, self.week.num)\n with open(file_name, 'w') as rank_file:\n for team, score in self.score.items():\n rank_file.write('%s,%s\\n' % (team, score))", "def _create_games(self):\n\n ''''''", "def scrape_teams_write_tojson() -> None:\r\n # Create a dictionary of Team objects by scraping TCS and Overbuff\r\n teams = TCS.get_teams()\r\n # Save this data to a json file named teams.json\r\n TCS.write_tojson(teams, \"teams.json\")", "def create_teams(a_season):\n\n team_a = Team(name=\"Team A\", season=a_season)\n team_a.save()\n for p in create_players(7, 0):\n team_a.players.add(p)\n team_a.save()\n team_b = Team(name=\"Team B\", season=a_season)\n team_b.save()\n for p in create_players(7, 16):\n team_b.players.add(p)\n team_b.save()", "def process_team(this_team, nodes, file_obj_out):\n for i, player_one in enumerate(this_team):\n for j, player_two in enumerate(this_team):\n if j > i and nodes[player_one] != nodes[player_two]:\n # write the source id and target id to file\n print(nodes[player_one], nodes[player_two],\n player_one + \" - \" + player_two,\n sep=',', file=file_obj_out)", "def _create_teams(self):\n\t\tself.teamsDict = {}\n\t\tself.teamNamesList = []\n\t\tfor team in range(self.numberOfTeams):\n\t\t\tname = 'TEAM_'+str(team+1)\n\t\t\tself.teamNamesList.append(name)\n\t\t\tself.teamsDict[name] = app.game.team.Team(sport_type=self.gameData['sportType'])", "def get_single_game_team_data(game, grouped_shot_data, pp_sit_data):\n game_stat_lines = list()\n game_id = game['game_id']\n home_id = game['home_id']\n road_id = game['road_id']\n game_type = get_game_type_from_season_type(game)\n\n home_stats_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_team_stats',\n str(game['season']), str(game_type), \"%d_%d.json\" % (game_id, home_id))\n road_stats_src_path = os.path.join(\n CONFIG['base_data_dir'], 'game_team_stats',\n str(game['season']), str(game_type), \"%d_%d.json\" % (game_id, road_id))\n\n # loading raw team game stats (if available)\n raw_stats = dict()\n if os.path.isfile(home_stats_src_path):\n raw_stats['home'] = json.loads(open(home_stats_src_path).read())\n else:\n raw_stats['home'] = dict()\n if os.path.isfile(road_stats_src_path):\n raw_stats['road'] = json.loads(open(road_stats_src_path).read())\n else:\n raw_stats['road'] = dict()\n\n # counting penalties per team\n penalty_counts = get_penalty_counts(game)\n\n for key in ['home', 'road']:\n opp_key = 'road' if key == 'home' else 'home'\n game_stat_line = dict()\n # basic game information\n game_stat_line['game_date'] = game['date']\n game_stat_line['weekday'] = game['weekday']\n game_stat_line['season'] = game['season']\n game_stat_line['season_type'] = game['season_type']\n game_stat_line['round'] = game['round']\n game_stat_line['game_id'] = game_id\n game_stat_line['team_id'] = game[\"%s_id\" % key]\n game_stat_line['team'] = game[\"%s_abbr\" % key]\n game_stat_line['opp_team_id'] = game[\"%s_id\" % opp_key]\n game_stat_line['opp_team'] = game[\"%s_abbr\" % opp_key]\n # identifying team's and opposing team's division (if\n # applicable for current season and season type)\n if (game['season'], game['season_type']) in divisions:\n current_divisions = divisions[game['season'], game['season_type']]\n game_stat_line['division'] = current_divisions[game_stat_line['team']]\n game_stat_line['opp_division'] = current_divisions[game_stat_line['opp_team']]\n # TODO: reactivate when schedule game id is available again\n # game_stat_line['schedule_game_id'] = game['schedule_game_id']\n game_stat_line['arena'] = correct_name(game['arena'])\n game_stat_line['attendance'] = game['attendance']\n if game_stat_line['arena'] in capacities:\n game_stat_line['capacity'] = capacities[game_stat_line['arena']]\n else:\n print(\n \"\\t+ Unable to retrieve capacity \" +\n \"for '%s'\" % game_stat_line['arena'])\n game_stat_line['capacity'] = 0\n # coaches and referees\n if \"%s_coach\" % key in game:\n game_stat_line['coach'] = correct_name(\n game[\"%s_coach\" % key], game['date'])\n if game_stat_line['coach'] not in coaches:\n print(\"+ Unknown coach '%s'\" % game_stat_line['coach'])\n else:\n print(\"\\t+ No coach information found for %s in game %d\" % (\n game_stat_line['team'], game_id))\n game_stat_line['coach'] = correct_name(\n \"%d_%s\" % (game_id, game_stat_line['team']))\n print(\"\\t+ Adjusted to '%s'\" % game_stat_line['coach'])\n if \"%s_coach\" % opp_key in game:\n game_stat_line['opp_coach'] = correct_name(\n game[\"%s_coach\" % opp_key], game['date'])\n if game_stat_line['opp_coach'] not in coaches:\n print(\"+ Unknown coach '%s'\" % game_stat_line['opp_coach'])\n else:\n print(\n \"\\t+ No opposition coach information found \" +\n \"for %s in game %d\" % (game_stat_line['opp_team'], game_id))\n game_stat_line['opp_coach'] = correct_name(\n \"%d_%s\" % (game_id, game_stat_line['opp_team']))\n print(\"\\t+ Adjusted to '%s'\" % game_stat_line['opp_coach'])\n game_stat_line['ref_1'] = correct_name(game['referee_1'])\n game_stat_line['ref_2'] = correct_name(game['referee_2'])\n game_stat_line['lma_1'] = correct_name(game['linesman_1'])\n game_stat_line['lma_2'] = correct_name(game['linesman_2'])\n # outcomes\n game_stat_line['games_played'] = 1\n game_stat_line['home_road'] = key\n game_stat_line['score'] = game[\"%s_score\" % key]\n game_stat_line['goals'] = game[\"%s_score\" % key]\n game_stat_line['opp_score'] = game[\"%s_score\" % opp_key]\n game_stat_line['opp_goals'] = game[\"%s_score\" % opp_key]\n # optionally correcting game scores\n if game_id in game_score_corrections:\n for team_abbr in game_score_corrections[game_id]:\n if game_stat_line['team'] == team_abbr:\n game_stat_line['score'] = game_score_corrections[game_id][team_abbr]\n if game_stat_line['opp_team'] == team_abbr:\n game_stat_line['opp_score'] = game_score_corrections[game_id][team_abbr]\n if game['shootout_game']:\n game_stat_line['game_type'] = 'SO'\n elif game['overtime_game']:\n game_stat_line['game_type'] = 'OT'\n else:\n game_stat_line['game_type'] = ''\n for gsl_key in ['w', 'rw', 'ow', 'sw', 'l', 'rl', 'ol', 'sl']:\n game_stat_line[gsl_key] = 0\n if game_stat_line['score'] > game_stat_line['opp_score']:\n game_stat_line['w'] += 1\n if game['shootout_game']:\n game_stat_line['sw'] += 1\n game_stat_line['goals'] -= 1\n elif game['overtime_game']:\n game_stat_line['ow'] += 1\n else:\n game_stat_line['rw'] += 1\n else:\n game_stat_line['l'] += 1\n if game['shootout_game']:\n game_stat_line['sl'] += 1\n game_stat_line['opp_goals'] -= 1\n elif game['overtime_game']:\n game_stat_line['ol'] += 1\n else:\n game_stat_line['rl'] += 1\n game_stat_line['points'] = (\n game_stat_line['rw'] * 3 + game_stat_line['ow'] * 2 +\n game_stat_line['sw'] * 2 + game_stat_line['sl'] * 1 +\n game_stat_line['ol'] * 1)\n # per-period goals\n for period in [1, 2, 3]:\n game_stat_line[\"goals_%d\" % period] = game[\n \"%s_goals_%d\" % (key, period)]\n game_stat_line[\"opp_goals_%d\" % period] = game[\n \"%s_goals_%d\" % (opp_key, period)]\n # empty-net and extra-attacker goals\n game_stat_line['en_goals'] = game[\"%s_en_goals\" % key]\n game_stat_line['ea_goals'] = game[\"%s_ea_goals\" % key]\n game_stat_line['opp_en_goals'] = game[\"%s_en_goals\" % opp_key]\n game_stat_line['opp_ea_goals'] = game[\"%s_ea_goals\" % opp_key]\n # situation after 20 and 40 minutes respectively\n for situation in [\n 'tied20', 'lead20', 'trail20', 'tied40', 'lead40', 'trail40'\n ]:\n game_stat_line[situation] = False\n if game_stat_line['goals_1'] == game_stat_line['opp_goals_1']:\n game_stat_line['tied20'] = True\n elif game_stat_line['goals_1'] > game_stat_line['opp_goals_1']:\n game_stat_line['lead20'] = True\n else:\n game_stat_line['trail20'] = True\n goals40 = game_stat_line['goals_1'] + game_stat_line['goals_2']\n opp_goals40 = (\n game_stat_line['opp_goals_1'] + game_stat_line['opp_goals_2'])\n if goals40 == opp_goals40:\n game_stat_line['tied40'] = True\n elif goals40 > opp_goals40:\n game_stat_line['lead40'] = True\n else:\n game_stat_line['trail40'] = True\n # scored first?\n if game['first_goal'] == game_stat_line['team']:\n game_stat_line['scored_first'] = True\n game_stat_line['trailed_first'] = False\n elif game['first_goal'] == game_stat_line['opp_team']:\n game_stat_line['scored_first'] = False\n game_stat_line['trailed_first'] = True\n # one-goal, two-goal, three-goal, four-goal-game?\n for goal_game in ['one_goal', 'two_goal', 'three_goal', 'four_goal']:\n game_stat_line[goal_game] = False\n score_diff = abs(\n (game_stat_line['score'] - game_stat_line['en_goals']) -\n (game_stat_line['opp_score'] - game_stat_line['opp_en_goals']))\n # in case the right amount of empty-net goals have been scored, we\n # may end up with a score differential of zero, see game between STR\n # and ING on Mar 3, 2019\n if not score_diff:\n game_stat_line['zero_goal'] = True\n if score_diff == 1:\n game_stat_line['one_goal'] = True\n elif score_diff == 2:\n game_stat_line['two_goal'] = True\n elif score_diff == 3:\n game_stat_line['three_goal'] = True\n elif score_diff > 3:\n game_stat_line['four_goal'] = True\n\n # retrieving score state time spans for current team\n game_stat_line['time_played'] = game['time_played']\n game_stat_line['tied'] = game['tied']\n game_stat_line['tied_pctg'] = round(\n game['tied'] / game['time_played'] * 100, 2)\n if key == 'home':\n game_stat_line['leading'] = game['home_leading']\n game_stat_line['trailing'] = game['road_leading']\n else:\n game_stat_line['leading'] = game['road_leading']\n game_stat_line['trailing'] = game['home_leading']\n game_stat_line['leading_pctg'] = round(\n game_stat_line['leading'] / game['time_played'] * 100, 2)\n game_stat_line['trailing_pctg'] = round(\n game_stat_line['trailing'] / game['time_played'] * 100, 2)\n\n # retrieving raw stats for team and opposing team\n for category, raw_category in RAW_STATS_MAPPING:\n game_stat_line[category] = raw_stats[key].get(raw_category, None)\n game_stat_line[\"opp_%s\" % category] = raw_stats[opp_key].get(raw_category, None)\n\n # checking number of power play goals retrieved from team stats with those registered in event data\n game_stat_line = check_pp_goals(game, key, opp_key, game_stat_line)\n\n # calculating shooting percentages\n if game_stat_line['shots_on_goal']:\n game_stat_line['shot_pctg'] = round(\n game_stat_line['goals'] /\n game_stat_line['shots_on_goal'] * 100., 2)\n else:\n game_stat_line['shot_pctg'] = None\n if game_stat_line['opp_shots_on_goal']:\n game_stat_line['opp_shot_pctg'] = round(\n game_stat_line['opp_goals'] /\n game_stat_line['opp_shots_on_goal'] * 100., 2)\n else:\n game_stat_line['opp_shot_pctg'] = None\n # calculating save percentages\n if game_stat_line['opp_shots_on_goal']:\n game_stat_line['save_pctg'] = round(\n 100 - game_stat_line['opp_goals'] /\n game_stat_line['opp_shots_on_goal'] * 100., 2)\n else:\n game_stat_line['save_pctg'] = None\n if game_stat_line['shots_on_goal']:\n game_stat_line['opp_save_pctg'] = round(\n 100 - game_stat_line['goals'] /\n game_stat_line['shots_on_goal'] * 100., 2)\n else:\n game_stat_line['opp_save_pctg'] = None\n # calculating pdo values\n if (\n game_stat_line['shot_pctg'] is not None and\n game_stat_line['save_pctg'] is not None\n ):\n game_stat_line['pdo'] = round((\n game_stat_line['shot_pctg'] +\n game_stat_line['save_pctg']), 1)\n game_stat_line['opp_pdo'] = round((\n game_stat_line['opp_shot_pctg'] +\n game_stat_line['opp_save_pctg']), 1)\n # calculating power play percentages\n if game_stat_line['pp_opps']:\n game_stat_line['pp_pctg'] = round((\n game_stat_line['pp_goals'] /\n game_stat_line['pp_opps']) * 100., 1)\n else:\n game_stat_line['pp_pctg'] = 0\n if game_stat_line['opp_pp_opps']:\n game_stat_line['opp_pp_pctg'] = round((\n game_stat_line['opp_pp_goals'] /\n game_stat_line['opp_pp_opps']) * 100., 1)\n else:\n game_stat_line['opp_pp_pctg'] = 0\n # calculating penalty killing percentages\n if game_stat_line['sh_opps']:\n game_stat_line['pk_pctg'] = round(\n 100 - game_stat_line['opp_pp_goals'] /\n game_stat_line['sh_opps'] * 100., 1)\n else:\n game_stat_line['pk_pctg'] = 0\n if game_stat_line['opp_sh_opps']:\n game_stat_line['opp_pk_pctg'] = round(\n 100 - game_stat_line['pp_goals'] /\n game_stat_line['opp_sh_opps'] * 100., 1)\n else:\n game_stat_line['opp_pk_pctg'] = 0\n game_stat_line['ev_goals'] = (\n game_stat_line['goals'] -\n game_stat_line['pp_goals'] -\n game_stat_line['sh_goals'])\n game_stat_line['opp_ev_goals'] = (\n game_stat_line['opp_goals'] -\n game_stat_line['opp_pp_goals'] -\n game_stat_line['opp_sh_goals'])\n # faceoffs are treated separately since each of the team game stats\n # datasets only contains the number of won faceoffs and sometimes this\n # one is stored as a string (wtf?)\n game_stat_line['faceoffs_won'] = int(\n raw_stats[key].get('faceOffsWon', 0))\n game_stat_line['faceoffs_lost'] = int(\n raw_stats[opp_key].get('faceOffsWon', 0))\n # calculating overall number of faceoffs and faceoff percentage\n game_stat_line['faceoffs'] = (\n game_stat_line['faceoffs_won'] + game_stat_line['faceoffs_lost'])\n if game_stat_line['faceoffs']:\n game_stat_line['faceoff_pctg'] = round(\n game_stat_line['faceoffs_won'] /\n game_stat_line['faceoffs'] * 100., 1)\n else:\n game_stat_line['faceoff_pctg'] = 0.\n # best players\n game_stat_line['best_plr_id'] = game.get(\n \"%s_best_player_id\" % key, None)\n game_stat_line['best_plr'] = game.get(\"%s_best_player\" % key, None)\n game_stat_line['opp_best_plr_id'] = game.get(\n \"%s_best_player_id\" % opp_key, None)\n game_stat_line['opp_best_plr'] = game.get(\n \"%s_best_player\" % opp_key, None)\n # game-winning-goal\n game_stat_line['gw_goal_team'] = game['gw_goal']\n game_stat_line['gw_goal_player_id'] = game['gw_goal_player_id']\n game_stat_line['gw_goal_first_name'] = game['gw_goal_first_name']\n game_stat_line['gw_goal_last_name'] = game['gw_goal_last_name']\n\n shot_zones_to_retain = ['slot', 'left', 'right', 'blue_line']\n shot_situations_to_retain = [\n 'shots_ev', 'shots_5v5', 'shots_pp', 'shots_sh', 'shots_unblocked',\n 'shots_unblocked_ev', 'shots_unblocked_5v5', 'shots_unblocked_pp',\n 'shots_unblocked_sh', 'shots_on_goal_ev', 'shots_on_goal_5v5',\n 'shots_on_goal_pp', 'shots_on_goal_sh', 'goals_5v5', 'hit_post']\n\n # retrieving shot data for current game and team\n shot_data = grouped_shot_data.get(\n (game_id, game_stat_line['team']), list())\n for item in shot_data:\n if item.startswith(tuple(shot_zones_to_retain)):\n abbr_item = item\n for zone_key, replacement in SHOT_ZONE_ABBREVIATIONS.items():\n abbr_item = abbr_item.replace(zone_key, replacement)\n game_stat_line[abbr_item] = shot_data[item]\n elif item in shot_situations_to_retain:\n game_stat_line[item] = shot_data[item]\n\n # retrieving shots against data for current game and team\n shot_against_data = grouped_shot_data.get(\n (game_id, game_stat_line['opp_team']), list())\n for item in shot_against_data:\n if item.startswith(tuple(shot_zones_to_retain)):\n abbr_item = item\n for zone_key, replacement in SHOT_ZONE_ABBREVIATIONS.items():\n abbr_item = abbr_item.replace(zone_key, replacement)\n game_stat_line[\"%s_a\" % abbr_item] = shot_against_data[item]\n elif item in shot_situations_to_retain:\n game_stat_line[\"opp_%s\" % item] = shot_against_data[item]\n\n try:\n game_stat_line['ev_cf_pctg'] = round(\n game_stat_line['shots_ev'] / (game_stat_line['shots_ev'] + game_stat_line['opp_shots_ev']) * 100, 2)\n except KeyError:\n print(\"\\t+Unable to calculate even strength shots for percentage\")\n game_stat_line['ev_cf_pctg'] = None\n\n for penalty_duration in [2, 5, 10, 20]:\n if penalty_counts[key] and penalty_duration in penalty_counts[key]:\n game_stat_line[\"penalty_%d\" % penalty_duration] = (\n penalty_counts[key][penalty_duration])\n else:\n game_stat_line[\"penalty_%d\" % penalty_duration] = 0\n\n game_stat_line['pp_5v4'] = pp_sit_data[key]['pp_sits']['5v4']\n game_stat_line['pp_5v3'] = pp_sit_data[key]['pp_sits']['5v3']\n game_stat_line['pp_4v3'] = pp_sit_data[key]['pp_sits']['4v3']\n game_stat_line['ppg_5v4'] = pp_sit_data[key]['pp_goals']['5v4']\n game_stat_line['ppg_5v3'] = pp_sit_data[key]['pp_goals']['5v3']\n game_stat_line['ppg_4v3'] = pp_sit_data[key]['pp_goals']['4v3']\n game_stat_line['opp_pp_5v4'] = pp_sit_data[opp_key]['pp_sits']['5v4']\n game_stat_line['opp_pp_5v3'] = pp_sit_data[opp_key]['pp_sits']['5v3']\n game_stat_line['opp_pp_4v3'] = pp_sit_data[opp_key]['pp_sits']['4v3']\n game_stat_line['opp_ppg_5v4'] = pp_sit_data[opp_key]['pp_goals']['5v4']\n game_stat_line['opp_ppg_5v3'] = pp_sit_data[opp_key]['pp_goals']['5v3']\n game_stat_line['opp_ppg_4v3'] = pp_sit_data[opp_key]['pp_goals']['4v3']\n\n # opp_diff = game_stat_line['pp_opps'] - (\n # game_stat_line['pp_5v4'] +\n # game_stat_line['pp_5v3'] +\n # game_stat_line['pp_4v3']\n # )\n # if opp_diff:\n # print(\"\\tpp opp discrepancy of %d for %s\" % (opp_diff, key))\n\n # registering shootout stats (if applicable)\n shootout_stats = get_shootout_stats(game, key, opp_key)\n if shootout_stats:\n game_stat_line = {**game_stat_line, **shootout_stats}\n\n game_stat_lines.append(game_stat_line)\n\n return game_stat_lines", "def print_league(teams):\n league_report = \"Raptors: {} \\n\\nDragons: {}\\n\\nSharks: {}\"\n\n try:\n with open('league.txt', 'w') as league_file:\n league_file.write(league_report.format(teams[0],\n teams[1],\n teams[2]))\n except Exception as error:\n print('There was an error writing the league report file: {}'\n .format(error))", "def _extract_games_data(season, season_type):\n nflscrapr.run(\n 'games',\n season=season,\n season_type=season_type\n )\n nflscrapr_output = etl_tools.extract_from_csv(config.GAMES_DUMP_CSV_PATH)\n return nflscrapr_output", "def get_all_player_history(understat_path, season): \n\n start_date, end_date = set_season_time(season)\n players = write_league_players(understat_path, season) # get all league players\n for i in range(len(players)):\n loop = asyncio.get_event_loop() \n result = loop.run_until_complete(get_player_history(int(players.loc[i][0])))\n name = players.loc[i][1]\n individuals = pd.DataFrame.from_dict(result)\n individuals['date'] = pd.to_datetime(individuals['date'])\n individuals = individuals[(individuals.date >= start_date)]\n individuals = individuals[(individuals.date <= end_date)]\n individuals['player_name'] = name\n individuals.to_csv(understat_path + \"{}_data.csv\".format(name), index = False) \n if i == 0:\n all_players = individuals\n else:\n all_players = all_players.append(individuals)\n all_players.to_csv(understat_path + 'all_understat_players.csv', index = False)", "def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV", "def writing_get_game(file_name, title):\n result = str(reports.get_game(file_name, title))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def build_teams(self):\n # get all nations\n all_nations = self.games.get_all_nations()\n\n # build teams for all participating nations in FIFA World Cup 2018\n bt = BuildTeams(self.squad_size, self.selected_attrs)\n bt.read_data()\n\n # a dict with a nations' name as a key and players' data as value\n self.teams = {}\n\n # build squad for every nation\n for nation in all_nations:\n team = bt.build_team(nation)\n # if we got enough players, add team\n if team.shape[0] >= bt.squad_size:\n #print(team)\n # convert pandas dataframe to matrix and flatten it\n self.teams[nation] = team.as_matrix().flatten()", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def create_plays(pk, f):\n game = bmodels.Game.objects.get(pk=pk)\n game.playbyplay_set.all().delete()\n top_play_players = []\n for bline in f.readlines():\n play_dict = {}\n top_play_players = []\n line = bline.decode().split(',')\n\n # parse time\n time_split = line[0].split(':')\n if len(time_split) == 3:\n play_dict['time'] = datetime.time(\n int(time_split[0]), int(time_split[1]), int(time_split[2]))\n else:\n play_dict['time'] = datetime.time(\n 0, int(time_split[0]), int(time_split[1]))\n\n # primary play\n for play_type in bmodels.PRIMARY_PLAY:\n if play_type[1].lower() == line[1].lower():\n play_dict['primary_play'] = play_type[0]\n break\n\n # primary player\n play_dict['primary_player'] = bmodels.Player.objects.get(first_name=line[\n 2])\n\n # secondary play\n if len(line[3].strip()) > 0:\n for play_type in bmodels.SECONDARY_PLAY:\n if play_type[1].lower() == line[3].lower():\n play_dict['secondary_play'] = play_type[0]\n break\n\n # seconday player\n play_dict['secondary_player'] = bmodels.Player.objects.get(first_name=line[\n 4])\n\n # assist play\n if len(line[5].strip()) > 0:\n for play_type in bmodels.ASSIST_PLAY:\n if play_type[1].lower() == line[5].lower():\n play_dict['assist'] = play_type[0]\n break\n\n # assist player\n play_dict['assist_player'] = bmodels.Player.objects.get(\n first_name=line[6].strip())\n\n # Top play rank\n if len(line) > 7:\n if len(line[7].strip()) > 0:\n for choice in bmodels.RANKS:\n if choice[1].lower() == line[7].lower():\n play_dict['top_play_rank'] = choice[0]\n\n # players involved(added after mode is saved cause of M2M)\n top_players_list = [player.strip()\n for player in line[8].strip().split('.')]\n top_play_players = bmodels.Player.objects.filter(\n first_name__in=top_players_list)\n\n # description\n play_dict['description'] = line[9].strip()\n\n play = bmodels.PlayByPlay.objects.create(game=game, **play_dict)\n play.top_play_players = top_play_players\n play.save()", "def team_season_stats(team):\n\n # Get HTML Content\n url = 'http://www.basketball-reference.com/teams/%s/stats_per_game_totals.html' % team\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Team's yearly stats are displayed in a table\n season_stats = soup.find(id='stats').find('tbody')\n\n # Iterate through each year\n for year in season_stats.find_all('tr', {'class': None}):\n\n season_year = year.find('th').text[0:4]\n season_year = int(season_year) + 1\n season = {'year': season_year}\n\n # Loop through each stat\n for stat in year.find_all('td'):\n season[stat['data-stat']] = stat.string\n\n # Rename relocated teams\n season['team_id'] = scrape_utils.rename_team(season['team_id'])\n season['_id'] = season['team_id'] + '_' + str(season_year)\n\n # Remove unwanted stats\n to_remove = ['rank_team', 'foo', 'g', 'mp_per_g']\n for k in to_remove:\n season.pop(k, None)\n\n # Add to MongoDB\n m.insert('team_season', season)", "def emit_list_episodes_orgmode(self, filename):\n\n fileObj = open(filename, 'w')\n \n for ep in self.episodes:\n fileObj.write('***' + \n ' Season:' + str(ep.season))\n fileObj.write(' Episode:' + str(ep.number))\n fileObj.write('\\tAired:' + ep.aired) \n fileObj.write('\\tRating:' + str(ep.rating))\n fileObj.write('\\n')\n fileObj.write(ep.description)\n fileObj.write('\\n')\n\n fileObj.close()", "def write_pending_bet(self, mode: str, team: TeamDict = 0, strategy: str = 0, line: str = 0, when: datetime = 0) -> None:\n look_report = {'e1': ['10:00'], 'e2': ['escanteios'], 'e3': ['escanteios'],\n 'e4': ['escanteios', 'opções'], 'h1': ['handicap', 'asiático']}\n\n #there are 12 collumns in self.pending_bets \n if mode == 'bet': #this appends 1 bet\n with io.open(f'{self.username}_pending_bets.txt', 'a+', encoding='utf-8') as bets:\n bet = f\"{team['name']}, {when.year}, {when.month}, {when.day}, {when.isoformat(' ').split(' ')[1][:8]}, \\\n {strategy}, {line}, {team['appm']}, {team['cg']}, {team['rend']}, {team['balan']}, {look_report[strategy]}\"\n bets.write(\"\\n\")\n bets.write(bet)\n self.pending_bets.append([item.strip(' ') for item in bet[:12].split(',')] + [[item.strip('\\'[] ') for item in bet[12:].split(',')]])\n \n if mode == 'report': #this overwrites with all pending_bets\n with io.open(f'{self.username}_pending_bets.txt', 'w+', encoding='utf-8') as bets:\n bets.write('Team Names, ano, mes, dia, horario, estrategia, linha, appm, cg, rend, balan, Betting Type$')\n for bet in self.pending_bets:\n text = \", \".join(bet[:12]).lower() + \", \" + \", \".join(bet[12]).lower()\n bets.write(\"\\n\"+text)", "def generate_tournament(self, date, surface, n_players=256):\n self.date = date\n self.surface = surface\n players = self._get_players(date, n_players)\n oppositions = self._generate_oppositions(players)\n\n self._debug(\"Generated oppositions: {}\".format(oppositions))\n oppositions.to_csv(\"oppositions_{}_{}_{}\".format(surface, n_players, date), index=False)\n\n return oppositions", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def main():\r\n\r\n directory = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n path = os.path.join(directory, 'dump_3')\r\n if not (os.path.exists(path)):\r\n os.mkdir(path)\r\n\r\n for date in range(1, 31):\r\n # date-month-year\r\n # file_name1 = path + '\\\\' + str(date) + '-8-2020' + '_file1.txt'\r\n\r\n # year-month-date\r\n # file_name1 = path + '\\\\' + '2020-08-' + str(date) + '_file3.txt'\r\n\r\n # month_year_date\r\n file_name1 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file5.txt'\r\n\r\n # date-month-year\r\n # file_name2 = path + '\\\\' + str(date) + '-8-2020' + '_file2.txt'\r\n\r\n # year-month-date\r\n # file_name2 = path + '\\\\' + '2020-08-' + str(date) + '_file4.txt'\r\n\r\n # month_year_date\r\n file_name2 = path + '\\\\' + 'Aug_2020_' + str(date) + '_file6.txt'\r\n\r\n rows = []\r\n for row in range(100):\r\n string = 'asddfgfhgkhjghkweoriuywoipywbnxvnmznvnmbatr'\r\n rows.append(string)\r\n with open(file_name1, 'w') as f1, open(file_name2, 'w') as f2:\r\n f1.writelines(rows)\r\n f2.writelines(rows)", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str" ]
[ "0.63666624", "0.6253113", "0.624497", "0.6097944", "0.6094452", "0.6092984", "0.6072868", "0.5952969", "0.593158", "0.5925691", "0.5921844", "0.58187026", "0.5802706", "0.578666", "0.57782906", "0.5777222", "0.5764585", "0.5715087", "0.5709732", "0.57018524", "0.56826544", "0.56780076", "0.56776935", "0.5625534", "0.55806273", "0.5579672", "0.55460346", "0.5530044", "0.5496095", "0.54721975" ]
0.68948334
0
Test the CLI convert command.
def test_cli_convert(app, bed_path, zarr_tmp_path): # noqa result = runner.invoke(app, ["convert", f"{bed_path}", f"{zarr_tmp_path}"]) assert result.exit_code == 0 assert zarr_tmp_path.is_dir()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cli_conversion(self):\n output = main('coloredlogs', '--convert', 'coloredlogs', '--demo', capture=True)\n # Make sure the output is encoded as HTML.\n assert '<span' in output", "def test_convert():", "def test_convert(self):\n out_text = StringIO()\n with redirect_stdout(out_text):\n main([\"-id\", indir, \"-od\", compdir, \"-if\", \"ttl\", \"-of\", \"json-ld\"])\n self.assertEqual(\"\"\"Total=3 Successful=3\"\"\", out_text.getvalue().strip())", "def test_convert(convert_parameters):\n test_input = convert_parameters[0]\n expected_output = convert_parameters[1]\n assert geojson2fromto.convert(test_input) == expected_output", "def _test_converter(testname, fail_expected, error_text=None, format=\"yaml\"):\n # Let's start every test afresh\n wipe_etcd(get_ip())\n testdata = data[testname]\n\n # Convert data to V3 API using the tool under test\n rc = calicoctl(\"convert -o %s\" % format, data=testdata, format=format)\n if not fail_expected:\n logger.debug(\"Trying to convert manifest from V1 to V3\")\n rc.assert_no_error()\n if format == \"yaml\":\n parsed_output = yaml.safe_load(rc.output)\n else:\n parsed_output = json.loads(rc.output)\n # Get the converted data and clean it up (remove fields we don't care about)\n converted_data = clean_calico_data(parsed_output)\n original_resource = rc\n\n # Apply the converted data\n rc = calicoctl(\"create\", data=original_resource.output, format=format)\n logger.debug(\"Trying to create resource using converted manifest\")\n rc.assert_no_error()\n rc = calicoctl(\"get %s %s -o yaml\" % (converted_data['kind'], name(converted_data)))\n\n # Comparison here needs to be against cleaned versions of data to remove Creation Timestamp\n logger.debug(\"Comparing 'get'ted output with original converted yaml\")\n cleaned_output = yaml.safe_dump(\n clean_calico_data(\n yaml.safe_load(rc.output),\n extra_keys_to_remove=['projectcalico.org/orchestrator']\n )\n )\n original_resource.assert_data(cleaned_output, format=format)\n else:\n rc.assert_error(error_text)", "def test_translate_command(command, expected):\n assert translate_command(command) == expected", "def test_bclconvert(self):\n self.assertEqual(bcl2fastq.bclconvert(\n '/runs/150107_NB123000_0001_ABCX',\n '/output/bclconvert').command_line,\n ['bcl-convert',\n '--bcl-input-directory',\n '/runs/150107_NB123000_0001_ABCX',\n '--output-dir','/output/bclconvert'])\n self.assertEqual(bcl2fastq.bclconvert(\n '/runs/150107_NB123000_0001_ABCX',\n '/output/bclconvert',\n sample_sheet='SampleSheet.csv').command_line,\n ['bcl-convert',\n '--bcl-input-directory',\n '/runs/150107_NB123000_0001_ABCX',\n '--output-dir','/output/bclconvert',\n '--sample-sheet','SampleSheet.csv'])\n self.assertEqual(bcl2fastq.bclconvert(\n '/runs/150107_NB123000_0001_ABCX',\n '/output/bclconvert',\n no_lane_splitting=True).command_line,\n ['bcl-convert',\n '--bcl-input-directory',\n '/runs/150107_NB123000_0001_ABCX',\n '--output-dir','/output/bclconvert',\n '--no-lane-splitting','true'])\n self.assertEqual(bcl2fastq.bclconvert(\n '/runs/150107_NB123000_0001_ABCX',\n '/output/bclconvert',\n sampleproject_subdirectories=True).command_line,\n ['bcl-convert',\n '--bcl-input-directory',\n '/runs/150107_NB123000_0001_ABCX',\n '--output-dir','/output/bclconvert',\n '--bcl-sampleproject-subdirectories','true'])", "def test_convert_noun():\n result = convert(\"noun\")\n assert result == \"ounnay\"", "async def test_command(self, parser_mock):\n test_cases = (\n (),\n (15, ),\n (MockTextChannel(),),\n (MockTextChannel(), 15),\n )\n\n ctx = MockContext()\n parser_mock.return_value = (ctx.channel, 10)\n\n for case in test_cases:\n with self.subTest(\"Test command converters\", args=case):\n await self.cog.silence.callback(self.cog, ctx, *case)\n\n try:\n first_arg = case[0]\n except IndexError:\n # Default value when the first argument is not passed\n first_arg = None\n\n try:\n second_arg = case[1]\n except IndexError:\n # Default value when the second argument is not passed\n second_arg = 10\n\n parser_mock.assert_called_with(ctx, first_arg, second_arg)", "def test_canConvert(string, cast, expected):\n assert canConvert(string, cast) == expected", "def test_cli_string():\n cmd = get_cli_string()\n assert \"pytest\" in cmd", "def cli_main():\n\n\n if len(sys.argv) > 1 and sys.argv[1].endswith('.xmind'):\n xmind_file = sys.argv[1]\n xmind_file = get_absolute_path(xmind_file)\n logging.info('Start to convert XMind file: %s', xmind_file)\n\n if len(sys.argv) == 3 and sys.argv[2] == '-json':\n testlink_json_file = xmind_testcase_to_json_file(xmind_file)\n logging.info('Convert XMind file to testcase json file successfully: %s', testlink_json_file)\n elif len(sys.argv) == 3 and sys.argv[2] == '-xml':\n testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)\n logging.info('Convert XMind file to testlink xml files successfully: %s', testlink_xml_file)\n elif len(sys.argv) == 3 and sys.argv[2] == '-csv':\n zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)\n logging.info('Convert XMind file to zentao csv file successfully: %s', zentao_csv_file)\n elif len(sys.argv) == 3 and sys.argv[2] == '-xlsx':\n excel_xlsx_file = xmind_to_xlsx_file(xmind_file)\n logging.info('Convert XMind file to zentao csv file successfully: %s', excel_xlsx_file)\n else:\n testlink_json_file = xmind_testcase_to_json_file(xmind_file)\n testlink_xml_file = xmind_to_testlink_xml_file(xmind_file)\n zentao_csv_file = xmind_to_zentao_csv_file(xmind_file)\n logging.info('Convert XMind file successfully: \\n'\n '1、 testcase json file(%s)\\n'\n '2、 testlink xml file(%s)\\n'\n '3、 zentao csv file(%s)',\n testlink_json_file,\n testlink_xml_file,\n zentao_csv_file)\n\n\n else:\n print(__doc__)\n logging.error('%s', __doc__)", "def cli() -> None:", "def cli() -> None:", "def test_command(self):\n out = io.StringIO()\n management.call_command('import_data', stdout=out)\n self.assertIn(\"Successfully imported\", out.getvalue())", "def test_convert_adjective():\n result = convert(\"adjective\")\n assert result == \"adjectiveway\"", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.74902606", "0.7343261", "0.72497123", "0.6713388", "0.6565135", "0.65025544", "0.64929765", "0.6471126", "0.6387078", "0.63310874", "0.6328124", "0.63148046", "0.627495", "0.627495", "0.62226474", "0.61437595", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579", "0.6133579" ]
0.7578506
0
export AmpliconSet as TSV
def export_amplicon_set_tsv(self, params): logging.info('start exporting amplicon set object') amplicon_set_ref = params.get('input_ref') amplicon_set_df = self._amplicon_set_to_df(amplicon_set_ref) result_dir = os.path.join(self.scratch, str(uuid.uuid4())) self._mkdir_p(result_dir) self._df_to_tsv(amplicon_set_df, result_dir, amplicon_set_ref) package_details = self.dfu.package_for_download({ 'file_path': result_dir, 'ws_refs': [amplicon_set_ref] }) return {'shock_id': package_details['shock_id']}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_tsv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".tsv\",\n filetypes=((\"tab seperated values\", \"*.tsv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile, dialect='excel-tab')\n else:\n raise ExportAborted('Export cancelled by user.')", "def dump_gazettes_as_csv(self):\n # TODO: dump_gazettes_as_csv\n pass", "def test_semmeddb_csv_to_tsv():\n t = PandasTransformer()\n nodes_file = os.path.join(resource_dir, \"semmed/semmeddb_test_nodes.csv\")\n edges_file = os.path.join(resource_dir, \"semmed/semmeddb_test_edges.csv\")\n output = os.path.join(target_dir, \"semmeddb_test_tsv_export\")\n\n t.parse(nodes_file)\n t.parse(edges_file)\n\n # save output as TSV in a tar archive\n t.save(output, extension='tsv')", "def write_tsv(self, filename):\n f = open(filename,'wb')\n wr = csv.writer(f,delimiter='\\t',quoting=csv.QUOTE_ALL)\n colrow = []\n for col in self.cols:\n colrow.append('<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape'))\n wr.writerow(colrow)\n for row in self.data:\n strrow = []\n for cell in row:\n strrow.append('' if cell is None else unicode(cell).encode('unicode-escape'))\n wr.writerow(strrow)\n f.close()", "def export_set(dataset):\n return to_xml(dataset.dict)", "def to_tsv(obj: ConfiguredBaseModel, file: str) -> str:\n\n # Extract headers and rows from object\n if isinstance(obj, Entity):\n headers = obj.dict().keys()\n rows = [list(obj.dict().values())]\n elif isinstance(obj, (AssociationCountList, HistoPheno, Results)):\n if not obj.items:\n headers = get_headers_from_obj(obj)\n rows = []\n else:\n headers = obj.items[0].dict().keys()\n rows = [list(item.dict().values()) for item in obj.items]\n else:\n raise TypeError(FMT_INPUT_ERROR_MSG)\n\n fh = open(file, \"w\") if file else sys.stdout\n writer = csv.writer(fh, delimiter=\"\\t\")\n writer.writerow(headers)\n for row in rows:\n writer.writerow(list(row))\n if file:\n fh.close()\n console.print(f\"\\nOutput written to {file}\\n\")\n\n return", "def to_abivars(self):", "def convert_quick_table(result):\n headline = result.split('\\n',1)[0]\n names, converters = MastCasJobs.get_converters(headline, delimiter=',')\n tab = ascii.read(MastCasJobs.replacenull(result,delimiter=','),\n guess=False,fast_reader=False,format='csv',\n names=names,converters=converters)\n return tab", "def tsv_value(self):\n return self.tsv_file.getvalue()", "def save_tsv_file(parsed_data):\n result_file.write('\\t'.join(parsed_data) + '\\n')", "def getSets():", "def _tab(content):\n response = _data_frame(content).to_csv(index=False,sep='\\t')\n return response", "def export_dataset(self):\n raise NotImplementedError", "def write_tsv_fast(self, filename):\n # TODO (without quotation marks)\n with open(filename, 'wb') as f:\n colnames = ['<undefined>' if len(col) == 0 else unicode(iter(col).next()).encode('unicode-escape') for col in self.cols]\n f.write('\\t'.join(colnames)+'\\n')\n for row in self.data:\n f.write('\\t'.join(['' if cell is None else unicode(cell).encode('unicode-escape') for cell in row])+'\\n')", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file,\"r\") as f:\n reader = csv.reader(f,delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def table(self):\n return self.t", "def mult_tab(self):\n raise NotImplementedError", "def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def tsv_lines(self):\n return self.tsv_value.splitlines()", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def test_TimeSeries_repr():", "def dataset(options):\n pass", "def fa2tab(filename):\n from Bio import SeqIO\n fo = open(filename,\"r\")\n fout = open(filename+\".tab\",\"w\")\n for seq in SeqIO.parse(fo, \"fasta\"):\n SeqIO.write(seq,fout,\"tab\")\n fo.close()\n fout.close()", "def readdata(self, fname):\n\t\treturn self.__readtsv(fname)", "def outputData(tname):\n\n table = pd.read_sql(\"SELECT * FROM {0}\".format(tname), ENGINE)\n table.to_csv(\"data/{0}.csv\".format(tname), sep=\",\", header=True, index=False, quoting=csv.QUOTE_NONNUMERIC)", "def parse_table_to_tracy_file(latname: str, df: pd.DataFrame, filename: str) -> None:\n save_string(parse_table_to_tracy_string(latname, df), filename)", "def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')", "def tsv_generator(file):\n for line in fileinput.input(file):\n article, summary = line.strip().split(\"\\t\")\n yield (article, summary)", "def tree2OTU_table(mvp_tree):\n series = []\n for terminal in mvp_tree.feature_tree.get_terminals():\n try:\n series.append(terminal.sample_series)\n except:\n print('there is no sample series in tree2OTU ')\n df = pd.dataframe(series)\n return df", "def create_tsv(df, filename=None):\n table = df.to_string()\n lines = table.splitlines()\n index_name = lines.pop(1).strip()\n lines[0] = index_name + lines[0][len(index_name):]\n table = '\\n'.join(lines)\n if filename is not None:\n with open(filename, 'w') as f:\n f.write(table)\n else:\n return table" ]
[ "0.59755164", "0.57235795", "0.5657693", "0.56393194", "0.56188923", "0.55969584", "0.5545065", "0.54538196", "0.5319591", "0.53152305", "0.527546", "0.5226011", "0.51641214", "0.5163705", "0.5158124", "0.5129905", "0.50534207", "0.50507843", "0.5033587", "0.50324833", "0.50033134", "0.4997099", "0.49861172", "0.49817425", "0.49753922", "0.49693528", "0.4956901", "0.49542722", "0.49520868", "0.49520448" ]
0.7254079
0
Test the _delete_sheets() method
def test_delete_sheet(self): self.workbook.add_worksheet('Sheet1') self.workbook.add_worksheet('Sheet2') self.workbook._write_sheets() exp = """<sheets><sheet name="Sheet1" sheetId="1" r:id="rId1"/><sheet name="Sheet2" sheetId="2" r:id="rId2"/></sheets>""" got = self.fh.getvalue() self.assertEqual(got, exp) self.fh.close() self.fh = StringIO() self.workbook._set_filehandle(self.fh) self.workbook._remove_sheet('Sheet1') self.workbook._write_sheets() exp = """<sheets><sheet name="Sheet2" sheetId="1" r:id="rId1"/></sheets>""" got = self.fh.getvalue() self.assertEqual(got, exp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_delete_buckets(self):\n pass", "def test_delete_bucket(self):\n pass", "def test_delete_run(self):\n pass", "def test_aws_service_api_vm_workshift_delete(self):\n pass", "def test_delete(self):\r\n course = CourseFactory.create(org='edX', course='999')\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 0)\r\n with self.assertRaises(ValueError):\r\n tabs.primitive_delete(course, 1)\r\n with self.assertRaises(IndexError):\r\n tabs.primitive_delete(course, 6)\r\n tabs.primitive_delete(course, 2)\r\n self.assertFalse({u'type': u'textbooks'} in course.tabs)\r\n # Check that discussion has shifted up\r\n self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})", "def test_delete7(self):\n pass", "def test_generate_sample_sheet(self):\n pass", "def test_delete_occurrence(self):\n pass", "def test_dashboards_v2_delete(self):\n pass", "def test_delete(self):\n pass", "def test_delete_deployment(self):\n pass", "def test_delete_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"error\"\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"error\"\n }\n )\n\n \"\"\"\n clear the table, create several books and list them, remove one and list them again, remove another one \n and list them again\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one,\n book_two\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": []\n }\n )", "def tearDown(self):\n with contextlib.suppress(FileNotFoundError):\n Path(\"test.xlsx\").absolute().unlink()", "def test_delete_case(self):\n pass", "def test_delete_records(self):\n pass", "def test_delete(self):\n # login as library manager\n self.authenticate(self.user)\n\n # check there are 3 works\n self.assertEqual(Work.objects.count(), 3)\n\n self.assertNotEqual(self.work1.song_set.count(), 0)\n\n # prune works\n response = self.client.delete(self.url)\n\n # check http status\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # check the response\n self.assertDictEqual(response.data, {\"deleted_count\": 2})\n\n # check there are only 1 work remaining\n self.assertEqual(Work.objects.count(), 1)\n\n # check artists with songs remains\n self.assertEqual(Work.objects.filter(pk=self.work2.pk).count(), 0)\n self.assertEqual(Work.objects.filter(pk=self.work3.pk).count(), 0)", "def test_delete_deployment_run(self):\n pass", "def test_delete_shelf(self, *_):\n request = self.factory.post(\"\")\n request.user = self.local_user\n shelf_id = self.shelf.id\n\n views.delete_shelf(request, shelf_id)\n\n self.assertFalse(models.Shelf.objects.filter(id=shelf_id).exists())", "def test_delete_on_background_response_descriptor_subscriptions_subscription_subscription_resource_spaces(self):\n pass", "def test_delete_book(self):\n response = self.client.delete(self.book.get_absolute_url()) \n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Book.objects.count(), 0)", "def test_delete_book(session, client, book1_dict, book2_dict, book3_dict, expect_book2_dict, expect_book3_dict):\n json_data = json.dumps(book1_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book2_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n json_data = json.dumps(book3_dict)\n post_response = client.post(\"/books\", data=json_data, headers={\"Content-Type\": \"application/json\"})\n assert 201 == post_response.status_code\n\n \"\"\"delete on non-existant resource\"\"\"\n get_response = client.delete(\"/books/20\")\n assert 404 == get_response.status_code\n\n \"\"\"delete on invalid id\"\"\"\n get_response = client.delete(\"/books/L\")\n assert 400 == get_response.status_code\n\n \"\"\"Valid delete\"\"\"\n get_response = client.delete('/books/1')\n assert 204 == get_response.status_code\n\n \"\"\"test state of db after valid delete\"\"\"\n expected_payload = []\n expected_payload.append(expect_book2_dict)\n expected_payload.append(expect_book3_dict)\n get_response = client.get(\"/books\")\n assert 200 == get_response.status_code\n payload = get_response.get_json()\n assert expected_payload == payload\n\n \"\"\"test re-deleting resource\"\"\"\n get_response = client.delete(\"/books/1\")\n assert 404 == get_response.status_code", "def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")", "def test_delete_on_background_response_descriptor_projects_release_release_resource_spaces(self):\n pass", "def test_delete1(self):\n pass", "def deleteShards():\n os.popen('rm *_shard')", "def test_employee_deletion(self):\n res = self.client().delete(service_url_emp, json={\"id_emp\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url_emp+'/1')\n self.assertEqual(result.status_code, 400)", "def test_client_document_delete(self):\n pass", "def test_delete_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# delete book\n\t\tdel_book = self.client.delete(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}')\n\t\t)\n\n\t\tres3 = json.loads(del_book.data.decode())\n\t\tself.assertTrue(res3['message'] == 'book with id 1 has been deleted')", "def test_delete_Student(self):\n school_ids = self.create_School(2,20)\n ids=[]\n url = '/students'\n data = {'first_name': 'Poompatai', 'last_name': 'Puntitpong','age': 20, 'nationality': 'Thailand', 'school': school_ids[0]}\n\n for i in range(3):\n response = self.client.post(url, data, format='json')\n ids.append(str(response.data['id']))\n\n self.assertEqual(Student.objects.count(), 3)\n\n for i in range(3):\n url = '/students/' + ids[i]\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Student.objects.count(), 2 - i)\n\n url = '/students/aaaa'\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def test_delete(self):\n responses.add(\n responses.Response(\n method='DELETE',\n url='https://connection.keboola.com/v2/storage/buckets/1?force=False&async=False',\n json={}\n )\n )\n bucket_id = '1'\n deleted_detail = self.buckets.delete(bucket_id, asynchronous=False)\n assert deleted_detail is None" ]
[ "0.64588976", "0.59919167", "0.5933634", "0.58989644", "0.58353966", "0.5820436", "0.57939655", "0.5754813", "0.57505316", "0.5708442", "0.5697343", "0.56962657", "0.5692305", "0.56788445", "0.56646174", "0.5659868", "0.5630166", "0.5594766", "0.55886954", "0.55800515", "0.55416936", "0.5523753", "0.55162346", "0.55152464", "0.54938245", "0.5471314", "0.5465997", "0.54624546", "0.54618806", "0.54586124" ]
0.7621504
0
Sends a reminder email based on the arguments for easy scripting
def send_reminder(): name = config["email"]["name"] user = config["email"]["user"] subject = "REMINDER: %s" % sys.argv[1] body = sys.argv[2] if len(sys.argv) > 2 else "" email_helper.send(user, name, user, subject, body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_send_reminder_email():\n send_reminder_email()\n logger.info(\"Sent reminder email\")", "def send_reminder(self):\n pass", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def send_reminder(self):\n message_contents = \"This is a reminder that your event: \" + self.event_title + \" takes place on \" + self.event_date + \" in \" + self.event_location\n subject = \"Event Reminder\"\n attendees = self.gameplanuser_set.all()\n for attendee in attendees:\n remindermessage = Message.objects.create(sender=self.event_manager, recipient=attendee, contents=message_contents)\n remindermessage.save()", "def send_reminder(self, url):\n variables = {\"url\": url, \"username\": self.contact.user.alias}\n send_template_email(recipients=[self.identifier],\n subject=\"Reminder from Rmnd.in!\",\n from_address=\"[email protected]\",\n variables=variables,\n template=\"email/reminder_email\")", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def send_feedback_email_task(subject, message, sender, reciever):\n logger.info(\"Reminder email\")\n return send_reminder_mail(subject, message, sender, reciever)", "def main():\n args = parse_argv()\n server = get_server(args.hostname, args.sender.split(\"@\")[0])\n send_mail(server, args.sender, args.recipients, args.message)", "def send_email():\n send_mail(\"You've got some problem.\", 'REPAIR IT', '[email protected]',\n ['[email protected]'], fail_silently=False,)", "def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()", "def main(arguments, emailer):\n emailer.read_config()\n print(\"Config read.\")\n emailer.setup_config(pages=arguments.pages,\n email_list=arguments.email_list,\n items_range=arguments.range,\n config=arguments.config,\n database=arguments.database,\n file=arguments.file,\n email_address=arguments.email_address,\n email_password=arguments.email_password,\n send_time=arguments.time,\n frequency=arguments.frequency)\n emailer.write_config()\n \n emailer.setup_database()\n if emailer.pull_items_search() != 'bot':\n print(\"Items retrieved\")\n else:\n return\n \n emailer.items_to_xls()\n print(\"xls file created.\")\n emailer.items_to_csv()\n print(\"csv file created\")\n\n print(\"Sending emails.\")\n emailer.send_email()", "def main_email(name, total, answered, not_answered, declines, remaining):\n\n start = smtplib.SMTP(host=HOST, port=PORT)\n start.starttls()\n start.login(ADDRESS, PASSWORD)\n\n date = datetime.datetime.now()\n date_now = date.strftime(\"%m-%d-%Y\")\n\n print_list, email_dict = simple_contacts('contacts.txt')\n\n emails = get_emails(print_list, email_dict)\n\n message_template = read_template()\n\n for mail in emails:\n pretty_print(f\"Sending email to {mail}\", \"!\")\n msg = MIMEMultipart()\n\n message = message_template.substitute(PERSON_NAME=name, DATE=date_now, TOTAL_CALLED=total, ANSWERED=answered, NOT_ANSWERED=not_answered, DECLINES=declines, REMAINING=remaining)\n\n msg['From'] = ADDRESS\n msg['To'] = mail\n msg['Subject'] = f\"{name} - Calling Campaign Summary - {date_now}\"\n\n msg.attach(MIMEText(message, 'plain'))\n start.send_message(msg)\n pretty_print(f\"Mail sent to {mail}\", \"!\")\n\n del msg\n\n start.quit()", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def test_email_reminders(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 19, 0, tzinfo=dt_timezone.utc\n )\n\n # cancellation period starts 2015/2/11 18:00\n event = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 12, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n cancellation_period=24)\n # cancellation period starts 2015/2/12 18:00\n event1 = baker.make_recipe(\n 'booking.future_EV',\n date=datetime(2015, 2, 13, 18, 0, tzinfo=dt_timezone.utc),\n payment_open=True,\n cost=10,\n cancellation_period=24)\n baker.make_recipe(\n 'booking.booking', event=event, _quantity=5,\n )\n baker.make_recipe(\n 'booking.booking', event=event1, _quantity=5,\n )\n # add user emails\n _add_user_email_addresses(Booking)\n\n management.call_command('email_reminders')\n # emails are only sent for event1\n self.assertEqual(len(mail.outbox), 5)", "def send_email(name, receiver, summary, cmd, label=None):\n py_version = '%d.%d' % (sys.version_info.major, sys.version_info.minor)\n\n # Send the email.\n sender = 'noreply@' + socket.gethostname()\n if summary is None:\n # We passed, no need to send an email!\n return True\n label = label if label else \"some reason\"\n smtp_msg = MSG_FMT.format(sender=sender, name=name, cmd=cmd,\n content=summary, receiver=receiver,\n version=py_version, label=label)\n smtp = smtplib.SMTP(HOST, port=PORT)\n smtp.sendmail(sender, [receiver], smtp_msg)\n return True", "def main():\n parser = argparse.ArgumentParser(description='this function can be used '\n 'to notify after an event occured via mail and console')\n\n parser.add_argument(\"-e\", \"--event\", action=\"store\", dest=\"event\",\n help=\"summery of the event (e.g. -e 'Exception caught!')\")\n\n parser.add_argument(\"-d\", \"--event_details\", action=\"store\", dest=\"event_details\",\n help=\"event details (e.g. -d 'NullPointerException ...')\")\n\n parser.add_argument(\"-u\", \"--mail_user\", action=\"store\", dest=\"mail_user\",\n help=\"email user which is equal to email itself (e.g. -u [email protected])\")\n\n parser.add_argument(\"-p\", \"--mail_password\", action=\"store\", dest=\"mail_password\",\n help=\"email password (e.g. -p 123456)\")\n\n parser.add_argument(\"-o\", \"--host_name\", action=\"store\", dest=\"host_name\",\n help=\"host name that the issue occured on (e.g. -o storage-ge4-test.scl.lab.tlv.redhat.com)\")\n\n parser.add_argument(\"-t\", \"--test_name\", action=\"store\", dest=\"test_name\",\n help=\"name of the test (e.g. -e 'TestCase18145')\")\n\n parser.add_argument(\"-x\", \"--target_mail\", action=\"store\", dest=\"target_mail\",\n help=\"email target address (e.g. -u '[email protected]')\")\n\n parser.add_argument(\"-l\", \"--log_path\", action=\"store\", dest=\"log_path\",\n help=\"the path of the log directory (e.g -l '/tmp/bug_hunter_logs')\")\n\n options = parser.parse_args()\n\n notify_via_mail_and_console(\n options.event, options.event_details, options.target_mail, options.mail_user, options.mail_password,\n options.host_name, options.test_name, options.log_path\n )", "def replyMessage(_email, _name):\n\n _mailer = app.config['MAIL_USERNAME']\n mesg = Message(\"Message Received\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[_email])\n mesg.body = f'''Hello {_name},\nThe message you sent to Randy has been received. \nRandy will contact you within 24 hours.\nThank you.\n\nRegards,\nRandy\n\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(mesg)\n return 'OK'", "def recs():\n click.echo(\"Emailing recommendations to destination...\")\n dio_dir: DioDir = DioDir()\n sched: ScheduleABC = DefaultSchedule()\n today: datetime.date = datetime.datetime.now().date()\n res: Optional[List[Person]] = get_recs(dio_dir, sched, today)\n next_day: datetime.date = sched.next_emailing_day(today)\n message: str = recs_to_message(res, next_day)\n settings: Optional[Settings] = dio_dir.get_settings()\n assert settings is not None, \"Have to setup diogenes to get emails. Run `dio setupemail`\"\n send_message(message, today, settings)\n click.echo(\"Recommendations emailed!\")", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)\n self.stopRouter()", "def task_rescheduled_notify(name, attempts, last_error, date_time, task_name, task_params):\n body = loader.render_to_string(\n 'notification/email/notify_rescheduled_task.html', {\n 'name': name,\n 'attempts': attempts,\n 'last_error': last_error,\n 'date_time': date_time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'task_name': task_name,\n 'task_params': task_params,\n 'signature': settings.EMAIL_SIGNATURE\n })\n subject = name + \" has been rescheduled\"\n mail_admins(subject, body, settings.DEFAULT_FROM_EMAIL)", "def generate_email(mail, env):\n race, results, standings = get_last_results_and_standings()\n next_race = get_next_race()\n\n subject = f\"Race digest - F1 2021 | Round {race.round} | {race.name}\"\n body = (f\"Results:\\n{results}\\n\\nCurrent standings:\\n\"\n f\"{standings}\\n\\nNext race: {next_race}\")\n\n login_info = env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']\n\n subs = update_db_and_get_subs(mail, (env['EMAIL_ADDRESS'], env['EMAIL_PASSWORD']))\n\n for sub in subs:\n send_email(subject, body, sub, login_info)", "def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg", "def send_reminder(self, url):\n redditor = praw.models.Redditor(self.client, name=self.identifier)\n redditor.message(\"Reminder from Rmnd.in!\", url)", "def send_confirmation(send_to, apply_info):\n msg = \"\"\"Hello,\n\nThis is a friendly confirmation for your Simply Apply application for position '{job_title}' at {job_company}.\n\nThank you,\nThe Simply Hired Team\"\"\".format(**apply_info)\n\n send_email('Simply Apply <[email protected]>', send_to, 'Simply Apply Confirmation', msg)", "def notification_email(self, sender, subject, body):\n\n\t\tts = str(int(time.time())*1000)\n\t\tparts = [sender, body, ts, subject]\n\t\tself._send_message(\"NOTIFICATION\", self._pack_message_data(0, parts))", "def post(self):\n return send_email(request.args)", "def delegate_last_day():\n\n regs = Registration.objects.all()\n\n template = 'notifications/last_day_mail.html'\n\n for reg in regs:\n subject = 'SciPy.in 2011: Schedule and other details'\n message = loader.render_to_string(\n template, dictionary={'name': reg.registrant.username})\n\n reg.registrant.email_user(subject=subject, message=message,\n from_email='[email protected]')", "def send_emails():\n\n cmd = \"sendmail -f [email protected]\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)", "def remind():\n ntftion.notify('reminder', f\"{self.notification}:\\n{self.work_name}\\n{self.work_datetime.hour}: \"\n f\"{self.work_datetime.minute} \", app_icon='reminder.ico', timeout=3)", "def send_email(msg):\n\tprint(\"sendEmail: \" + msg)" ]
[ "0.74956924", "0.7179477", "0.70684314", "0.6713392", "0.6654129", "0.6575302", "0.65031403", "0.64966327", "0.6465705", "0.6435539", "0.6431049", "0.64269364", "0.6388207", "0.6252947", "0.62377954", "0.6234091", "0.62230086", "0.62125534", "0.61743337", "0.6134789", "0.6124602", "0.61239123", "0.61095107", "0.6085106", "0.6026115", "0.60239965", "0.6021992", "0.60213006", "0.60208726", "0.6017339" ]
0.8920202
0
Take a diff string and convert it to syntax highligted HTML This takes a diff string and runs it through vim's TOhtml script to generate HTML that shows a rendered diff with syntax highlighting.
def _generate_html_diff(diff_output): diff_output_file = tempfile.NamedTemporaryFile(delete=False) with open(diff_output_file.name, 'w') as t: t.write(diff_output) # Use the default colorscheme on a light background for the # generated html diff_colorize_cmd = shlex.split( 'vim ' '-c "set bg=light" ' '-c "colo default" ' '-c "let g:html_no_progress=1" ' '-c "let g:html_number_lines=1" ' '-c "let g:html_prevent_copy=\\"n\\"" ' '-c "let g:html_ignore_folding=1" ' '-c "TOhtml" ' '-c "wqa" ' '-- {diff_output_file}'.format(diff_output_file=diff_output_file.name)) subprocess.call(diff_colorize_cmd) with open('{0}.html'.format(diff_output_file.name)) as o: html_diff_output = o.read() # Remove the temporary files os.unlink(diff_output_file.name) os.unlink(diff_output_file.name + '.html') return html_diff_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _style_diff(self, diff):\n # NOTE: Django wraps the contents in a <p>, but browsers will\n # be sad about that, because it contains a <pre>. Chrome,\n # for instance, will move it out into its own node. Be\n # consistent and just make that happen for them.\n return format_html(\n '</p>{0}<p>',\n mark_safe(highlight(diff, DiffLexer(), HtmlFormatter())))", "def _generate_side_by_side_html_diff(\n string1_output, string2_output, string3_output=None,\n string4_output=None):\n string_output_files = []\n for string_output in [\n string1_output, string2_output, string3_output,\n string4_output]:\n if not string_output:\n break\n temp_file = tempfile.NamedTemporaryFile(delete=False)\n with open(temp_file.name, 'w') as t:\n t.write(string_output)\n string_output_files.append(temp_file.name)\n \n side_by_side_html_output_file = tempfile.NamedTemporaryFile(delete=False)\n vim_side_by_side_cmd = shlex.split(\n 'vim -O '\n '-c \"set bg=light\" '\n '-c \"colo default\" '\n '-c \"windo diffthis\" '\n '-c \"let g:html_number_lines=1\" '\n '-c \"let g:html_no_progress=1\" '\n '-c \"let g:html_prevent_copy=\\\\\"n\\\\\"\" '\n '-c \"let g:html_ignore_folding=1\" '\n '-c \"TOhtml\" '\n '-c \"w! {side_by_side_html_output_file}\" '\n '-c \"qa!\" '\n '-- {string_output_files}'.format(\n side_by_side_html_output_file=side_by_side_html_output_file.name,\n string_output_files=' '.join(string_output_files)))\n \n subprocess.call(vim_side_by_side_cmd)\n \n with open(side_by_side_html_output_file.name) as t:\n side_by_side_html_output = t.read()\n \n # Remove temp files\n os.unlink(side_by_side_html_output_file.name)\n for string_output_file in string_output_files:\n os.unlink(string_output_file)\n\n return side_by_side_html_output", "def mdhtml_to_html(data_str):\n mdrenderer = mistune.Renderer()\n markdown = mistune.Markdown(renderer=mdrenderer)\n return markdown(data_str)", "def htmlFormat(self, text):\n txt_blocks = self._parser_block(lex_block(text))\n\n #XXX: Maybe there is a better solution, but I doubt\n #The problem is nested escapestyles\n escape_d = {}\n escapes = re.compile('\\[escapestyle\\] \\s* (?P<inner>(.|\\s)*?) \\s* \\[/escapestyle\\]', re.VERBOSE)\n def rem(mo):\n h_code = hash(mo.group(0))\n escape_d[h_code] = mo.group('inner')\n return '(<!%s!>)' % h_code\n txt_blocks = escapes.sub(rem, txt_blocks)\n\n txt_style = parser_style(lex_style(txt_blocks))\n\n eess = re.compile('\\(<!(-?\\d+)!>\\)')\n def back(mo):\n val = int(mo.group(1))\n if escape_d.has_key(val):\n return escape_d[val]\n return mo.group(0)\n txt_style = eess.sub(back, txt_style)\n\n return txt_style", "def test_gen_diff_html(mock_diff):\n from_title = \"from_title_content\"\n from_lines = \"left content here\"\n to_title = \"to_title_content\"\n to_lines = \"different content on the right here\"\n mock_diff.return_value.make_table.return_value = \"<t>{} {}</t>\".format(\n from_lines, to_lines\n )\n\n html = cmds._gen_diff_html(from_title, [from_lines], to_title, [to_lines])\n\n assert html.count(from_title) == 2\n assert html.count(from_lines) == 1\n assert html.count(to_title) == 2\n assert html.count(to_lines) == 1", "def markdown_to_html(s):\n return markdown(s)", "def htmlize(text):\n htmlized = markdown.markdown(\n text,\n output_format=\"xhtml5\", safe_mode=\"escape\",\n )\n htmlversion = htmltemplate.format(body=htmlized)\n return htmlversion", "def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'", "def convert_html():\n return", "def render_udiff(udiff):\n change_pattern = re.compile(r'@@ -(\\d+)(?:,(\\d+))? \\+(\\d+)(?:,(\\d+))? @@')\n\n lines = udiff.splitlines()\n line_iter = iter(lines)\n\n mods = {}\n\n try:\n line = line_iter.next()\n mods['old'] = line[4:]\n line = line_iter.next()\n mods['new'] = line[4:]\n\n mods['lines'] = []\n\n line = line_iter.next()\n\n while True:\n match = change_pattern.match(line)\n if match is not None:\n old_line, old_end, new_line, new_end = [int(x or 1) for x in match.groups()]\n old_line -= 1\n new_line -= 1\n old_end += old_line\n new_end += new_line\n\n line = line_iter.next()\n\n while old_line < old_end or new_line < new_end:\n old_change = new_change = False\n command, content = line[0], line[1:]\n\n if command == ' ':\n old_change = new_change = True\n action = 'none'\n elif command == '-':\n old_change = True\n action = 'del'\n elif command == '+':\n new_change = True\n action = 'add'\n\n old_line += old_change\n new_line += new_change\n\n mods['lines'].append({\n 'old_line': old_change and old_line or u'',\n 'new_line': new_change and new_line or u'',\n 'action': action,\n 'content': content\n })\n\n line = line_iter.next()\n except StopIteration:\n pass\n\n return mods", "def md2html(template,filepath):\n content=''\n s = string.Template(template) \n try:\n content=markdown2.markdown_path(filepath)\n except:\n logger.warning('md2html:markdown convertion failed... Trying safe mode ')\n try:\n content=markdown2.markdown_path(filepath,safe_mode=True)\n except:\n logger.error('md2html:markdown convertion failed for %s. Use raw text.' %filepath)\n import codecs\n try:\n content=codecs.open(filepath,'r','utf-8').read()\n except:\n logger.error('md2html:invalid file? %s ' %filepath)\n # print 'error processing markdown. Read raw file...' \n html=''\n try:\n html=s.substitute(content=content)\n except:\n logger.warning('md2html()::string.Template substitute failed... Trying safe mode ')\n try:\n html=s.safe_substitute(content=content) \n except:\n logger.error('md2html()::string.Template conversion failed for : %s ' %filepath)\n return html", "def txt_to_html(in_str):\n replace_list = {\n \">\": \"&gt;\",\n \"<\": \"&lt;\",\n \"\\n\": \"<br/>\",\n }\n for i in replace_list:\n in_str = re.sub(i, replace_list[i], in_str)\n return in_str", "def bugs_to_html(string):\n string = string.strip()\n string = escape(string)\n index = 0\n string = BUG_REGEX.sub('<a href=\"%s\\\\3\">\\\\1</a>' % BUG_URL, string)\n match = DATE_REGEX.search(string, index)\n if match:\n start = match.start()\n next = DATE_REGEX.search(string, match.end() + 1)\n if next:\n end = next.start() - 1\n else:\n end = len(string)\n substring = string[start:end]\n html = '<span class=\"change\"><span class=\"date\">%s</span>%s</span>' % \\\n (substring[:11], substring[11:])\n (string, index) = mstring.replace_sub(string[:end+1], html, start, end)\n string = GENTOO_DEV.sub('(<a href=\"%s\\\\2\">\\\\1</a>)' % CIA_URL, string)\n return '<span class=\"change\">%s\\n\\n</span>' % string", "def highlight(contents, lexer):\n formatter = formatters.HtmlFormatter(\n linenos=\"table\", lineanchors=\"loc\", anchorlinenos=True\n )\n return pygments.highlight(contents, lexer, formatter)", "def diff(self):\n # Split new and existing content in lines\n current_content = self.current_content.splitlines(1)\n new_content = self.content.splitlines(1)\n\n # Call difflib\n diff = ''.join(difflib.unified_diff(current_content, new_content))\n print highlight(diff, DiffLexer(), Formatter())\n\n return diff", "def htmlForMarkdown(md):\n return mdProcessor.convert(md)", "def make_wikipage_diff(self, group, old_ver_num, new_ver_num):\n old_content = self.get_version_content(group, old_ver_num)\n new_content = self.get_version_content(group, new_ver_num)\n d = difflib.HtmlDiff()\n diff_table = d.make_table(old_content.splitlines(), new_content.splitlines())\n diff_table = diff_table.replace('&nbsp;', ' ').replace(' nowrap=\"nowrap\"', '')\n return diff_table", "def convert(md_text):\n # separate by line\n md_text = md_text.split('\\n')\n\n # save the html content for return\n html_text = ''\n\n # begin looping from the first line\n index = -1\n while index < len(md_text) - 1:\n index += 1\n line = md_text[index]\n\n # code segment\n if len(line) >= 3 and line[:3] == '```':\n html_line = \"\"\n language = line[3:].replace(' ', '')\n if len(language) == 0:\n language = False\n order_index = index + 1\n find_end = False\n while order_index < len(md_text):\n if md_text[order_index][:3] == '```':\n find_end = True\n break\n else:\n temp_line = md_text[order_index]\n temp_line = temp_line.replace('<', '&lt;')\n temp_line = temp_line.replace('>', '&gt;')\n temp_line = temp_line.replace(' ', '&nbsp;')\n html_line += temp_line + '<br />'\n order_index += 1\n\n if find_end:\n # if language is not False:\n # html_text += ('<pre><code class=\"' + language + '\">' + html_line + '</code></pre>')\n # else:\n html_text += ('<code>' + html_line + '</code>')\n # print(language)\n index = order_index\n continue\n\n # inline code\n\n\n # header\n is_header, html_line = check_header(line)\n if is_header:\n html_text = html_text + html_line\n continue\n\n # horizontal rule\n is_horizontal_rule, html_line = check_horizontal_rule(line)\n if is_horizontal_rule:\n html_text = html_text + html_line\n continue\n\n # paragraph\n line = check_paragraph(line)\n\n # deal with ordered list\n if len(line.split('.')) != 0 and '1.' == line[:2]:\n html_line = '<ol>'\n order_index = index\n while order_index < len(md_text)\\\n and len(md_text[order_index].split('.')) != 0\\\n and (str(order_index - index + 1) == md_text[order_index].split('.')[0]\n or '1' == md_text[order_index].split('.')[0]):\n to_replace = [str(order_index - index + 1) + '.', '1.']\n for replace_content in to_replace:\n md_text[order_index] = md_text[order_index].replace(replace_content, '')\n html_line = html_line + '<li>' + md_text[order_index] + '</li>'\n\n order_index += 1\n index = order_index - 1\n html_line = html_line + '</ol>'\n line = html_line\n\n # deal with unordered list\n is_unordered_list, html_line = check_unordered_list(line)\n if is_unordered_list:\n line = html_line\n\n # deal with strong\n line = strong(line)\n\n # Scratch\n line = scratch(line)\n\n # italics\n line = italics(line)\n\n # image\n while len(re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)!\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<img src=\"' + link + '\" alt=\"' + alt_text + '\">'\n line = pre_text + img_html + after_text\n\n # link\n while len(re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line).group())\\\n != 0:\n match = re.match(r'((?P<pre_text>.*)\\[(?P<alt_text>.*)\\]\\((?P<link>.*)\\)(?P<after_text>.*))*', line)\n pre_text = match.group('pre_text')\n alt_text = match.group('alt_text')\n link = match.group('link')\n after_text = match.group('after_text')\n img_html = '<a href=\"' + link + '\">' + alt_text + '</a>'\n line = pre_text + img_html + after_text\n\n html_text = html_text + line\n if not is_unordered_list:\n html_text = html_text + '<br>'\n\n return html_text", "def _buildDiff(self):\n outputList = []\n for tag, alo, ahi, blo, bhi in self.cruncher.get_opcodes():\n if tag == 'replace':\n # Text replaced = deletion + insertion\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.replaceCount += 1\n elif tag == 'delete':\n # Text deleted\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n self.deleteCount += 1\n elif tag == 'insert':\n # Text inserted\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.insertCount += 1\n diffText = u\" \".join(outputList)\n #diffText = \" \".join(diffText.split())\n self.diffText = diffText.replace(self.nl, u\"\\n\")", "def generate_html_diff(self, base_path: str, to_view: bool = True) -> str:\n from run import log\n\n file_ok = os.path.join(base_path, self.expected + self.regression_test_output.correct_extension)\n file_fail = os.path.join(base_path, self.got + self.regression_test_output.correct_extension)\n log.debug(f\"Generate diff for {file_ok} vs {file_fail}\")\n lines_ok = self.read_lines(file_ok)\n lines_fail = self.read_lines(file_fail)\n\n return diff.get_html_diff(lines_ok, lines_fail, to_view)", "def render_diff_report():\n if nori.core.cfg['action'] == 'diff':\n diff_report = ' Diff Report '\n elif nori.core.cfg['action'] == 'sync':\n diff_report = ' Diff / Sync Report '\n diff_report = ('#' * len(diff_report) + '\\n' +\n diff_report + '\\n' +\n '#' * len(diff_report) + '\\n\\n')\n if nori.core.cfg['report_order'] == 'template':\n for template_index in diff_dict:\n template = nori.core.cfg['templates'][template_index]\n section_header = ('Template {0} ({1}):' .\n format(template_index,\n nori.pps(template[T_NAME_KEY])))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[template_index]:\n exists_in_source = diff_t[0]\n source_row = diff_t[1]\n exists_in_dest = diff_t[2]\n dest_row = diff_t[3]\n has_been_changed = diff_t[4]\n if exists_in_source:\n source_str = nori.pps(source_row[1])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n dest_str = nori.pps(dest_row[1])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Source: {0}\\nDest: {1}\\nStatus: {2}\\n\\n' .\n format(source_str, dest_str, changed_str)\n )\n diff_report += '\\n'\n elif nori.core.cfg['report_order'] == 'keys':\n for key_str in diff_dict:\n section_header = ('Key tuple {0}:' .\n format(nori.pps(key_str)))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[key_str]:\n template_index = diff_t[0]\n exists_in_source = diff_t[1]\n source_row = diff_t[2]\n exists_in_dest = diff_t[3]\n dest_row = diff_t[4]\n has_been_changed = diff_t[5]\n template = nori.core.cfg['templates'][template_index]\n if exists_in_source:\n num_keys = source_row[0]\n source_data = source_row[1]\n source_str = nori.pps(source_data[num_keys:])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n num_keys = dest_row[0]\n dest_data = dest_row[1]\n dest_str = nori.pps(dest_data[num_keys:])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Template: {0}\\nSource: {1}\\nDest: {2}\\n'\n 'Status: {3}\\n\\n' .\n format(template[T_NAME_KEY], source_str, dest_str,\n changed_str)\n )\n diff_report += '\\n'\n return diff_report.strip()", "def _format_html(self, file_content):\n old_string = r\"<!-- INSERT JUMP BOX HERE -->\"\n new_string = self._getJumpBoxHtml()\n file_content = string.replace(file_content, old_string, new_string) \n\n additional_head_string = ''' \n<link media=\"screen\" href=\"dataTableMedia/css/demo_table.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<link media=\"screen\" href=\"dataTableMedia/css/TableTools.css\" type=\"text/css\" rel=\"stylesheet\"/>\n<script src=\"util.js\" type=\"text/javascript\"></script>\n<script src=\"jquery.js\" type=\"text/javascript\"></script>\n<script src=\"customTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/TableTools.js\" type=\"text/javascript\"></script>\n<script src=\"dataTableMedia/js/jquery.dataTables.select.filtering.js\" type=\"text/javascript\" ></script>\n '''\n old_string = r\"<!-- INSERT ADDITIONAL HEAD STRING HERE -->\" \n file_content = string.replace(file_content, old_string, additional_head_string) \n new_string = '''\n <table id=\"dataTables-summaryArchive\" class=\"display\" cellspacing=\"0\" cellpadding=\"0\" border=\"0\"> \n <thead>\n <tr> \n '''\n #Write headers: 'name', 'rog', 'distance_count', 'cs_count', 'chothia_class', 'chain_count', 'res_count'\n for i,_header in enumerate(summaryHeaderList):\n new_string += '\\t<th title=\"{help}\">{header}</th>\\n'.format(header = summaryHeader2List[i],\n help = summaryHeaderTitleList[i])\n # end for \n new_string += '''\n </tr> \n </thead>\n </table>\n '''\n old_string = r\"<!-- INSERT NEW RESULT STRING HERE -->\" \n file_content = string.replace(file_content, old_string, new_string)\n return file_content", "def __html__(self, file_path:str) -> str:\n with open(f\"{file_path}\", \"r\") as mdfile: # Parse markdown file\n text = mdfile.read()\n html = self.md.convert(text) # Convert the markdown content text to hmtl\n return html", "def highlight_source(source):\n return highlight(source, PythonLexer(), HtmlFormatter())", "def write_html_diff(self, name, original, transformed):\n html = name + \".html\"\n fromlines = original.split(\"\\n\")\n tolines = transformed.split(\"\\n\")\n\n diff = difflib.HtmlDiff().make_file(\n fromlines, tolines, name + \".\" + config.FILE_EXT, name + \".py\"\n )\n with open(html, \"w\") as the_file:\n the_file.write(diff)\n print(\"Diff file writen to\", html)", "def to_markdown(html_string, safe_tags=None, safe_attrs=None):\n # out = StringIO()\n # for f in parse_fragments(html_string, safe_tags=None, safe_attrs=None):\n # handlers.process_tag_events(f, out)\n # return normalize(out.getvalue())\n return handlers.render(*parse_fragments(html_string, safe_tags))", "def update_html(shell=False):\n\n if shell: tell.info(\"Rendering the HTML.\")\n html.generate()\n if shell: tell.done(\"Updated `html/index.html`.\")", "def vhdl2html(expression):\n rules = (\n (r'\\b([a-zA-Z_][a-zA-Z0-9_]*[0-9]+)\\b',\n lambda match: '<span class=\"vhdlsig\">{0}</span>'.format(match.group(1))),\n (r'\\b(and|or|xor|not|AND|OR|XOR|NOT)\\b',\n lambda match: '<span class=\"vhdlop\">{0}</span>'.format(match.group(1).lower())),\n )\n for pattern, repl in rules:\n expression = re.sub(pattern, repl, expression)\n return expression", "def _run_diff(oldfile, newfile):\n # TODO: It may be nicer to use the internal diff engine for this.\n # For one, this would use the correct colors set up for hg\n # diff rather than the colors set up for colordiff. It's not\n # clear to me how this can be done though, and if it is\n # worth the bother.\n _call_subprocesses(\"diff or colordiff\",\n [\"colordiff\", \"-u\", oldfile, newfile],\n [\"diff\", \"-u\", oldfile, newfile])", "def makediff(s1, s2):\n import difflib\n differ = difflib.SequenceMatcher()\n differ.set_seqs(s1, s2)\n #debug = False\n s1new = [ ]\n s2new = [ ]\n previousOp = None\n for op, i1, i2, j1, j2 in differ.get_opcodes():\n #if debug: print \"top\"\n #if debug: print op, i1, i2, j1, j2, '->'\n #if debug: print s1, s2\n if op == 'equal':\n #if i2-i1 < 4 and len(s1new) > 1 and previousOp == \"replace\":\n # s1new[-2] += escape(s1[i1:i2])\n # s2new[-2] += escape(s2[j1:j2])\n #else:\n s1new.append(escape(s1[i1:i2]))\n s2new.append(escape(s2[j1:j2]))\n elif op == 'insert':\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n elif op == \"delete\":\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n elif op == 'replace':\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n previousOp = op\n #if debug: print s1, s2\n #if debug: print \"bottom\"\n #if debug: print \"done\"\n return ''.join(s1new), ''.join(s2new)" ]
[ "0.6462075", "0.60662234", "0.59270734", "0.5817898", "0.5745568", "0.5704598", "0.5586395", "0.55667764", "0.553908", "0.55132574", "0.5492292", "0.5484959", "0.54815155", "0.5458665", "0.5453541", "0.5408362", "0.5382663", "0.5380105", "0.53666836", "0.5363882", "0.5358014", "0.53344476", "0.53208655", "0.5298947", "0.5261481", "0.52442527", "0.5229257", "0.522447", "0.5214516", "0.52028835" ]
0.71012473
0
Generate HTML rendering of a sidebyside diff This takes two to four strings and writes them to temporary files. Then it runs vim to open all the files with a vertical split, uses the TOhtml vim script to convert that into an html file which is then written to another temp file. The content of that file is read into a variable. Then all temp files are deleted and the string is returned to the caller.
def _generate_side_by_side_html_diff( string1_output, string2_output, string3_output=None, string4_output=None): string_output_files = [] for string_output in [ string1_output, string2_output, string3_output, string4_output]: if not string_output: break temp_file = tempfile.NamedTemporaryFile(delete=False) with open(temp_file.name, 'w') as t: t.write(string_output) string_output_files.append(temp_file.name) side_by_side_html_output_file = tempfile.NamedTemporaryFile(delete=False) vim_side_by_side_cmd = shlex.split( 'vim -O ' '-c "set bg=light" ' '-c "colo default" ' '-c "windo diffthis" ' '-c "let g:html_number_lines=1" ' '-c "let g:html_no_progress=1" ' '-c "let g:html_prevent_copy=\\"n\\"" ' '-c "let g:html_ignore_folding=1" ' '-c "TOhtml" ' '-c "w! {side_by_side_html_output_file}" ' '-c "qa!" ' '-- {string_output_files}'.format( side_by_side_html_output_file=side_by_side_html_output_file.name, string_output_files=' '.join(string_output_files))) subprocess.call(vim_side_by_side_cmd) with open(side_by_side_html_output_file.name) as t: side_by_side_html_output = t.read() # Remove temp files os.unlink(side_by_side_html_output_file.name) for string_output_file in string_output_files: os.unlink(string_output_file) return side_by_side_html_output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_html_diff(diff_output):\n diff_output_file = tempfile.NamedTemporaryFile(delete=False)\n with open(diff_output_file.name, 'w') as t:\n t.write(diff_output)\n\n # Use the default colorscheme on a light background for the\n # generated html\n diff_colorize_cmd = shlex.split(\n 'vim '\n '-c \"set bg=light\" '\n '-c \"colo default\" '\n '-c \"let g:html_no_progress=1\" '\n '-c \"let g:html_number_lines=1\" '\n '-c \"let g:html_prevent_copy=\\\\\"n\\\\\"\" '\n '-c \"let g:html_ignore_folding=1\" '\n '-c \"TOhtml\" '\n '-c \"wqa\" '\n '-- {diff_output_file}'.format(diff_output_file=diff_output_file.name))\n subprocess.call(diff_colorize_cmd)\n\n with open('{0}.html'.format(diff_output_file.name)) as o:\n html_diff_output = o.read()\n\n # Remove the temporary files\n os.unlink(diff_output_file.name)\n os.unlink(diff_output_file.name + '.html')\n\n return html_diff_output", "def html_file_diff(lhs, rhs):\n with open(lhs, encoding='utf-8') as lhs_file:\n lhs_text = lhs_file.read()\n with open(rhs, encoding='utf-8') as rhs_file:\n rhs_text = rhs_file.read()\n return html_diff(lhs, lhs_text, rhs, rhs_text)", "def test_gen_diff_html(mock_diff):\n from_title = \"from_title_content\"\n from_lines = \"left content here\"\n to_title = \"to_title_content\"\n to_lines = \"different content on the right here\"\n mock_diff.return_value.make_table.return_value = \"<t>{} {}</t>\".format(\n from_lines, to_lines\n )\n\n html = cmds._gen_diff_html(from_title, [from_lines], to_title, [to_lines])\n\n assert html.count(from_title) == 2\n assert html.count(from_lines) == 1\n assert html.count(to_title) == 2\n assert html.count(to_lines) == 1", "def saveCompareHTML(outputDir,chemkinPath1,speciesDictPath1,chemkinPath2,speciesDictPath2,readComments1=True,readComments2=True):\n model1 = ReactionModel()\n model1.species, model1.reactions = loadChemkinFile(chemkinPath1, speciesDictPath1, readComments = readComments1)\n model2 = ReactionModel()\n model2.species, model2.reactions = loadChemkinFile(chemkinPath2, speciesDictPath2, readComments = readComments2)\n commonReactions, uniqueReactions1, uniqueReactions2 = compareModelReactions(model1, model2)\n commonSpecies, uniqueSpecies1, uniqueSpecies2 = compareModelSpecies(model1, model2)\n \n outputPath = outputDir + 'diff.html' \n saveDiffHTML(outputPath, commonSpecies, uniqueSpecies1, uniqueSpecies2, commonReactions, uniqueReactions1, uniqueReactions2)", "def generate_diff(file_path1: str, file_path2: str, format=\"stylish\"):\n\n file1, extension1 = _read_file(file_path1)\n file2, extension2 = _read_file(file_path2)\n data1 = parse(file1, extension1)\n data2 = parse(file2, extension2)\n diff = build_tree(data1, data2)\n formatted_diff = render_diff(diff, format)\n return formatted_diff", "def make_diff(self, file_path_1, file_path_2, path_in):\n hash_ = hash(path_in)\n\n with open(file_path_1) as file_1:\n with open(file_path_2) as file_2:\n d = difflib.Differ()\n\n diff = list(d.compare(file_1.readlines(), file_2.readlines()))\n _diff = []\n\n for i in range(len(diff)-1):\n if(diff[i][0] == '+' or diff[i][0] == '-'):\n if(diff[i].find('Copyright') == -1 and\n diff[i].find(\"generated by:\") == -1):\n _diff.append(diff[i])\n\n if _diff:\n _diff = ''.join(_diff)\n try:\n f = open('./diff/' + str(hash_) + '.diff', 'w')\n except:\n os.mkdir('./diff')\n f = open('./diff/' + str(hash_) + '.diff', 'w')\n f.write(_diff)\n f.close()\n return str(hash_)+'.diff'\n return None", "def generate_html_diff(self, base_path: str, to_view: bool = True) -> str:\n from run import log\n\n file_ok = os.path.join(base_path, self.expected + self.regression_test_output.correct_extension)\n file_fail = os.path.join(base_path, self.got + self.regression_test_output.correct_extension)\n log.debug(f\"Generate diff for {file_ok} vs {file_fail}\")\n lines_ok = self.read_lines(file_ok)\n lines_fail = self.read_lines(file_fail)\n\n return diff.get_html_diff(lines_ok, lines_fail, to_view)", "def render_diff_report():\n if nori.core.cfg['action'] == 'diff':\n diff_report = ' Diff Report '\n elif nori.core.cfg['action'] == 'sync':\n diff_report = ' Diff / Sync Report '\n diff_report = ('#' * len(diff_report) + '\\n' +\n diff_report + '\\n' +\n '#' * len(diff_report) + '\\n\\n')\n if nori.core.cfg['report_order'] == 'template':\n for template_index in diff_dict:\n template = nori.core.cfg['templates'][template_index]\n section_header = ('Template {0} ({1}):' .\n format(template_index,\n nori.pps(template[T_NAME_KEY])))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[template_index]:\n exists_in_source = diff_t[0]\n source_row = diff_t[1]\n exists_in_dest = diff_t[2]\n dest_row = diff_t[3]\n has_been_changed = diff_t[4]\n if exists_in_source:\n source_str = nori.pps(source_row[1])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n dest_str = nori.pps(dest_row[1])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Source: {0}\\nDest: {1}\\nStatus: {2}\\n\\n' .\n format(source_str, dest_str, changed_str)\n )\n diff_report += '\\n'\n elif nori.core.cfg['report_order'] == 'keys':\n for key_str in diff_dict:\n section_header = ('Key tuple {0}:' .\n format(nori.pps(key_str)))\n section_header += '\\n' + ('-' * len(section_header)) + '\\n\\n'\n diff_report += section_header\n for diff_t in diff_dict[key_str]:\n template_index = diff_t[0]\n exists_in_source = diff_t[1]\n source_row = diff_t[2]\n exists_in_dest = diff_t[3]\n dest_row = diff_t[4]\n has_been_changed = diff_t[5]\n template = nori.core.cfg['templates'][template_index]\n if exists_in_source:\n num_keys = source_row[0]\n source_data = source_row[1]\n source_str = nori.pps(source_data[num_keys:])\n elif exists_in_source is None:\n source_str = '[no value match in source database]'\n else:\n source_str = '[no key match in source database]'\n if exists_in_dest:\n num_keys = dest_row[0]\n dest_data = dest_row[1]\n dest_str = nori.pps(dest_data[num_keys:])\n elif exists_in_dest is None:\n dest_str = '[no value match in destination database]'\n else:\n dest_str = '[no key match in destination database]'\n if has_been_changed is None:\n changed_str = 'unchanged'\n elif not has_been_changed:\n changed_str = (\n 'partially changed - action may be needed!'\n )\n else:\n changed_str = 'changed'\n diff_report += (\n 'Template: {0}\\nSource: {1}\\nDest: {2}\\n'\n 'Status: {3}\\n\\n' .\n format(template[T_NAME_KEY], source_str, dest_str,\n changed_str)\n )\n diff_report += '\\n'\n return diff_report.strip()", "def diff2(request, ps_left_id, ps_right_id, patch_filename):\n context = _get_context_for_user(request)\n column_width = _get_column_width_for_user(request)\n tab_spaces = _get_tab_spaces_for_user(request)\n\n ps_right = models.PatchSet.get_by_id(\n int(ps_right_id), parent=request.issue.key)\n patch_right = None\n\n if ps_right:\n patch_right = models.Patch.query(\n models.Patch.filename == patch_filename,\n ancestor=ps_right.key).get()\n\n if patch_right:\n patch_id = patch_right.key.id()\n elif patch_filename.isdigit():\n # Perhaps it's an ID that's passed in, based on the old URL scheme.\n patch_id = int(patch_filename)\n else: # patch doesn't exist in this patchset\n patch_id = None\n\n data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,\n column_width, tab_spaces, patch_filename)\n if isinstance(data, HttpResponse) and data.status_code != 302:\n return data\n\n patchsets = list(request.issue.patchsets)\n\n if data[\"patch_right\"]:\n _add_next_prev2(data[\"ps_left\"], data[\"ps_right\"], data[\"patch_right\"])\n return respond(request, 'diff2.html',\n {'issue': request.issue,\n 'ps_left': data[\"ps_left\"],\n 'patch_left': data[\"patch_left\"],\n 'ps_right': data[\"ps_right\"],\n 'patch_right': data[\"patch_right\"],\n 'rows': data[\"rows\"],\n 'patch_id': patch_id,\n 'context': context,\n 'context_values': models.CONTEXT_CHOICES,\n 'column_width': column_width,\n 'tab_spaces': tab_spaces,\n 'patchsets': patchsets,\n 'filename': patch_filename,\n })", "def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right", "def _buildDiff(self):\n outputList = []\n for tag, alo, ahi, blo, bhi in self.cruncher.get_opcodes():\n if tag == 'replace':\n # Text replaced = deletion + insertion\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.replaceCount += 1\n elif tag == 'delete':\n # Text deleted\n outputList.append(self.delTag % u\" \".join(self.source[alo:ahi]))\n self.deleteCount += 1\n elif tag == 'insert':\n # Text inserted\n outputList.append(self.insTag % u\" \".join(self.target[blo:bhi]))\n self.insertCount += 1\n diffText = u\" \".join(outputList)\n #diffText = \" \".join(diffText.split())\n self.diffText = diffText.replace(self.nl, u\"\\n\")", "def vennDiagram(bed1File, bed2File, only1Output=None, only2Output=None, bothOutput=None):\n\n bed1 = readJunctionsFromBed(bed1File, True)\n bed2 = readJunctionsFromBed(bed2File, True)\n\n count1 = 0\n count2 = 0\n countBoth = 0\n\n out1 = None\n if only1Output:\n out1 = open(only1Output, \"w\")\n out2 = None\n if only2Output:\n out2 = open(only2Output, \"w\")\n both = None\n if bothOutput:\n both = open(bothOutput, \"w\")\n\n for chr, chrJunct in bed1.iteritems():\n for (start,stop) in chrJunct:\n if bed2.has_key(chr):\n if bed2[chr].has_key( (start, stop) ):\n if both:\n for line in bed1[chr][(start,stop)]:\n both.write(line)\n both.write(\"\\n\")\n del bed2[chr][(start,stop)]\n countBoth += 1\n else:\n count1 += 1\n if out1:\n line = bed1[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], pieces[9],\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out1.write(\"\\t\".join(str(x) for x in bedVals))\n out1.write(\"\\n\")\n #for line in bed1[chr][(start, stop)]:\n # out1.write(line)\n # out1.write(\"\\n\")\n else:\n count1 += 1\n if out1:\n line = bed1[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], \"2\",\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out1.write(\"\\t\".join(str(x) for x in bedVals))\n out1.write(\"\\n\")\n #for line in bed1[chr][(start, stop)]:\n # out1.write(line)\n # out1.write(\"\\n\")\n\n #print\n #print\n #print\n\n count2 = sum( len(chrJunct) for chrJunct in bed2.values())\n if out2:\n for chr, chrJunct in bed2.iteritems():\n for (start,stop) in chrJunct:\n line = bed2[chr][(start,stop)][0]\n pieces = line.split()\n bedVals = [chr, start-10, stop+10, pieces[3], pieces[4], pieces[5], start-10, stop+10, pieces[8], \"2\",\n \"10,10\", \"0,%s\"%(stop-start+10)]\n out2.write(\"\\t\".join(str(x) for x in bedVals))\n out2.write(\"\\n\")\n #for line in bed2[chr][(start, stop)]:\n # out2.write(line)\n # out2.write(\"\\n\")\n\n print \"There were %s in both, %s in the first one and %s in the second one\" % (countBoth, count1, count2)", "def write_html_diff(self, name, original, transformed):\n html = name + \".html\"\n fromlines = original.split(\"\\n\")\n tolines = transformed.split(\"\\n\")\n\n diff = difflib.HtmlDiff().make_file(\n fromlines, tolines, name + \".\" + config.FILE_EXT, name + \".py\"\n )\n with open(html, \"w\") as the_file:\n the_file.write(diff)\n print(\"Diff file writen to\", html)", "def diff2(request, ps_left_id, ps_right_id, patch_filename):\n context = _get_context_for_user(request)\n column_width = _get_column_width_for_user(request)\n\n ps_right = models.PatchSet.get_by_id(\n int(ps_right_id), parent=request.issue.key)\n patch_right = None\n\n if ps_right:\n patch_right = models.Patch.query(\n models.Patch.patchset_key == ps_right.key,\n models.Patch.filename == patch_filename).get()\n\n if patch_right:\n patch_id = patch_right.key.id()\n elif patch_filename.isdigit():\n # Perhaps it's an ID that's passed in, based on the old URL scheme.\n patch_id = int(patch_filename)\n else: # patch doesn't exist in this patchset\n patch_id = None\n\n data = _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,\n column_width, patch_filename)\n if isinstance(data, HttpResponse) and data.status_code != 302:\n return data\n\n patchsets = list(request.issue.patchsets)\n\n if data[\"patch_right\"]:\n _add_next_prev2(data[\"ps_left\"], data[\"ps_right\"], data[\"patch_right\"])\n return respond(request, 'diff2.html',\n {'issue': request.issue,\n 'ps_left': data[\"ps_left\"],\n 'patch_left': data[\"patch_left\"],\n 'ps_right': data[\"ps_right\"],\n 'patch_right': data[\"patch_right\"],\n 'rows': data[\"rows\"],\n 'patch_id': patch_id,\n 'context': context,\n 'context_values': models.CONTEXT_CHOICES,\n 'column_width': column_width,\n 'patchsets': patchsets,\n 'filename': patch_filename,\n })", "def _run_diff(oldfile, newfile):\n # TODO: It may be nicer to use the internal diff engine for this.\n # For one, this would use the correct colors set up for hg\n # diff rather than the colors set up for colordiff. It's not\n # clear to me how this can be done though, and if it is\n # worth the bother.\n _call_subprocesses(\"diff or colordiff\",\n [\"colordiff\", \"-u\", oldfile, newfile],\n [\"diff\", \"-u\", oldfile, newfile])", "def make_wikipage_diff(self, group, old_ver_num, new_ver_num):\n old_content = self.get_version_content(group, old_ver_num)\n new_content = self.get_version_content(group, new_ver_num)\n d = difflib.HtmlDiff()\n diff_table = d.make_table(old_content.splitlines(), new_content.splitlines())\n diff_table = diff_table.replace('&nbsp;', ' ').replace(' nowrap=\"nowrap\"', '')\n return diff_table", "def pydiff(text1, text2, text1_name='text1', text2_name='text2',\n prefix_diff_files='tmp_diff', n=3):\n if text1 == text2:\n return False\n\n # Else:\n import difflib, time, os\n\n text1_lines = text1.splitlines()\n text2_lines = text2.splitlines()\n\n diff_html = difflib.HtmlDiff().make_file(\n text1_lines, text2_lines, text1_name, text2_name,\n context=True, numlines=n)\n diff_plain = difflib.unified_diff(\n text1_lines, text2_lines, text1_name, text2_name, n=n)\n filename_plain = prefix_diff_files + '.txt'\n filename_html = prefix_diff_files + '.html'\n\n f = open(filename_plain, 'w')\n # Need to add newlines despite doc saying that trailing newlines are\n # inserted...\n diff_plain = [line + '\\n' for line in diff_plain]\n f.writelines(diff_plain)\n f.close()\n\n f = open(filename_html, 'w')\n f.writelines(diff_html)\n f.close()\n return True", "def do_diff(self, line):\n with tempfile.NamedTemporaryFile(suffix='.revu.diff') as temp:\n with open(temp.name, 'w') as fd:\n fd.write(self.review.diff())\n subprocess.call(['vimdiff', temp.name])", "def html(name, options='', split=False):\n if name.endswith('.do.txt'):\n name = name.replace('.do.txt', '')\n\n # Compile source\n cmd = 'doconce format html %(name)s %(options)s ' % vars()\n system(cmd)\n\n\n cmd = u\"doconce replace 'Figure' 'Рис.' %(name)s.html\".encode('utf-8') % vars()\n system(cmd)\n\n cmd = u\"doconce replace 'figure' 'рис.' %(name)s.html\".encode('utf-8') % vars()\n system(cmd)\n\n cmd = u\"doconce replace 'width=responsive' 'class=\\\"img-responsive\\\" style=\\\"max-width:600px; width:100%%;\\\"' %(name)s.html\".encode('utf-8') % vars()\n system(cmd)\n\n if split:\n cmd = 'doconce split_html %(name)s' % vars()\n system(cmd)\n \n for filename in glob.glob(\"._%(name)s*.html\" % vars()):\n if '000' not in filename:\n cmd = u\"doconce replace '&larr; Prev' '&larr; Предыдущая глава' %s\".encode('utf-8') % filename\n system(cmd)\n\n cmd = u\"doconce replace 'Next &rarr;' ' Следующая глава &rarr;' %s\".encode('utf-8') % filename\n system(cmd)\n\n for filename in [name, '._%s000' % name]:\n print(filename)\n cmd = u\"doconce replace 'Read' 'Перейти к первой главе' %s.html\".encode('utf-8') % filename \n system(cmd)\n\n cmd = u\"doconce subst '.*Next.*' '' %s.html\".encode('utf-8') % filename \n system(cmd)", "def diffch(dir1,dir2,outfile=None):\n for ff in sorted(os.listdir(dir1)):\n if re.search('.c$',ff) or re.search('.h$',ff):\n f1 = dir1 + ff\n f2 = dir2 + ff\n if outfile is None:\n print 'start diff ',f1,f2\n os.system('diff %s %s' % (f1,f2))\n print 'end diff ',f1,f2\n else:\n ofp = open(outfile,'a')\n ofp.write('start diff %s %s\\n' % (f1,f2))\n ofp.close()\n os.system('diff %s %s >> %s' % (f1,f2,outfile))\n ofp = open(outfile,'a')\n ofp.write('end diff %s %s\\n' % (f1,f2))\n ofp.close()", "def diff(self):\n # Split new and existing content in lines\n current_content = self.current_content.splitlines(1)\n new_content = self.content.splitlines(1)\n\n # Call difflib\n diff = ''.join(difflib.unified_diff(current_content, new_content))\n print highlight(diff, DiffLexer(), Formatter())\n\n return diff", "def makediff(s1, s2):\n import difflib\n differ = difflib.SequenceMatcher()\n differ.set_seqs(s1, s2)\n #debug = False\n s1new = [ ]\n s2new = [ ]\n previousOp = None\n for op, i1, i2, j1, j2 in differ.get_opcodes():\n #if debug: print \"top\"\n #if debug: print op, i1, i2, j1, j2, '->'\n #if debug: print s1, s2\n if op == 'equal':\n #if i2-i1 < 4 and len(s1new) > 1 and previousOp == \"replace\":\n # s1new[-2] += escape(s1[i1:i2])\n # s2new[-2] += escape(s2[j1:j2])\n #else:\n s1new.append(escape(s1[i1:i2]))\n s2new.append(escape(s2[j1:j2]))\n elif op == 'insert':\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n elif op == \"delete\":\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n elif op == 'replace':\n s1new.extend(('<b><strike>', escape(s1[i1:i2]), '</strike></b>'))\n s2new.extend(('<b>', escape(s2[j1:j2]), '</b>'))\n previousOp = op\n #if debug: print s1, s2\n #if debug: print \"bottom\"\n #if debug: print \"done\"\n return ''.join(s1new), ''.join(s2new)", "def generate_diff(file_path1: str,\n file_path2: str,\n format=STYLISH) -> str:\n diff = make_diff(file_path1, file_path2)\n if format == PLAIN:\n return get_plain(diff)\n elif format == JSON:\n return get_json(diff)\n return get_stylish(diff)", "def _get_diff2_data(request, ps_left_id, ps_right_id, patch_id, context,\n column_width, tab_spaces, patch_filename=None):\n ps_left = models.PatchSet.get_by_id(int(ps_left_id), parent=request.issue.key)\n if ps_left is None:\n return HttpTextResponse(\n 'No patch set exists with that id (%s)' % ps_left_id, status=404)\n ps_left.issue_key = request.issue.key\n ps_right = models.PatchSet.get_by_id(\n int(ps_right_id), parent=request.issue.key)\n if ps_right is None:\n return HttpTextResponse(\n 'No patch set exists with that id (%s)' % ps_right_id, status=404)\n ps_right.issue_key = request.issue.key\n if patch_id is not None:\n patch_right = models.Patch.get_by_id(int(patch_id), parent=ps_right.key)\n else:\n patch_right = None\n if patch_right is not None:\n patch_right.patchset_key = ps_right.key\n if patch_filename is None:\n patch_filename = patch_right.filename\n # Now find the corresponding patch in ps_left\n patch_left = models.Patch.query(\n models.Patch.filename == patch_filename,\n ancestor=ps_left.key).get()\n\n if patch_left:\n try:\n new_content_left = patch_left.get_patched_content()\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n lines_left = new_content_left.lines\n elif patch_right:\n lines_left = patch_right.get_content().lines\n else:\n lines_left = []\n\n if patch_right:\n try:\n new_content_right = patch_right.get_patched_content()\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n lines_right = new_content_right.lines\n elif patch_left:\n lines_right = patch_left.get_content().lines\n else:\n lines_right = []\n\n rows = engine.RenderDiff2TableRows(request,\n lines_left, patch_left,\n lines_right, patch_right,\n context=context,\n colwidth=column_width,\n tabspaces=tab_spaces)\n rows = list(rows)\n if rows and rows[-1] is None:\n del rows[-1]\n\n return dict(patch_left=patch_left, patch_right=patch_right,\n ps_left=ps_left, ps_right=ps_right, rows=rows)", "def cmpfile(file_left, file_right):\n nobv.visual_comparefile(file_left, file_right)", "def compareFiles(baseFile_path, testTempFile):\n baseFile = open(baseFile_path, \"r\")\n testTempFile.seek(0) \n## only lines that have changed\n testoutput = []\n testTempFile.seek(0) \n baseFile.seek(0)\n m_base = baseFile.readlines()\n clean_base = []\n m_temp = testTempFile.readlines() \n clean_temp = []\n ignore_chars = '\\n\\t '\n for line in m_base:\n if not line == '\\n':\n clean_base += [line.strip(ignore_chars)]\n for line in m_temp: \n if not line == '\\n':\n clean_temp += [line.strip(ignore_chars)] \t\n for line in difflib.context_diff(clean_base, clean_temp):\n testoutput += [line] \n \n## all lines diff \n# diff = difflib.ndiff(baseFile.readlines(), testTempFile.readlines())\n# print ''.join(diff)\n baseFile.close() \n diffFile_name = baseFile_path.replace(\"_Base.output\",\".diff\")\n diffFile = open(diffFile_name, \"w\")\n \n if len(testoutput) > 1:\n for line in difflib.context_diff(m_base, m_temp):\n print line\n diffFile.write(line)\n diffFile.close() \n assert ( len(testoutput) == 1 )", "async def do_diff_computation(request):\n session = await get_session(request)\n\n # users upload directory\n upload_dir_path = os.path.join(request.app[\"upload_dir\"], session['uid'])\n\n template_context = {}\n template_context[\"data\"] = session[\"session_data\"]\n upload_resource = request.app.router['uploads']\n\n # get both files - and if both dont exists then we are done\n try:\n # take care of the left and right images\n code, script_and_div = render_sideby_side(session['uid'], upload_resource, session[\"session_data\"])\n if code != 0:\n # cant render plots for side by side image\n return web.HTTPFound('/diff') # go back to diff\n\n # add the side by side images plots to the context\n template_context[\"image_display\"] = script_and_div\n\n # ####################################################\n # now let compute the differences between those images\n\n left_image = os.path.join(upload_dir_path,session['session_data'][\"left_image\"][\"filename\"] )\n right_image = os.path.join(upload_dir_path, session['session_data'][\"right_image\"][\"filename\"])\n\n # ########################################################################################################\n # Computing the diff takes a long time - pass it to the a process pool executor\n # ########################################################################################################\n\n process_pool_executor = request.app[\"process_pool_executor\"]\n\n # get the event loop -- get it from app or asyncio\n loop = request.app.loop\n # do i need a partial?\n future1 = loop.run_in_executor(process_pool_executor, image_ops.workon_images, left_image, right_image, upload_dir_path)\n\n code, result = await future1\n\n # code, result = image_ops.workon_images(left_image, right_image, upload_dir_path)\n if code == 0:\n # TODO add the result to the data session and render the page\n\n \"\"\"\n { \"ssim_score\":\n \"diff\": <diff image name> optional \n \"thresh\" <thresh image name> optional\n \"marked_l\" <marked left image name> optional\n \"marked_r\" <marked right iamge name> optional\n \"histogram\" <dual histogram plot {\"div\",\"script\"} for left image and right image>\n \"diff_histogram\" {\"div\",\"script\"} <single histogram plot \n } \n \"\"\"\n # create a diffresult\n diff_result = {}\n template_context[\"diff_result\"] = diff_result\n\n # populate it\n if \"histogram\" in result:\n diff_result[\"histogram\"] = result[\"histogram\"]\n if \"diff_histogram\" in result:\n diff_result[\"diff_histogram\"] = result[\"diff_histogram\"]\n\n\n if \"marked_l\" in result and \"marked_r\" in result:\n code, script_and_div = render_sideby_side2(result[\"marked_l\"],result[\"marked_r\"], upload_resource, session['uid'])\n if code == 0:\n diff_result[\"diff_image_display\"] = script_and_div\n\n\n response = aiohttp_jinja2.render_template('base_html.jinja2', request, template_context)\n return response\n\n else:\n # we had an error\n return web.HTTPFound('/diff')\n\n except KeyError:\n logging.debug(\"Both files don't exists - we shouldnt get here\")\n return web.HTTPFound('/diff')\n\n return web.HTTPFound('/diff')", "def snippet_diff(request, template_name='dpaste/snippet_diff.html'):\n if request.GET.get('a') and request.GET.get('a').isdigit() \\\n and request.GET.get('b') and request.GET.get('b').isdigit():\n try:\n fileA = Snippet.objects.get(pk=int(request.GET.get('a')))\n fileB = Snippet.objects.get(pk=int(request.GET.get('b')))\n except ObjectDoesNotExist:\n return HttpResponseBadRequest(u'Selected file(s) does not exist.')\n else:\n return HttpResponseBadRequest(u'You must select two snippets.')\n\n class DiffText(object):\n pass\n\n diff = DiffText()\n\n if fileA.content != fileB.content:\n d = difflib.unified_diff(\n fileA.content.splitlines(),\n fileB.content.splitlines(),\n 'Original',\n 'Current',\n lineterm=''\n )\n\n diff.content = '\\n'.join(d).strip()\n diff.lexer = 'diff'\n else:\n diff.content = _(u'No changes were made between this two files.')\n diff.lexer = 'text'\n\n template_context = {\n 'snippet': diff,\n 'fileA': fileA,\n 'fileB': fileB,\n }\n\n return render_to_response(\n template_name,\n template_context,\n RequestContext(request)\n )", "def diff_files(self, added_deleted, path=(), pars=()):\n body_diffs = self.selector.flat_diff(added_deleted + \" terminal\",\n path = path + ((self.filename,)), pars = pars + (self.diff_name,))\n for diff in body_diffs:\n diff.instrument = self.instrument\n diff.filekind = self.filekind\n return body_diffs", "def diff(options):\n if len(options.args) < 2:\n raise TelemacException(\\\n '\\nThe code \"diff\" uses a minimum of '\n '3 argumensts, aside from the options\\n')\n slf_files = options.args[0:len(options.args)-1]\n out_file = options.args[len(options.args)-1]\n\n slfs = Selafins()\n print('\\n\\nDifferences into {}\\n{}\\n'.format(path.basename(out_file),\n '~'*72))\n for slf_file in slf_files:\n slf_file = path.realpath(slf_file)\n if not path.exists(slf_file):\n raise TelemacException(\\\n '\\nCould not find '\n 'the file named: {}'.format(slf_file))\n slfs.add(slf_file)\n slfs.put_content(out_file)" ]
[ "0.6935427", "0.61285514", "0.6108246", "0.5999014", "0.58274055", "0.58048826", "0.5680434", "0.5638912", "0.5617471", "0.55919796", "0.55733824", "0.5559977", "0.55554956", "0.55301815", "0.5490199", "0.5481587", "0.5448345", "0.5435468", "0.5404115", "0.5347791", "0.53248686", "0.5321444", "0.5291031", "0.5235956", "0.5235581", "0.52182823", "0.518983", "0.5180493", "0.51784086", "0.5161389" ]
0.8523384
0
Parse the diff associated with a commit The diff associated with commit is normally the difference between the state of the tree in that commit and the state of the tree in one or more of its parent commits. This method will parse that diff and return a list of lines that correspond to added and/or deleted lines of code.
def _parse_diff(commit_sha1): class DiffState(object): START = 0 DIFF_BLOCK_LINE = 1 INDEX_LINE = 2 A_LINE = 3 B_LINE = 4 AT_LINE = 5 DIFF_LINES = 6 diff_cmd = shlex.split('git show {commit_sha1}'.format( commit_sha1=commit_sha1)) diff_output = subprocess.check_output(diff_cmd) diff_lines = set() state = DiffState.START for line in diff_output.splitlines(): if state in [DiffState.START, DiffState.DIFF_LINES] and line.startswith('diff '): state = DiffState.DIFF_BLOCK_LINE continue if state == DiffState.DIFF_BLOCK_LINE and line.startswith('index '): state = DiffState.INDEX_LINE continue if state == DiffState.INDEX_LINE and line.startswith('--- '): state = DiffState.A_LINE continue if state == DiffState.A_LINE and line.startswith('+++ '): state = DiffState.B_LINE continue if state in [DiffState.B_LINE, DiffState.DIFF_LINES] and line.startswith('@@ '): state = DiffState.AT_LINE continue if state in [DiffState.AT_LINE, DiffState.DIFF_LINES] and ( line.startswith(('+', '-', ' '))): state = DiffState.DIFF_LINES if line.startswith(' '): continue diff_lines.add(line) continue state = DiffState.START return diff_lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def get_changed_files(self, old_commit, new_commit):\n if old_commit is not None and not self.pygit.descendant_of(\n new_commit, old_commit\n ):\n raise ValueError(\"Second commit must be a descendant of first commit\")\n\n old_index = pygit2.Index()\n new_index = pygit2.Index()\n if old_commit is not None:\n old_tree = self.pygit.get(old_commit).tree\n old_index.read_tree(old_tree)\n else:\n # This is a special hash that represents an empty tree\n old_tree = self.pygit.get(\"4b825dc642cb6eb9a060e54bf8d69288fbee4904\")\n\n new_tree = self.pygit.get(new_commit).tree\n new_index.read_tree(new_tree)\n\n for patch in self.pygit.diff(old_tree, new_tree):\n if patch.delta.status_char() != \"M\":\n continue\n\n if not patch.delta.new_file.path.startswith(\"locales/\"):\n continue\n\n old_file_oid = old_index[patch.delta.old_file.path].oid\n new_file_oid = new_index[patch.delta.new_file.path].oid\n old_file = self.pygit.get(old_file_oid)\n new_file = self.pygit.get(new_file_oid)\n yield patch.delta.new_file.path, old_file.data, new_file.data", "def check_diffs():\n process = Popen([\"git\", \"diff\", \"HEAD^\", \"--name-only\"], stdout=PIPE)\n\n diff, stderr = process.communicate()\n\n if process.returncode !=0:\n raise Exception(\"Unable to do git diff\")\n return diff.splitlines(False)", "def lint(self, commit):\n LOG.debug(\"Linting commit %s\", commit.sha or \"[SHA UNKNOWN]\")\n LOG.debug(\"Commit Object\\n\" + str(commit))\n\n # Ensure the Deprecation class has a reference to the config currently being used\n Deprecation.config = self.config\n\n # Apply config rules\n for rule in self.configuration_rules:\n rule.apply(self.config, commit)\n\n # Skip linting if this is a special commit type that is configured to be ignored\n ignore_commit_types = [\"merge\", \"squash\", \"fixup\", \"fixup_amend\", \"revert\"]\n for commit_type in ignore_commit_types:\n if getattr(commit, f\"is_{commit_type}_commit\") and getattr(self.config, f\"ignore_{commit_type}_commits\"):\n return []\n\n violations = []\n # determine violations by applying all rules\n violations.extend(self._apply_line_rules([commit.message.title], commit, self.title_line_rules, 1))\n violations.extend(self._apply_line_rules(commit.message.body, commit, self.body_line_rules, 2))\n violations.extend(self._apply_commit_rules(self.commit_rules, commit))\n\n # Sort violations by line number and rule_id. If there's no line nr specified (=common certain commit rules),\n # we replace None with -1 so that it always get's placed first. Note that we need this to do this to support\n # python 3, as None is not allowed in a list that is being sorted.\n violations.sort(key=lambda v: (-1 if v.line_nr is None else v.line_nr, v.rule_id))\n return violations", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def test_with_commit_history(self):\n\t\treview_request = self.create_review_request(create_repository=True, publish=True)\n\t\tdiffset = self.create_diffset(review_request=review_request)\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r1\", parent_id=\"r0\", diff_contents=(b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r2\", parent_id=\"r1\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Hi, world!\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r4\", parent_id=\"r3\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 197009f..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hi, world!\\n\" b\"+Yo, world.\\n\"))\n\t\tcumulative_diff = b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\" b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Yo, world.\\n\"\n\t\tdiffset.finalize_commit_series(cumulative_diff=cumulative_diff, validation_info=None, validate=False, save=True)\n\t\tresponse = self.client.get(\"/r/%d/diff/raw/\" % review_request.pk)\n\t\tself.assertEqual(response.content, cumulative_diff)", "def getModifiedBlockList(self):\n if self.binary:\n return []\n block_list = []\n for child in self.children:\n old_line_list = [line.strip() for line, color in child.getOldCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n DELETED_DIFF_COLOR)]\n new_line_list = [line.strip() for line, color in child.getNewCodeList()\n if line is not None and color in (MODIFIED_DIFF_COLOR,\n ADDITION_DIFF_COLOR)]\n if old_line_list or new_line_list:\n block_list.append((child,(old_line_list, new_line_list)))\n return block_list", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def clean_diff(diff):\n res = []\n skip = True\n for line in diff.split('\\n'):\n if line.startswith('diff --git'):\n skip = True\n if line.startswith('@@ '):\n skip = False\n if not skip:\n res.append(line)\n return '\\n'.join(res)", "def _check_diff_add_delete(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-add-delete-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -S parameter of git log to check whether an added line\n # was removed or duplicated in a later commit, or whether a\n # removed line was re-added or also removed elsewhere in a later\n # commit\n\n # Escape double-quotes\n line = re.sub(r'\"', r'\\\\\\\"', line)\n git_log_s_str = (\n 'git log --oneline --no-abbrev --reverse -S\"{line}\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_s_cmd = shlex.split(git_log_s_str)\n print 'Running git log -S\"{line}\"'.format(line=line)\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n\n git_log_s_output = subprocess.check_output(git_log_s_cmd)\n print 'git_log_s_output: {git_log_s_output}'.format(\n git_log_s_output=git_log_s_output)\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -S\"{line}\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n except Exception as ex:\n print 'git_log_s_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_s_str}'.format(\n git_log_s_str=git_log_s_str))\n git_log_s_output = ''\n pass\n\n for git_log_s_line in git_log_s_output.splitlines():\n sha1_s, _ = git_log_s_line.split(' ', 1)\n\n if sha1_s not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Adds or removes lines matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Adds or removes lines matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_add_delete'.format(\n line_type=line_type))\n\n commit_info[sha1_s] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_s in branch_sha1s:\n branch_sha1s.remove(sha1_s)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s", "def changes(self, files=[], rev=None, change=None, text=False,\n reverse=False, ignore_all_space=False, ignore_space_change=False,\n ignore_blank_lines=False, context=None, subrepos=False,\n include=None, exclude=None): \n return diffparser.parse(self.diff(files=files, rev=rev, change=change,\n text=text, git=True, reverse=reverse,\n ignore_all_space=ignore_all_space,\n ignore_space_change=ignore_space_change,\n ignore_blank_lines=ignore_blank_lines,\n unified=context, subrepos=subrepos,\n include=include, exclude=exclude))", "def _diff_and_commit(self, commit_msg=''):\n if not commit_msg:\n if 'commit_msg' not in self.data:\n # Ask until we get a non-empty commit message.\n while not commit_msg:\n commit_msg = utils.get_input(\n \"What is the commit message? \")\n else:\n commit_msg = self.data['commit_msg']\n\n diff_cmd = self.vcs.cmd_diff()\n diff = execute_command(diff_cmd)\n if sys.version.startswith('2.6.2'):\n # python2.6.2 bug... http://bugs.python.org/issue5170 This is the\n # spot it can surface as we show a part of the changelog which can\n # contain every kind of character. The rest is mostly ascii.\n print(\"Diff results:\")\n print(diff)\n else:\n # Common case\n logger.info(\"The '%s':\\n\\n%s\\n\", diff_cmd, diff)\n if utils.ask(\"OK to commit this\"):\n msg = commit_msg % self.data\n msg = self.update_commit_message(msg)\n commit_cmd = self.vcs.cmd_commit(msg)\n commit = execute_command(commit_cmd)\n logger.info(commit)", "def get_tree(gitdir=\".\"):\n\n cmd = [\"git\", \"log\", \"--all\", \"--branches\", '--pretty=format:{ \"commit\": \"%H\", \"abbreviated_commit\": \"%h\", \"tree\": \"%T\", \"abbreviated_tree\": \"%t\", \"parent\": \"%P\", \"abbreviated_parent\": \"%p\", \"refs\": \"%d\", \"encoding\": \"%e\", \"subject\": \"%s\", \"sanitized_subject_line\": \"%f\", \"commit_notes\": \"\", \"author\": { \"name\": \"%aN\", \"email\": \"%aE\", \"date\": \"%ai\" }, \"commiter\": { \"name\": \"%cN\", \"email\": \"%cE\", \"date\": \"%ci\" }},']\n\n output = run(cmd)\n lines = output.split(\"\\n\")\n\n content = \"\"\n history = []\n for l in lines:\n try:\n revisedcontent = content + l\n if revisedcontent.count('\"') % 2 == 0:\n j = json.loads(revisedcontent[:-1])\n if \"Notes added by\" in j['subject']:\n content = \"\"\n continue\n history.append(j)\n content = \"\"\n else:\n content = revisedcontent\n except Exception as e:\n print(\"Error while parsing record\")\n print(revisedcontent)\n content = \"\"\n\n # Order by time. First commit first...\n history.reverse()\n\n #\n changes = get_change()\n\n for i in range(len(history)):\n abbrev_commit = history[i]['abbreviated_commit']\n if abbrev_commit not in changes:\n raise Exception(\"Missing changes for \" + abbrev_commit)\n\n history[i]['changes'] = changes[abbrev_commit]['changes']\n\n\n return history", "def commits(self):\n p = Popen(['git', 'rev-list', '--all', '--timestamp', '--parents'], \n cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_info = line.split()\n if len(commit_info) < 2:\n print >> sys.stderr, \"error: bad line: %r\" % line\n continue\n timestamp = int(commit_info.pop(0))\n commit_info = map(CommitId, commit_info)\n commit_id = commit_info.pop(0)\n yield (timestamp, commit_id, commit_info)", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def diff(self, top_repo_path):\n p = Popen(\n [\"git\", \"diff\", \"--numstat\"], stdout=PIPE, stderr=PIPE, cwd=top_repo_path\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n for line in line_array:\n linesplit = line.split()\n result.append(\n {\n \"insertions\": linesplit[0],\n \"deletions\": linesplit[1],\n \"filename\": linesplit[2],\n }\n )\n return {\"code\": p.returncode, \"result\": result}\n else:\n return {\"code\": p.returncode, \"message\": my_error.decode(\"utf-8\")}", "def get_diff_as_json(filename):\n with open(filename, encoding=\"utf-8\") as f:\n lines = f.readlines()\n\n lines = [l.strip() for l in lines]\n changes ={}\n i = 0\n n = len(lines)\n\n current_file_name = None\n current_file_additions = []\n current_file_deletions = []\n\n while i < n:\n l = lines[i]\n if l.startswith(\"diff \"):\n if current_file_name:\n changes[current_file_name] = {}\n changes[current_file_name]['addition'] = current_file_additions\n changes[current_file_name]['deletion'] = current_file_deletions\n current_file_additions = []\n current_file_deletions = []\n current_file_name = l.split(' ')[3][2:]\n i+=1\n elif l.startswith('@@'):\n line_info = l.split('@@')[1].split()\n for info in line_info:\n key = info[0]\n info = info[1:]\n if ',' in info:\n line_num, n_lines = [int(li) for li in info.split(',')]\n else:\n n_lines = 1\n line_num = int(info)\n if key == '+':\n insert_index = line_num\n n_append = n_lines\n elif key == '-':\n delete_index = line_num\n n_delete = n_lines\n i+=1\n j=0\n while j<n_delete and lines[i].startswith('-'):\n current_file_deletions.append(delete_index+j)\n j+=1\n i+=1\n assert n_delete == j\n while i<n and lines[i].startswith('\\\\'):\n i+=1\n j=0\n while j<n_append and lines[i].startswith('+'):\n current_file_additions.append(insert_index+j)\n j+=1\n i+=1\n assert n_append == j\n else:\n print(lines[i])\n i+=1\n\n if current_file_name:\n changes[current_file_name] = {}\n changes[current_file_name]['addition'] = current_file_additions\n changes[current_file_name]['deletion'] = current_file_deletions\n\n return changes", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def diff(self):\n # Split new and existing content in lines\n current_content = self.current_content.splitlines(1)\n new_content = self.content.splitlines(1)\n\n # Call difflib\n diff = ''.join(difflib.unified_diff(current_content, new_content))\n print highlight(diff, DiffLexer(), Formatter())\n\n return diff", "def parse_hunks(diff: str) -> list[Hunk]:\n diff_pattern = (\n r\"diff --git a/.* b/(.*)\\n\" # capture file name\n r\"(?:\\w+ file mode \\d+\\n)?\" # maybe 'new file mode 100644' or similar\n r\"index .*\\n\"\n r\"--- .*\\n\"\n r\"\\+\\+\\+ .*\\n\"\n )\n\n # capture line number and length from header\n hunk_header_pattern = r\"@@ -\\d+,\\d+ \\+(\\d+),(\\d+) @@.*\\n\"\n\n # ignore initial empty match\n raw_per_file_hunks = re.split(diff_pattern, diff)[1:]\n\n parsed_hunks = []\n\n for file, raw_hunks in batch(raw_per_file_hunks, 2):\n # ignore initial empty match\n hunks = re.split(hunk_header_pattern, raw_hunks, re.MULTILINE)[1:]\n for start, length, body in batch(hunks, 3):\n lines = body.split(\"\\n\")\n lines = lines if lines[-1] else lines[:-1] # trim empty line\n parsed_hunks.append(Hunk(file, int(start), int(length), lines))\n\n return parsed_hunks", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def commits_parsing(query):\n logging.info(\"GET request commit parsing is working\")\n results = {}\n list_of_commits = []\n clear_list_message = []\n clear_list_committer = []\n json_commits = {}\n json_all = {}\n for single_query in query:\n list_of_commits += {single_query[:-6]}\n\n try:\n results = requests.get(single_query[:-6])\n except requests.ConnectionError as exception:\n return f'{exception}'\n\n json_all = results.json()[0]\n\n json_commits = json_all['commit']\n clear_list_message += {json_commits['message']}\n clear_list_committer += {json_commits['committer']['name']}\n\n return clear_list_message, clear_list_committer", "def test_diff_git_line_without_a_b_and_spaces_changed(self):\n diff = (b'diff --git foo bar1 foo bar2\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n')\n\n with self.assertRaises(DiffParserError) as cm:\n self.tool.get_parser(diff).parse()\n\n self.assertTrue(str(cm.exception).startswith(\n 'Unable to parse the \"diff --git\" line'))", "def get_chunks(diff):\n diff = clean_diff(diff)\n chunk = []\n chunks = []\n for line in diff.split('\\n'):\n if not line:\n continue\n if line.startswith('@@ '):\n if chunk:\n chunks.append('\\n'.join(chunk) + '\\n')\n chunk = [line]\n else:\n chunk.append(line)\n if chunk:\n chunks.append('\\n'.join(chunk) + '\\n')\n return chunks", "def get_changed_files_from(old_commit_sha, new_commit_sha):\n return check_output(\n \"git diff-tree --no-commit-id --name-only -r {0}..{1}\".format(\n old_commit_sha,\n new_commit_sha\n ).split(\" \")\n ).decode('utf-8').strip()", "def _commit_tree(commit):\n return {\n \"commit\": commit.hexsha,\n \"parents\": [_commit_tree(c) for c in commit.parents],\n \"tree\": commit.tree.hexsha,\n \"author\": str(commit.author),\n \"authored_date\": commit.authored_date,\n \"committer\": str(commit.committer),\n \"committed_date\": commit.committed_date,\n \"message\": commit.message\n }", "def _check_diff_move(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-move-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -G parameter of git log to check whether an added or\n # deleted line was moved in a later commit\n\n # Escape regex meta-characters\n line = re.sub(r'([].^$*+?{}\\\\[|()\"])', r'\\\\\\1', line)\n\n git_log_g_str = (\n 'git log --oneline --no-abbrev --reverse -G\"^{line}$\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_g_cmd = shlex.split(git_log_g_str)\n print 'Running git log -G\"^{line}$\"'.format(line=line)\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n git_log_g_output = subprocess.check_output(git_log_g_cmd)\n print 'git_log_g_output: {git_log_g_output}'.format(\n git_log_g_output=git_log_g_output)\n\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -G\"^{line}$\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n except Exception as ex:\n print 'git_log_g_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_g_str}'.format(\n git_log_g_str=git_log_g_str))\n git_log_g_output = ''\n pass\n\n for git_log_g_line in git_log_g_output.splitlines():\n sha1_g, _ = git_log_g_line.split(' ', 1)\n\n if sha1_g not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Removes a line matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Re-adds a line matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_move'.format(line_type=line_type))\n\n commit_info[sha1_g] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_g in branch_sha1s:\n branch_sha1s.remove(sha1_g)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s" ]
[ "0.67668235", "0.6617173", "0.615786", "0.61474323", "0.61232686", "0.61045563", "0.6047123", "0.60470784", "0.6023137", "0.59735805", "0.59698206", "0.5967609", "0.5944944", "0.5932438", "0.588417", "0.58626026", "0.5787998", "0.5774946", "0.5749417", "0.57234246", "0.57083136", "0.56782067", "0.56462246", "0.564311", "0.56080705", "0.5599191", "0.55443996", "0.553298", "0.55205315", "0.5472923" ]
0.7781374
0
Check added code is not removed and vice versa We want to determine whether later commits in the same branch remove or duplicate code that was added by an earlier commit in the branch or if removed code is readded or removed in another location. This can be done by passing a line of the commit's associated diff to the S parameter of gitlog.
def _check_diff_add_delete(commit_sha1, head_sha1): commit_info = {} branch_sha1s = [] # Get list of commits between this one and the branch head git_log_cmd = shlex.split( 'git log --oneline --no-abbrev --reverse ' '{commit_sha1}..{head_sha1}'.format( commit_sha1=commit_sha1, head_sha1=head_sha1)) git_log_output = subprocess.check_output(git_log_cmd) for git_log_line in git_log_output.splitlines(): if git_log_line == '': continue branch_sha1, _ = git_log_line.split(' ', 1) branch_sha1s.append(branch_sha1) # If there are no commits to check then just return an empty dict # and empty list tuple if branch_sha1s == []: return commit_info, branch_sha1s diff_lines = _parse_diff(commit_sha1) context = 'diff-add-delete-check' for diff_line in diff_lines: line_type, line = diff_line[0], diff_line[1:] # Skip blank lines if line == '': continue # Use the -S parameter of git log to check whether an added line # was removed or duplicated in a later commit, or whether a # removed line was re-added or also removed elsewhere in a later # commit # Escape double-quotes line = re.sub(r'"', r'\\\"', line) git_log_s_str = ( 'git log --oneline --no-abbrev --reverse -S"{line}" ' '{commit_sha1}..{head_sha1}'.format( line=line, commit_sha1=commit_sha1, head_sha1=head_sha1)) try: git_log_s_cmd = shlex.split(git_log_s_str) print 'Running git log -S"{line}"'.format(line=line) print 'git_log_s_cmd: {git_log_s_cmd}'.format( git_log_s_cmd=git_log_s_cmd) git_log_s_output = subprocess.check_output(git_log_s_cmd) print 'git_log_s_output: {git_log_s_output}'.format( git_log_s_output=git_log_s_output) except (subprocess.CalledProcessError, ValueError) as e: print 'Exception when running git log -S"{line}"'.format(line=line) print 'Exception was {e}'.format(e=e) try: print 'git_log_s_cmd: {git_log_s_cmd}'.format( git_log_s_cmd=git_log_s_cmd) except Exception as ex: print 'git_log_s_cmd not defined: {ex}'.format(ex=ex) print ( 'Failed to run shlex.split on {git_log_s_str}'.format( git_log_s_str=git_log_s_str)) git_log_s_output = '' pass for git_log_s_line in git_log_s_output.splitlines(): sha1_s, _ = git_log_s_line.split(' ', 1) if sha1_s not in commit_info.keys(): message = None if line_type == '+': description = ( 'Adds or removes lines matching a line added in ' '{commit_sha1}'.format(commit_sha1=commit_sha1)) message = context, description elif line_type == '-': description = ( 'Adds or removes lines matching a line removed in ' '{commit_sha1}'.format(commit_sha1=commit_sha1)) message = context, description else: print ( 'Got line_type "{line_type}" instead of ' '"-" or "+" in _check_diff_add_delete'.format( line_type=line_type)) commit_info[sha1_s] = [message] # Remove this sha1 from branch_sha1s if sha1_s in branch_sha1s: branch_sha1s.remove(sha1_s) # If we have already marked all the existing commits in the # branch, then break out of the loop if branch_sha1s == []: return commit_info, branch_sha1s return commit_info, branch_sha1s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_diff_move(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-move-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -G parameter of git log to check whether an added or\n # deleted line was moved in a later commit\n\n # Escape regex meta-characters\n line = re.sub(r'([].^$*+?{}\\\\[|()\"])', r'\\\\\\1', line)\n\n git_log_g_str = (\n 'git log --oneline --no-abbrev --reverse -G\"^{line}$\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_g_cmd = shlex.split(git_log_g_str)\n print 'Running git log -G\"^{line}$\"'.format(line=line)\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n git_log_g_output = subprocess.check_output(git_log_g_cmd)\n print 'git_log_g_output: {git_log_g_output}'.format(\n git_log_g_output=git_log_g_output)\n\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -G\"^{line}$\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n except Exception as ex:\n print 'git_log_g_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_g_str}'.format(\n git_log_g_str=git_log_g_str))\n git_log_g_output = ''\n pass\n\n for git_log_g_line in git_log_g_output.splitlines():\n sha1_g, _ = git_log_g_line.split(' ', 1)\n\n if sha1_g not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Removes a line matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Re-adds a line matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_move'.format(line_type=line_type))\n\n commit_info[sha1_g] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_g in branch_sha1s:\n branch_sha1s.remove(sha1_g)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''", "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True", "def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def test_diff_git_line_without_a_b(self):\n diff = (\n b'diff --git foo foo\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def check_diff(src, dst):\n result = _subprocess(['git', '--no-pager', 'log', '--graph', '--abbrev-commit', '--pretty=oneline',\n '--no-merges', \"--\", f\"{src}\", f\"^{dst}\"])\n\n if result:\n print(f\"Warning: the following commits are present on {dst} but not on {src}: \\n{result}\")\n if args.force:\n print(f\"Warning: they will be overwritten on {dst} and discarded.\")\n else:\n print(f\"Warning: run with --force to overwrite and discard these commits from {dst}\")\n exit(1)", "def changed_in_diff(diff: PatchedFile, line_n: int):\n for hunk in diff:\n hunk: Hunk\n for line_change in hunk:\n line_change: Line\n if line_change.is_added and line_change.target_line_no == line_n:\n return True\n return False", "def line_part_of_commit(file, line, commit):\n if line == '0': return False\n\n line_val = git(\"blame\", \"-l\", \"-L{0},{0}\".format(line), file)\n return line_val.split(\" \", 1)[0] == commit", "def check_diffs():\n process = Popen([\"git\", \"diff\", \"HEAD^\", \"--name-only\"], stdout=PIPE)\n\n diff, stderr = process.communicate()\n\n if process.returncode !=0:\n raise Exception(\"Unable to do git diff\")\n return diff.splitlines(False)", "def prevent_duplicate_commits(oldrev, newrev, refname):\n try:\n commit_list = subprocess.check_output([\n \"git\", \"rev-list\", newrev, \"-n\", GIT_COMMIT_LIST_LENGTH\n ])\n except Exception as e:\n print(\"Exception: %s\" % e)\n pass\n commit_list = commit_list.split(\"\\n\")\n commit_list = [item for item in commit_list if len(item) > 0]\n\n # For each of the first GIT_COMMIT_LIST_LENGTH pairs, check diff\n for i in range(len(commit_list) - 1):\n first = commit_list[i]\n second = commit_list[i + 1]\n\n rev1 = get_svn_revision(first)\n rev2 = get_svn_revision(second)\n if rev1 and (rev1 == rev2):\n diff = subprocess.check_output([\"git\", \"diff\", first, second])\n # If the diff of two commits is empty, means they are the same.\n # i.e duplicate\n if not diff:\n print(ERROR_MSG % (first, second))\n sys.exit(1)\n return", "def audit_eol(self):\n\n # Regex's....\n re_commit = re.compile(\"^\\xff(.+)\\xff$\")\n re_filename = re.compile(\"^diff --(cc |git a\\/.+ b\\/)(.+)$\")\n blocked_eol = re.compile(r\"(?:\\r\\n|\\n\\r|\\r)$\")\n\n # Bool to allow special files such as vcards to bypass the check\n eol_allowed = False\n\n\n # Do EOL audit!\n process = get_change_diff( self.repository, [\"-p\"] )\n for line in process.stdout:\n commit_change = re.match( re_commit, line )\n if commit_change:\n commit = commit_change.group(1)\n continue\n\n file_change = re.match( re_filename, line )\n if file_change:\n filename = file_change.group(2)\n eol_violation = False\n eol_allowed = False\n\n # Check if it's an allowed mimetype\n # First - check with the mimetypes system, to see if it can tell\n guessed_type, _ = mimetypes.guess_type(filename)\n if guessed_type in self.ALLOWED_EOL_MIMETYPES:\n eol_allowed = True\n continue\n\n # Second check: by file extension\n # NOTE: This uses the FIRST dot as extension\n splitted_filename = filename.split(os.extsep)\n # Check if there's an extension or not\n # NOTE This assumes that files use dots for extensions only!\n if len(splitted_filename) > 1:\n extension = splitted_filename[1]\n if extension in self.ALLOWED_EOL_EXTENSIONS:\n eol_allowed = True\n\n continue\n\n # Unless they added it, ignore it\n if not line.startswith(\"+\"):\n continue\n\n if re.search( blocked_eol, line ) and not eol_violation:\n # Is this an allowed filename?\n if eol_allowed:\n continue\n\n # Failure has been found... handle it\n eol_violation = True\n self.__log_failure(commit, \"End of Line Style (non-Unix): \" + filename);", "def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False", "def test_diff_git_line_without_a_b_and_spaces_quotes(self):\n diff = (\n b'diff --git \"foo bar1\" \"foo bar1\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def test_diff_git_line_without_a_b_quotes(self):\n diff = (\n b'diff --git \"foo\" \"foo\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def changelog_updated(target_branch):\n\n output = subprocess.getoutput(['git diff HEAD origin/{}'.format(target_branch)])\n return 'a/changelog.md b/changelog.md' in output.lower()", "def test_noChangeFromTrunk(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"mypatch\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(\n logs[-1], \"On trunk or no diffs from trunk; no need to look at this.\"\n )", "def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0", "def svn_diff_contains_conflicts(diff):\n return _diff.svn_diff_contains_conflicts(diff)", "def clean_diff(diff):\n res = []\n skip = True\n for line in diff.split('\\n'):\n if line.startswith('diff --git'):\n skip = True\n if line.startswith('@@ '):\n skip = False\n if not skip:\n res.append(line)\n return '\\n'.join(res)", "def git_removed_files(self):\n\n etc_tracked = self.repo.tracked_files('etc-tmp')\n for rpath in etc_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.etc_commits.removed.rpaths.append(rpath)\n self.etc_commits.removed.commit()\n\n master_tracked = self.repo.tracked_files('master-tmp')\n for rpath in master_tracked:\n etc_file = os.path.join(self.root_dir, rpath)\n if not os.path.lexists(etc_file):\n self.master_commits.removed.rpaths.append(rpath)\n self.master_commits.removed.commit()", "def test_with_commit_history(self):\n\t\treview_request = self.create_review_request(create_repository=True, publish=True)\n\t\tdiffset = self.create_diffset(review_request=review_request)\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r1\", parent_id=\"r0\", diff_contents=(b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r2\", parent_id=\"r1\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Hi, world!\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r4\", parent_id=\"r3\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 197009f..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hi, world!\\n\" b\"+Yo, world.\\n\"))\n\t\tcumulative_diff = b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\" b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Yo, world.\\n\"\n\t\tdiffset.finalize_commit_series(cumulative_diff=cumulative_diff, validation_info=None, validate=False, save=True)\n\t\tresponse = self.client.get(\"/r/%d/diff/raw/\" % review_request.pk)\n\t\tself.assertEqual(response.content, cumulative_diff)", "def test_diffFromTrunkNoNewsfragments(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"mypatch\"], cwd=self.repo.path)\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (1,))\n self.assertEqual(logs[-1], \"No newsfragment found. Have you committed it?\")", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def _already_copied_commit(self, commit_sha1, branch_id):\n if not self.already_copied_commit_runner:\n return False\n return self.already_copied_commit_runner.already_copied_commit(\n commit_sha1, branch_id)", "def _gitline_comparator(self, a, b):\n if a.startswith('!'):\n return -1\n elif b.startswith('!'):\n return 1\n else:\n return a == b", "def _gitline_comparator(self, a, b):\n if a.startswith('!'):\n return -1\n elif b.startswith('!'):\n return 1\n else:\n return a == b" ]
[ "0.6234145", "0.59902537", "0.59753424", "0.59653366", "0.5901322", "0.5837043", "0.58310616", "0.5809848", "0.58078736", "0.5804549", "0.5749114", "0.5715498", "0.56908053", "0.5660887", "0.56248826", "0.55297625", "0.55249697", "0.54839456", "0.5470019", "0.54518175", "0.5445477", "0.53842133", "0.5379666", "0.53725904", "0.53307176", "0.53242326", "0.5321662", "0.53024065", "0.5286769", "0.5286769" ]
0.6817716
0
Check added or changed code has not been moved We want to determine whether later commits in the same branch move code that was updated in an earlier commit.
def _check_diff_move(commit_sha1, head_sha1): commit_info = {} branch_sha1s = [] # Get list of commits between this one and the branch head git_log_cmd = shlex.split( 'git log --oneline --no-abbrev --reverse ' '{commit_sha1}..{head_sha1}'.format( commit_sha1=commit_sha1, head_sha1=head_sha1)) git_log_output = subprocess.check_output(git_log_cmd) for git_log_line in git_log_output.splitlines(): if git_log_line == '': continue branch_sha1, _ = git_log_line.split(' ', 1) branch_sha1s.append(branch_sha1) # If there are no commits to check then just return an empty dict # and empty list tuple if branch_sha1s == []: return commit_info, branch_sha1s diff_lines = _parse_diff(commit_sha1) context = 'diff-move-check' for diff_line in diff_lines: line_type, line = diff_line[0], diff_line[1:] # Skip blank lines if line == '': continue # Use the -G parameter of git log to check whether an added or # deleted line was moved in a later commit # Escape regex meta-characters line = re.sub(r'([].^$*+?{}\\[|()"])', r'\\\1', line) git_log_g_str = ( 'git log --oneline --no-abbrev --reverse -G"^{line}$" ' '{commit_sha1}..{head_sha1}'.format( line=line, commit_sha1=commit_sha1, head_sha1=head_sha1)) try: git_log_g_cmd = shlex.split(git_log_g_str) print 'Running git log -G"^{line}$"'.format(line=line) print 'git_log_g_cmd: {git_log_g_cmd}'.format( git_log_g_cmd=git_log_g_cmd) git_log_g_output = subprocess.check_output(git_log_g_cmd) print 'git_log_g_output: {git_log_g_output}'.format( git_log_g_output=git_log_g_output) except (subprocess.CalledProcessError, ValueError) as e: print 'Exception when running git log -G"^{line}$"'.format(line=line) print 'Exception was {e}'.format(e=e) try: print 'git_log_g_cmd: {git_log_g_cmd}'.format( git_log_g_cmd=git_log_g_cmd) except Exception as ex: print 'git_log_g_cmd not defined: {ex}'.format(ex=ex) print ( 'Failed to run shlex.split on {git_log_g_str}'.format( git_log_g_str=git_log_g_str)) git_log_g_output = '' pass for git_log_g_line in git_log_g_output.splitlines(): sha1_g, _ = git_log_g_line.split(' ', 1) if sha1_g not in commit_info.keys(): message = None if line_type == '+': description = ( 'Removes a line matching a line added in ' '{commit_sha1}'.format(commit_sha1=commit_sha1)) message = context, description elif line_type == '-': description = ( 'Re-adds a line matching a line removed in ' '{commit_sha1}'.format(commit_sha1=commit_sha1)) message = context, description else: print ( 'Got line_type "{line_type}" instead of ' '"-" or "+" in _check_diff_move'.format(line_type=line_type)) commit_info[sha1_g] = [message] # Remove this sha1 from branch_sha1s if sha1_g in branch_sha1s: branch_sha1s.remove(sha1_g) # If we have already marked all the existing commits in the # branch, then break out of the loop if branch_sha1s == []: return commit_info, branch_sha1s return commit_info, branch_sha1s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True", "def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def check_unstaged_changes(self):\n pass", "def has_moved(self):\n return self.move_count > 0", "def _already_copied_commit(self, commit_sha1, branch_id):\n if not self.already_copied_commit_runner:\n return False\n return self.already_copied_commit_runner.already_copied_commit(\n commit_sha1, branch_id)", "def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True", "def has_moved(self):\n return bool(self.rename_phases)", "def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)", "def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0", "def op_move_preconditions(self):\n\n if(self.next_move != self.FREE):\n return False\n\n return True", "def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False", "def _check_previous_func_code(self, stacklevel=2):\r\n # First check if our function is in the in-memory store.\r\n # Using the in-memory store not only makes things faster, but it\r\n # also renders us robust to variations of the files when the\r\n # in-memory version of the code does not vary\r\n try:\r\n if self.func in _FUNCTION_HASHES:\r\n # We use as an identifier the id of the function and its\r\n # hash. This is more likely to falsely change than have hash\r\n # collisions, thus we are on the safe side.\r\n func_hash = self._hash_func()\r\n if func_hash == _FUNCTION_HASHES[self.func]:\r\n return True\r\n except TypeError:\r\n # Some callables are not hashable\r\n pass\r\n\r\n # Here, we go through some effort to be robust to dynamically\r\n # changing code and collision. We cannot inspect.getsource\r\n # because it is not reliable when using IPython's magic \"%run\".\r\n func_code, source_file, first_line = get_func_code(self.func)\r\n func_dir = self._get_func_dir()\r\n func_code_file = os.path.join(func_dir, 'func_code.py')\r\n\r\n try:\r\n with open(func_code_file) as infile:\r\n old_func_code, old_first_line = \\\r\n extract_first_line(infile.read())\r\n except IOError:\r\n self._write_func_code(func_code_file, func_code, first_line)\r\n return False\r\n if old_func_code == func_code:\r\n return True\r\n\r\n # We have differing code, is this because we are referring to\r\n # different functions, or because the function we are referring to has\r\n # changed?\r\n\r\n _, func_name = get_func_name(self.func, resolv_alias=False,\r\n win_characters=False)\r\n if old_first_line == first_line == -1 or func_name == '<lambda>':\r\n if not first_line == -1:\r\n func_description = '%s (%s:%i)' % (func_name,\r\n source_file, first_line)\r\n else:\r\n func_description = func_name\r\n warnings.warn(JobLibCollisionWarning(\r\n \"Cannot detect name collisions for function '%s'\"\r\n % func_description), stacklevel=stacklevel)\r\n\r\n # Fetch the code at the old location and compare it. If it is the\r\n # same than the code store, we have a collision: the code in the\r\n # file has not changed, but the name we have is pointing to a new\r\n # code block.\r\n if not old_first_line == first_line and source_file is not None:\r\n possible_collision = False\r\n if os.path.exists(source_file):\r\n _, func_name = get_func_name(self.func, resolv_alias=False)\r\n num_lines = len(func_code.split('\\n'))\r\n with open(source_file) as f:\r\n on_disk_func_code = f.readlines()[\r\n old_first_line - 1:old_first_line - 1 + num_lines - 1]\r\n on_disk_func_code = ''.join(on_disk_func_code)\r\n possible_collision = (on_disk_func_code.rstrip()\r\n == old_func_code.rstrip())\r\n else:\r\n possible_collision = source_file.startswith('<doctest ')\r\n if possible_collision:\r\n warnings.warn(JobLibCollisionWarning(\r\n 'Possible name collisions between functions '\r\n \"'%s' (%s:%i) and '%s' (%s:%i)\" %\r\n (func_name, source_file, old_first_line,\r\n func_name, source_file, first_line)),\r\n stacklevel=stacklevel)\r\n\r\n # The function has changed, wipe the cache directory.\r\n # XXX: Should be using warnings, and giving stacklevel\r\n if self._verbose > 10:\r\n _, func_name = get_func_name(self.func, resolv_alias=False)\r\n self.warn(\"Function %s (stored in %s) has changed.\" %\r\n (func_name, func_dir))\r\n self.clear(warn=True)\r\n return False", "def hasMoved(self):\r\n if BLENDER_MODE == 'BGE':\r\n world_tranform = self.obj.worldTransform.copy()\r\n elif BLENDER_MODE == 'BPY':\r\n world_tranform = self.obj.matrix_world.copy()\r\n\r\n # if objed has not yet been checked\r\n if not self.old_worldTransform:\r\n self.old_worldTransform = world_tranform\r\n return True\r\n\r\n elif self._areDifferent_Mat44(world_tranform, self.old_worldTransform, self.moveThresholdLoc, self.moveThresholdRot):\r\n # moved since last check\r\n self.old_worldTransform = world_tranform\r\n return True\r\n else:\r\n # did not move since last check\r\n return False", "def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def is_new_move(my_board, x, y):\n return my_board[x, y] == CLOSED", "def checks(self, event):\n if ListenerContainer.is_syncing:\n if ListenerContainer.move_to_folder:\n try:\n ListenerContainer.client.delete_folder(ListenerContainer.move_to_folder)\n except error_perm or error_reply: #TODO\n reset()\n # pass # nothing to delete\n # except error_reply:\n # reset()\n ListenerContainer.move_to_folder = None\n if ListenerContainer.move_to_file:\n try:\n ListenerContainer.client.delete_file(ListenerContainer.move_to_file)\n except error_perm or error_reply: # TODO\n reset()\n # pass # nothing to delete\n # except error_reply:\n # reset()\n ListenerContainer.move_to_file = None\n if event.pathname[-1] == '~': # Temp file\n return False\n else:\n return True\n else:\n timer = now()\n if ListenerContainer.move_to_folder:\n x = [timer, 'DELFOLDER', event.pathname]\n ListenerContainer.sync_db.quick_push(x)\n ListenerContainer.move_to_folder = None\n if ListenerContainer.move_to_file:\n x = [timer, 'DELFILE', event.pathname]\n ListenerContainer.sync_db.quick_push(x)\n ListenerContainer.move_to_file = None\n if event.pathname[-1] == '~': # Temp file\n return False\n else:\n return True", "def has_state_changed(self) -> bool:\r\n ...", "def move(self) -> bool:\n pass", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):\n if len(source_chunk.paths) <= 1:\n return False\n\n move_time = source_chunk.paths[path_index].time\n\n new_source_badness = self._badness(source_chunk.time - move_time)\n new_target_badness = self._badness(target_chunk.time + move_time)\n\n delta_badness = ((new_source_badness + new_target_badness) -\n (source_chunk.badness + target_chunk.badness))\n if delta_badness < 0:\n move_func()\n return True\n\n return False", "def move_valid(move):\n return True", "def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False", "def _check_for_added_blocks(old_components, new_components):\n for new_component_name, new_component in new_components.items():\n if new_component_name not in old_components and len(new_component.blocks) != 0:\n return True\n return False", "def check_move_states(self, player, depth):\n\n if depth >= self.look_ahead:\n return True\n\n for move in gen_moves(player, self.__state.board, self.checker):\n self.__state.push(move)\n winner = self.checker.check_game_over(self.__pid, self.__opponent)\n if winner == self.__opponent:\n return False\n worker = move['xy2']\n if not self.check_build_states(player, worker, depth):\n return False\n self.__state.pop()\n return True", "def status_change(previous, current):\n if previous in look_for:\n return current not in look_for\n elif current in look_for:\n return previous not in look_for", "def checkValidMove(self, move):\n boardCopy = copy.deepcopy(self)\n tilesChange = False\n if move == Move.UP:\n boardCopy.moveUp()\n elif move == Move.DOWN:\n boardCopy.moveDown()\n elif move == Move.LEFT:\n boardCopy.moveLeft()\n elif move == Move.RIGHT:\n boardCopy.moveRight()\n else:\n raise ValueError('Invalid Move was input')\n \n for i in range(4):\n for j in range(4):\n if boardCopy.getTile(i,j) != self.getTile(i,j):\n tilesChange = True\n del(boardCopy)\n return tilesChange", "def is_changed(self, include_md: bool = True) -> bool:\n current = self.calculate_hash(include_md=include_md)\n stored = self.hash if include_md else self.stub_hash\n log.trace(f\"changed = {self.hash != current} | Stored: {stored} | Current: {current}\")\n return stored != current", "def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False", "def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0" ]
[ "0.6227778", "0.6157627", "0.604345", "0.60423243", "0.5937032", "0.59338146", "0.5875718", "0.58480257", "0.5824385", "0.57581633", "0.5725834", "0.5703709", "0.5675813", "0.5660672", "0.5636747", "0.5629766", "0.560405", "0.5584103", "0.5574079", "0.5573388", "0.55449444", "0.5530109", "0.55168825", "0.5466938", "0.5458058", "0.54422027", "0.5439355", "0.5430207", "0.542779", "0.5411668" ]
0.61819804
1
Check the commit message and commit diff. The commit message is checked for the following Title is 50 characters or less Title is in imperative mood Title begins with a capital letter Title beings with verb (see commit_title_start_words) Title does not end in punctuation or whitespace There is a blank line separating the title and body The commit message body lines do not exceed 72 characters The commit title doesn't start with fixup! or squash! The commit diff is checked to verify that it doesn't introduce trailing whitespace or extra blank lines at the end of the file.
def _validate_commit( commit_sha1, merge, author, committer, title, separator, body): errors = [] # List of words a commit title can start with commit_title_start_words = filter( lambda x: x, COMMIT_TITLE_START_WORDS.splitlines()) author_errors = _validate_email(author, 'Author') committer_errors = _validate_email(committer, 'Committer') if author_errors: errors.extend(author_errors) if committer_errors: errors.extend(committer_errors) title_words = title.split(' ', 1) # Check if in imperative tense if re.search(r'(ed|ing|s)$', title_words[0]): errors.append(( 'title-imperative-tense-check', 'Commit title is not in imperative tense')) # Check if first word is capitalized if re.match(r'^[^A-Z]', title_words[0]): errors.append(( 'title-capitalization-check', 'Commit title is not capitalized')) # Check if title begins with known start word if title_words[0] not in commit_title_start_words: errors.append(( 'title-verb-check', 'Commit title does not begin with a verb')) # Check if this is a fixup! commit if re.match(r'^fixup!', title_words[0]): errors.append(( 'title-fixup-check', 'Commit title starts with fixup! ')) # Check if this is a squash! commit if re.match(r'^squash!', title_words[0]): errors.append(( 'title-squash-check', 'Commit title starts with squash! ')) # Check if the commit title ends in whitespace or punctuation if len(title_words) > 1 and re.search(r'[\s\W]$', title_words[1]): errors.append(( 'title-whitespace-punctuation-check', 'Commit title ends in whitespace or punctuation')) # Check if the title is greater than 50 characters in length if len(title) > 50: errors.append(( 'title-length-check', 'Commit title longer than 50 characters')) # Check if separator line (between title and body) is empty if separator is not None and separator != '': errors.append(( 'message-separator-check', 'Missing blank line between title and body')) # Check if the commit message has a body if body == []: errors.append(( 'body-check', 'Missing commit message body')) # Check if any line in the body is greater than 72 characters in legnth for body_line in body: if len(body_line) <= 72: continue errors.append(( 'body-length-check', 'Commit message body line > 72 characters')) break # Check if commit is a merge commit if merge is not None: errors.append(( 'commit-merge-check', 'Commit is a merge commit')) # Check commit diff for whitespace errors git_diff_cmd = shlex.split( 'git show --check {commit_sha1}'.format( commit_sha1=commit_sha1)) has_whitespace_issue = None f, _ = tempfile.mkstemp() has_whitespace_issue = subprocess.call(git_diff_cmd, stdout=f, stderr=f, close_fds=True) os.close(f) if has_whitespace_issue: errors.append(( 'diff-whitespace-check', 'Commit diff has whitespace issues')) return errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def _diff_and_commit(self, commit_msg=''):\n if not commit_msg:\n if 'commit_msg' not in self.data:\n # Ask until we get a non-empty commit message.\n while not commit_msg:\n commit_msg = utils.get_input(\n \"What is the commit message? \")\n else:\n commit_msg = self.data['commit_msg']\n\n diff_cmd = self.vcs.cmd_diff()\n diff = execute_command(diff_cmd)\n if sys.version.startswith('2.6.2'):\n # python2.6.2 bug... http://bugs.python.org/issue5170 This is the\n # spot it can surface as we show a part of the changelog which can\n # contain every kind of character. The rest is mostly ascii.\n print(\"Diff results:\")\n print(diff)\n else:\n # Common case\n logger.info(\"The '%s':\\n\\n%s\\n\", diff_cmd, diff)\n if utils.ask(\"OK to commit this\"):\n msg = commit_msg % self.data\n msg = self.update_commit_message(msg)\n commit_cmd = self.vcs.cmd_commit(msg)\n commit = execute_command(commit_cmd)\n logger.info(commit)", "def _validate_commits(pull_request):\n commits = github.get_commits(pull_request[\"commits_url\"])\n analyzed = []\n\n for commit_wrapper in commits:\n commit = {\n \"sha\": commit_wrapper[\"sha\"],\n \"message\": commit_wrapper[\"commit\"][\"message\"],\n }\n\n commit[\"standard\"] = _validate_title(commit[\"message\"])\n analyzed.append(commit)\n\n result = all(commit[\"standard\"] for commit in analyzed)\n return analyzed, result", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success", "def validate(self, title, _commit):\n gitmojis = requests.get(\n \"https://raw.githubusercontent.com/carloscuesta/gitmoji/master/packages/gitmojis/src/gitmojis.json\"\n ).json()[\"gitmojis\"]\n emojis = [item[\"emoji\"] for item in gitmojis]\n pattern = r\"^({:s})\\(.*\\)\\s[a-z].*$\".format(\"|\".join(emojis))\n if not re.search(pattern, title):\n violation_msg = 'Title does not match regex \"<gitmoji>(<scope>) <subject>\"'\n return [RuleViolation(self.id, violation_msg, title)]", "def test_with_commit_history(self):\n\t\treview_request = self.create_review_request(create_repository=True, publish=True)\n\t\tdiffset = self.create_diffset(review_request=review_request)\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r1\", parent_id=\"r0\", diff_contents=(b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r2\", parent_id=\"r1\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Hi, world!\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r4\", parent_id=\"r3\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 197009f..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hi, world!\\n\" b\"+Yo, world.\\n\"))\n\t\tcumulative_diff = b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\" b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Yo, world.\\n\"\n\t\tdiffset.finalize_commit_series(cumulative_diff=cumulative_diff, validation_info=None, validate=False, save=True)\n\t\tresponse = self.client.get(\"/r/%d/diff/raw/\" % review_request.pk)\n\t\tself.assertEqual(response.content, cumulative_diff)", "def check_pr_details(self, pr_number):\n pr = self.repo.get_pull(pr_number)\n email_pattern = re.compile(r'^.*@suse\\.(com|cz|de)$')\n\n for commit in pr.get_commits():\n sha = commit.sha\n author = commit.author\n title = message = commit.commit.message\n # Not sure why we need to use the nested commit for the email\n email = commit.commit.author.email\n user_id = f'{author.login}({email})'\n body = ''\n\n # This could be probably smarter but commit contains something like the following\n # message=\"$commit_title\\n\\n$long_commit_message\" and as such maybe we can split it and\n # check for the following limits: title max 50 chars, body max 72 chars per line and at\n # least as long as the commit title to avoid commit message bodies full of whitespaces\n try:\n title, body = message.split('\\n\\n', 1)\n except ValueError:\n print('No commit body was detected')\n\n print(f'Checking commit \"{sha}: {title}\"')\n\n if not email_pattern.fullmatch(email):\n print(f'Checking if {user_id} is part of the SUSE organization...')\n\n if self.org.has_in_members(commit.author):\n print(f'{user_id} is part of SUSE organization but a SUSE e-mail address was not used for commit: {sha}')\n sys.exit(1)\n\n # replace case-insensitive \"(bsc#)\" (or []) and surrounding spaces\n # with a single space, then prune leading/trailing spaces\n title = re.sub(r'\\s*[([]\\s*(?i:bsc)#\\d+\\s*[)\\]]\\s*', ' ', title).strip()\n if len(title) > 50:\n print('Commit message title should be less than 50 characters (excluding the bsc# reference)')\n sys.exit(1)\n\n # No body detected. Nothing else to do here.\n if not body:\n continue\n\n if len(body) < len(title):\n print('Commit message body is too short')\n sys.exit(1)\n\n # strip multi-line '```code```' blocks & lines starting w\\ `code`\n code_pattern = re.compile(\n r'''\n ((?m:^)\\s*```) # multi-line beginning, 0-more whitespace, ```\n (?s:.*?) # non-greedy, zero or more chars, including \\n\n \\1 # whatever matched at the beginning\n | # or...\n (?m:^)\\s*` # start of line, optional whitespace, backtick\n [^`]+ # oneor more non-backtick chars\n `\\s*(?m:$) # and a backtick at the end of the line\n ''',\n re.VERBOSE\n )\n for body_line in re.sub(code_pattern, '', body).splitlines():\n if len(body_line) > 72:\n print('Each line in the commit body should be less than 72 characters')\n sys.exit(1)\n\n print(f'PR-{pr_number} commits verified.')", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")", "def _check_diff_move(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-move-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -G parameter of git log to check whether an added or\n # deleted line was moved in a later commit\n\n # Escape regex meta-characters\n line = re.sub(r'([].^$*+?{}\\\\[|()\"])', r'\\\\\\1', line)\n\n git_log_g_str = (\n 'git log --oneline --no-abbrev --reverse -G\"^{line}$\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_g_cmd = shlex.split(git_log_g_str)\n print 'Running git log -G\"^{line}$\"'.format(line=line)\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n git_log_g_output = subprocess.check_output(git_log_g_cmd)\n print 'git_log_g_output: {git_log_g_output}'.format(\n git_log_g_output=git_log_g_output)\n\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -G\"^{line}$\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_g_cmd: {git_log_g_cmd}'.format(\n git_log_g_cmd=git_log_g_cmd)\n except Exception as ex:\n print 'git_log_g_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_g_str}'.format(\n git_log_g_str=git_log_g_str))\n git_log_g_output = ''\n pass\n\n for git_log_g_line in git_log_g_output.splitlines():\n sha1_g, _ = git_log_g_line.split(' ', 1)\n\n if sha1_g not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Removes a line matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Re-adds a line matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_move'.format(line_type=line_type))\n\n commit_info[sha1_g] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_g in branch_sha1s:\n branch_sha1s.remove(sha1_g)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def check_clang_format(project, commit, _desc, diff, options=None):\n tool = get_helper_path('clang-format.py')\n clang_format = options.tool_path('clang-format')\n git_clang_format = options.tool_path('git-clang-format')\n tool_args = (['--clang-format', clang_format, '--git-clang-format',\n git_clang_format] +\n options.args(('--style', 'file', '--commit', commit), diff))\n cmd = [tool] + tool_args\n fixup_func = _fixup_func_caller([tool, '--fix'] + tool_args)\n return _check_cmd('clang-format', project, commit, cmd,\n fixup_func=fixup_func)", "def test_git_commits(self):\n event_id = dog.Event.create(title=\"Testing git commits\", text=\"\"\"$$$\n eac54655 * Merge pull request #2 from DataDog/alq-add-arg-validation ([email protected])\n |\\\n 760735ef | * origin/alq-add-arg-validation Simple typecheck between metric and metrics ([email protected])\n |/\n f7a5a23d * missed version number in docs ([email protected])\n $$$\"\"\", event_type=\"commit\", source_type_name=\"git\", event_object=\"0xdeadbeef\")['event']['id']\n event = self.get_event_with_retry(event_id)\n self.assertEqual(event['event']['title'], \"Testing git commits\")", "def something_to_commit():\n\n # Procelain returns nothing if there's nothing to commit\n ret = subprocess.check_output([\"git\", \"status\", \"--porcelain\"])\n\n if (len(ret) > 0):\n return True\n\n return False", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def check_diffs():\n process = Popen([\"git\", \"diff\", \"HEAD^\", \"--name-only\"], stdout=PIPE)\n\n diff, stderr = process.communicate()\n\n if process.returncode !=0:\n raise Exception(\"Unable to do git diff\")\n return diff.splitlines(False)", "def validate_commit(ctx, sha, **_):\n\n gh = ctx.obj.github\n ci_provider = ctx.obj.ci_provider\n\n sha = sha or (ci_provider.sha if ci_provider else None)\n\n def _pre_issue():\n log.echo('Commit references an issue...', break_line=False)\n\n def _post_issue():\n log.checkmark()\n\n def _pre_label():\n log.echo('Issue is labeled with a release label...', break_line=False)\n\n def _post_label():\n log.checkmark()\n\n log.echo('Validating commit', add=True)\n\n try:\n gh.validate_commit(sha=sha,\n hooks={\n 'pre_issue': _pre_issue,\n 'pre_label': _pre_label,\n 'post_issue': _post_issue,\n 'post_label': _post_label\n })\n except exceptions.ReleaseValidationFailedException as e:\n log.xmark()\n log.sub()\n tb = sys.exc_info()[2]\n utils.raise_with_traceback(e, tb)\n log.sub()\n\n log.echo('Validation passed')", "def validate_change(ticket):\n # First ensure topic line mentions tickets, and pull them out.\n topic = COMMIT_MSG.split('\\n', 1)[0]\n fix_tickets = re.findall(\"[A-Z]{2,5}-[0-9]{1,6}\", topic)\n if len(fix_tickets) == 0:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: commit message does not name a ticket!\"\n return False\n\n # Now get list of approved tickets from master ticket, and ensure\n # all \"fixed\" tickets are approved.\n approved_tickets = get_approved_tickets(ticket)\n for tick in fix_tickets:\n if not tick in approved_tickets:\n print \"\\n\\n\\n\\n\\n*********\\nERROR: ticket {} is not approved (see approval ticket {})\".format(\n tick, ticket)\n return False\n return True", "def test_diff_git_line_without_a_b_and_spaces_changed(self):\n diff = (b'diff --git foo bar1 foo bar2\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n')\n\n with self.assertRaises(DiffParserError) as cm:\n self.tool.get_parser(diff).parse()\n\n self.assertTrue(str(cm.exception).startswith(\n 'Unable to parse the \"diff --git\" line'))", "def test_commit_message_default(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n repository.commit()\n\n head = repository.head.commit\n assert \"\" == head.message", "def commit(self, commit_msg, top_repo_path):\n my_output = subprocess.check_output(\n [\"git\", \"commit\", \"-m\", commit_msg], cwd=top_repo_path\n )\n return my_output", "def deal_lines(self, lines, conf):\n if lines == ['']:\n print \"NO new %s commit!\" % conf\n else:\n for line in lines:\n if re.search('\\d+ files? changed', line) is None:\n pos = line.find(' ')\n if pos != -1:\n try:\n parts = line.split(' ', 2)\n commit_id = parts[0]\n self.current_commit = commit_id\n stamp = int(parts[1])\n ti = datetime.datetime.fromtimestamp(float(stamp))\n s_time = datetime.datetime.fromtimestamp(float(0))\n if self.start_date == s_time:\n self.start_date = ti\n elif self.start_date > ti:\n self.start_date = ti\n author, mail = parts[2].split('<', 1)\n message = mail.split('> ', 1)[1]\n mail = mail.split('>', 1)[0]\n if re.search(': ', message) is not None:\n messagetype = message.split(': ', 1)[0]\n if messagetype not in CLASSIFICATION:\n messagetype = 'OTR'\n else:\n messagetype = 'OTR'\n if commit_id not in self.commit_dictionary:\n self.commit_dictionary[commit_id]\\\n = [commit_id, mail,\n stamp, messagetype,\n messagetype, 0, 0, 0, 0]\n # [files, inserted, deleted, total_lines]\n if mail not in self.author_dictionary:\n self.author_dictionary[mail] = [author,\n mail, 0, 0,\n 0, 0, 1,\n stamp]\n # [files,inserted,deleted,total_lines,commit,stamp]\n else:\n self.author_dictionary[mail][6] += 1\n if stamp > self.author_dictionary[mail][7]:\n self.author_dictionary[mail][7] = stamp\n self.total_patches += 1\n except:\n print 'Warning: unexpected line \"%s\"' % line\n else:\n if conf == 'no_merges':\n try:\n commit_id = self.current_commit\n numbers = self.getstatsummarycounts(line)\n if len(numbers) == 3:\n (files, inserted, deleted) = \\\n map(lambda el: int(el), numbers)\n total_lines = inserted - deleted\n self.commit_dictionary[commit_id][5] = files\n self.commit_dictionary[commit_id][6] = inserted\n self.commit_dictionary[commit_id][7] = deleted\n self.commit_dictionary[commit_id][8] = total_lines\n self.author_dictionary[mail][2] += files\n self.author_dictionary[mail][3] += inserted\n self.author_dictionary[mail][4] += deleted\n self.author_dictionary[mail][5] += total_lines\n self.total_lines_inserted += inserted\n self.total_lines_deleted += deleted\n self.total_lines += total_lines\n self.current_commit = None\n except:\n print 'Warning: unexpected line \"%s\"' % line", "def test_diff_git_line_without_a_b_and_spaces_quotes(self):\n diff = (\n b'diff --git \"foo bar1\" \"foo bar1\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def test_diff_git_line_without_a_b_and_spaces_quotes_changed(self):\n diff1 = (\n b'diff --git \"foo bar1\" \"foo bar2\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n diff2 = (\n b'diff --git \"foo bar1\" foo\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n diff3 = (\n b'diff --git foo \"foo bar1\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n diff = diff1 + diff2 + diff3\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 3)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar2',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff1)\n\n self.assert_parsed_diff_file(\n parsed_files[1],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff2)\n\n self.assert_parsed_diff_file(\n parsed_files[2],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff3)", "def test_message_truncated_correctly_commit_log_entry(self):\n commit = collection_models.CollectionCommitLogEntryModel.create(\n 'b', 0, 'committer_id', 'a', 'a' * 400, [{}],\n constants.ACTIVITY_STATUS_PUBLIC, False)\n commit.collection_id = 'b'\n commit.update_timestamps()\n commit.put()\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)\n\n # Ensure nothing happens to messages of proper length.\n self._run_one_off_job()\n self.assertEqual(\n len(\n collection_models.CollectionCommitLogEntryModel.get_by_id(\n commit.id).commit_message),\n 375)", "def _check_diff_add_delete(commit_sha1, head_sha1):\n commit_info = {}\n branch_sha1s = []\n\n # Get list of commits between this one and the branch head\n git_log_cmd = shlex.split(\n 'git log --oneline --no-abbrev --reverse '\n '{commit_sha1}..{head_sha1}'.format(\n commit_sha1=commit_sha1, head_sha1=head_sha1))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n for git_log_line in git_log_output.splitlines():\n if git_log_line == '':\n continue\n\n branch_sha1, _ = git_log_line.split(' ', 1)\n branch_sha1s.append(branch_sha1)\n\n # If there are no commits to check then just return an empty dict\n # and empty list tuple\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n diff_lines = _parse_diff(commit_sha1)\n\n context = 'diff-add-delete-check'\n for diff_line in diff_lines:\n line_type, line = diff_line[0], diff_line[1:]\n\n # Skip blank lines\n if line == '':\n continue\n\n # Use the -S parameter of git log to check whether an added line\n # was removed or duplicated in a later commit, or whether a\n # removed line was re-added or also removed elsewhere in a later\n # commit\n\n # Escape double-quotes\n line = re.sub(r'\"', r'\\\\\\\"', line)\n git_log_s_str = (\n 'git log --oneline --no-abbrev --reverse -S\"{line}\" '\n '{commit_sha1}..{head_sha1}'.format(\n line=line, commit_sha1=commit_sha1, head_sha1=head_sha1))\n try:\n git_log_s_cmd = shlex.split(git_log_s_str)\n print 'Running git log -S\"{line}\"'.format(line=line)\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n\n git_log_s_output = subprocess.check_output(git_log_s_cmd)\n print 'git_log_s_output: {git_log_s_output}'.format(\n git_log_s_output=git_log_s_output)\n except (subprocess.CalledProcessError, ValueError) as e:\n print 'Exception when running git log -S\"{line}\"'.format(line=line)\n print 'Exception was {e}'.format(e=e)\n try:\n print 'git_log_s_cmd: {git_log_s_cmd}'.format(\n git_log_s_cmd=git_log_s_cmd)\n except Exception as ex:\n print 'git_log_s_cmd not defined: {ex}'.format(ex=ex)\n print (\n 'Failed to run shlex.split on {git_log_s_str}'.format(\n git_log_s_str=git_log_s_str))\n git_log_s_output = ''\n pass\n\n for git_log_s_line in git_log_s_output.splitlines():\n sha1_s, _ = git_log_s_line.split(' ', 1)\n\n if sha1_s not in commit_info.keys():\n message = None\n if line_type == '+':\n description = (\n 'Adds or removes lines matching a line added in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n elif line_type == '-':\n description = (\n 'Adds or removes lines matching a line removed in '\n '{commit_sha1}'.format(commit_sha1=commit_sha1))\n message = context, description\n else:\n print (\n 'Got line_type \"{line_type}\" instead of '\n '\"-\" or \"+\" in _check_diff_add_delete'.format(\n line_type=line_type))\n\n commit_info[sha1_s] = [message]\n\n # Remove this sha1 from branch_sha1s\n if sha1_s in branch_sha1s:\n branch_sha1s.remove(sha1_s)\n\n # If we have already marked all the existing commits in the\n # branch, then break out of the loop\n if branch_sha1s == []:\n return commit_info, branch_sha1s\n\n return commit_info, branch_sha1s", "def test_no_body_smart_require_min_body_lines_option_ignored(self, custom_config):\n del custom_config['body']['smart_require']['min_changes']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run(\n {\n 'commits': [\n {\n 'stats': {'total': 2000},\n 'message': 'xxxxx',\n 'sha': 'aa',\n 'url': '',\n }\n ]\n }\n )[0]\n assert result.success is True", "def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)" ]
[ "0.7362382", "0.70552653", "0.701963", "0.6680221", "0.6538131", "0.62699616", "0.6244798", "0.6007464", "0.60052216", "0.59986544", "0.5935543", "0.5911222", "0.5903283", "0.58931595", "0.5861587", "0.5839806", "0.5837703", "0.58372337", "0.5822277", "0.5812173", "0.578061", "0.57590044", "0.5756618", "0.5716239", "0.57142645", "0.57072717", "0.57041764", "0.57023007", "0.56428546", "0.56354165" ]
0.7532964
0
Parse the output of git log format=full This parses the output of git log formatfull , extracts the commit sha1, author, committer, commit title, commit separator, and commit message body values and passes them to other methods to validate their format
def _parse_commit_log(base_commit, tip_commit): class LogState(object): SEPARATOR_LINE = 0 COMMIT_SHA1_LINE = 1 MERGE_LINE = 2 AUTHOR_LINE = 3 COMMITTER_LINE = 4 MIDDLE_SEPARATOR_LINE = 5 TITLE_LINE = 6 BLANK_LINE = 7 BODY_LINES = 8 commit_info = {} check_churn = True check_move = True git_log_cmd = shlex.split( 'git log --format=full --reverse {base_commit}..{tip_commit}'.format( base_commit=base_commit, tip_commit=tip_commit)) git_log_output = subprocess.check_output(git_log_cmd) log_line_state = LogState.SEPARATOR_LINE commit_sha1 = None merge = None author = None committer = None title = None separator = None body = [] git_log_output_lines = git_log_output.splitlines() for idx, line in enumerate(git_log_output_lines, 1): # commit line if ( log_line_state == LogState.SEPARATOR_LINE and line.startswith('commit ')): commit_sha1 = line.split(' ')[1] log_line_state = LogState.COMMIT_SHA1_LINE continue # Merge: line if ( log_line_state == LogState.COMMIT_SHA1_LINE and line.startswith('Merge: ')): merge = line.split(' ', 1)[1] log_line_state = LogState.MERGE_LINE continue # Author: line if ( log_line_state in [ LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and line.startswith('Author: ')): author = line.split(' ', 1)[1] log_line_state = LogState.AUTHOR_LINE continue # Commit: line if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '): committer = line.split(' ', 1)[1] log_line_state = LogState.COMMITTER_LINE continue # empty line after Commit: line if log_line_state == LogState.COMMITTER_LINE and line == '': log_line_state = LogState.MIDDLE_SEPARATOR_LINE continue # Title line of commit message if ( log_line_state == LogState.MIDDLE_SEPARATOR_LINE and line.startswith(' ')): title = line.lstrip(' ') log_line_state = LogState.TITLE_LINE if idx < len(git_log_output_lines): continue commit_status = _validate_commit( commit_sha1, merge, author, committer, title, separator, body) if commit_sha1 not in commit_info.keys(): commit_info[commit_sha1] = commit_status else: commit_info[commit_sha1].extend(commit_status) if check_churn: commit_churn_info, branch_churn_sha1s = _check_diff_add_delete( commit_sha1, tip_commit) for commit_churn_sha1 in commit_churn_info.keys(): if commit_churn_sha1 not in commit_info.keys(): commit_info[commit_churn_sha1] = commit_churn_info[ commit_churn_sha1] else: commit_info[commit_churn_sha1].extend( commit_churn_info[commit_churn_sha1]) check_churn = bool(branch_churn_sha1s) if check_move: commit_move_info, branch_move_sha1s = _check_diff_move( commit_sha1, tip_commit) for commit_move_sha1 in commit_move_info.keys(): if commit_move_sha1 not in commit_info.keys(): commit_info[commit_move_sha1] = commit_move_info[ commit_move_sha1] else: commit_info[commit_move_sha1].extend( commit_move_info[commit_move_sha1]) check_move = bool(branch_move_sha1s) break # Blank line between title and body (still contains 4 space prefix) if log_line_state == LogState.TITLE_LINE and line.startswith(' '): separator = line.lstrip(' ') log_line_state = LogState.BLANK_LINE if idx < len(git_log_output_lines): continue commit_status = _validate_commit( commit_sha1, merge, author, committer, title, separator, body) if commit_sha1 not in commit_info.keys(): commit_info[commit_sha1] = commit_status else: commit_info[commit_sha1].extend(commit_status) if check_churn: commit_churn_info, branch_churn_sha1s = _check_diff_add_delete( commit_sha1, tip_commit) for commit_churn_sha1 in commit_churn_info.keys(): if commit_churn_sha1 not in commit_info.keys(): commit_info[commit_churn_sha1] = commit_churn_info[ commit_churn_sha1] else: commit_info[commit_churn_sha1].extend( commit_churn_info[commit_churn_sha1]) check_churn = bool(branch_churn_sha1s) if check_move: commit_move_info, branch_move_sha1s = _check_diff_move( commit_sha1, tip_commit) for commit_move_sha1 in commit_move_info.keys(): if commit_move_sha1 not in commit_info.keys(): commit_info[commit_move_sha1] = commit_move_info[ commit_move_sha1] else: commit_info[commit_move_sha1].extend( commit_move_info[commit_move_sha1]) check_move = bool(branch_move_sha1s) break # Body lines if ( log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and line.startswith(' ')): body.append(line.lstrip(' ')) log_line_state = LogState.BODY_LINES if idx < len(git_log_output_lines): continue commit_status = _validate_commit( commit_sha1, merge, author, committer, title, separator, body) if commit_sha1 not in commit_info.keys(): commit_info[commit_sha1] = commit_status else: commit_info[commit_sha1].extend(commit_status) if check_churn: commit_churn_info, branch_churn_sha1s = _check_diff_add_delete( commit_sha1, tip_commit) for commit_churn_sha1 in commit_churn_info.keys(): if commit_churn_sha1 not in commit_info.keys(): commit_info[commit_churn_sha1] = commit_churn_info[ commit_churn_sha1] else: commit_info[commit_churn_sha1].extend( commit_churn_info[commit_churn_sha1]) check_churn = bool(branch_churn_sha1s) if check_move: commit_move_info, branch_move_sha1s = _check_diff_move( commit_sha1, tip_commit) for commit_move_sha1 in commit_move_info.keys(): if commit_move_sha1 not in commit_info.keys(): commit_info[commit_move_sha1] = commit_move_info[ commit_move_sha1] else: commit_info[commit_move_sha1].extend( commit_move_info[commit_move_sha1]) check_move = bool(branch_move_sha1s) break # End of commit message if ( log_line_state in [ LogState.TITLE_LINE, LogState.BLANK_LINE, LogState.BODY_LINES] and line == ''): commit_status = _validate_commit( commit_sha1, merge, author, committer, title, separator, body) if commit_sha1 not in commit_info.keys(): commit_info[commit_sha1] = commit_status else: commit_info[commit_sha1].extend(commit_status) if check_churn: commit_churn_info, branch_churn_sha1s = _check_diff_add_delete( commit_sha1, tip_commit) for commit_churn_sha1 in commit_churn_info.keys(): if commit_churn_sha1 not in commit_info.keys(): commit_info[commit_churn_sha1] = commit_churn_info[ commit_churn_sha1] else: commit_info[commit_churn_sha1].extend( commit_churn_info[commit_churn_sha1]) check_churn = bool(branch_churn_sha1s) if check_move: commit_move_info, branch_move_sha1s = _check_diff_move( commit_sha1, tip_commit) for commit_move_sha1 in commit_move_info.keys(): if commit_move_sha1 not in commit_info.keys(): commit_info[commit_move_sha1] = commit_move_info[ commit_move_sha1] else: commit_info[commit_move_sha1].extend( commit_move_info[commit_move_sha1]) check_move = bool(branch_move_sha1s) log_line_state = LogState.SEPARATOR_LINE commit_sha1 = None merge = None author = None committer = None title = None separator = None body = [] return commit_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_git_log(cls, repo_path, commit=None, pkgs=False, verbosity=-1):\n cmd = shlex.split(cls._git_cmd)\n # custom git log format, see the \"PRETTY FORMATS\" section of the git\n # log man page for details\n format_lines = [\n '# BEGIN COMMIT',\n '%h', # abbreviated commit hash\n '%cd', # commit date\n '%an <%ae>', # Author Name <[email protected]>\n '%cn <%ce>', # Committer Name <[email protected]>\n '%B', # commit message\n '# END MESSAGE BODY',\n ]\n format_str = '%n'.join(format_lines)\n cmd.append(f'--pretty=tformat:{format_str}')\n\n if commit:\n if '..' in commit:\n cmd.append(commit)\n else:\n cmd.append(f'{commit}..origin/HEAD')\n else:\n cmd.append('origin/HEAD')\n\n git_log = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_path)\n line = git_log.stdout.readline().decode().strip()\n if git_log.poll():\n error = git_log.stderr.read().decode().strip()\n logger.warning('skipping git checks: %s', error)\n return\n\n count = 1\n with base.ProgressManager(verbosity=verbosity) as progress:\n while line:\n hash = git_log.stdout.readline().decode().strip()\n commit_date = git_log.stdout.readline().decode().strip()\n author = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n committer = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n\n message = []\n while True:\n line = git_log.stdout.readline().decode('utf-8', 'replace').strip('\\n')\n if line == '# END MESSAGE BODY':\n # drop trailing newline if it exists\n if not message[-1]:\n message.pop()\n break\n message.append(line)\n\n # update progress output\n progress(f'{hash} commit #{count}, {commit_date}')\n count += 1\n\n commit = GitCommit(hash, commit_date, author, committer, message)\n if not pkgs:\n yield commit\n\n # file changes\n while True:\n line = git_log.stdout.readline().decode()\n if line == '# BEGIN COMMIT\\n' or not line:\n break\n if pkgs:\n parsed = cls._parse_file_line(line.strip())\n if parsed is not None:\n atom, status = parsed\n yield GitPkgChange(atom, status, commit)", "def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()", "def detailed_log(self, selected_hash, current_path):\n p = Popen(\n [\"git\", \"log\", \"-1\", \"--stat\", \"--numstat\", \"--oneline\", selected_hash],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n note = [0] * 3\n count = 0\n temp = \"\"\n line_array = my_output.decode(\"utf-8\").splitlines()\n length = len(line_array)\n INSERTION_INDEX = 0\n DELETION_INDEX = 1\n MODIFIED_FILE_PATH_INDEX = 2\n if length > 1:\n temp = line_array[length - 1]\n words = temp.split()\n for i in range(0, len(words)):\n if words[i].isdigit():\n note[count] = words[i]\n count += 1\n for num in range(1, int(length / 2)):\n line_info = line_array[num].split()\n words = line_info[2].split(\"/\")\n length = len(words)\n result.append(\n {\n \"modified_file_path\": line_info[MODIFIED_FILE_PATH_INDEX],\n \"modified_file_name\": words[length - 1],\n \"insertion\": line_info[INSERTION_INDEX],\n \"deletion\": line_info[DELETION_INDEX],\n }\n )\n\n if note[2] == 0 and length > 1:\n if \"-\" in temp:\n exchange = note[1]\n note[1] = note[2]\n note[2] = exchange\n\n return {\n \"code\": p.returncode,\n \"modified_file_note\": temp,\n \"modified_files_count\": note[0],\n \"number_of_insertions\": note[1],\n \"number_of_deletions\": note[2],\n \"modified_files\": result,\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git log_1\",\n \"message\": my_error.decode(\"utf-8\"),\n }", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def extract_commit_data(repo, fields, result_format, log=LOG):\n results = []\n commits = list(repo.iter_commits())\n if not commits:\n msg = \"No commits found\"\n log.error(msg, commits=commits)\n raise GitToolException(msg)\n\n log = log.bind(total_commits=len(commits))\n log.debug(\"Filtering commits\", fields=fields)\n\n for c in commits:\n commit_metadata = dict() if result_format == 'dict' else []\n for f in fields:\n data = c\n try:\n for part in f.split('.'):\n data = getattr(data, part)\n if isinstance(data, six.string_types):\n data = data.strip()\n except AttributeError as e:\n msg = 'Commit missing an attribute'\n members = [x[0] for x in inspect.getmembers(data) if not x[0].startswith('_')]\n log.exception(msg, commit=c, attribute=f, data_obj=data, members=members, exc_info=e)\n raise GitToolException(msg)\n\n if result_format == 'dict':\n commit_metadata[f] = data\n else:\n commit_metadata.append(data)\n if result_format == 'flat_list':\n assert isinstance(commit_metadata, list)\n [results.append(x) for x in commit_metadata]\n else:\n results.append(commit_metadata)\n return results", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def log(self, current_path):\n p = Popen(\n [\"git\", \"log\", \"--pretty=format:%H%n%an%n%ar%n%s\", \"-10\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n i = 0\n PREVIOUS_COMMIT_OFFSET = 4\n while i < len(line_array):\n if i + PREVIOUS_COMMIT_OFFSET < len(line_array):\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": line_array[i + PREVIOUS_COMMIT_OFFSET],\n }\n )\n else:\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": \"\",\n }\n )\n i += PREVIOUS_COMMIT_OFFSET\n return {\"code\": p.returncode, \"commits\": result}\n else:\n return {\"code\": p.returncode, \"message\": my_error.decode(\"utf-8\")}", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def get_changelog(self, commit_sha):\n\n url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG'\n url = url.format(HOST_GITHUB_RAW, self.repo, self.product)\n\n req = requests.get(url)\n lines = req.text\n\n first = self.latest_tags[self.num_comparisons - 1][VERS]\n last = self.latest_tags[self.num_comparisons - 2][VERS]\n flag = False\n\n log = ''\n for line in lines.splitlines():\n if first in line:\n flag = True\n if last in line:\n flag = False\n if flag:\n log += line + '\\n'\n return log", "def parse(self, filename):\n def invalid_line(line, reason):\n stats.count_lines_invalid.increment()\n if config.options.debug >= 2:\n logging.debug('Invalid line detected (%s): %s' % (reason, line))\n\n if filename == '-':\n filename = '(stdin)'\n file = sys.stdin\n else:\n if not os.path.exists(filename):\n print >> sys.stderr, \"\\n=====> Warning: File %s does not exist <=====\" % filename\n return\n else:\n if filename.endswith('.bz2'):\n open_func = bz2.BZ2File\n elif filename.endswith('.gz'):\n open_func = gzip.open\n else:\n open_func = open\n file = open_func(filename, 'r')\n\n if config.options.show_progress:\n print 'Parsing log %s...' % filename\n\n if config.format:\n # The format was explicitely specified.\n format = config.format\n\n if isinstance(format, W3cExtendedFormat):\n format.create_regex(file)\n\n if format.regex is None:\n return fatal_error(\n \"File is not in the correct format, is there a '#Fields:' line? \"\n \"If not, use the --w3c-fields option.\"\n )\n else:\n # If the file is empty, don't bother.\n data = file.read(100)\n if len(data.strip()) == 0:\n return\n try:\n file.seek(0)\n except IOError:\n pass\n\n format = self.detect_format(file)\n if format is None:\n return fatal_error(\n 'Cannot guess the logs format. Please give one using '\n 'either the --log-format-name or --log-format-regex option'\n )\n # Make sure the format is compatible with the resolver.\n\n if config.options.dump_log_regex:\n logging.info(\"Using format '%s'.\" % format.name)\n if format.regex:\n logging.info(\"Regex being used: %s\" % format.regex.pattern)\n else:\n logging.info(\"Format %s does not use a regex to parse log lines.\" % format.name)\n logging.info(\"--dump-log-regex option used, aborting log import.\")\n os._exit(0)\n\n hits = []\n for lineno, line in enumerate(file):\n try:\n line = line.decode(config.options.encoding)\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n stats.count_lines_parsed.increment()\n if stats.count_lines_parsed.value <= config.options.skip:\n continue\n\n match = format.match(line)\n if not match:\n invalid_line(line, 'line did not match')\n continue\n\n hit = Hit(\n filename=filename,\n lineno=lineno,\n status=format.get('status'),\n full_path=format.get('path'),\n is_download=False,\n is_robot=False,\n is_error=False,\n is_redirect=False,\n args={},\n )\n\n if config.options.regex_group_to_page_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)\n\n if config.options.regex_group_to_visit_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)\n\n if config.options.regex_groups_to_ignore:\n format.remove_ignored_groups(config.options.regex_groups_to_ignore)\n\n try:\n hit.query_string = format.get('query_string')\n hit.path = hit.full_path\n except BaseFormatException:\n hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)\n\n # W3cExtendedFormat detaults to - when there is no query string, but we want empty string\n if hit.query_string == '-':\n hit.query_string = ''\n\n hit.extension = hit.path.rsplit('.')[-1].lower()\n\n try:\n hit.referrer = format.get('referrer')\n\n if hit.referrer.startswith('\"'):\n hit.referrer = hit.referrer[1:-1]\n except BaseFormatException:\n hit.referrer = ''\n if hit.referrer == '-':\n hit.referrer = ''\n\n try:\n hit.user_agent = format.get('user_agent')\n\n # in case a format parser included enclosing quotes, remove them so they are not\n # sent to Piwik\n if hit.user_agent.startswith('\"'):\n hit.user_agent = hit.user_agent[1:-1]\n except BaseFormatException:\n hit.user_agent = ''\n\n hit.ip = format.get('ip')\n try:\n hit.length = int(format.get('length'))\n except (ValueError, BaseFormatException):\n # Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)\n hit.length = 0\n\n try:\n hit.generation_time_milli = float(format.get('generation_time_milli'))\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000\n except BaseFormatException:\n hit.generation_time_milli = 0\n\n if config.options.log_hostname:\n hit.host = config.options.log_hostname\n else:\n try:\n hit.host = format.get('host').lower().strip('.')\n\n if hit.host.startswith('\"'):\n hit.host = hit.host[1:-1]\n except BaseFormatException:\n # Some formats have no host.\n pass\n\n # Add userid\n try:\n hit.userid = None\n\n userid = format.get('userid')\n if userid != '-':\n hit.args['uid'] = hit.userid = userid\n except:\n pass\n\n # add event info\n try:\n hit.event_category = hit.event_action = hit.event_name = None\n\n hit.event_category = format.get('event_category')\n hit.event_action = format.get('event_action')\n\n hit.event_name = format.get('event_name')\n if hit.event_name == '-':\n hit.event_name = None\n except:\n pass\n\n # add session time\n try:\n hit.session_time = None\n\n session_time = format.get('session_time')\n hit.session_time = int(session_time)\n except:\n pass\n\n # Check if the hit must be excluded.\n if not all((method(hit) for method in self.check_methods)):\n continue\n\n # Parse date.\n # We parse it after calling check_methods as it's quite CPU hungry, and\n # we want to avoid that cost for excluded hits.\n date_string = format.get('date')\n try:\n hit.date = datetime.datetime.strptime(date_string, format.date_format)\n except ValueError:\n invalid_line(line, 'invalid date')\n continue\n\n # Parse timezone and substract its value from the date\n try:\n timezone = float(format.get('timezone'))\n except BaseFormatException:\n timezone = 0\n except ValueError:\n invalid_line(line, 'invalid timezone')\n continue\n\n if timezone:\n hit.date -= datetime.timedelta(hours=timezone/100)\n\n if config.options.replay_tracking:\n # we need a query string and we only consider requests with piwik.php\n if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):\n invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')\n continue\n\n query_arguments = urlparse.parse_qs(hit.query_string)\n if not \"idsite\" in query_arguments:\n invalid_line(line, 'missing idsite')\n continue\n\n try:\n hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n hits.append(hit)\n if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):\n Recorder.add_hits(hits)\n hits = []\n if len(hits) > 0:\n Recorder.add_hits(hits)", "def commits_parsing(query):\n logging.info(\"GET request commit parsing is working\")\n results = {}\n list_of_commits = []\n clear_list_message = []\n clear_list_committer = []\n json_commits = {}\n json_all = {}\n for single_query in query:\n list_of_commits += {single_query[:-6]}\n\n try:\n results = requests.get(single_query[:-6])\n except requests.ConnectionError as exception:\n return f'{exception}'\n\n json_all = results.json()[0]\n\n json_commits = json_all['commit']\n clear_list_message += {json_commits['message']}\n clear_list_committer += {json_commits['committer']['name']}\n\n return clear_list_message, clear_list_committer", "def _parse_diff(commit_sha1):\n class DiffState(object):\n START = 0\n DIFF_BLOCK_LINE = 1\n INDEX_LINE = 2\n A_LINE = 3\n B_LINE = 4\n AT_LINE = 5\n DIFF_LINES = 6\n\n diff_cmd = shlex.split('git show {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n diff_output = subprocess.check_output(diff_cmd)\n\n diff_lines = set()\n state = DiffState.START\n for line in diff_output.splitlines():\n if state in [DiffState.START, DiffState.DIFF_LINES] and line.startswith('diff '):\n state = DiffState.DIFF_BLOCK_LINE\n continue\n\n if state == DiffState.DIFF_BLOCK_LINE and line.startswith('index '):\n state = DiffState.INDEX_LINE\n continue\n\n if state == DiffState.INDEX_LINE and line.startswith('--- '):\n state = DiffState.A_LINE\n continue\n\n if state == DiffState.A_LINE and line.startswith('+++ '):\n state = DiffState.B_LINE\n continue\n\n if state in [DiffState.B_LINE, DiffState.DIFF_LINES] and line.startswith('@@ '):\n state = DiffState.AT_LINE\n continue\n\n if state in [DiffState.AT_LINE, DiffState.DIFF_LINES] and (\n line.startswith(('+', '-', ' '))):\n state = DiffState.DIFF_LINES\n\n if line.startswith(' '):\n continue\n diff_lines.add(line)\n continue\n\n state = DiffState.START\n return diff_lines", "def gitLogValue(format,directory):\n return subprocess.check_output([\"git\",\"log\",\"-1\",\"--pretty=format:%\"+format],cwd=directory).strip()", "def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )", "def parse(args):\n # create the parser with the default help formatter\n gg_parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n # do not display the welcome message\n # CORRECT WHEN: always, only changes output on screen\n gg_parser.add_argument(\n \"--nowelcome\", help=\"do not display the welcome message\", action=\"store_true\"\n )\n\n # output reports in JSON\n # CORRECT WHEN: always, only changes report output\n gg_parser.add_argument(\"--json\", help=\"print reports in JSON\", action=\"store_true\")\n\n # Top-Level Arguments {{{\n\n # specify a check for the number of commits in the Git repository\n # CORRECT WHEN: user provides this argument but not any other main arguments\n gg_parser.add_argument(\"--commits\", type=int, help=\"minimum number of git commits\")\n\n # specify a single file and a single directory\n # CORRECT WHEN: user provides both of these\n gg_parser.add_argument(\n \"--directory\", type=str, metavar=\"DIR\", help=\"directory with file for checking\"\n )\n gg_parser.add_argument(\"--file\", type=str, help=\"file for checking\")\n\n # Ancillary Arguments for File and Directory {{{\n\n # do not display the welcome message\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--exists\", help=\"does a file in a directory exist\", action=\"store_true\"\n )\n\n # specify a check on single-line comments\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--single\", type=int, metavar=\"COUNT\", help=\"minimum number of single comments\"\n )\n\n # specify a check on multiple-line comments\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--multiple\", type=int, metavar=\"COUNT\", help=\"minimum number of multi comments\"\n )\n\n # specify that the comment checks are for a certain language\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--language\",\n type=str,\n choices=[JAVA, PYTHON],\n help=\"language for the single comments\",\n )\n\n # specify a check on paragraphs\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--paragraphs\", metavar=\"COUNT\", type=int, help=\"minimum number of paragraphs\"\n )\n\n # specify a check on words\n # note that sentences are no longer supported so, a \"dest\" given\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--words\", type=int, help=\"minimum number of words in paragraphs\"\n )\n\n # }}}\n\n # specify a command to run for checking\n # CORRECT WHEN: user provides this argument but not a file or directory\n gg_parser.add_argument(\"--command\", type=str, help=\"command to run\")\n\n # execute the specified command\n # CORRECT WHEN: user provides a command to run along with this argument\n gg_parser.add_argument(\n \"--executes\", help=\"does a command execute without error\", action=\"store_true\"\n )\n\n # Ancillary Arguments for File or Commands {{{\n\n # specify a check on fragments\n # CORRECT WHEN: user provides file and directory along with this argument\n # or\n # CORRECT WHEN: user provides a command along with this argument\n gg_parser.add_argument(\n \"--fragment\", type=str, help=\"fragment that exists in code or output\"\n )\n\n # specify a check on fragments\n # CORRECT WHEN: user provides file and directory along with this argument\n # or\n # CORRECT WHEN: user provides a command along with this argument\n gg_parser.add_argument(\n \"--count\", type=int, metavar=\"COUNT\", help=\"how many of an entity should exist\"\n )\n\n # }}}\n\n # Ancillary Arguments for Any Counting of Entities {{{\n\n # perform exact checking for entity counting (i.e,. \"==\" instead of \">=\")\n # CORRECT WHEN: user provides file and directory along with this argument\n gg_parser.add_argument(\n \"--exact\", help=\"equals instead of a minimum number\", action=\"store_true\"\n )\n\n # }}}\n\n # call argparse's parse_args function and return result\n gg_arguments_finished = gg_parser.parse_args(args)\n return gg_arguments_finished", "def parse_raw_entry(raw_entry):\n entry_start = raw_entry[0]\n\n # get the timestamp\n ts_len = 23\n ts = entry_start[:ts_len]\n # get the IP, if there is one\n idx = entry_start.find(' ', ts_len+1)\n ip = entry_start[ts_len+1:idx]\n # get the database, if there is one\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n db = entry_start[consumed+1:idx]\n # get the log type\n consumed = idx\n idx = entry_start.find(' ', consumed+1)\n type = entry_start[consumed+1:idx]\n # finally, combined the message\n consumed = idx\n remaining = entry_start[consumed+1:]\n foo = [remaining]\n foo.extend(raw_entry[1:])\n msg = ''.join(foo).strip()\n\n return Entry(ts, ip, db, type, msg)", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def rev_parse(commit_ish, short=False):\n args = [\"--short\"] if short else []\n return (\n subprocess.check_output([\"git\", \"rev-parse\"] + args + [commit_ish])\n .decode()\n .strip()\n )", "def _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body):\n errors = []\n\n # List of words a commit title can start with\n commit_title_start_words = filter(\n lambda x: x, COMMIT_TITLE_START_WORDS.splitlines())\n\n author_errors = _validate_email(author, 'Author')\n committer_errors = _validate_email(committer, 'Committer')\n\n if author_errors:\n errors.extend(author_errors)\n if committer_errors:\n errors.extend(committer_errors)\n\n title_words = title.split(' ', 1)\n\n # Check if in imperative tense\n if re.search(r'(ed|ing|s)$', title_words[0]):\n errors.append((\n 'title-imperative-tense-check',\n 'Commit title is not in imperative tense'))\n\n # Check if first word is capitalized\n if re.match(r'^[^A-Z]', title_words[0]):\n errors.append((\n 'title-capitalization-check',\n 'Commit title is not capitalized'))\n\n # Check if title begins with known start word\n if title_words[0] not in commit_title_start_words:\n errors.append((\n 'title-verb-check',\n 'Commit title does not begin with a verb'))\n\n # Check if this is a fixup! commit\n if re.match(r'^fixup!', title_words[0]):\n errors.append((\n 'title-fixup-check',\n 'Commit title starts with fixup! '))\n\n # Check if this is a squash! commit\n if re.match(r'^squash!', title_words[0]):\n errors.append((\n 'title-squash-check',\n 'Commit title starts with squash! '))\n\n # Check if the commit title ends in whitespace or punctuation\n if len(title_words) > 1 and re.search(r'[\\s\\W]$', title_words[1]):\n errors.append((\n 'title-whitespace-punctuation-check',\n 'Commit title ends in whitespace or punctuation'))\n\n # Check if the title is greater than 50 characters in length\n if len(title) > 50:\n errors.append((\n 'title-length-check',\n 'Commit title longer than 50 characters'))\n\n # Check if separator line (between title and body) is empty\n if separator is not None and separator != '':\n errors.append((\n 'message-separator-check',\n 'Missing blank line between title and body'))\n\n # Check if the commit message has a body\n if body == []:\n errors.append((\n 'body-check',\n 'Missing commit message body'))\n\n # Check if any line in the body is greater than 72 characters in legnth\n for body_line in body:\n if len(body_line) <= 72:\n continue\n errors.append((\n 'body-length-check',\n 'Commit message body line > 72 characters'))\n break\n\n # Check if commit is a merge commit\n if merge is not None:\n errors.append((\n 'commit-merge-check',\n 'Commit is a merge commit'))\n\n # Check commit diff for whitespace errors\n git_diff_cmd = shlex.split(\n 'git show --check {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n\n has_whitespace_issue = None\n f, _ = tempfile.mkstemp()\n has_whitespace_issue = subprocess.call(git_diff_cmd,\n stdout=f, stderr=f, close_fds=True)\n os.close(f)\n\n if has_whitespace_issue:\n errors.append((\n 'diff-whitespace-check',\n 'Commit diff has whitespace issues'))\n\n return errors", "def read_commit(self, form):\n\n db = PostsaiDB(self.config)\n db.connect()\n sql = \"\"\"SELECT repositories.repository, checkins.ci_when, people.who,\n trim(leading '/' from concat(concat(dirs.dir, '/'), files.file)),\n revision, descs.description, commitids.hash, commitids.co_when, repository_url\n FROM checkins\n JOIN descs ON checkins.descid = descs.id\n JOIN dirs ON checkins.dirid = dirs.id\n JOIN files ON checkins.fileid = files.id\n JOIN people ON checkins.whoid = people.id\n JOIN repositories ON checkins.repositoryid = repositories.id\n JOIN commitids ON checkins.commitid = commitids.id\n WHERE repositories.repository = %s AND commitids.hash = %s \"\"\"\n data = [form.getfirst(\"repository\", \"\"), form.getfirst(\"commit\", \"\")]\n result = db.query(sql, data)\n db.disconnect()\n return result", "def aggregate_git_log(path, progress_callback=lambda progress: None):\n versions = list()\n\n current_version, current_commits = None, list()\n\n log_data = git_log_hash(path)\n log_length = len(log_data)\n progress_step = max(1, log_length / 100)\n \n for idx, (rev_hash, date, msg) in enumerate(log_data):\n if idx % progress_step == 0:\n progress_callback(float(idx) / log_length)\n \n current_commits.append(msg)\n if git_checkout(path=path, revision_hash=rev_hash):\n version = get_package_metadata(path=path, field_name='Version')\n if version != current_version:\n # memorize it\n versions.insert(0,\n dict(version=version,\n date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n current_version, current_commits = version, list()\n\n if current_commits:\n versions.insert(0,\n dict(version='newest',\n date=None,\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n return versions", "def html_message_formatter(mode, name, build, results, master_status):\n result = Results[results]\n\n limit_lines = 80\n text = list()\n text.append(u'<h4>Build status: %s</h4>' % result.upper())\n text.append(u'<table cellspacing=\"10\"><tr>')\n text.append(u\"<td>Buildslave for this Build:</td><td><b>%s</b></td></tr>\" % build.getSlavename())\n if master_status.getURLForThing(build):\n text.append(u'<tr><td>Complete logs for all build steps:</td><td><a href=\"%s\">%s</a></td></tr>'\n % (master_status.getURLForThing(build),\n master_status.getURLForThing(build))\n )\n text.append(u'<tr><td>Build Reason:</td><td>%s</td></tr>' % build.getReason())\n source = u\"\"\n for ss in build.getSourceStamps():\n if ss.codebase:\n source += u'%s: ' % ss.codebase\n if ss.branch:\n source += u\"[branch %s] \" % ss.branch\n if ss.revision:\n source += ss.revision\n else:\n source += u\"HEAD\"\n if ss.patch:\n source += u\" (plus patch)\"\n if ss.patch_info: # add patch comment\n source += u\" (%s)\" % ss.patch_info[1]\n text.append(u\"<tr><td>Build Source Stamp:</td><td><b>%s</b></td></tr>\" % source)\n text.append(u\"<tr><td>Blamelist:</td><td>%s</td></tr>\" % \",\".join(build.getResponsibleUsers()))\n text.append(u'</table>')\n if ss.changes:\n text.append(u'<h4>Recent Changes:</h4>')\n for c in ss.changes:\n cd = c.asDict()\n when = datetime.datetime.fromtimestamp(cd['when'] ).ctime()\n text.append(u'<table cellspacing=\"10\">')\n text.append(u'<tr><td>Repository:</td><td>%s</td></tr>' % cd['repository'] )\n text.append(u'<tr><td>Project:</td><td>%s</td></tr>' % cd['project'] )\n text.append(u'<tr><td>Time:</td><td>%s</td></tr>' % when)\n text.append(u'<tr><td>Changed by:</td><td>%s</td></tr>' % cd['who'] )\n text.append(u'<tr><td>Comments:</td><td>%s</td></tr>' % cd['comments'] )\n text.append(u'</table>')\n files = cd['files']\n if files:\n text.append(u'<table cellspacing=\"10\"><tr><th align=\"left\">Files</th></tr>')\n for file in files:\n text.append(u'<tr><td>%s:</td></tr>' % file['name'] )\n text.append(u'</table>')\n text.append(u'<br>')\n # get all the steps in build in reversed order\n rev_steps = reversed(build.getSteps())\n # find the last step that finished\n for step in rev_steps:\n if step.isFinished():\n break\n # get logs for the last finished step\n if step.isFinished():\n logs = step.getLogs()\n # No step finished, loop just exhausted itself; so as a special case we fetch all logs\n else:\n logs = build.getLogs()\n # logs within a step are in reverse order. Search back until we find stdio\n for log in reversed(logs):\n if log.getName() == 'stdio':\n break\n name = \"%s.%s\" % (log.getStep().getName(), log.getName())\n status, dummy = log.getStep().getResults()\n content = log.getText().splitlines() # Note: can be VERY LARGE\n url = u'%s/steps/%s/logs/%s' % (master_status.getURLForThing(build),\n log.getStep().getName(),\n log.getName())\n\n text.append(u'<i>Detailed log of last build step:</i> <a href=\"%s\">%s</a>'\n % (url, url))\n text.append(u'<br>')\n text.append(u'<h4>Last %d lines of \"%s\"</h4>' % (limit_lines, name))\n unilist = list()\n for line in content[len(content)-limit_lines:]:\n unilist.append(cgi.escape(unicode(line,'utf-8')))\n text.append(u'<pre>')\n text.extend(unilist)\n text.append(u'</pre>')\n text.append(u'<br><br>')\n text.append(u'<b>-The Buildbot</b>')\n return {\n 'body': u\"\\n\".join(text),\n 'type': 'html'\n }", "def get_commit_change_stats(self, commit_url='', full_name='', commit_sha=''):\n if commit_url == '' and (commit_sha == '' and full_name == ''):\n raise BaseException('commit url could not be generated. Commit url, commit sha and full name not set')\n return None\n url = commit_url\n if url == '':\n url = COMMIT_DETAILS.format(commit_sha=commit_sha, full_name=full_name)\n url = self.get_full_url(url)\n\n json_data = loads(self.get_from_net(url))\n stats = {'additions': 0, 'deletions': 0}\n if 'stats' in json_data:\n stats['additions'] = json_data['stats']['additions']\n stats['deletions'] = json_data['stats']['deletions']\n\n return stats", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def get_contact(path, sha):\n contact = None\n email = None\n\n cmd = ['git', '-C', path, 'log', '--format=%B', '-n', '1', sha]\n commit_message = subprocess.check_output(\n cmd, encoding='utf-8', errors='ignore')\n tags = 'Signed-off-by|Commit-Queue|Tested-by'\n domains = 'chromium.org|google.com|collabora.com'\n m = '^(?:%s): (.*) <(.*@(?:%s))>$' % (tags, domains)\n emails = re.findall(m, commit_message, re.M)\n if emails:\n contact, email = emails[0]\n else:\n tags = 'Reviewed-by'\n m = '^(?:%s): (.*) <(.*@(?:%s))>$' % (tags, domains)\n emails = re.findall(m, commit_message, re.M)\n if emails:\n contact, email = emails[-1]\n return contact, email", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success" ]
[ "0.6421075", "0.5869288", "0.5712835", "0.57056826", "0.5663615", "0.5649697", "0.56157106", "0.55999523", "0.55940133", "0.547705", "0.5421677", "0.5412499", "0.5387538", "0.530713", "0.52561617", "0.5245139", "0.5184921", "0.51770186", "0.51596844", "0.51574826", "0.51415545", "0.513402", "0.5129565", "0.5126661", "0.5117708", "0.5115702", "0.51151145", "0.51037455", "0.5096013", "0.5087188" ]
0.6594322
0
Check the SQL Database Version and prints on console.
def version(self): self.cursor.execute("SELECT VERSION()") # Fetch a single row using fetchone() method. data = self.cursor.fetchone() print("Database version : %s " % data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check():\n conn = None\n try:\n # read connection parameters\n params = config()\n \n # connect to the PostgreSQL server\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**params)\n \n # create a cursor\n cur = conn.cursor()\n \n # execute a statement\n click.secho('PostgreSQL database version:', fg='yellow')\n cur.execute('SELECT version()')\n \n # display the PostgreSQL database server version\n db_version = cur.fetchone()\n print(db_version)\n \n # close the communication with the PostgreSQL\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n click.secho('Database connection closed.', fg='yellow')", "def inspect(self):\n self.db.connect()\n result = None\n try:\n jambi_versions = JambiModel.select().limit(1)\n if any(jambi_versions):\n field = jambi_versions[0].ref\n try:\n result = int(field)\n except ValueError:\n self.logger.error('Database current version \"{}\" is not '\n 'valid'.format(jambi_versions[0].ref))\n self.logger.info('Your database is at version '\n '{}'.format(field))\n else:\n self.logger.info('This database hasn\\'t been migrated yet')\n except ProgrammingError:\n self.logger.info('Run \"init\" to create a jambi version table')\n finally:\n self.db.close()\n return result", "def version(self):\r\n print migration.db_version()", "def test_version_01(self):\n\n version = self.sqlbak([\"--version\"])\n self.assertTrue(\"sqlbak v\" in version)", "def db_version():\n return IMPL.db_version()", "def database_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_version\")", "def sql_version(connection):\n cursor = connection.cursor()\n cursor.execute(\"SELECT ecs.versionTable.version FROM ecs.versionTable;\")\n for ver in cursor.fetchone():\n version = ver\n cursor.close()\n return version", "def detect_version(conn):\n try:\n with conn.begin():\n db_version = conn.scalar(text(\n \"SELECT version FROM configuration\"))\n except exc.ProgrammingError:\n with conn.begin():\n packages_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'packages'\")))\n with conn.begin():\n statistics_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_views \"\n \"WHERE schemaname = 'public' AND viewname = 'statistics'\")))\n with conn.begin():\n files_exists = bool(conn.scalar(text(\n \"SELECT 1 FROM pg_catalog.pg_tables \"\n \"WHERE schemaname = 'public' AND tablename = 'files'\")))\n if not packages_exists:\n # Database is uninitialized\n return None\n elif not files_exists:\n # Database is too ancient to upgrade\n raise RuntimeError(\"Database version older than 0.4; cannot upgrade\")\n elif not statistics_exists:\n return \"0.4\"\n else:\n return \"0.5\"\n else:\n return db_version", "def test_db_connection(self):\n try:\n database = Database()\n database.get_server_version()\n except (Exception) as error:\n logging.error(\"\\n\\nConnection to postgresql\"\n \" failed with error: {}\\n\\n\".format(error))\n assert(False)", "def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)", "def confirm_schema_match():\n\n db_version = get_db_version()\n if not isinstance(db_version, int) or db_version > CURRENT_DATABASE_VERSION:\n logging.error('The stored db schema version of %s is incompatible with required version %s',\n str(db_version), CURRENT_DATABASE_VERSION)\n sys.exit(43)\n elif db_version < CURRENT_DATABASE_VERSION:\n sys.exit(42)\n else:\n sys.exit(0)", "def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")", "def version(verbose: bool) -> None:\n print(Fore.BLUE + '==' * 15)\n print(\n Fore.YELLOW + 'Raven ' + Fore.CYAN + '0.1-dev'\n )\n print(Fore.BLUE + '==' * 15)\n if verbose:\n print(f'[DB]: {db.engine}')\n print(Style.RESET_ALL)", "def db_version(engine):\n return IMPL.db_version(engine)", "def test1_version(self):\n lVersion = rdbhdb.__version__.split('.')\n nVersion = need_version.split('.')\n self.assert_(lVersion >= nVersion, rdbhdb.__version__)", "def __get_db_version_int(self):\r\n query = QtSql.QSqlQuery('PRAGMA user_version')\r\n query.first()\r\n return query.value(0).toInt()[0]", "def database_installed_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_installed_version\")", "def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]", "def test(self):\r\n self.log.debug(\"connection test using version query with adapter %s...\", self.adapter)\r\n try:\r\n res = self.get('SELECT * FROM Package WHERE PackageID=\\'179\\'') # 'SELECT Family FROM Version'\r\n if not res:\r\n self.log.critical('no results, database problem.')\r\n return False\r\n else:\r\n self.log.info('connection successful.')\r\n return True\r\n except:\r\n self.log.critical('connection not possible, check host/user/pwd configuration')\r\n return False", "def database_version(self) -> Optional[pulumi.Input['InstanceDatabaseVersion']]:\n return pulumi.get(self, \"database_version\")", "def print_postgresql_status():\n exit_code = 0\n result = check_postgres()\n\n\n if result == False:\n print('[{red}-{white}] Could not connect to PostgreSQL, please check if database is running'\\\n .format(red=Fore.RED, white=Fore.WHITE))\n exit_code = 1\n return exit_code\n elif result == None:\n print('[{red}-{white}] Database not initialized. Execute: faraday-manage initdb'\\\n .format(red=Fore.RED, white=Fore.WHITE))\n exit_code = 1\n return exit_code\n elif int(result[1][0])<90400:\n print('[{red}-{white}] PostgreSQL is running, but needs to be 9.4 or newer, please update PostgreSQL'.\\\n format(red=Fore.RED, white=Fore.WHITE))\n elif result:\n print('[{green}+{white}] PostgreSQL is running and up to date'.\\\n format(green=Fore.GREEN, white=Fore.WHITE))\n return exit_code", "def print_version():\n print(FULMAR_VERSION_STR)\n return", "def show_version(self):\n from goulash import __version__\n print 'goulash=={0}'.format(__version__)", "def database_installed_version(self) -> str:\n return pulumi.get(self, \"database_installed_version\")", "def test_print_database():\n db_conn = conn_to_db('optwrf.db')\n print_database(db_conn)\n close_conn_to_db(db_conn)", "def test_show_version():\n result = runner.invoke(app, [\"--version\"])\n assert result.exit_code == 0\n assert \"Confluence poster version\" in result.stdout", "def print_version():\n print(\"1.0\")", "def __get_dbms_version(self, make_connection=True):\r\n if not self.connection and make_connection:\r\n self.connect()\r\n return self.connection.adoConnProperties.get('DBMS Version', '') if self.connection else ''", "def update_db_version():\n print(\"Checking Database states...\")\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ADSM.settings\")\n try:\n call_command('migrate', database='scenario_db', interactive=False, fake_initial=True)\n call_command('migrate', database='default', interactive=False, fake_initial=True)\n except:\n print(\"Error: Migration failed.\")\n print('Done migrating databases.')", "def get_tgis_db_version():\n global tgis_db_version\n return tgis_db_version" ]
[ "0.74608433", "0.745182", "0.72382915", "0.7159295", "0.7049766", "0.70265293", "0.6929252", "0.68626547", "0.68565637", "0.679402", "0.6749475", "0.670352", "0.66886044", "0.6532294", "0.64263856", "0.64192784", "0.6319371", "0.6301346", "0.62995845", "0.6246262", "0.6200538", "0.61629504", "0.61368036", "0.613507", "0.61000216", "0.609855", "0.6082422", "0.6080542", "0.6069946", "0.6042596" ]
0.7676812
0
Insert a record into the MySQL Database.
def insert(self, sql): try: # Execute the SQL command self.cursor.execute(sql) # Commit your changes in the database self.db.commit() except: # Rollback in case there is any error self.db.rollback()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self):\n self.getDbRecord().insert()\n\n return", "def insert_record(self, record, session):\n try:\n session.add(record)\n session.commit()\n session.close()\n return True\n except:\n\n logging.exception(\"http record cannot be added to db \" \":Time: \" + str(datetime.datetime.now()))\n return False", "def insert_record(self):\r\n try:\r\n db.session.add(self)\r\n db.session.commit()\r\n return {\"error\": False, \"id\": self.id}\r\n except exc.SQLAlchemyError as e: # pragma: no cover\r\n # print(e)\r\n # print(sys.exc_info())\r\n db.session.rollback()\r\n return {\"error\": True}\r\n finally:\r\n db.session.close()", "def insert(self):\n sql = u'INSERT INTO %s' % self.table()\n keys = []\n values = []\n format_values = []\n for field in self.fields():\n attr = object.__getattribute__(self, field)\n if attr.auto_value:\n continue\n keys.append(field)\n format_values.append(attr.format)\n values.append(attr._value)\n keys_str = u'( %s )' % u', '.join(keys)\n values_str = u'VALUES( %s )' % u', '.join(format_values)\n sql = '%s %s %s;' % (sql, keys_str, values_str)\n connection.execute(sql, values)\n primary_k = self.__class__.get_primary()\n primary = object.__getattribute__(self, primary_k)\n primary.value = connection.connection.insert_id()", "def insert(self, query):\n try:\n self.cursor.execute(query)\n self.connection.commit()\n\n except MySQLdb.Error as e:\n self.connection.rollback()\n try:\n print(\"MySQL Error {}: {}\".format(e.args[0], e.args[1]))\n except IndexError:\n print(\"MySQL Error: {}\".format(str(e)))", "def add_record(self, record):\n logging.debug('Adding new entry to table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n date = record.get('date', '')\n time = record.get('time', '')\n location = record.get('location', '')\n node_id = record.get('nodeID', '')\n\n if '' in (date, time, node_id, location):\n raise Exception('Invalid SecuritySystemDB record!')\n\n self._cursor.execute(\"insert into {} values(?, ?, ?, ?)\".format(self._name),\n (date, time, location, node_id))", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def add_record(self, table_name, **kwargs):\n\n if not self.open:\n print(\"Not currently connected to a DB.\")\n return False\n\n\n fields = \", \".join([str(f) for f in kwargs.keys()])\n values = \", \".join([str(v) for v in kwargs.values()])\n q = \"INSERT INTO {tn}({columns}) VALUES ({values})\"\n self.query = q.format(tn=table_name,\n columns=fields,\n values=values)\n\n # try:\n self.cursor.execute(self.query)\n print(\"{}\\n inserted into {} table.\".format(values, table_name))\n return True\n # except Exception as error:\n # print(\"Failed to add {} to {} table.\".format(values, table_name))\n # print(\"SQL Query: \\n{}\\n\".format(self.query))\n # print(\"Exception: \\n{}\".format(error))\n\n # return False", "def insert_to_db(self, query):\n try:\n q = self.connection.execute(query)\n except Exception:\n self.print_std_error()", "def execute_insert(self,insert):\n try:\n self.cursor.execute(insert)\n self.connection.commit()\n except Exception as error:\n self.connection.rollback()\n raise error", "def insert_to_db(self) -> None:\n query = \"\"\"INSERT INTO Users(Username, Password, Firstname, Surname, Currency_id,\n Has_First_Sign_In, Account_Created, Last_Sign_In)\n VALUES(?,?,?,?,?,?,?,?);\"\"\"\n self.db.commit(query, values=self.to_tuple())", "def insert(self, table_name, fields):\n LOGGER.debug(\"%r: Inserting %r\" % (table_name, fields))\n return self.db.table(table_name).insert(fields)", "def sql_insert(self, sqlstr):\n get_connection().insert_raw(sqlstr)\n return 1", "def insert_row(self, tablename, fields):\n insert_params = \"(\" + \",\".join(['?' for x in fields]) + \")\"\n self.cursor.execute(\"insert into \" + tablename + \" values \" +\n insert_params, fields)", "def insert_to_db(self, query):\n self.cursor.execute(query)\n self.conn.commit()\n return self", "def insert_db(table, schema, value):\n cursor.execute(schema, value)\n db.commit()\n print(cursor.rowcount, \"record inserted into db: \" + table)", "def insert(title, author, year, isbn,shelf,raw):\n\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n sql=\"INSERT INTO book (title, author, year, isbn,shelf,raw) VALUES(%s, %s, %s, %s, %s, %s)\"\n cur_obj.execute(sql,(title, author, year, isbn,shelf,raw))\n conn_obj.commit()\n conn_obj.close()", "def do_insert_data(self, *args):\n print(\"Provide data to insert\")\n self.connection_obj.insert_into_table(**self.__class__.populate_data())\n print(\"Data Insertion Successful\")", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def insert(q, *params):\n db = Database()\n db.cur.execute(q, *params)\n ret_id = db.cur.lastrowid\n db.con.close()\n return ret_id", "def insert(self, fields, values):\n sql = self.generate_insert_sql(fields, values)\n self.sqlhistory.append(sql)\n return self.sql_insert(sql)", "def insert_data(self, row, table_fields_names, table_fields_types):\n\n\t\tquery = ''\n\n\t\ttry:\t\t\t\t\n\t\t\tquery = self.form_insert_query(TABLE_NAME, row, table_fields_names, table_fields_types)\n\t\t\t# print query\n\t\t\tself.execute_query(query)\t\t\t\n\t\texcept Exception, e:\t\t\t\t\n\t\t\tprint '[e] Exeption: %s' % (str(e))\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\t\t\treturn False\n\n\t\treturn True", "def insert(db, table, name, row):\n\n # Build insert prepared statement\n columns = [name for name, _ in table.items()]\n insert = INSERT_ROW.format(table=name, columns=\", \".join(columns), values=(\"?, \" * len(columns))[:-2])\n\n try:\n db.execute(insert, values(table, row, columns))\n except Exception as ex:\n print(\"Error inserting row: {}\".format(row), ex)", "def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()", "def Insert(self):\n sql = 'INSERT INTO %s ( %s ) VALUES ( %s )' % (\n self.table_name,\n ', '.join(self.values),\n ', '.join(['?' for _ in self.values])\n )\n return Database().Execute(sql, tuple(self.values.values()))", "def insert(self,table,values):\n self.connect.execute(self.insert_disc[table],values)\n self.connect.commit()", "def singleInsert(self, table_name, fields, field_values, field_types=[]):\n if not self.checkTable(table_name):\n self.createTable(table_name, fields, field_types)\n self.transactionInsert(table_name, fields, field_values)\n self.transactionEnd()", "def _query_insert(self, sql, data=None):\n\n conn = psycopg2.connect(self.connect_args)\n cur = conn.cursor()\n cur.execute(sql, data)\n conn.commit()\n cur.close()\n conn.close()" ]
[ "0.72778225", "0.71292144", "0.6977028", "0.6974775", "0.6912995", "0.6867108", "0.6768567", "0.6768567", "0.6768567", "0.6734239", "0.6724782", "0.6591992", "0.656104", "0.6560686", "0.65567183", "0.65319073", "0.65109533", "0.6503445", "0.6499452", "0.6496004", "0.64883584", "0.6461727", "0.6441742", "0.641306", "0.63717586", "0.6365816", "0.63467056", "0.63420385", "0.63334227", "0.63310647" ]
0.7278151
0
Internal init method. Please use DHTProtocol.create coroutine to spawn new protocol instances
def __init__(self, *, _initialized_with_create=False): assert _initialized_with_create, "Please use DHTProtocol.create coroutine to spawn new protocol instances" super().__init__()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "def __init__(self, protocol):\r\n self._protocol = protocol", "def __init__(self, protocol, timeout):\r\n self._protocol = protocol\r\n self._timeout = timeout\r\n\r\n # Set of _IncompleteMessage instances\r\n self._incompleteMsgs = set()\r\n\r\n # Dictionary with binary UID as key and the binary as value\r\n self._binaries = {}\r\n\r\n # Setup repeated calling of the clean up method\r\n self._cleaner = LoopingCall(self._cleanUp)", "def create_protocol(self_key=None):\n\n if self_key is None:\n self_key = 'selfkey'\n\n bucket_tree = mock.Mock()\n value_store = mock.Mock()\n\n protocol = DHTProtocol(self_key, bucket_tree, value_store, 1234)\n transport = mock.Mock()\n protocol.transport = transport\n\n return protocol, transport, bucket_tree, value_store", "def __init__(self, host, port, initialized=None, uuid=None, debug=False, no_mine=False, benchmark=False, neighbors=[]):\n\n m = sha1()\n m.update(host.encode())\n m.update(str(port).encode())\n\n self.metadata = {}\n self.metadata['done'] = initialized\n self.metadata['host'] = host\n self.metadata['port'] = port\n self.metadata['uuid'] = str(m.hexdigest()) if uuid is None else uuid\n self.metadata['debug'] = debug\n self.metadata['no_mine'] = no_mine\n self.metadata['benchmark'] = benchmark\n self.metadata['resolve_requests'] = set()\n self.metadata['resolve_lock'] = Lock()\n\n if benchmark:\n from threading import Semaphore\n self.metadata['benchmark_lock'] = Semaphore(0)\n\n if self.metadata['uuid'] == 'SYSTEM':\n raise InvalidID\n\n initialize_log(self.metadata['uuid'], debug)\n\n # Create the Blockchain object.\n self.metadata['blockchain'] = Blockchain()\n self.metadata['history'] = History(self.metadata['uuid'])\n\n # Create the Network Handler object.\n self.nh = NetworkHandler(self.metadata, neighbors)\n\n # Start the Network Handler main loop.\n self.nh.event_loop()", "def __init__(self, listen_connection, bootstrap_connection = ('router.bittorrent.com', 6881),\n\t\t\tuser_setup = {}, user_router = None):\n\t\tsetup = {'discover_t': 180, 'check_t': 30, 'check_N': 10}\n\t\tsetup.update(user_setup)\n\t\tself._log = logging.getLogger(self.__class__.__name__ + '.%s.%d' % listen_connection)\n\t\tself._log.info('Starting DHT node with bootstrap connection %s:%d' % bootstrap_connection)\n\t\tlisten_connection = (socket.gethostbyname(listen_connection[0]), listen_connection[1])\n\t\t# Generate key for token generation\n\t\tself._token_key = os.urandom(20)\n\t\t# Start KRPC server process and Routing table\n\t\tself._krpc = KRPCPeer(listen_connection, self._handle_query)\n\t\tif not user_router:\n\t\t\tuser_router = DHT_Router('%s.%d' % listen_connection, setup)\n\t\tself._nodes = user_router\n\t\tself._node = DHT_Node(listen_connection, os.urandom(20))\n\t\tself._node_lock = threading.RLock()\n\t\t# Start bootstrap process\n\t\ttry:\n\t\t\ttmp = self.ping(bootstrap_connection, sender_id = self._node.id).get_result(timeout = 1)\n\t\texcept Exception:\n\t\t\traise\n\t\t\ttmp = {b'ip': encode_connection(listen_connection), b'r': {b'id': self._node.id}}\n\t\tself._node.connection = decode_connection(tmp[b'ip'])\n\t\tself._bootstrap_node = self._nodes.register_node(bootstrap_connection, tmp[b'r'][b'id'])\n\t\t# BEP #0042 Enable security extension\n\t\tlocal_id = bytearray(self._node.id)\n\t\tbep42_value = encode_uint32(bep42_prefix(self._node.connection[0], local_id[-1], local_id[0]))\n\t\tself._node.set_id(bep42_value[:3] + self._node.id[3:])\n\t\tassert(valid_id(self._node.id, self._node.connection))\n\t\tself._nodes.protect_nodes([self._node.id])\n\n\t\t# Start maintainance threads\n\t\tself._threads = ThreadManager(self._log.getChild('maintainance'))\n\n\t\t# Periodically ping nodes in the routing table\n\t\tdef _check_nodes(N, last_ping = 15 * 60, timeout = 5):\n\t\t\tdef get_unpinged(n):\n\t\t\t\treturn time.time() - n.last_ping > last_ping\n\t\t\tcheck_nodes = list(self._nodes.get_nodes(N, expression = get_unpinged))\n\t\t\tif not check_nodes:\n\t\t\t\treturn\n\t\t\tself._log.debug('Starting cleanup of known nodes')\n\t\t\tnode_result_list = []\n\t\t\tfor node in check_nodes:\n\t\t\t\tnode.last_ping = time.time()\n\t\t\t\tnode_result_list.append((node, node.id, self.ping(node.connection, self._node.id)))\n\t\t\tt_end = time.time() + timeout\n\t\t\tfor (node, node_id, async_result) in node_result_list:\n\t\t\t\tresult = self._eval_dht_response(node, async_result, timeout = max(0, t_end - time.time()))\n\t\t\t\tif result and (node.id != result.get(b'id')): # remove nodes with changing identities\n\t\t\t\t\tself._nodes.remove_node(node, force = True)\n\t\tself._threads.start_continuous_thread(_check_nodes, thread_interval = setup['check_t'], N = setup['check_N'])\n\n\t\t# Try to discover a random node to populate routing table\n\t\tdef _discover_nodes():\n\t\t\tself._log.debug('Starting discovery of random node')\n\t\t\tfor idx, entry in enumerate(self.dht_find_node(os.urandom(20), timeout = 1)):\n\t\t\t\tif idx > 10:\n\t\t\t\t\tbreak\n\t\tself._threads.start_continuous_thread(_discover_nodes, thread_interval = setup['discover_t'])", "def __init__(self, protocol):\n\n # TODO: add client dictionary\n self._name = protocol['name']\n self._mode = protocol['mode']\n\n try:\n from minicps import __file__\n index = __file__.rfind('minicps')\n self._minicps_path = __file__[:index+7] + '/'\n\n except Exception as error:\n print('ERROR Protocol __init__ set _minicps_path: ', error)\n\n if self._mode > 0:\n # TODO: update server dict field: log\n self._server = protocol['server']\n else:\n self._server = {}", "def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n self.client = TcpClientConnection(conn=conn, addr=self.addr)\n if b'ProtocolHandlerPlugin' in self.config.plugins:\n for klass in self.config.plugins[b'ProtocolHandlerPlugin']:\n instance = klass(self.config, self.client, self.request)\n self.plugins[instance.name()] = instance", "def __init__(self, protocol: str):\n self.protocol = protocol\n\n # initalize context for pva\n self.context = None\n if protocol == \"pva\":\n self.context = Context(\"pva\")", "def __init__(self, protocol_instance, root_path):\n\n self.protocol = protocol_instance\n self.root_path = root_path", "def __init__(self):\n\n # For now, we'll connect to the target via the Apollo debug controller.\n # This should be replaced by a high-speed USB link soon; but for now\n # we'll use the slow debug connection.\n self._debugger = ApolloDebugger()\n self._serial = self._find_serial_connection()", "def __init__(self):\n CRTPDriver.__init__(self)\n self._radio_manager = None\n self.uri = ''\n self.link_error_callback = None\n self.link_quality_callback = None\n self.in_queue = None\n self.out_queue = None\n self._thread = None\n self.needs_resending = True", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self, url: java.net.URL, protocolHandler: ghidra.framework.protocol.ghidra.GhidraProtocolHandler):\n ...", "def __init__(\n self, config: interface.BaseConfig, session_manager: ClientSessionManager\n ):\n super().__init__(max_calls=1) # To StateProducer via interface.AppleTV\n self._config = config\n self._session_manager = session_manager\n self._protocols_to_setup: Queue[SetupData] = Queue()\n self._protocol_handlers: Dict[Protocol, SetupData] = {}\n self._push_updates = FacadePushUpdater()\n self._features = FacadeFeatures(self._push_updates)\n self._pending_tasks: Optional[set] = None\n self._device_info = interface.DeviceInfo({})\n self._interfaces = {\n interface.Features: self._features,\n interface.RemoteControl: FacadeRemoteControl(),\n interface.Metadata: FacadeMetadata(),\n interface.Power: FacadePower(),\n interface.PushUpdater: self._push_updates,\n interface.Stream: FacadeStream(self._features),\n interface.Apps: FacadeApps(),\n interface.Audio: FacadeAudio(),\n }", "def _build_protocol(self):\n self._protocol = SBE19Protocol(Prompt, NEWLINE, self._driver_event)", "def __init__(self):\n\n self._mh = MasterHead.get_head()", "def __init__(self, hsp):\n raise NotImplementedError", "def __init__(self, params={}):\n self.lt_ses = lt.session() # pylint: disable=no-member\n self.lt_ses.listen_on(6881, 6891)\n\n self.params = params\n self.queue = deque()\n self.stream_thread = None\n self.handle = None", "def __init__(self, chain_instance, *args, **kwargs):\n protocol_logger('Intializing protocol processor')\n self.chain_instance = chain_instance", "def __init__(self, gym_env: gym.Env) -> None:\n super().__init__()\n self._queue: Queue = Queue()\n self._action_counter: int = 0\n self.gym_address = str(GYM_CONNECTION_PUBLIC_ID)\n self._agent = ProxyAgent(\n name=\"proxy\", gym_env=gym_env, proxy_env_queue=self._queue\n )\n self._agent_thread = Thread(target=self._agent.start)\n self._active_dialogue = None # type: Optional[GymDialogue]\n self.gym_skill = \"fetchai/gym:0.1.0\"\n self.gym_dialogues = GymDialogues(self.gym_skill, role_from_first_message)", "def init(self) -> None:\n ...", "def __init__(self, proto):\n self.proto = proto", "def _init_objects(self) -> None:\n self.position = selectors.get_position(self.exchange, self.symbol)\n self.broker = Broker(self.position, self.exchange, self.symbol, self.timeframe)\n\n if self.hp is None and len(self.hyperparameters()) > 0:\n self.hp = {}\n for dna in self.hyperparameters():\n self.hp[dna['name']] = dna['default']", "def init(self) -> None:", "def __init__(self, h, inp, sent, nll):\n self.h, self.inp, self.sent, self.nll = h, inp, sent, nll", "def __init__(self, transport, protocol):\n self._promises = {}\n self._send_msg = transport\n self.protocol = protocol\n\n for p in self.protocol:\n assert 'procedure' in p, \\\n \"\"\n\n setattr(self, p['procedure'], self._make_protocol_method(p))", "def __init__(self, fetcher, sender, sleep_time=1, command_interpreter=None, port=16981):\n self.__fetcher = fetcher\n self.__sender = sender\n self.sleep_time = sleep_time\n #if port:\n # self.__external_server = StreamServer((\"127.0.0.0\", port), self.__listen_external)\n #else:\n # self.__external_server = None\n\n self.__ticket_counter = 1\n #self.__ticket_counter_lock = Semaphore()\n\n self.interpreter = command_interpreter\n #self.__default_interpreter = PytheasCommandInterpreter(self)", "def __init__(self):\n\n # labjack connection handle (default: None. If connected: labjack handler instance)\n self.connection_handle = None\n\n # labjack connection state (default: None, connection_error: False, connected: True)\n self.connection_state = False\n\n # try to connect\n self.connect()", "def _build_protocol(self):\n self._protocol = Protocol(MENU, Prompt, NEWLINE, self._driver_event)" ]
[ "0.7248141", "0.702848", "0.66360265", "0.65688103", "0.6516263", "0.64979625", "0.6491149", "0.64696556", "0.64560825", "0.63274443", "0.6293979", "0.6292233", "0.62528545", "0.6223429", "0.6187743", "0.61605954", "0.6160218", "0.61589706", "0.61360425", "0.61326575", "0.61255383", "0.6123858", "0.6123196", "0.6117159", "0.6113117", "0.6101537", "0.61003464", "0.6099494", "0.6089976", "0.60854554" ]
0.8525663
0