query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get a random system quality attribute. An alias for system_quality_attribute().
def ility(self) -> str: return self.system_quality_attribute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def system_quality_attribute(self) -> str:\n return self.random.choice(SYSTEM_QUALITY_ATTRIBUTES)", "def quality(self):\n try:\n qid = int((self.tool_metadata or {}).get(\"quality\", 0))\n except:\n qid = 0\n\n # We might be able to get the quality strings from the item's tags\n internal_name, name = \"normal\", \"Normal\"\n if self.tags:\n tags = {x.get('category'): x for x in self.tags}\n if 'Quality' in tags:\n internal_name, name = tags['Quality'].get('internal_name'), tags['Quality'].get('name')\n\n return qid, internal_name, name", "def genQuality(self):\n return np.clip(np.random.normal(self.qavgs, self.qstdevs), 0, 40)", "def get_random(self):\n self.random_range = list(np.array(self.friendly_range) * self.conversion)\n return np.random.uniform(self.random_range[0], self.random_range[1], 1)[0]", "def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)", "def get_multiplier(quality):\n\n if quality == \"low\":\n return 5\n elif quality == \"medium\":\n return 6\n elif quality == \"good\":\n return 7\n elif quality == \"high\":\n return 8\n return 6", "def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n return \"EPIC\"\n else:\n return \"COMUN\"", "def __getattr__(self, name):\n\n return getattr(self._random, name)", "def get_random(self):\n return self._get_random()", "def random(self):\n return self._random", "def RandomLoadUnit(self):\n\t\treturn self._get_attribute('randomLoadUnit')", "def test_sample_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def getConstant(self):\n return _libsbml.QualitativeSpecies_getConstant(self)", "def _get_random_returns(self): \n return self.asset_process.distrib.random()", "def __getattr__(self, key):\n return random.choice([\"world\", math.pi])", "def random(self):\n\n return self._random", "def get_random_question(self):\n available_qs = self.available_qs\n if available_qs.exists():\n return random.choice(available_qs)", "def quality(self):\n return self.plays * self.number", "def test_sample_one_quality(self):\r\n self.assertEqual(self.test_sample.quality, 'medium')", "def __getattribute__(self, name):\n if name in [\"sampling_function\", \"env\", \"fit_dist\", \"reset\"]:\n return object.__getattribute__(self, name)\n\n else:\n return getattr(self.env, name)", "def mineral_attr(attribute):\n return attribute[0]", "def get_printer_quality(self):\n return self.parent.printer.get_quality()", "def quality(self) -> int:\n return self._quality", "def _get_random_value(self):\r\n return random.randint(1, 10)", "def _random_weight(self):\n return random.uniform(MIN_WEIGHT, MAX_WEIGHT)", "def get_system_value(name: str):\n return Config.objects.first().__dict__[name]", "def random(self):\n return self._randomize()", "def random():\n pars = dict(\n scale=10**np.random.uniform(1, 3),\n gamma=np.random.uniform(0, 6),\n q_0=10**np.random.uniform(-3, -1),\n )\n return pars", "def _cim_quality():\n return {\n 'type' : 'class',\n 'name' : 'cim_quality',\n 'base' : None,\n 'is_abstract' : False,\n 'is_entity' : True,\n 'doc' : 'The starting point for a quality record. It can contain any number of issues and reports. An issue is an open-ended description of some issue about a CIM instance. A record is a prescribed description of some specific quantitative measure that has been applied to a CIM instance.',\n 'properties' : [\n ('meta', 'shared.doc_meta_info', '1.1', None),\n ('reports', 'quality.report', '0.N', None),\n ],\n 'decodings' : [\n ('meta', 'self::cim:cIM_Quality'),\n ('reports', 'child::cim:report'),\n ]\n }", "def get_attr(self, attr_name, ds_name=None):\n if self.science_product:\n return self.__nc_attr(attr_name, ds_name)\n\n return self.__h5_attr(attr_name, ds_name)" ]
[ "0.87941146", "0.61170155", "0.6050746", "0.5851535", "0.5762336", "0.5579478", "0.55139697", "0.5418243", "0.5411513", "0.535925", "0.53295076", "0.52709144", "0.52343696", "0.520349", "0.51900476", "0.5188331", "0.5184759", "0.5182861", "0.5176378", "0.51667213", "0.5160519", "0.5117905", "0.5108728", "0.50853467", "0.5080183", "0.5080049", "0.50744367", "0.5070408", "0.50702107", "0.5068036" ]
0.6239924
1
Fit scaler and transform input data Winsorise `X` at `quantile` and `1quantile`. Scale each variable (as long as they aren't binary in which case they are already rules).
def fit_transform(self, X, y=None): self.scale = np.ones(X.shape[1]) self.lower = np.percentile(X, self.quantile*100, axis=0) self.upper = np.percentile(X, (1-self.quantile)*100, axis=0) # Winsorize at `self.quantile` winX = X.copy() is_lower = (winX < self.lower) is_higher = (winX > self.upper) for col in range(X.shape[1]): winX[is_lower[:, col], col] = self.lower[col] winX[is_higher[:, col], col] = self.upper[col] num_uniq = np.unique(X[:, col]).size if num_uniq > 2: # Don't scale binary vars self.scale[col] = 0.4/(1e-12 + np.std(winX[:, col])) large_scale = np.where(self.scale > 1e3)[0] if large_scale.size > 0: warnings.warn('Scales of {} are larger than 1e3!'.format(large_scale)) return winX*self.scale
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, X):\n q_min, q_max = self.quantile_range\n self.center_ = np.nanmedian(X, axis=0) if self.with_centering else None\n \n if self.with_scaling:\n quantiles = []\n for feature_idx in range(X.shape[1]):\n column_data = X[:, feature_idx]\n quantiles.append(np.nanpercentile(column_data, self.quantile_range))\n\n quantiles = np.transpose(quantiles)\n self.scale_ = quantiles[1] - quantiles[0]\n else:\n self.scale_ = None\n \n return self", "def quantile_transform(X, *, axis=..., n_quantiles=..., output_distribution=..., ignore_implicit_zeros=..., subsample=..., random_state=..., copy=...):\n ...", "def fit(self, x, y=None):\n x = pd.DataFrame(x)\n q1 = x.quantile(0.25)\n q3 = x.quantile(0.75)\n iqr = q3 - q1\n self.lower_bound = q1 - (1.5 * iqr)\n self.upper_bound = q3 + (1.5 * iqr)\n self.imputer.fit(\n x.where(~((x < self.lower_bound) | (x > self.upper_bound)), np.nan)\n )\n return self", "def robust_scale(X, *, axis=..., with_centering=..., with_scaling=..., quantile_range=..., copy=..., unit_variance=...):\n ...", "def fit(self, X, y=None):\n self.scaler_ = MaxAbsScaler(copy=True)\n self.columns_to_transform_ = get_numerical_columns(\n data_frame=X,\n ignore_columns=self.ignore_columns,\n uniqueness_thresshold=self.uniqueness_thresshold,\n )\n self.scaler_.fit(X[self.columns_to_transform_])\n return self", "def fit(self, X):\n data_min = np.nanmin(X, axis=0)\n data_max = np.nanmax(X, axis=0)\n \n data_range = (data_max - data_min)\n \n self.scale_ = (self.feature_range[1] - self.feature_range[0]) / data_range\n self.min_ = self.feature_range[0] - data_min * self.scale_\n self.data_range_ = data_range\n self.data_min_ = data_min\n self.data_max_ = data_max", "def fit(self, X, y=None):\n feature_range = self.feature_range\n data_min = np.min(X, axis=0)\n data_range = np.max(X, axis=0) - data_min\n self.scale_ = (feature_range[1] - feature_range[0]) / data_range\n self.min_ = feature_range[0] - data_min * self.scale_\n return self", "def normalize_X(X):\n scaler = preprocessing.StandardScaler()\n X = scaler.fit_transform(X)\n return X", "def standardize(X):\n\n scaler = StandardScaler()\n X_scaled = scaler.fit_transform(X)\n return X_scaled", "def fit_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n self.fit(X)\n return self.transform(X)", "def fit_transform(self, X):\n X = np.asarray(X, dtype=np.float64)\n \n self.fit(X)\n return self.transform(X)", "def percentile_normalization(data: np.ndarray, percentile: int = 1) -> np.ndarray:\n\n min_percentile = np.percentile(data, percentile)\n max_percentile = np.percentile(data, 100 - percentile)\n\n # limit maximum intensity of data by max_percentile\n data[data >= max_percentile] = max_percentile\n\n # limit minimum intensity of data by min_percentile\n data[data <= min_percentile] = min_percentile\n\n return data", "def fit(self, X, y):\n self.model = Pipeline([\n ('scalar', RobustScaler()),\n ('classifier', SVC(probability=True, gamma='scale')),\n ])\n self.model.fit(X, y)", "def scale(X, *, axis=..., with_mean=..., with_std=..., copy=...):\n ...", "def fit(self, X, y=None):\n continuous_features = self._get_relevant_features(X)\n self._feature_mask_ = continuous_features\n\n if self.with_mean:\n means = X.loc[:, self._feature_mask_].mean(skipna=self.ignore_nans)\n else:\n means = pd.Series(0, index=continuous_features)\n self.mean_ = means\n\n if self.with_std:\n scales = X.loc[:, self._feature_mask_].std(skipna=self.ignore_nans)\n else:\n scales = pd.Series(1, index=continuous_features)\n self.scale_ = scales\n\n return self", "def rescale(data, perc_mini=1, perc_maxi=99, \n out_mini=0, out_maxi=1, \n cutoff_mini=True, cutoff_maxi=True, \n return_extrema=False):\n \n mini = np.percentile(data, perc_mini)\n maxi = np.percentile(data, perc_maxi)\n if out_mini is None:\n out_mini = mini\n if out_maxi is None:\n out_maxi = maxi\n data_out = data - mini\n data_out = data_out * (out_maxi-out_mini) / (maxi-mini)\n data_out = data_out + out_mini\n if cutoff_mini:\n data_out[data_out<out_mini] = out_mini\n if cutoff_maxi:\n data_out[data_out>out_maxi] = out_maxi\n if return_extrema:\n return data_out, mini, maxi\n else:\n return data_out", "def transform(self, X, y=None):\n data_subframe = X[self.columns_to_transform_]\n X[self.columns_to_transform_] = self.scaler_.transform(data_subframe)\n return X.copy()", "def scale_data(self, train_data):\n\n # Fit on training data only.\n # scaler = StandardScaler().fit(train_data[self.feature_names])\n scaler = QuantileTransformer().fit(train_data[self.feature_names])\n self.scaler = scaler\n scaled_train_data = scaler.transform(train_data[self.feature_names])\n\n scaled_train_data_df = pd.DataFrame(data=scaled_train_data, columns=self.feature_names)\n scaled_train_data_df.index = train_data.index\n scaled_train_data_df[self.outcome_name] = train_data[self.outcome_name]\n\n return scaled_train_data_df", "def fit(self, X, y=None):\n feature_mask = self._get_relevant_features(X)\n self._feature_mask_ = feature_mask\n\n self.min_ = X.min(skipna=self.ignore_nans)[feature_mask]\n self.max_ = X.max(skipna=self.ignore_nans)[feature_mask]\n self.scale_ = self.max_ - self.min_\n\n # if feature_mask.size != X.shape[1]:\n # self.scale_[~feature_mask] = 1\n # self.min_[~feature_mask] = 0\n # self.max_[~feature_mask] = 1\n\n return self", "def transform(self, x, y=None):\n x = pd.DataFrame(x)\n x.where(~((x < self.lower_bound) | (x > self.upper_bound)),\n np.nan,\n inplace=True)\n return self.imputer.transform(x)", "def scale_X(X_train, X_test):\n \n scaler = MinMaxScaler()\n scaler.fit(X_train)\n X_train_scaled = scaler.transform(X_train)\n X_test_scaled = scaler.transform(X_test)\n print(\"scaling done\")\n \n return X_train_scaled, X_test_scaled", "def ds_preprocessing(x, error_threshold, min_val=0, max_val=1):\n # Scale in range [min_val, max_val]\n scaler = MinMaxScaler((min_val, max_val))\n processed = scaler.fit_transform(x)\n\n # Quantization\n bins = np.arange(min_val, max_val, 2 * error_threshold)\n digitized = np.digitize(processed, bins)\n quantized = (digitized - 1) * (2 * error_threshold) + error_threshold\n\n return quantized, scaler", "def fit(self, df, method='min_max_scaling', per_col_scaler=False):\n # Does df contain multiple columns ?\n if df.size == len(df) or per_col_scaler is True:\n # df contains multiple columns\n lbl_list = df.columns.values\n for lbl in lbl_list:\n try:\n min_val = float(np.amin(df[lbl]))\n max_val = float(np.amax(df[lbl]))\n mean_val = float(np.mean(df[lbl]))\n std_val = float(np.std(df[lbl]))\n # TODO Validate/Debug Robust Scaler\n q1_val = float(np.percentile(df[lbl], 25))\n q3_val = float(np.percentile(df[lbl], 75))\n except TypeError:\n raise Exception(\"[ERROR] TypeError in normalization fit\")\n scaler = self.Scaler(min_val=min_val, max_val=max_val,\n mean_val=mean_val, std_val=std_val,\n q1=q1_val, q3=q3_val,\n method=method)\n self.dict_scalers[lbl] = scaler\n else:\n # df contains one single column or scaling is applied\n # independently for each feature/column\n try:\n min_val = float(np.amin(df))\n max_val = float(np.amax(df))\n mean_val = float(np.mean(df))\n std_val = float(np.std(df))\n # TODO Validate/Debug Robust Scaler\n q1_val = float(np.percentile(df, 25))\n q3_val = float(np.percentile(df, 75))\n except TypeError:\n raise Exception(\"[ERROR] TypeError in normalization fit\")\n scaler = self.Scaler(min_val=min_val, max_val=max_val,\n mean_val=mean_val, std_val=std_val,\n q1=q1_val, q3=q3_val,\n method=method)\n self.dict_scalers['OneForAll'] = scaler", "def transform(self, X, y=None):\n if isinstance(X, DataFrame):\n is_df = True # Serves no purpose \n \n X = check_array(X) # Validate input data\n \n X = self.ext_scaler.transform(X) # Scale and centre features\n if self.linear_features:\n X_scale = self._scaler.transform(X) # Scale linear features to give same a priori weight as rules\n return hstack([X_scale, self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n return self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))", "def scale(df, missing=\"zeros\", scaler=\"robust\", **kwargs):\n \n scalers = {'standard':'StandardScaler', 'minmax':'MinMaxScaler', 'maxabs':'MaxAbsScaler',\\\n 'robust':'RobustScaler', 'quantile':'QuantileTransformer'}\n \n s = getattr(preprocessing, scalers[scaler])\n s = s(**kwargs)\n \n df = fillna(df, method=missing)\n df = pd.DataFrame(s.fit_transform(df), index=df.index, columns=df.columns)\n \n return df", "def fit(self, data):\n self.column_min_value, self.column_max_value = self._get_min_max_value(data)\n self.scale_column_idx = self._get_scale_column_idx(data)\n self.header = self._get_header(data)\n\n self.column_range = []\n for i in range(len(self.column_max_value)):\n scale = self.column_max_value[i] - self.column_min_value[i]\n if scale < 0:\n raise ValueError(\"scale value should large than 0\")\n elif np.abs(scale - 0) < 1e-6:\n scale = 1\n self.column_range.append(scale)\n\n f = functools.partial(MinMaxScale.__scale, max_value_list=self.column_max_value,\n min_value_list=self.column_min_value, scale_value_list=self.column_range,\n process_cols_list=self.scale_column_idx)\n fit_data = data.mapValues(f)\n\n return fit_data", "def fit_transform(self, X, y=...):\n ...", "def nscale_forward(self, x_1x, scales):\n assert 1.0 in scales, 'expected 1.0 to be the target scale'\n # Lower resolution provides attention for higher rez predictions,\n # so we evaluate in order: high to low\n scales = sorted(scales, reverse=True)\n\n pred = None\n\n for s in scales:\n x = nn.functional.interpolate(\n x_1x,\n scale_factor=s,\n align_corners=self.align_corners,\n mode='bilinear')\n outs = self.single_scale_forward(x)\n\n cls_out = outs['cls_out']\n attn_out = outs['logit_attn']\n\n if pred is None:\n pred = cls_out\n elif s >= 1.0:\n # downscale previous\n pred = scale_as(pred, cls_out, self.align_corners)\n pred = cls_out * attn_out + pred * (1 - attn_out)\n else:\n # s < 1.0: upscale current\n cls_out = cls_out * attn_out\n\n cls_out = scale_as(cls_out, pred, self.align_corners)\n attn_out = scale_as(attn_out, pred, self.align_corners)\n\n pred = cls_out + pred * (1 - attn_out)\n\n return [pred]", "def transform(self, X: FEATURES, y: TARGET | None = None) -> DATAFRAME:\n check_is_fitted(self)\n X, y = self._prepare_input(X, y, columns=self.feature_names_in_)\n\n self.log(\"Scaling features...\", 1)\n X_transformed = self._estimator.transform(X[self._num_cols])\n\n # If all columns were transformed, just swap sets\n if len(self._num_cols) != X.shape[1]:\n # Replace the numerical columns with the transformed values\n for i, col in enumerate(self._num_cols):\n X[col] = X_transformed[:, i]\n else:\n X = to_df(X_transformed, X.index, X.columns)\n\n return X", "def fit(self, X, y):\n\n # est = KBinsDiscretizer(\n # n_bins=self.bins, encode='ordinal', strategy='kmeans')\n # Xt = est.fit_transform(X, y)\n # Xt = pd.DataFrame(Xt, columns=X.columns)\n\n Xt = self.discretize_dframe(X, y)\n self._x_transformed = Xt\n return self" ]
[ "0.6935344", "0.6514156", "0.6485185", "0.6348961", "0.62394345", "0.606469", "0.5963839", "0.591956", "0.5840398", "0.58116454", "0.58023864", "0.5767737", "0.5724162", "0.5704402", "0.5686381", "0.5667723", "0.56520194", "0.5641679", "0.5634941", "0.5634244", "0.562591", "0.5605802", "0.5598885", "0.5574371", "0.55738074", "0.55320454", "0.55313945", "0.5524987", "0.54730403", "0.5469715" ]
0.7883213
0
Transform data into modified features (before being passed to penalised regression step). If `linear_features=True` then this will be scaled linear features followed by the onehotencoding signifying which rules are "on". Otherwise this is just the onehotencoding signifying which rules are "on".
def transform(self, X, y=None): if isinstance(X, DataFrame): is_df = True # Serves no purpose X = check_array(X) # Validate input data X = self.ext_scaler.transform(X) # Scale and centre features if self.linear_features: X_scale = self._scaler.transform(X) # Scale linear features to give same a priori weight as rules return hstack([X_scale, self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))]) else: return self._one_hot_encoder.transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_linear_transform(self):\n \n with tf.variable_scope(\"linear_transform\"):\n \n # feature scales/weights\n self.w = tf.get_variable(\"weights\", shape=[self.dim_input], \n initializer= tf.contrib.layers.xavier_initializer())\n #self.B = tf.get_variable(\"biases\", shape=[self.dim_input], \n # initializer= tf.contrib.layers.xavier_initializer())\n \n # diagonalize and matmul\n self.W = tf.diag(self.w)\n #self.W = tf.get_variable(\"weights\", shape=[self.dim_input, self.dim_input], \n # initializer= tf.contrib.layers.xavier_initializer())\n \n #self.X_transformed = tf.add(tf.matmul(self.X_input, self.W), self.B) \n self.X_transformed = tf.matmul(self.X_input, self.W)", "def preprocess_feature(df):", "def fit(self, X: pd.DataFrame):\n self.numeric_features = [\n x for x in X.columns\n if 'lag_' in x\n or 'rolling_' in x\n or 'price' in x\n ]\n\n self.feature_transformer = ColumnTransformer(\n [('numeric', 'passthrough', self.numeric_features),\n ('categorical', OneHotEncoder(sparse=False, drop='first'),\n self.categorical_features)]\n )\n\n self.feature_transformer.fit(X)\n\n return self.feature_transformer", "def transform(self, X):\n\n X = X.copy() # type: pd.DataFrame\n X.loc[:, self._feature_mask_] -= self.min_\n X.loc[:, self._feature_mask_] /= self.scale_\n return X", "def transform(self, X):\n \n # Check is fit had been called\n check_is_fitted(self, ['encoder_dict_'])\n \n # Check that the input is of the same shape as the one passed\n # during fit.\n if X.shape[1] != self.input_shape_[1]:\n raise ValueError('Number of columns in dataset is different from training set used to fit the encoder')\n \n X = X.copy()\n for feature in self.variables:\n X[feature] = np.where(X[feature].isin(self.encoder_dict_[feature]), X[feature], 'Rare')\n \n return X", "def transform(self, X: FEATURES, y: TARGET | None = None) -> DATAFRAME:\n check_is_fitted(self)\n X, y = self._prepare_input(X, y, columns=self.feature_names_in_)\n\n self.log(\"Normalizing features...\", 1)\n X_transformed = self._estimator.transform(X[self._num_cols])\n\n # If all columns were transformed, just swap sets\n if len(self._num_cols) != X.shape[1]:\n # Replace the numerical columns with the transformed values\n for i, col in enumerate(self._num_cols):\n X[col] = X_transformed[:, i]\n else:\n X = to_df(X_transformed, X.index, X.columns)\n\n return X", "def transform(self, X):\n \n # Check is fit had been called\n check_is_fitted(self, ['encoder_dict_'])\n \n # Check that the input is of the same shape as the one passed\n # during fit.\n if X.shape[1] != self.input_shape_[1]:\n raise ValueError('Number of columns in dataset is different from training set used to fit the encoder')\n \n X = X.copy()\n for feature in self.variables:\n for category in self.encoder_dict_[feature]:\n X[str(feature) + '_' + str(category)] = np.where(X[feature] == category, 1, 0)\n \n # drop the original non-encoded variables.\n X.drop(labels=self.variables, axis=1, inplace=True)\n \n return X", "def transform(self, X: FEATURES, y: TARGET | None = None) -> DATAFRAME:\n check_is_fitted(self)\n X, y = self._prepare_input(X, y, columns=self.feature_names_in_)\n\n self.log(\"Scaling features...\", 1)\n X_transformed = self._estimator.transform(X[self._num_cols])\n\n # If all columns were transformed, just swap sets\n if len(self._num_cols) != X.shape[1]:\n # Replace the numerical columns with the transformed values\n for i, col in enumerate(self._num_cols):\n X[col] = X_transformed[:, i]\n else:\n X = to_df(X_transformed, X.index, X.columns)\n\n return X", "def preprocess(old_df, label_name, category_features, non_category_features):\n old_df['fraud'] = old_df[label_name].apply(lambda x: x[0] == 'f')\n\n # Creating a new dataframe with a subset of features.\n new_df = old_df[['fraud'] + non_category_features]\n\n # For categorical features, we make dummy variables,\n # and merge them into new_df.\n for feature in category_features:\n dummy_df = pd.get_dummies(old_df[feature], prefix=feature,\n dummy_na=True)\n # Since dummy_na=True, the last column will be for null values.\n dummy_df.drop(dummy_df.columns[-1], axis=1, inplace=True)\n new_df = pd.concat([new_df, dummy_df], axis=1)\n return new_df", "def preprocess_features(X):\n\t# Initialize new output DataFrame\n\toutput = pd.DataFrame(index = X.index)\n\n\t# Investigate new output DataFrame\n\tfor col, col_data in X.iteritems():\n\t\t# If data type is categorical, convert to dummy variables\n\t\tif col_data.dtype == object:\n\t\t\tcol_data = pd.get_dummies(col_data, prefix = col)\n\n\t\t\t# Collect the revised columns\n\t\t\toutput - output.join(col_data)\n\treturn output", "def transform(self, X):\n for i,f in enumerate(self.features):\n X[f] = self._label_encoders_[i].transform(X[f])\n return X", "def transform(self, X: FEATURES, y: TARGET | None = None) -> DATAFRAME:\n X, y = self._prepare_input(X, y, columns=self.feature_names_in_)\n\n self.log(\"Binning the features...\", 1)\n\n for col in self._num_cols:\n if self.strategy.lower() == \"custom\":\n X[col] = self._discretizers[col].transform(X[col])\n else:\n X[col] = self._discretizers[col].transform(X[[col]])[:, 0]\n\n # Replace cluster values with labels\n for i, label in enumerate(self._labels[col]):\n X[col] = X[col].replace(i, label)\n\n self.log(f\" --> Discretizing feature {col} in {X[col].nunique()} bins.\", 2)\n\n return X", "def transform(self, X, y=None):\n\n check_is_fitted(self, ('n_features_', ))\n X = check_array(X, accept_sparse=True)\n\n if X.shape[1] != self.n_features_:\n raise ValueError('num_features differ between fit and transform!')\n\n return X # dummy pass-through, doing nothing except for shape checks.", "def transform(self, data: Dict) -> Dict:\n\n for c in data.columns:\n if c in self.featBin:\n data[c] = data[c].astype(int)\n if data[c].max() > 1:\n data.loc[data[c] > 1, c] = 1\n elif data[c].min() < 0:\n data.loc[data[c] < 0] = 1\n else:\n pass\n elif c in self.featNum:\n data[c] = np.abs(data[c])\n\n else:\n pass\n\n return data", "def transform(self, X: pd.DataFrame):\n return self.feature_transformer.transform(X)", "def onehot_features(data):\n\n# Binary Features\n columns = ['Weekend', 'Revenue']\n for col in columns:\n data[col] = data[col].apply(lambda x: float(1) if x else float(0))\n\n columns = ['Month', 'OperatingSystems', 'Browser', 'Region', 'TrafficType',\n 'VisitorType']\n for col in columns:\n enc = OneHotEncoder()\n data_array = enc.fit_transform(data[[col]]).toarray()\n enc_data = pd.DataFrame(data_array)\n enc_data.columns = list(enc.get_feature_names([col]))\n data = data.join(enc_data)\n\n data = data.drop(columns={'Month', 'Month_May', 'OperatingSystems',\n 'OperatingSystems_2', 'Browser', 'Browser_2',\n 'Region', 'Region_1.0', 'TrafficType',\n 'TrafficType_2', 'VisitorType',\n 'VisitorType_Returning_Visitor'})\n return data", "def fit_transform(self, X, y=...):\n ...", "def inverse_transform(self, X):\n # No warning for y, since there's no y variable.\n # This correpsonds to function signature in scikit-learn's code base\n X = X.copy() # type: pd.DataFrame\n X.loc[:, self._feature_mask_] *= self.scale_\n X.loc[:, self._feature_mask_] += self.min_\n return X", "def _transform(self, X, y=None):\n # Check input of feature calculators, i.e list of functions to be\n # applied to time-series\n features = _check_features(self.features)\n X = convert_to(X, \"numpy3D\")\n\n # Check that the input is of the same shape as the one passed\n # during fit.\n if X.shape[1] != self.input_shape_[1]:\n raise ValueError(\n \"Number of columns of input is different from what was seen in `fit`\"\n )\n # Input validation\n # if not all([np.array_equal(fit_idx, trans_idx) for trans_idx,\n # fit_idx in zip(check_equal_index(X),\n # raise ValueError('Indexes of input time-series are different\n # from what was seen in `fit`')\n\n n_instances, _, _ = X.shape\n n_features = len(features)\n\n intervals = self.intervals_\n n_intervals = len(intervals)\n\n # Compute features on intervals.\n Xt = np.zeros((n_instances, n_features * n_intervals)) # Allocate output array\n # for transformed data\n columns = []\n\n i = 0\n drop_list = []\n for func in features:\n # TODO generalise to series-to-series functions and function kwargs\n for start, end in intervals:\n interval = X[:, :, start:end]\n\n # Try to use optimised computations over axis if possible,\n # otherwise iterate over rows.\n try:\n Xt[:, i] = func(interval, axis=-1).squeeze()\n except TypeError as e:\n if (\n str(e) == f\"{func.__name__}() got an unexpected \"\n f\"keyword argument 'axis'\"\n ):\n Xt[:, i] = np.apply_along_axis(\n func, axis=2, arr=interval\n ).squeeze()\n else:\n raise\n new_col_name = f\"{start}_{end}_{func.__name__}\"\n if new_col_name in columns:\n drop_list += [i]\n else:\n columns = columns + [new_col_name]\n i += 1\n\n Xt = pd.DataFrame(Xt)\n Xt = Xt.drop(columns=Xt.columns[drop_list])\n Xt.columns = columns\n\n return Xt", "def fit_transform(self, data: np.ndarray) -> np.ndarray:\n for i in range(self.n_layers):\n if self.verbose_training:\n print(\"Fitting layer %d with output width %d\" % (i+1, self.layers[i]))\n new_data = np.nan_to_num(data)\n new_data = self.ica_list[i].fit_transform(X=new_data)\n if i != self.n_layers - 1:\n self.power_list[i].fit(new_data)\n new_data = self.power_list[i].inverse_transform(new_data)\n data = new_data\n return data", "def fit_transform(self, data: pd.DataFrame):\n return self.fit(data).transform(data)", "def dummify_features(df):\n colnames = df.columns\n le_dict = {}\n for col in colnames:\n le_dict[col] = preprocessing.LabelEncoder()\n le_dict[col].fit(df[col])\n df.loc[:, col] = le_dict[col].transform(df[col])\n\n enc = preprocessing.OneHotEncoder()\n enc.fit(df)\n X = enc.transform(df)\n\n dummy_colnames = [cv + '_' + str(modality) for cv in colnames for modality in le_dict[cv].classes_]\n # for cv in colnames:\n # for modality in le_dict[cv].classes_:\n # dummy_colnames.append(cv + '_' + modality)\n\n return X, dummy_colnames, enc", "def preprocess_features(features):\r\n rowsum = np.array(features.sum(1))\r\n r_inv = np.power(rowsum, -1).flatten()\r\n r_inv[np.isinf(r_inv)] = 0.\r\n r_mat_inv = np.diag(r_inv)\r\n features = r_mat_inv.dot(features)\r\n return features", "def fit_transform(self, X, y=None):\n if isinstance(X, pd.DataFrame):\n self.input_feature_names = list(X.columns.values)\n else:\n self.input_feature_names = range(X.shape[1])\n\n try:\n X_t = self._component_obj.fit_transform(X, y)\n except AttributeError:\n raise RuntimeError(\"Transformer requires a fit_transform method or a component_obj that implements fit_transform\")\n if not isinstance(X_t, pd.DataFrame) and isinstance(X, pd.DataFrame):\n X_dtypes = X.dtypes.to_dict()\n selected_col_names = self.get_names()\n col_types = {key: X_dtypes[key] for key in selected_col_names}\n return pd.DataFrame(X_t, columns=selected_col_names, index=X.index).astype(col_types)\n else:\n return pd.DataFrame(X_t)", "def scale_data(data_matrix):\n scaler = StandardScaler() \n # Don't cheat - fit only on training data\n scaler.fit(data_matrix) \n X_train = scaler.transform(data_matrix) \n \n return X_train", "def de_normalize_data(self, df):\n if len(df) == 0:\n return df\n result = df.copy()\n for feature_name in self.continuous_feature_names:\n max_value = self.permitted_range[feature_name][1]\n min_value = self.permitted_range[feature_name][0]\n result[feature_name] = (\n df[feature_name]*(max_value - min_value)) + min_value\n return result", "def transform(self, data: np.ndarray) -> np.ndarray:\n for i in range(self.n_layers):\n new_data = np.nan_to_num(data)\n new_data = self.ica_list[i].transform(X=new_data)\n if i != self.n_layers - 1:\n new_data = self.power_list[i].inverse_transform(new_data)\n data = new_data\n return data", "def fit_transform(self, X):\n X_sparse = X.copy().astype(np.float64)\n self.X_sparse = X_sparse\n self._fit()\n return self.sample_weights, self.s, self.feature_weights", "def preprocess_features(features):\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n return features.todense()", "def transform(self, X):\r\n return LabelBinarizer().fit_transform(X)" ]
[ "0.64672863", "0.6376041", "0.620645", "0.6029789", "0.59679097", "0.59625727", "0.5958583", "0.5933087", "0.59234154", "0.5873284", "0.57592976", "0.57417256", "0.5729975", "0.57214034", "0.571568", "0.57002", "0.56780124", "0.56722736", "0.5665581", "0.5648128", "0.56347185", "0.5633068", "0.5621569", "0.56141645", "0.56119233", "0.55987704", "0.5592536", "0.55805373", "0.55556244", "0.55459577" ]
0.65656894
0
Extract rule set from single decision tree according to `XGBClassifier` format
def __extract_xgb_dt_rules__(self, dt): md = self.max_depth + 1 # upper limit of max_depth? rules = [] levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id) path = [] # Extract feature numbers and thresholds for all nodes feat_thresh_l = re.findall(r'\[f([0-9]+)<([-]?[0-9]+\.?[0-9]*)\]', dt) _id = 0 prune = -1 for line in dt.split('\n')[:-1]: # Separate node id and rest of line _id, rest = line.split(':') # Count number of tabs at start of line to get level (and then remove) level = Counter(_id)['\t'] _id = _id.lstrip() if prune > 0: # If we were last at a leaf, prune the path path = path[:-1+(level-prune)] # Add current node to path path.append(int(_id)) if 'leaf' in rest: prune = level # Store where we are so we can prune when we backtrack rules.append(levels[:level, (0, 2, 1)].copy()) # Add rules rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:]) # Convert path to geq/leq operators else: # Extract (feature name, threshold, next node id) levels[level, :] = re.findall(r'\[f([0-9]+)<([-]?[0-9]+\.?[0-9]*)\].*yes=([0-9]+)', line)[0] # Don't prune prune = -1 return rules
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_xgboost_dump(model):\n trees= []\n for tree_string in model._Booster.get_dump():\n nodes = [feature_regex.search('t' + node).groupdict() if '[' in node else leaf_regex.search('t' +node).groupdict() for node in tree_string.split('\\n')[:-1]]\n trees.append(nodes)\n return trees", "def get_XGBmodel(depth = 5, lr = 0.08, n_est = 100):\n XGBCla = XGBClassifier(\n # Maximum depth of each tree.\n max_depth = depth,\n # Learning rate.\n learning_rate = lr, \n # Number of trees in forest to fit.\n n_estimators=n_est, \n verbosity=0, \n objective='binary:logistic', \n # Booster to use: gbtree, gblinear or dart.\n booster='gbtree', \n # Number of parallel threads used to run xgboost.\n n_jobs=12, \n nthread=None, \n gamma=0, \n min_child_weight=1, \n max_delta_step=0, \n # subsample: The % of rows taken to build tree. \n # (should not be to low, recommended to be 0.8-1)\n subsample=1,\n colsample_bytree=1, \n colsample_bylevel=1, \n reg_alpha=0, \n reg_lambda=1, \n scale_pos_weight=1, \n base_score=0.5, \n random_state=0, \n seed=None, \n missing=None\n )\n return XGBCla", "def fit_transform(self, X, y, sample_weight=None):\n # Instantiate rule ensemble generator and set parameters\n if isinstance(self.base_estimator, XGBClassifier):\n self.base_estimator.set_params(n_estimators=self.n_estimators, silent=(self.verbose>0),\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, RandomForestClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n warnings.warn('This base_estimator implementation has not been tested in a while!')\n self.base_estimator.set_params(n_estimators=self.n_estimators, verbose=self.verbose,\n max_depth=self.max_depth, n_jobs=self.n_jobs)\n else:\n raise NotImplementedError\n \n # Name features\n if isinstance(X, DataFrame):\n self.features = X.columns.values\n else:\n self.features = ['f'+str(i) for i in range(X.shape[1])]\n \n # Check input\n X = check_array(X)\n \n # Generate and extract rules\n if not self.rand_tree_size:\n self.base_estimator.fit(X, y, sample_weight=sample_weight)\n if isinstance(self.base_estimator, XGBClassifier):\n self._rule_dump = self.base_estimator._Booster.get_dump()\n else:\n NotImplementedError() # TODO: work out how to incrementally train XGB\n \n if self.verbose > 0:\n print('fitting trees')\n \n # For each tree: get leaf numbers and map them to [0, num leaves]\n # before one-hot encoding them\n n_values = \"auto\"\n leaves_l = []\n for tree_i in self._rule_dump:\n leaves = [int(i) for i in re.findall(r'([0-9]+):leaf=', tree_i)]\n leaves_l.append(leaves)\n self._one_hot_encoder = LabelOneHotEncoder(leaves_l)\n \n if self.verbose > 0:\n print('setup encoding')\n \n # Scale and centre linear features\n X = self.ext_scaler.fit_transform(X)\n \n if self.linear_features:\n # Linear features must be scaled to have same weighting as an average rule\n self._scaler = FriedScaler(quantile=self.linear_feature_quantile)\n X_scale = self._scaler.fit_transform(X)\n X_transform = hstack([X_scale, self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))])\n else:\n X_transform = self._one_hot_encoder.fit_transform(self.base_estimator.apply(X).reshape(-1, self.n_estimators))\n \n if self.verbose > 0:\n print('encoded')\n \n # Fit sparse linear model to rules (and optionally linear features)\n self.LR = LogisticRegression(C=self.C, penalty=self.penalty, class_weight=self.class_weight,\n warm_start=self.warm_start, solver='saga', verbose=self.verbose)\n self.LR.fit(X_transform, y, sample_weight=sample_weight)\n \n if self.verbose > 0:\n print('fitted')\n \n # Mask features with zero co-efficients\n # self.feature_mask_ = np.arange(self.LR.coef_.size)\n self.feature_mask_ = self.LR.coef_.nonzero()[1]\n \n self.coef_ = self.LR.coef_[0, self.feature_mask_]\n self.intercept_ = self.LR.intercept_\n self.get_feature_names()\n assert self.features_.size == self.feature_mask_.size\n return X_transform", "def xgboost_model(features, df):\n X= features\n y = df['Severity'].values\n\n xg_model = XGBClassifier(subsample= .7, reg_lambda = 5, n_estimators=900, min_child_weight=1, max_depth=20,\n learning_rate=.01, gamma = .5, colsample_bytree = .6, colsample_bylevel=.7)\n xg_model.fit(X, y)\n y_pred = xg_model.predict(X)\n \n return classification_report(y, y_pred, target_names=['Non-Severe', 'Severe'])", "def xgb(x_train, y_train, x_test):\n\n model = XGBClassifier()\n # y_train = np.reshape(y_train, (len(y_train), 1))\n # data = np.concatenate((x_train, y_train), axis=1)\n # for train, test in kfold.split(data):\n # # print(\"reached here\")\n # x_tr = data[train, :-1]\n # y_tr = data[train, -1]\n # x_va = data[test, :-1]\n # y_va = data[test, -1]\n\n # model.fit(x_tr, y_tr)\n # y_pred = model.predict(x_va)\n # predictions = [round(value) for value in y_pred]\n # f1 = f1_score(y_va, predictions)\n # print(f1)\n model.fit(x_train, y_train)\n y_predict = model.predict(x_test)\n y_predict = [round(value) for value in y_predict]\n return y_predict", "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def extract_trees_from_xgb(model, unique_labels, features):\n trees = parse_xgboost_dump(model)\n num_of_labels = len(unique_labels)\n feature_dict = {v:k for k,v in enumerate(features)}\n conjunction_sets = {}\n for i,t in enumerate(trees): #i stands for the corresponding class index\n indexed_tree = {int(v['node_index']): v for v in t}\n conjunction_sets[i] = extract_conjunction_from_xgboost_base_tree(indexed_tree, i, 0, feature_dict, unique_labels, i % num_of_labels, output_type='logit')\n return list(conjunction_sets.values())", "def xgboost_cv(self, nsplits: int = 5) -> (float, float, float):\r\n x_train, x_test, y_train, y_test = train_test_split(self.x, self.y, test_size=0.2)\r\n params = {\r\n \"max_depth\": [2, 3, 5, 8],\r\n \"eta\": [0.01, 0.05, 0.1, 0.15, 0.2],\r\n \"objective\": ['binary:logistic'],\r\n \"sumsample\": [0.5, 0.7, 1],\r\n \"colsample_bytree\": [0.5, 0.7, 1],\r\n \"n_estimators\": [50, 100, 200, 500],\r\n }\r\n \"\"\"\r\n fit_params = {\r\n \"early_stopping_rounds\": 20,\r\n \"eval_metric\": \"error\",\r\n \"eval_set\": [(x_test, y_test)]\r\n }\r\n \"\"\"\r\n model = xgb.XGBClassifier()\r\n gridcv = GridSearchCV(model, params, cv=nsplits)\r\n gridcv.fit(x_train, y_train) # , **fit_params)\r\n best_params = gridcv.best_params_\r\n cv = KFold(n_splits=nsplits)\r\n acc_result = []\r\n for train, test in cv.split(self.x):\r\n x_train = self.x[train, :]\r\n x_test = self.x[test, :]\r\n y_train = self.y[train]\r\n y_test = self.y[test]\r\n model = xgb.XGBClassifier(**best_params).fit(x_train, y_train)\r\n \"\"\"\r\n x_t, x_v, y_t, y_v = train_test_split(x_train, y_train, test_size=0.2)\r\n model = xgb.XGBClassifier(**best_params).fit(x_t, y_t, eval_metric=\"error\", eval_set=[(x_v, y_v)],\r\n early_stopping_rounds=20)\r\n \"\"\"\r\n y_predict = model.predict(x_test)\r\n acc_result.append(binary_acc(y_test, y_predict))\r\n return np.mean(acc_result), np.std(acc_result), best_params", "def get_xy(preprocessor, target='Coding:Level1'):\n set_names = ['train', 'valid']\n dfs = [pd.read_csv(f'data/roatan_{s}.csv') for s in set_names]\n\n # fit preprocessor with training set\n preprocessor.fit(dfs[0]['message'])\n # transform all data sets\n xs = [preprocessor.transform(df['message']) for df in dfs]\n\n # encode labels as integers 0 ... n-1 using training set\n le = LabelEncoder().fit(dfs[0][target])\n # transform labels for all data sets\n ys = [le.transform(df[target]) for df in dfs]\n\n classes = le.classes_\n data_sets = list(zip(xs, ys))\n return classes, data_sets, set_names", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def create_XGBoost_model():\n model = sklearn.ensemble.GradientBoostingRegressor(n_estimators=300, learning_rate=0.05)\n return sklearn.multioutput.RegressorChain(model)", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def xgb_train(X_train, y_train, write=False):\n model_xgb = xgb.XGBClassifier(max_depth=7,\n min_child_weight=1,\n learning_rate=0.01,\n n_estimators=5000,\n gamma=0.8,\n subsample=0.95,\n colsample_bytree=0.6,\n reg_alpha=0.0025,\n objective='binary:logistic',\n nthread=4,\n scale_pos_weight=1,\n seed=123)\n model_xgb.fit(X_train, y_train)\n if write:\n pickle.dump(model_xgb, open(obj_save_path+'model_xgb.p', 'wb'))\n #model_xgb = pickle.load(open(obj_save_path+'model_xgb.p', 'rb'))\n plot_importance(model_xgb)\n plt.show()\n return model_xgb", "def train_xgb(params, X_train, y_train, cv, scorer='neg_mean_squared_error', seed=42):\n\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=params[\"learning_rate\"],\n subsample=params[\"subsample\"], \n seed=seed)\n\n \n #result = model.fit(X_train,\n # y_train.values.ravel(),\n # eval_set=[(X_train, y_train.values.ravel())],\n # early_stopping_rounds=50,\n # verbose=False)\n\n fit_params = {\n 'eval_set': [(X_train, y_train.values.ravel())],\n 'early_stopping_rounds': 50,\n 'verbose': False\n }\n\n return_estimator = False\n cv_score = cross_validate(\n model,\n X_train, y_train.values.ravel(),\n cv=cv,\n scoring=scorer,\n return_estimator=return_estimator,\n fit_params=fit_params\n )\n\n scores = np.abs(np.array(cv_score['test_score']))\n avg_score = np.mean(scores)\n return {\n \"loss\": avg_score,\n \"scores\": scores,\n \"status\": STATUS_OK,\n #\"models\": cv_score['estimator']\n }\n\n except ValueError as ex:\n return {\n \"error\": ex,\n \"status\": STATUS_FAIL\n }", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def try_ada_boost_decision_tree():\n\n print(\"AdaBoost to Decision Tree\")\n from sklearn.tree import DecisionTreeClassifier\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.grid_search import GridSearchCV\n\n param_grid = {\"base_estimator__criterion\" : [\"gini\", \"entropy\"],\n \"base_estimator__splitter\" : [\"best\", \"random\"],\n \"n_estimators\": [10, 30]\n }\n\n DTC = DecisionTreeClassifier(random_state = 11, max_features = \"auto\", class_weight = \"balanced\",max_depth = None)\n\n ABC = AdaBoostClassifier(base_estimator = DTC)\n\n grid_search_ABC = GridSearchCV(ABC, param_grid=param_grid, scoring = 'roc_auc')\n\n grid_search_ABC.fit(features_train,labels_train)\n\n pred = grid_search_ABC.predict(features_test)\n accuracy = accuracy_score(labels_test, pred)\n precision = precision_score(labels_test, pred)\n recall = recall_score(labels_test, pred)\n\n print(\"DecisionTree after applying AdaBoost and GridSearchCV:\")\n print(\"accuracy AdaBoost: \", accuracy)\n print(\"precision: \", precision)\n print(\"recall: \", recall)\n print_separator_line()\n dict_results = { \"classifier\": \"AdaBoost decision tree\", \"accuracy\": accuracy, \"precision\": precision, \"recall\": recall }\n return dict_results, grid_search_ABC", "def __extract_dt_rules__(self, dt): \n t = dt.tree_ # Get tree object\n rules = []\n\n stack = [(0, -1, -1)] # (node id, parent depth, true[<=thresh]/false[>thresh] arm)\n path = [(0, -1, -1)] # Begin path at root\n while len(stack) > 0: # While nodes to visit is not empty\n nid, pd, op = stack.pop() # Get next node id, path depth, operator\n\n if (pd > path[-1][1]): # Going deeper\n path.append((nid, pd, op))\n elif pd == -1: # ROOT\n pass\n else: # Back-track\n [path.pop() for _ in range(path[-1][1]-pd+1)]\n path.append((nid, pd, op))\n\n if t.children_left[nid] > 0: # If not leaf, add children onto stack\n stack.append((t.children_left[nid], pd + 1, 1))\n stack.append((t.children_right[nid], pd + 1, 0))\n else: # If leaf append rule\n rules.append(np.array([(t.feature[path[i][0]], path[i+1][2], t.threshold[path[i][0]]) for i in range(len(path)-1)]))\n\n return rules", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def decision_tree_classifier(features,target):\r\n clf = DecisionTreeClassifier()\r\n clf.fit(features, target)\r\n return clf", "def build_decision_tree(baseline=False):\r\n if baseline:\r\n model = DecisionTreeClassifier()\r\n else:\r\n model = DecisionTreeClassifier(criterion='entropy',\r\n splitter='best',\r\n max_depth=25)\r\n\r\n return model", "def summarize_model(clf_, X_tr, X_te, y_tr, y_te, tree=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n import pandas as pd\n \n y_hat_tr, y_hat_te = fit_n_pred(clf_, X_tr, X_te, y_tr)\n print('Classification Report:')\n print(metrics.classification_report(y_te, y_hat_te))\n \n if tree:\n fig, ax = plt.subplots(figsize=(10,5), nrows=2)\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true',\n ax=ax[0])\n ax[0].set(title='Confusion Matrix')\n ax[0].grid(False)\n\n plot_importance(clf_, X_tr, ax=ax[1])\n plt.tight_layout()\n \n else:\n clf_coef = pd.Series(clf_.coef_[0], index=X_tr.columns, name='Normal')\n abs_coef = pd.Series(abs(clf_.coef_[0]), index=X_tr.columns, name='Absolute')\n posi_coef = pd.Series((clf_coef > 0), name='Positive')\n coef_all = pd.concat([clf_coef, abs_coef, posi_coef], axis=1)\n coef_all.sort_values('Absolute', ascending=True, inplace=True)\n coef_all.tail(20)['Normal'].plot(kind='barh', color=coef_all['Positive'].map({True:'b',False:'r'})\n\n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\", normalize='true')\n plt.title('Confusion Matrix')\n plt.grid(False)\n plt.tight_layout()\n\ndef grid_searcher(clf_, params, X_tr, X_te, y_tr, y_te, cv=None, keep_t=False, train_score=True):\n \n \"\"\"Takes any classifier, train/test data for X/y, and dict of parameters to\n iterate over. Optional parameters select for cross-validation tuning, keeping\n time for running the gridsearch, and returning training scores when done.\n Default parameters only return the fitted grid search object. MUST HAVE Timer\n class imported.\"\"\"\n \n from sklearn.model_selection import GridSearchCV\n import numpy as np\n \n ## Instantiate obj. with our targets\n grid_s = GridSearchCV(clf_, params, cv=cv, return_train_score=train_score)\n \n ## Time and fit run the 'search'\n time = Timer()\n time.start()\n grid_s.fit(X_tr, y_tr)\n time.stop()\n \n ## Display results\n tr_score = np.mean(grid_s.cv_results_['mean_train_score'])\n te_score = grid_s.score(X_te, y_te)\n print(f'Mean Training Score: {tr_score :.2%}')\n print(f'Mean Test Score: {te_score :.2%}')\n print('Best Parameters:')\n print(grid_s.best_params_)\n \n ## Time keeping and grid obj\n if keep_t:\n lap = time.record().total_seconds()\n print('**********All done!**********')\n return grid_s, lap\n else:\n return grid_s", "def crossValidation(data, output_variable_name):\r\n X, xt, y, yt = train_test_split(\r\n data.drop(output_variable_name, axis=1), data[output_variable_name], test_size=0.01, random_state=SEED)\r\n\r\n model = pickle.load(open(\"models/lasso.sav\", 'rb'))\r\n lassoCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/ridge.sav\", 'rb'))\r\n ridgeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n model = pickle.load(open(\"models/decisionTree.sav\", 'rb'))\r\n decTreeCV = -mean(cross_val_score(model, X, y, cv=5, scoring='neg_root_mean_squared_error'))\r\n\r\n param = {\r\n 'max_depth': 15,\r\n 'eta': 0.1,\r\n 'objective': 'reg:squarederror',\r\n 'nthread': 16,\r\n \"subsample\": 0.5,\r\n \"colsample_bytree\": 0.5,\r\n 'eval_metric': 'rmse'\r\n }\r\n num_round = XGB_EPOCH_NR\r\n\r\n dtrain = xgb.DMatrix(X, label=y)\r\n xgbCV = xgb.cv(\r\n param,\r\n dtrain,\r\n num_boost_round=num_round,\r\n seed=SEED,\r\n nfold=5,\r\n metrics={'rmse'}\r\n )[\"test-rmse-mean\"][-1:]\r\n\r\n param = {\r\n \"iterations\": 400,\r\n \"learning_rate\": 0.02,\r\n \"depth\": 12,\r\n \"eval_metric\": 'RMSE',\r\n \"random_seed\": 23,\r\n \"bagging_temperature\": 0.2,\r\n \"od_type\": 'Iter',\r\n \"metric_period\": 75,\r\n \"od_wait\": 100\r\n }\r\n\r\n catBoostCV = cv(data, param, fold_count=5, plot=True)\r\n\r\n return lassoCV, ridgeCV, decTreeCV, xgbCV, catBoostCV", "def save_XGB(X, y):\n XG_final_model = XGBClassifier()\n XG_final_model.fit(X, y)\n joblib.dump(XG_final_model, XG_filename) \n print('Final XG model trained and saved to ' + XG_filename)", "def predict_all():\n\n # need train dir to list category names\n cfg = configparser.ConfigParser()\n cfg.read(sys.argv[1])\n base = os.environ['DATA_ROOT']\n eval_type = cfg.get('args', 'eval_type')\n train_xml_dir = os.path.join(base, cfg.get('data', 'train_xml_dir'))\n\n if eval_type == 'sparse':\n predict_sparse(train_xml_dir)\n else:\n predict_dense(train_xml_dir)", "def decision_tree(df):\n features = df[['Temperature(F)', 'Humidity(%)', 'Visibility(mi)', 'Wind_Speed(mph)',\n 'Precipitation(in)', 'Amenity', 'Bump', 'Crossing', 'Give_Way',\n 'Junction', 'No_Exit', 'Railway', 'Roundabout', 'Station', 'Stop',\n 'Traffic_Calming', 'Traffic_Signal', 'Civil_Twilight', 'Rush Hour', 'Weekend',\n 'Side_R', 'Season_Spring', 'Season_Summer',\n 'Season_Winter', 'Weather_Condition_Clear', 'Weather_Condition_Fog',\n 'Weather_Condition_Other', 'Weather_Condition_Rain',\n 'Weather_Condition_Snow', 'Weather_Condition_Thunderstorm']]\n X= features\n y = df['Severity']\n clf = DecisionTreeClassifier(min_samples_split=6, min_samples_leaf=2, max_depth=3, \n criterion = 'gini', random_state=42)\n clf.fit(X, y)\n\n plt.figure(figsize=(25,10))\n a = plot_tree(clf, \n feature_names=X.columns.to_list(), \n filled=True, \n rounded=True, \n fontsize=14)\n plt.savefig(\"../Images/rockies_decision_tree.png\")\n plt.show()", "def gridSearch_XGB(gridnum=3):\n n_est_list = np.array([1, 5, 10, 50, 100, 500, 1000])\n max_dep_list = list(range(1,3))\n if gridnum==1:\n grid = {'n_estimators': n_est_list, 'max_depth': np.array(max_dep_list)}\n elif gridnum==2:\n grid = {'max_depth': np.array(max_dep_list+[50])}\n else:\n grid = {'n_estimators': np.array([1000,5000])}\n XGBCla = get_XGBmodel()\n GSxgbCla = GridSearchCV(\n XGBCla, \n grid, \n verbose=2, \n cv=StratifiedKFold(n_splits=5, shuffle=True)\n )\n print(GSxgbCla.best_params_)" ]
[ "0.5865318", "0.5536741", "0.54987574", "0.5455966", "0.54210675", "0.5409109", "0.5405483", "0.53791", "0.53362054", "0.53147733", "0.53034365", "0.52629757", "0.51678437", "0.51664716", "0.51662976", "0.5051097", "0.50148183", "0.49448028", "0.49409705", "0.49294248", "0.49025938", "0.4878625", "0.48769754", "0.48744574", "0.4858999", "0.4850327", "0.48470944", "0.4837806", "0.48308575", "0.4828053" ]
0.63547754
0
Extract rule set from single decision tree according to sklearn binarytree format
def __extract_dt_rules__(self, dt): t = dt.tree_ # Get tree object rules = [] stack = [(0, -1, -1)] # (node id, parent depth, true[<=thresh]/false[>thresh] arm) path = [(0, -1, -1)] # Begin path at root while len(stack) > 0: # While nodes to visit is not empty nid, pd, op = stack.pop() # Get next node id, path depth, operator if (pd > path[-1][1]): # Going deeper path.append((nid, pd, op)) elif pd == -1: # ROOT pass else: # Back-track [path.pop() for _ in range(path[-1][1]-pd+1)] path.append((nid, pd, op)) if t.children_left[nid] > 0: # If not leaf, add children onto stack stack.append((t.children_left[nid], pd + 1, 1)) stack.append((t.children_right[nid], pd + 1, 0)) else: # If leaf append rule rules.append(np.array([(t.feature[path[i][0]], path[i+1][2], t.threshold[path[i][0]]) for i in range(len(path)-1)])) return rules
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __extract_xgb_dt_rules__(self, dt): \n md = self.max_depth + 1 # upper limit of max_depth?\n rules = []\n levels = np.zeros((md, 3)) # Stores: (feature name, threshold, next node id)\n path = []\n\n # Extract feature numbers and thresholds for all nodes\n feat_thresh_l = re.findall(r'\\[f([0-9]+)<([-]?[0-9]+\\.?[0-9]*)\\]', dt)\n\n _id = 0\n prune = -1\n for line in dt.split('\\n')[:-1]:\n # Separate node id and rest of line\n _id, rest = line.split(':')\n\n # Count number of tabs at start of line to get level (and then remove)\n level = Counter(_id)['\\t']\n _id = _id.lstrip()\n\n if prune > 0:\n # If we were last at a leaf, prune the path\n path = path[:-1+(level-prune)]\n # Add current node to path\n path.append(int(_id))\n\n if 'leaf' in rest:\n prune = level # Store where we are so we can prune when we backtrack\n rules.append(levels[:level, (0, 2, 1)].copy()) # Add rules\n rules[-1][:, 1] = rules[-1][:, 1] == np.array(path[1:]) # Convert path to geq/leq operators\n else:\n # Extract (feature name, threshold, next node id)\n levels[level, :] = re.findall(r'\\[f([0-9]+)<([-]?[0-9]+\\.?[0-9]*)\\].*yes=([0-9]+)', line)[0]\n # Don't prune\n prune = -1\n\n return rules", "def decision_tree(original_training_data,call_depth):\n\n ''' Checking the stopping criterion. If yes then it returns the majority class (Muffin or CupCake) '''\n if check_if_stopping_criterion_is_met(original_training_data.values) or call_depth > 10:\n majority = classification(original_training_data)\n return majority\n\n else:\n ''' Each time we split the data and go deeper, we increment the depth of the tree '''\n call_depth += 1\n\n ''' Finding the best attribute, best threshold to split data, best minimum entropy '''\n best_split_index, best_attribute, best_threshold, best_minimum_entropy = find_best_attribute_threshold_entropy(original_training_data)\n original_training_data_values = original_training_data.values\n\n best_split_values = original_training_data_values[:,best_split_index]\n\n less_than_threshold = original_training_data[best_split_values <= best_threshold]\n more_than_threshold = original_training_data[best_split_values > best_threshold]\n\n ''' Initializing a variable called as condition which stores the format of the key for the resulting decision tree dictionary '''\n condition = original_training_data.columns[best_split_index] + \" <= \" + str(best_threshold)\n\n ''' Initializing a dictionary where key is condition and value is a list. This is the basic data structure in which the\n resulting decision tree is stored '''\n sub_tree = {condition: []}\n\n ''' Calling the decision tree recursively '''\n left_tree = decision_tree(less_than_threshold, call_depth)\n right_tree = decision_tree(more_than_threshold, call_depth)\n\n ''' For removing edge cases where on either split, the resulting decision tree gives the same result '''\n if left_tree == right_tree:\n sub_tree = left_tree\n else:\n ''' Appending the smaller trees in the final decision tree '''\n sub_tree[condition].append(left_tree)\n sub_tree[condition].append(right_tree)\n\n return sub_tree", "def train_decision_tree():\n train_model(DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DECISION_TREE_DEFAULT_DATASET,\n model_file_name=DECISION_TREE_DEFAULT_MODEL)", "def decision_tree(df, variables, test_size):\n from sklearn.model_selection import train_test_split\n from sklearn import tree\n\n # Define input\n X = encoding_df(df, variables)\n\n # Set validation\n y = df['target']\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)\n\n clf = tree.DecisionTreeRegressor()\n clf = clf.fit(X_train, y_train)\n\n print(compute_rmse(y_test, clf.predict(X_test)))\n return clf.predict(X_test), y_test", "def build_decision_tree():\n\n decision_tree_root = None\n decision_tree_root = DecisionNode(None,None,lambda feature:feature[0]==1)\n decision_tree_root.left = DecisionNode(None,None,None,1)\n decision_tree_root.right = DecisionNode(None,None,lambda feature:feature[3]==1)\n decision_tree_root.right.left = DecisionNode(None,None,lambda feature:feature[1]==0)\n decision_tree_root.right.right = DecisionNode(None,None,lambda feature:feature[2]==1)\n decision_tree_root.right.left.left = DecisionNode(None,None,None,1)\n decision_tree_root.right.left.right = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.left = DecisionNode(None,None,None,0)\n decision_tree_root.right.right.right = DecisionNode(None,None,None,1)\n return decision_tree_root", "def decision_tree(data_frame, filename=0):\n\tprint \"Building decision tree...\"\n\tr = robjects.r\n\trpart = importr(\"rpart\")\n\tfit = rpart.rpart(\"category~bpm+speechiness+time_sig+key+duration+loudness+\\\n\t\t\tend_of_fade_in+start_of_fade_out+bpm_range+\\\n\t\t\tmax_bpm_spike+num_keys\", data=data_frame, method=\"class\", \n\t\t\tna_action='na.rpart', control='rpart.control(cp = .0001)')\n\trpart.printcp(fit)\n\tr.plot(fit, uniform=True, main=\"Classification Tree for Genre\")\n\tr.text(fit, use_n=True, all=True, cex=.8)\n\tif filename != 0:\n\t\trpart.post(fit, file=filename, title=\"Classification Tree for Genre\")\n\traw_input(\"> Press enter to continue.\")\n\treturn fit", "def main():\n\n ''' Reading the training data file '''\n original_training_data = pd.read_csv(\"DT_Data_CakeVsMuffin_v012_TRAIN.csv\")\n\n ''' Storing the final decision tree '''\n final_tree = decision_tree(original_training_data,0)\n\n ''' Printing the final decision tree '''\n print(\"This is the resulting decision tree: \\n\")\n print(final_tree)\n\n ''' Iterating through the dictionary by using the key values '''\n for key in final_tree.keys():\n ''' Parent = Flour <= 5.1636'''\n parent = key\n ''' left_child = [{'Oils <= 3.1265': [{'Flour <= 2.7291': [{'Proteins <= 2.6527': ['Muffin', 'CupCake']}, 'Muffin']}, 'CupCake']}'''\n left_child = final_tree[parent][0]\n ''' right_child = {'Oils <= 7.7793': ['Muffin', {'Flour <= 8.2225': ['CupCake', 'Muffin']}]}]'''\n right_child = final_tree[parent][1]\n\n ''' Writing a file which generates code for classification '''\n file = open('HW06_Parchand_Nihal_Classifier.py','w+')\n file.write(\"'''Importing libraries''' \"\n \"\\n\\nimport pandas as pd \\n\\ndef main():\"\n \"\\n\\tdata_df = pd.read_csv('DT_Data_CakeVsMuffin_v012_TEST.csv')\"\n \"\\n\\tresult = []\"\n \"\\n\\tfor row in range(0,len(data_df)):\"\n \"\\n\\t\\tFlour = data_df.loc[row][0]\"\n \"\\n\\t\\tSugar = data_df.loc[row][1]\"\n \"\\n\\t\\tOils = data_df.loc[row][2]\"\n \"\\n\\t\\tProteins = data_df.loc[row][3]\"\n \"\\n\\t\\tif {}:\\n\".format(parent))\n\n ''' Iterating through the left_tree '''\n for key in left_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n\n ''' Iterating through the inner left_tree '''\n for inner_key in left_child[key][0].keys():\n file.write(\"\\t\\t\\t\\tif {}:\\n\".format(inner_key))\n\n for inner_inner_key in ((left_child[key][0])[inner_key])[0]:\n file.write(\"\\t\\t\\t\\t\\tif {}:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\t\\t\\telse:\\n\".format(inner_inner_key))\n file.write(\"\\t\\t\\t\\t\\t\\tresult.append(1)\\n\")\n\n file.write(\"\\t\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\t\\tresult.append(0)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\telse:\\n\")\n\n ''' Iterating through the right_tree '''\n for key in right_child.keys():\n file.write(\"\\t\\t\\tif {}:\\n\".format(key))\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\")\n for inner_key in right_child[key][1].keys():\n file.write(\"\\t\\t\\telif {}:\\n\".format(inner_key))\n file.write(\"\\t\\t\\t\\tresult.append(1)\\n\")\n file.write(\"\\t\\t\\telse:\\n\")\n file.write(\"\\t\\t\\t\\tresult.append(0)\\n\\n\")\n\n ''' Writing the results of classifier to a csv file '''\n file.write(\n \"\\twith open('HW06_Parchand_Nihal_MyClassifications.csv', 'w+') as file2:\\n\"\n \"\\t\\tfor value in result:\\n\"\n \"\\t\\t\\tfile2.write(str(value))\\n\"\n \"\\t\\t\\tfile2.write('\\\\n')\\n\\n\"\n \"main()\")", "def buildTree(rows, maxDepth = None, scoref=entropy, depth = 0):\n #A base condition for the recursion. Check if this branch of a split has no data\n if len(rows)==0:\n return decisionNode( )\n newDepth = depth + 1 #Calculate the depth of the next split.\n #Check if the depth at the next split is greater than a maximum specified depth\n if (maxDepth == 0 or maxDepth) and (newDepth > maxDepth): \n return decisionNode(results=__uniqueCounts(rows)) #If so, stop splitting.\n current_score=scoref(rows) #Calculate the current value of the score function.\n # Set up some variables to track the best criteria\n best_gain=0.0 #Initialize a value for the best gain from all possible splits\n best_criteria=None #Initialize a variable for the best column to split on\n best_sets=None #Initialize a variable for the best split's true and false data.\n\n #Count the number of columns in the row, minus the results column \n column_count=len(rows[0])-1\n for col in range(0,column_count): #Iterate over all the columns of the data\n #Generate the list of different values in this column\n column_values={} #Initialize a dictionary to store the column values\n for row in rows: \n #Iterate over each row, adding a key in the dict for each observed value\n column_values[row[col]]=1\n # Divide the dataset on each value in this column.\n for value in column_values.keys( ):\n (set1,set2)=__divideset(rows,col,value)\n #Calculate the fraction of data in the true branch\n p=float(len(set1))/len(rows) \n #Calculate the gain on the chosen score function using this split.\n gain=current_score-p*scoref(set1)-(1-p)*scoref(set2) \n #Check if this split provides a better gain than the best previous split\n if gain>best_gain and len(set1)>0 and len(set2)>0:\n best_gain=gain\n best_criteria=(col,value)\n best_sets=(set1,set2)\n # Recursively create the subbranches\n if best_gain>0:\n trueBranch=buildTree(best_sets[0], maxDepth = maxDepth, depth = newDepth)\n falseBranch=buildTree(best_sets[1], maxDepth = maxDepth, depth = newDepth)\n return decisionNode(col=best_criteria[0],value=best_criteria[1],\n tb=trueBranch,fb=falseBranch)\n else:\n return decisionNode(results=__uniqueCounts(rows))", "def predict_decision_tree(input_data=rand_input, tree=dtr_full):\n return y_scaler.inverse_transform(tree.predict(input_data))", "def get_decision_tree(log, net, initial_marking, final_marking, decision_point=None, attributes=None, parameters=None):\n from sklearn import tree\n\n if parameters is None:\n parameters = {}\n log = log_converter.apply(log, parameters=parameters)\n X, y, targets = apply(log, net, initial_marking, final_marking, decision_point=decision_point,\n attributes=attributes, parameters=parameters)\n dt = tree.DecisionTreeClassifier()\n dt = dt.fit(X, y)\n return dt, list(X.columns.values.tolist()), targets", "def tree2pc(tree, X, y, ncat, learnspn=np.Inf, max_height=100000,\n thr=0.01, minstd=1, smoothing=1e-6, return_pc=True):\n\n scope = np.array([i for i in range(X.shape[1]+1)]).astype(int)\n data = np.concatenate([X, np.expand_dims(y, axis=1)], axis=1)\n lp = np.sum(np.where(ncat==1, 0, ncat)) * smoothing # LaPlace counts\n classcol = len(ncat)-1\n\n # Recursively parse decision tree nodes to PC nodes.\n def recurse(node, node_ind, depth, data, upper, lower):\n value = tree_.value[node_ind][0]\n counts = np.bincount(data[:, -1].astype(int), minlength=int(ncat[-1]))\n # If split node\n if tree_.feature[node_ind] != _tree.TREE_UNDEFINED:\n split_var = feature_name[node_ind]\n split_value = np.array([tree_.threshold[node_ind]], dtype=np.float64)\n sumnode = SumNode(scope=scope, n=data.shape[0]+lp)\n if node is not None:\n node.add_child(sumnode)\n # Parse left node <=\n upper1 = upper.copy()\n lower1 = lower.copy()\n upper1[split_var] = min(split_value, upper1[split_var])\n split1 = data[np.where(data[:, split_var] <= split_value)]\n p1 = ProdNode(scope=scope, n=split1.shape[0]+lp)\n sumnode.add_child(p1)\n ind1 = Leaf(scope=np.array([split_var]), n=split1.shape[0]+lp, value=split_value, comparison=3) # Comparison <=\n p1.add_child(ind1)\n recurse(p1, tree_.children_left[node_ind], depth + 1, split1.copy(), upper1, lower1)\n # Parse right node >\n upper2 = upper.copy()\n lower2 = lower.copy()\n lower2[split_var] = max(split_value, lower2[split_var])\n split2 = data[np.where(data[:, split_var] > split_value)]\n p2 = ProdNode(scope=scope, n=split2.shape[0]+lp)\n sumnode.add_child(p2)\n ind2 = Leaf(scope=np.array([split_var]), n=split2.shape[0]+lp, value=split_value, comparison=4) # Comparison >\n p2.add_child(ind2)\n recurse(p2, tree_.children_right[node_ind], depth + 1, split2.copy(), upper2, lower2)\n return sumnode\n # Leaf node\n else:\n assert node is not None, \"Tree has no splits.\"\n if data.shape[0] >= learnspn:\n learner = LearnSPN(ncat, thr, 2, max_height, None)\n fit(learner, data, node)\n else:\n for var in scope:\n if ncat[var] > 1: # Categorical variable\n leaf = MultinomialLeaf(scope=np.array([var]), n=data.shape[0]+lp)\n node.add_child(leaf)\n fit_multinomial(leaf, data, int(ncat[var]), smoothing)\n else: # Continuous variable\n leaf = GaussianLeaf(scope=np.array([var]), n=data.shape[0]+lp)\n node.add_child(leaf)\n fit_gaussian(leaf, data, upper[var], lower[var], minstd)\n return None\n\n upper = ncat.copy().astype(float)\n upper[upper == 1] = np.Inf\n lower = ncat.copy().astype(float)\n lower[ncat == 1] = -np.Inf\n\n feature_names = [i for i in range(X.shape[1])]\n tree_ = tree.tree_\n feature_name = [\n feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature\n ]\n root = recurse(None, 0, 1, data, upper, lower)\n if return_pc:\n pc = PC(ncat)\n pc.root = root\n return pc\n return root", "def buildDecisionTree(self, data):\n self.data = data\n self.decisionTree = self.buildTree(self.data, self.listAttributes)\n with open(\"decision_tree_model\", \"wb\") as f:\n pickle.dump(self.decisionTree, f, pickle.HIGHEST_PROTOCOL)\n return self.decisionTree", "def fit_decision_tree(model, x_train, y_train):\r\n model.fit(x_train, y_train)\r\n score = model.score(x_train, y_train)\r\n importance = model.feature_importances_\r\n return score, importance", "def fetchNodes(tree):\n if tree.results is None: #Check if the node is a branch\n condItems = {} #Initialize a container for the node conditions from lower branches\n v = [\"true\", \"false\"] #\"Veracity values\"\n for branch, veracity in [(tree.tb, v[0]), (tree.fb, v[1])]: #iterate over this node's true and false child nodes\n lower_results = fetchNodes(branch)\n if len(lower_results) == 1: #Check if child node is actually a leaf. If so,\n lower_results.insert(0, (tree.col, tree.value, veracity))\n condItems[veracity] = [lower_results] #Initialize the condition needed to reach that leaf\n else:\n condItems[veracity] = [] #If the child is not a leaf, initialize an empty list to contain its updated conditions\n for item in lower_results: #Iterate over each set of node conditions that stem from this branch\n new_descriptor = deepcopy(item) #make a deep copy of the list of node conditions from the lower level nodes\n #insert this node's condition at the beginning of each of the node conditions from the lower levels\n new_descriptor.insert(0, (tree.col, tree.value, veracity)) \n condItems[veracity].append(new_descriptor) #append the updated set of node conditions to the branches items\n node_conditions = deepcopy(condItems[v[0]]) #Initialize the complete list of node conditions that stem from this node\n node_conditions.extend(deepcopy(condItems[v[1]])) #Add the node conditions from the second branch of this node\n return node_conditions #Send the full set of node conditions from this node up to the higher nodes.\n else: #If the node is a leaf, return the dictionary of results\n return [tree.results]", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def decision_tree_classifier(features,target):\r\n clf = DecisionTreeClassifier()\r\n clf.fit(features, target)\r\n return clf", "def __build_tree__(self, features, classes, depth=0):\n\n # TODO: finish this.\n root = None\n if (len(set(classes)) <= 1) and (len(classes) != 0) :\n return DecisionNode(None,None,None,classes[0])\n elif (len(classes) == 0):\n return DecisionNode(None,None,None,2)\n elif depth == self.depth_limit:\n return DecisionNode(None,None,None,max(set(classes), key=list(classes).count))\n else:\n# if depth == 0:\n features = np.array(features)\n classes = np.array(classes).reshape(-1,1)\n feat_shape = features.shape\n sample_list = range(feat_shape[0])\n gains = np.zeros((feat_shape[1]))\n indices = np.zeros((feat_shape[1]))\n for i in range(feat_shape[1]):\n attribute = features[:,i]\n for j in range(20):\n split_indx = int(np.random.choice(sample_list, replace=False))\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n gain = gini_gain(list(classes.reshape(1,-1)[0]),[list(classes_below),list(classes_above)])\n if gain > gains[i]:\n gains[i] = gain\n indices[i] = split_indx\n indx = np.argmax(gains)\n split_indx = int(indices[indx])\n attribute = features[:,indx]\n idx_above = np.where(attribute > attribute[split_indx])[0]\n idx_below = np.where(attribute < attribute[split_indx])[0] \n features_below = features[idx_below,:]\n features_above = features[idx_above,:]\n classes_below = classes[idx_below,:].reshape(1,-1)[0]\n classes_above = classes[idx_above,:].reshape(1,-1)[0]\n if (len(classes_below) != 0) and (len(classes_above) != 0):\n root = DecisionNode(None,None,lambda feat:feat[indx] > features[split_indx,indx])\n root.left = self.__build_tree__(features_above, classes_above, depth+1)\n root.right = self.__build_tree__(features_below, classes_below, depth+1)\n return root\n elif (len(classes_below) == 0) and (len(classes_above) != 0):\n return DecisionNode(None,None,None,max(set(classes_above), key=list(classes_above).count))\n elif (len(classes_above) == 0) and (len(classes_below) !=0):\n return DecisionNode(None,None,None,max(set(classes_below), key=list(classes_below).count))\n else:\n return DecisionNode(None,None,None,2)", "def select_model():\r\n from sklearn import tree\r\n import graphviz\r\n\r\n ValidationSetAndLabels = AllSets[1]\r\n ValLabels = ValidationSetAndLabels[:, [-1]] # extract labels (last column)\r\n ValSet = np.delete(ValidationSetAndLabels, -1, axis=1) # delete labels\r\n\r\n TrainingSetAndLabels = AllSets[2]\r\n TrainLabels = TrainingSetAndLabels[:, [-1]] # extract labels (last column)\r\n TrainSet = np.delete(TrainingSetAndLabels, -1, axis=1) # delete labels\r\n\r\n \"\"\"\r\n This is the code to select the best hyperparameter (part b)\r\n\r\n for SplitCriterion in ['entropy', 'gini']:\r\n print \"Criterion: \" + SplitCriterion + '\\n'\r\n\r\n for MaxDepth in [int(depth) for depth in np.linspace(1, np.log2(TrainSet.shape[1]), 5)]:\r\n print \"max_depth: \" + str(MaxDepth) + '\\n'\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion=SplitCriterion, max_depth=MaxDepth)\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n print \"Accuracy for this test is: %f %%\" %Accuracy\r\n print '\\n'\r\n\r\n print '\\n'\r\n \"\"\"\r\n\r\n MyTree = tree.DecisionTreeClassifier(criterion='entropy', max_depth=12)\r\n\r\n MyTree = MyTree.fit(TrainSet, TrainLabels)\r\n\r\n Predictions = MyTree.predict(ValSet)\r\n Result = np.abs(Predictions - ValLabels.flatten())\r\n\r\n Accuracy = 100 * float(np.count_nonzero(Result == 0)) / Predictions.shape[0]\r\n\r\n dot_data = tree.export_graphviz(MyTree, out_file=None, max_depth=2,\r\n feature_names=AllSets[3], filled=True, rounded=True, special_characters=True,\r\n class_names=TrainLabels.flatten().astype(str))\r\n graph = graphviz.Source(dot_data)\r\n graph.render(\"output\")", "def extract_rules(self, labels=None):\n # Extract flat list of rules in array form\n if isinstance(self.base_estimator, RandomForestClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_]))\n elif isinstance(self.base_estimator, GradientBoostingClassifier):\n rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()]))\n elif isinstance(self.base_estimator, XGBClassifier):\n rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump]))\n \n # Convert each sub-rule into text, join together with '&' and then add to rules\n self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules])\n \n return self.rules", "def trainDecisionTree(inputDf, outputDf):\n clf = DecisionTreeRegressor(random_state=0)\n clf.fit(inputDf, outputDf)\n return clf", "def classifier(decision_tree,data):\n dt = copy.deepcopy(decision_tree) # copy to maintain original decision tree\n cur_attr = list(dt)[0] # 'cur_attr' is first selected attribute\n \n while True:\n dt = dt[cur_attr] # 'dt' is sub decision tree \n value = data[cur_attr] # 'value' is data's attribute value\n\n # if there is no dictionary type instance, dt[value] is class label\n if not isinstance(dt[value],dict): \n return dt[value]\n\n dt = dt[value] # 'dt' is branches of value\n cur_attr = list(dt)[0] # update cur_attr", "def decision_tree_prediction(example, root, attributes):\n # If reached a leaf node, return the label\n if isinstance(root, str):\n return root\n\n # Attribute that was split on\n attribute = root.attribute\n # Column of the attribute that was split on\n i = get_index(attribute, attributes)\n testValue = example[i]\n # Check every child to see what path the example must take in the decision tree\n for child in root.children:\n if isinstance(child.branch, int):\n if int(testValue) <= child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n elif isinstance(child.branch, float):\n if int(testValue) > child.branch:\n return decision_tree_prediction(example, child.nextTree, attributes)\n# -----------------------------------------------Naive Bayes-------------------------------------------------\n # Naive bayes\n elif child.branch == \"Naive\":\n yes_probability = child.histogram[0]\n no_probability = child.histogram[2]\n i = 0\n for feature in example:\n if feature == \"yes\" or feature == \"no\":\n continue\n if i == 0 or i == 2 or i == 4 or i == 10 or i == 11 or i == 12:\n j = 0\n # Its a float so check\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n for key in child.histogram[1][attribute_index[i]]:\n if float(feature) <= float(key) and j == 0:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n elif j == 1:\n no_probability = no_probability * child.histogram[1][attribute_index[i]][key]\n j += 1\n else:\n yes_probability = yes_probability * child.histogram[1][attribute_index[i]][feature]\n no_probability = no_probability * child.histogram[3][attribute_index[i]][feature]\n i += 1\n if yes_probability > no_probability:\n return \"yes\"\n elif no_probability >= yes_probability:\n return \"no\"\n# -----------------------------------------------End Naive Bayes-------------------------------------------------\n else:\n if child.branch == testValue:\n return decision_tree_prediction(example, child.nextTree, attributes)", "def tree_model(feature_train, help_rank_train, model_name):\n decision_tree = DecisionTreeClassifier()\n decision_tree = decision_tree.fit(feature_train, help_rank_train)\n tree_model = open(model_name,'wb')\n dump(decision_tree, tree_model, -1)\n return", "def classify(series, tree):\n feature = tree[0]\n subtree = tree[1]\n\n answer = series[feature]\n response = subtree[answer]\n\n if type(response) != list: #base case\n return subtree[answer]\n else:\n return classify(series, response) #recursive case", "def generateRules(singleCovering, decisions):\n tempCovering = tupleToDict(singleCovering)\n tempDecisions = tupleToDict(decisions)\n\n coverDF = pd.DataFrame(tempCovering)\n decisionsDF = pd.DataFrame(tempDecisions)\n\n combinedDF = pd.concat([coverDF, decisionsDF], axis=1)\n\n ruleDF = combinedDF[combinedDF.iloc[:,-1] != 'madhu']\n # ruleDF = ruleDF.drop_duplicates()\n conceptblockDF = ruleDF.copy(deep=True)\n del conceptblockDF['class']\n\n ruleDict = conceptblockDF.T.to_dict().values()\n ruleTuple = dictToTuple(ruleDict)\n\n\n ruleset = set(ruleDF.index.values)\n\n for i in range(len(ruleTuple)):\n listofsets = []\n count = 0\n\n for j in range(len(ruleTuple[i])):\n # collect the cases that are satisfying a rule from the ruleTuple\n listofsets.append(set(combinedDF[combinedDF[ruleTuple[i][j][0]] == ruleTuple[i][j][1]].index.values))\n\n for m in range(len(listofsets)):\n if (len(listofsets) > 1):\n # drop the first condition from the rule\n appendlast = listofsets.pop(0)\n\n # compute the case Numbers thar are satifying the ruleTUple\n u = set.intersection(*listofsets)\n\n if (not u.issubset(ruleset)):\n # Check whether the remaining attributes satisfy the cases\n # if not append the condition to the attribute list\n listofsets.append(appendlast)\n elif(len(ruleTuple[i]) > 1):\n # if yes remove the dropped attribute from the list\n ruleTuple[i].pop(m-count)\n count = count + 1\n\n return list(set([tuple(i) for i in ruleTuple]))", "def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)", "def predict(tree, dataSet):\n\n\tcount = 0 #used for tracking how many times we've correctly classified our data\n\tfor index in range(len(dataSet)):\n\t\tdataPoint = dataSet[index]\n\t\tprint \"Current dataPoint: \", dataPoint.retrieve('id').getValue()\n\t\tnode = 0\n\t\tfor i in tree.fields[tree.nType].keys():\n\t\t\tif NodeType.ROOT == tree.getNodeType(i):\n\t\t\t\tnode = i #basically an index\n\t\t\t\tprint \"root node: \", node\n\t\t\t\tbreak\n\t\t\t#keep going down the tree until no children exist, then get output classification\n\n\t\tprint \"node type\", tree.getNodeType(node)\n\n\t\twhile tree.getNodeType(node) != NodeType.LEAF:\n\t\t\tsplitVal = tree.getSplitValue(node)\n\t\t\tprint \"tree split value: \", splitVal\n\t\t\tsplitAttribute = tree.getSplitAtribute(node)\n\t\t\tprint \"tree split attribute: \", splitAttribute\n\t\t\tval = dataPoint.retrieve(splitAttribute).getValue()\n\t\t\tif val == None:\t\t\n\t\t\t\tval = np.median(retrieveDataFromColumn(dataSet, splitAttribute))\n\n\t\t\tprint \"data point value for split attribute: \", val\n\t\t\tif FeatureType.CONTINUOUS == tree.getSplitType(node): \n\t\t\t\tif val >= splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\t\tprint \"greater than\", \"going to next node\", node\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"lesser than\", \"going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\telif FeatureType.DISCRETE == tree.getSplitType(node):\n\t\t\t\tif val != splitVal:\n\t\t\t\t\tnode = tree.getChild0(node)\n\t\t\t\t\tprint \"not equal\", \" going to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\t\t\telse:\n\t\t\t\t\tnode = tree.getChild1(node)\n\t\t\t\t\tprint \"equal\", \"goint to next node\", node\n\t\t\t\t\tprint \"node type\", tree.getNodeType(node)\n\t\tleafClass = tree.getMajorityClassification(node)\n\t\tprint \"leaf classification: \", leafClass\n\t\tleafAttribute = tree.getSplitAtribute(node)\n\t\tprint \"leaf attribute: \", leafAttribute\n\t\t\n\t\t# Need to fill the last column (which is the same column as leafAttribute) with the \n\t\t# value of the leaf (i.e. classify as winner or not)\n\t\tdataPoint.retrieve(leafAttribute).addValue(leafClass)\n\t\tprint \"prediction is: \", dataPoint.retrieve(leafAttribute).getValue()\n\n\tcreateFileCSV(dataSet)\n\treturn dataSet", "def prep_decisiontree(tp_vcf, fp_vcf, name, metrics, format_metrics):\n out_decision = \"%s-decisiontree-%s.graphviz\"\n #metrics = ['FS', 'MFE', 'NBQ', 'ReadPosEndDist']\n #format_metrics = [\"AD\", \"PL\", \"QUAL\"]\n extras = []\n depth = 2\n with open(tp_vcf) as in_handle:\n df_tp = read_vcf_metrics(in_handle, metrics, format_metrics, 1)\n with open(fp_vcf) as in_handle:\n df_fp = read_vcf_metrics(in_handle, metrics, format_metrics, -1)\n df = pandas.concat([df_tp, df_fp])\n for val, vartype in [(0, \"snp\"), (1, \"indel\"), (None, \"all\")]:\n if val is None:\n cur_df = df\n else:\n cur_df = df[df[\"indel\"] == val]\n explore_ml_decisiontree(cur_df,\n metrics + format_metrics + extras, depth,\n out_decision % (name, vartype))\n #print df_tp.describe()\n #print df_fp.describe()", "def get_trembl_arr(arr):\n # creating an empty list, that will (hopefully) contain the results\n trembls = []\n\n # looping through the array, and collecting trembl nodes\n for line in arr:\n is_swissprot = line[2]\n if is_swissprot == 0:\n trembls.append(line)\n\n # returning the trembl nodes\n return trembls", "def fit(self, dataSet, prune=False, validSet=None):\n\t\t\n\t\tmodel_args = self._model_complexity_args.copy()\n\t\tif prune:\n\t\t\tif type(validSet).__name__ != 'ndarray':\n\t\t\t\traise AttributeError(\"To make pruning, validation set accept 'ndarray'\\\n\t\t\t\t\t, cannot be {}!\".format(type(validSet).__name__))\n\t\t\t# get a fully-grown tree\n\t\t\tmodel_args['min_impurity_decrease'] = 0\n\t\t\tmodel_args['min_samples_split'] = 2\n\t\t\n\t\tif self._treeType == 'reg':\n\t\t\timpurity_crit = DecisionTree._MSE\n\t\telif self._treeType == 'clf':\n\t\t\timpurity_crit = DecisionTree._Gini\n\n\n\t\telse:\n\t\t\traise ValueError(\"Argument 'treeType' accept 'clf' or 'reg' only\")\n\t\tself._root = DecisionTree._createTree(dataSet, impurity_crit=impurity_crit,\n\t\t\t\t\t\t\t\t\t\t\t**model_args)\n\n\t\tprint(\"Decision Tree Generated!\")\n\n\t\tif prune:\n\t\t\tprint(\"Pruning...\")\n\t\t\ttreeSeq = {'tree':[self._root], 'alpha':[0], 'num_leaves': [self._root.leaves()]} \n\t\t\tpruned_tree = DecisionTree._prune(deepcopy(self._root), impurity_crit, dataSet, treeSeq)\n\t\t\tprint('Pruning Done: %d pruned sub tree got' % len(treeSeq['tree']))\n\t\t\tprint('choosing best subtree through validation set...')\n\t\t\tbestSubtree, error_score = DecisionTree._bestSubtree(treeSeq, impurity_crit, validSet)\n\t\t\tprint('best subtree selected with error score: {}'.format(error_score))\n\n\t\t\tself._root = bestSubtree" ]
[ "0.63081384", "0.5973033", "0.5891241", "0.5791572", "0.57859606", "0.5680792", "0.5676548", "0.56683165", "0.5648141", "0.5642845", "0.56323576", "0.562277", "0.5616259", "0.5608892", "0.5596876", "0.55703026", "0.55625194", "0.5539325", "0.54848295", "0.5470803", "0.54429305", "0.5439835", "0.54281974", "0.54267716", "0.5357908", "0.5350742", "0.5344968", "0.5341187", "0.5325358", "0.5322204" ]
0.63386476
0
Extract rules from `base_estimator`
def extract_rules(self, labels=None): # Extract flat list of rules in array form if isinstance(self.base_estimator, RandomForestClassifier): rules = list(it.chain(*[self.__extract_dt_rules__(dt) for dt in self.base_estimator.estimators_])) elif isinstance(self.base_estimator, GradientBoostingClassifier): rules = list(it.chain(*[self.__extract_dt_rules(__dt) for dt in self.base_estimator.estimators_.ravel()])) elif isinstance(self.base_estimator, XGBClassifier): rules = list(it.chain(*[self.__extract_xgb_dt_rules__(dt) for dt in self._rule_dump])) # Convert each sub-rule into text, join together with '&' and then add to rules self.rules = np.array([' & '.join(self.__convert_rule__(r, labels=labels, scaler=self.ext_scaler)) for r in rules]) return self.rules
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _TP_estimator_requirements(estimator):\n if estimator == 'Natural':\n do_DD = True\n do_DR = False\n do_RR = True\n elif estimator == 'Davis-Peebles':\n do_DD = True\n do_DR = True\n do_RR = False\n elif estimator == 'Hewett':\n do_DD = True\n do_DR = True\n do_RR = True\n elif estimator == 'Hamilton':\n do_DD = True\n do_DR = True\n do_RR = True\n elif estimator == 'Landy-Szalay':\n do_DD = True\n do_DR = True\n do_RR = True\n else:\n available_estimators = _list_estimators()\n if estimator not in available_estimators:\n msg = (\"Input `estimator` must be one of the following:{0}\".format(available_estimators))\n raise HalotoolsError(msg)\n\n return do_DD, do_DR, do_RR", "def get_rules(cls):\n raise NotImplementedError()", "def _set_cv_params(self):\n _base_estimator = self.init_params['base_estimator'] \n ada = {'n_estimators': randint(10, 1000),\n 'learning_rate': _uniform(0.01, 0.1)} \n \n if isinstance(_base_estimator, DecisionTreeClassifier().__class__):\n base = {\n 'base_estimator__criterion': ('gini', 'entropy'),\n 'base_estimator__max_depth': randint(1, 8), \n 'base_estimator__min_samples_leaf': randint(2, 20),\n 'base_estimator__max_features': (0.1, 'auto', 'log2'),\n 'base_estimator__class_weight': ('balanced', None) } \n \n elif isinstance(_base_estimator, LogisticRegression().__class__): \n base = {\n 'base_estimator__C': uniform(0, 1000),\n 'base_estimator__fit_intercept': (True, False),\n 'base_estimator__penalty': ('l1', 'l2') } \n else:\n base = {} \n ada.update(base) \n \n return [ada]", "def _validate_estimator(self, default=DecisionTreeClassifier()):\n if not isinstance(self.n_estimators, (numbers.Integral, np.integer)):\n raise ValueError(\n f\"n_estimators must be an integer, \" f\"got {type(self.n_estimators)}.\"\n )\n\n if self.n_estimators <= 0:\n raise ValueError(\n f\"n_estimators must be greater than zero, \" f\"got {self.n_estimators}.\"\n )\n\n if self.base_estimator is not None:\n base_estimator = clone(self.base_estimator)\n else:\n base_estimator = clone(default)\n \n # validate sampler and sampler_kwargs\n # validated sampler stored in self.base_sampler_\n try:\n self.base_sampler_ = clone(self.base_sampler)\n except Exception as e:\n e_args = list(e.args)\n e_args[0] = \"Exception occurs when trying to validate\" + \\\n \" base_sampler: \" + e_args[0]\n e.args = tuple(e_args)\n raise e\n\n if self.base_sampler_._sampling_type != \"bypass\":\n self.base_sampler_.set_params(sampling_strategy=self._sampling_strategy)\n self.base_sampler_.set_params(**self.sampler_kwargs_)\n\n self.base_estimator_ = Pipeline(\n [\n (\"sampler\", self.base_sampler_),\n (\"classifier\", base_estimator),\n ]\n )", "def get_params(self, deep=True):\n return super(AveragingRegressor, self)._get_params('estimators', deep=deep)", "def test_valid_estimator(strategy: str) -> None:\n mapie = MapieRegressor(estimator=DummyRegressor(), **STRATEGIES[strategy])\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, DummyRegressor)\n for estimator in mapie.estimators_:\n assert isinstance(estimator, DummyRegressor)", "def test_rules():", "def get_estimator(res_df, test_type, mode='mean_cv'):\n if mode == 'mean_cv':\n # choose best test score out of top 20 best validation scores\n best_res = res_df[res_df.test_type == '[' + str(test_type) + ']'].sort_values(['mean_test_score'],\n ascending=False).head(1)\n # best_res = best_res.sort_values(['best_estimator_test_score'], ascending=False).head(1)\n\n best_estimator = svm.SVC(C=best_res['param_C'].values.tolist()[0], kernel='linear')\n\n return best_res, best_estimator\n\n elif mode == 'all_splits':\n results = []\n estimators = []\n\n for split in range(4):\n # choose best test score out of top 20 best validation scores\n best_res = res_df[res_df.test_type == '[' + str(test_type) + ']'] \\\n .sort_values(['split' + str(split) + '_test_score'], ascending=False).head(1)\n\n results.append(best_res)\n estimators.append(svm.SVC(C=best_res['param_C'].values.tolist()[0], kernel='linear'))\n\n return results, estimators\n\n else:\n raise Exception('Unknown mode.')", "def vrules(self):\n ...", "def test_check_estimator(estimator):\n check_estimator(estimator)", "def __init__(self,estimator, param = None):\n self.estimator=estimator", "def init_estimator(self):\n raise NotImplementedError()", "def test_valid_prefit_estimator(estimator: RegressorMixin) -> None:\n estimator.fit(X_toy, y_toy)\n mapie = MapieRegressor(estimator=estimator, cv=\"prefit\")\n mapie.fit(X_toy, y_toy)\n if isinstance(estimator, Pipeline):\n check_is_fitted(mapie.single_estimator_[-1])\n else:\n check_is_fitted(mapie.single_estimator_)\n check_is_fitted(\n mapie,\n [\n \"n_features_in_\",\n \"single_estimator_\",\n \"estimators_\",\n \"k_\",\n \"residuals_\"\n ]\n )\n assert mapie.n_features_in_ == 1", "def test_sklearn_compatible_estimator(estimator: Any, check: Any) -> None:\n check(estimator)", "def _list_estimators():\n estimators = ['Natural', 'Davis-Peebles', 'Hewett', 'Hamilton', 'Landy-Szalay']\n return estimators", "def test_valid_estimator(strategy: str) -> None:\n clf = LogisticRegression().fit(X_toy, y_toy)\n mapie = MapieClassifier(\n estimator=clf,\n **STRATEGIES[strategy]\n )\n mapie.fit(X_toy, y_toy)\n assert isinstance(mapie.single_estimator_, LogisticRegression)", "def getAllDecisionRules(self):\n\n #check this shit lol?\n thetas = self.getAllTheta()\n human_actions = self.getAllHumanActions()\n return [list(zip(thetas, item)) for item in itertools.product(human_actions, repeat=len(thetas))]", "def get_params(self, deep=True):\n return super()._get_params('estimators', deep=deep)", "def _get_evaluators(self):\n if self._evaluator_overrides is not None:\n return self._evaluator_overrides\n return self._create_evaluators()", "def _validate_estimator(self):\n\n if self.smote is not None:\n if isinstance(self.smote, SMOTE):\n self.smote_ = self.smote\n else:\n raise ValueError('smote needs to be a SMOTE object.'\n 'Got {} instead.'.format(type(self.smote)))\n else:\n self.smote_ = SMOTE(ratio=self.ratio, k_neighbors=3,\n random_state=self.random_state)\n\n if self.tomek is not None:\n if isinstance(self.tomek, TomekLinks):\n self.tomek_ = self.tomek\n else:\n raise ValueError('tomek needs to be a TomekLinks object.'\n 'Got {} instead.'.format(type(self.tomek)))\n else:\n self.tomek_ = TomekLinks(ratio=\"all\",\n random_state=self.random_state)", "def hrules(self):\n ...", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def get_estimator(arguments):\n \n numerical_indices = [1, 2, 4, 5,6,7,8,9,10,11,12,13,14]\n categorical_indices = [0]\n original_indices = list(set(range(59))-set(numerical_indices)-set(categorical_indices))\n \n p1 = make_pipeline(my_module.PositionalSelector(categorical_indices),OneHotEncoder())\n p2 = make_pipeline(my_module.PositionalSelector(numerical_indices),StandardScaler())\n p3 = make_pipeline(my_module.PositionalSelector(original_indices))\n \n feats = FeatureUnion([('categoricals', p1),\n ('numericals', p2),\n ('originals', p3),])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n pipeline = Pipeline([('pre', feats),\n ('estimator', linear_model.LogisticRegression(penalty=\"l2\",\n tol=arguments.tol,\n C = arguments.C,\n solver='lbfgs',\n max_iter=10000))])\n \n # tolerance and C are expected to be passed as\n # command line argument to task.py\n #classifier = linear_model.LogisticRegression(\n # penalty=\"l2\",\n # tol=arguments.tol,\n # C = arguments.C,\n # solver='lbfgs',\n # max_iter=1000\n #)\n \n return pipeline", "def evaluate(self, Estimator, params):\n assert hasattr(Estimator, 'fit'),\\\n \"Estimator must implement the fit method\"\n assert hasattr(Estimator, 'predict'),\\\n \"Estimator must implement the predict method\"\n # Initialize Estimators\n models = [Estimator(param) for param in params]\n ac = list()\n for idx, (search, hold_out) in enumerate(self.cv):\n if idx >= self.max_outer:\n break\n cv = StratifiedKFold(y=self.b[search], n_folds=self.k_folds-1)\n for jdx, (train, test) in enumerate(cv):\n if jdx >= self.max_inner:\n break\n scores = [self._score(model, train, test) for model in models]\n ac.append(self._score(models[np.argmax(scores)], search, hold_out))\n return np.mean(ac)", "def test_fit_with_pipelines_as_base_estimators(self) -> type(None):\n X, y = get_dataset_for_regression()\n rgr = StackingRegressor(\n base_estimators_types=[Pipeline, Pipeline],\n base_estimators_params=[\n {\n 'steps': [('lin_reg', LinearRegression())]\n },\n {\n 'steps': [('neighbors', KNeighborsRegressor())],\n 'neighbors__n_neighbors': 1\n }\n ],\n keep_meta_X=True\n )\n rgr.fit(X, y)\n true_meta_X_ = np.array(\n [[6.69395712, 15.0],\n [10.76647173, 15.0],\n [14.83898635, 15.0],\n [18.91150097, 21.0],\n [22.98401559, 23.0],\n [9.74141049, 13.0],\n [13.70235081, 13.0],\n [17.66329114, 13.0],\n [21.62423146, 13.0],\n [15.94394213, 21.0],\n [19.8032967, 15.0],\n [23.92527473, 19.0],\n [28.04725275, 23.0],\n [32.16923077, 23.0],\n [11.94542125, 8.0]]\n )\n np.testing.assert_allclose(rgr.meta_X_, true_meta_X_)\n true_coefs_of_base_lr = np.array([1.05304994, 2.97421767])\n np.testing.assert_allclose(\n rgr.base_estimators_[0].named_steps.lin_reg.coef_,\n true_coefs_of_base_lr\n )\n true_coefs_of_meta_estimator = np.array([1.01168028, -0.04313311])\n np.testing.assert_allclose(\n rgr.meta_estimator_.coef_,\n true_coefs_of_meta_estimator\n )", "def _cross_validate(self, fit_params={}):\n\n # Flatten the true labels for the training data\n y_train = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n\n if self.model.estimator_type == \"classifier\":\n\n # Get unique labels for classification\n labels = np.unique(y_train)\n\n # Set up a dictionary for the scoring metrics\n scoring = {'accuracy':'accuracy'}\n\n # Prepare arguments for the scorers\n metric_args = self.model.metric_args\n \n if 'average' in metric_args and metric_args['average'] is not None:\n # If the score is being averaged over classes a single scorer per metric is sufficient\n scoring['precision'] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall'] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore'] = metrics.make_scorer(metrics.f1_score, **metric_args)\n\n output_format = \"clf_overall\"\n else:\n # If there is no averaging we will need multiple scorers; one for each class\n for label in labels:\n metric_args['pos_label'] = label\n metric_args['labels'] = [label]\n scoring['precision_'+str(label)] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall_'+str(label)] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore_'+str(label)] = metrics.make_scorer(metrics.f1_score, **metric_args)\n \n output_format = \"clf_classes\"\n\n elif self.model.estimator_type == \"regressor\":\n scoring = ['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_median_absolute_error', 'explained_variance']\n \n # Perform cross validation using the training data and the model pipeline\n scores = cross_validate(self.model.pipe, self.X_train, y_train, scoring=scoring, cv=self.model.cv, fit_params=fit_params, return_train_score=False)\n\n # Prepare the metrics data frame according to the output format\n if self.model.estimator_type == \"classifier\": \n # Get cross validation predictions for the confusion matrix\n y_pred = cross_val_predict(self.model.pipe, self.X_train, y_train, cv=self.model.cv, fit_params=fit_params)\n\n # Prepare the confusion matrix and add it to the model\n self._prep_confusion_matrix(y_train, y_pred, labels)\n\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"])\n\n if output_format == \"clf_overall\": \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.average(scores[\"test_precision\"]), np.std(scores[\"test_precision\"]),\\\n np.average(scores[\"test_recall\"]), np.std(scores[\"test_recall\"]),\\\n np.average(scores[\"test_fscore\"]), np.std(scores[\"test_fscore\"])]\n\n elif output_format == \"clf_classes\":\n # Add accuracy which is calculated at an overall level\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]\n\n # Add the metrics for each class to the data frame\n for i, label in enumerate(labels):\n metrics_df.loc[i+1] = [label, np.NaN, np.NaN, np.average(scores[\"test_precision_\"+str(label)]),\\\n np.std(scores[\"test_precision_\"+str(label)]), np.average(scores[\"test_recall_\"+str(label)]),\\\n np.std(scores[\"test_recall_\"+str(label)]), np.average(scores[\"test_fscore_\"+str(label)]),\\\n np.std(scores[\"test_fscore_\"+str(label)])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"accuracy\"].values[0]\n\n elif self.model.estimator_type == \"regressor\":\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"])\n \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [np.average(scores[\"test_r2\"]), np.std(scores[\"test_r2\"]),\\\n np.average(scores[\"test_neg_mean_squared_error\"]), np.std(scores[\"test_neg_mean_squared_error\"]),\\\n np.average(scores[\"test_neg_mean_absolute_error\"]), np.std(scores[\"test_neg_mean_absolute_error\"]),\\\n np.average(scores[\"test_neg_median_absolute_error\"]), np.std(scores[\"test_neg_median_absolute_error\"]),\\\n np.average(scores[\"test_explained_variance\"]), np.std(scores[\"test_explained_variance\"])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"r2_score\"].values[0]\n\n # Save the metrics_df to the model\n self.model.metrics_df = metrics_df", "def evaluate(self, X, y, hypes={}, n_splits=5, shuffle=True, standardize=True, groups=None):\n \n ### SET HYPERPARAMETERS ###\n model = clone(self.estimator) # Gotta do this otherwise funky things happen\n model.set_params(**hypes)\n \n ### INITIALIZE SCORING DATAFRAME ###\n fractions = ['train', 'val']\n scoring_metrics = ['mae', 'mape', 'medape', 'pearson', 'spearman']\n score_columns = pd.MultiIndex.from_product([fractions, scoring_metrics]) # This sets up a heirarchical index for the results dataframe\n score = pd.DataFrame(columns=score_columns)\n\n ### SET UP X-VALIDATION ###\n \n if groups is not None:\n cv = model_selection.LeaveOneGroupOut()\n splitter = enumerate(cv.split(X,y,groups))\n else:\n cv = model_selection.KFold(n_splits=n_splits, shuffle=shuffle)\n splitter = enumerate(cv.split(X,y))\n\n ### RUN CV AND SCORE MODEL ###\n last_splits = [] # Keep track of split indices for forensics\n for idx, (train, val) in splitter:\n\n X_train = X.iloc[train,:]; y_train = y.iloc[train]\n X_val = X.iloc[val,:]; y_val = y.iloc[val]\n \n if standardize:\n std = preprocessing.StandardScaler()\n std.fit(X_train)\n X_train, X_val = std.transform(X_train), std.transform(X_val)\n\n # if idx==0:\n # for v in ['X_train','y_train','X_val','y_val']:\n # print('{} shape: {}'.format(v, eval('{}.shape'.format(v))))\n\n ### INSTANTIATE AND FIT MODEL ###\n last_splits.append((train, val))\n model.fit(X_train, y_train)\n\n for frac in ['train','val']:\n \n # y_true will either be y_train or y_val depending on what 'frac' is. Kind of hacky.\n y_true = eval('y_'+frac)\n y_pred = model.predict(eval('X_'+frac))\n \n # Calculate MAE\n score.loc[idx, (frac,'mae')] = \\\n metrics.mean_absolute_error(y_true, y_pred)\n \n # Calculate MAPE\n score.loc[idx, (frac,'mape')] = \\\n mean_absolute_percentage_error(y_true, y_pred)\n \n # Calculate MedAPE\n score.loc[idx, (frac,'medape')] = \\\n median_absolute_percentage_error(y_true, y_pred)\n\n # Calculate pearson\n score.loc[idx, (frac,'pearson')] = \\\n stats.pearsonr(y_true, y_pred)[0]\n\n # Calculate spearman\n score.loc[idx, (frac,'spearman')] = \\\n stats.spearmanr(y_true, y_pred)[0]\n\n self.estimator = model\n self.last_scores = score\n self.last_hypes = hypes\n self.last_splits = last_splits\n\n return score", "def get_default_estimator():\n return LogisticRegression()" ]
[ "0.5808173", "0.5672008", "0.56038344", "0.55043817", "0.54182005", "0.5397512", "0.5376288", "0.53144646", "0.5294726", "0.52705055", "0.5173132", "0.5156924", "0.5128409", "0.5119125", "0.5093487", "0.5083402", "0.50692993", "0.50663245", "0.50592685", "0.5044347", "0.503868", "0.50303763", "0.5016243", "0.49951872", "0.4991269", "0.49473724", "0.4936268", "0.4921238", "0.4910883", "0.49101874" ]
0.6383667
0
Returns the index of the section, or pseudosection, for the symbol. the index of the section, or pseudosection, for the symbol
def getSectionIndex(self) -> int: ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_index_for_symbol(self, symbol):\n for idx, state in enumerate(self):\n if state.symbol == symbol:\n return idx\n if value in self.symbol_synonyms:\n return self.index(self.symbol_synonyms[value])\n raise Exception(\"State with symbol of '%s' not defined\" % symbol)", "def get_index_from_section(section):\n return section.rsplit(\"(\", 1)[1].rstrip(\")\")", "def section(self, idx: int) -> int:\n if self.sections >= (idx + 1):\n return int(RE_DIGIT.match(self.string.split(\".\")[idx]).group(1))\n return 0", "def find_index(segmentation, stroke_id):\n for i, symbol in enumerate(segmentation):\n for sid in symbol:\n if sid == stroke_id:\n return i\n return -1", "def indexOf(self, aName):\n if aName in self.subroutineTable:\n tup = self.subroutineTable[aName]\n elif aName in self.classTable:\n tup = self.classTable[aName]\n else:\n return None\n\n # DEBUG\n if self.DEBUG:\n print(\"DEBUG(SymbolTable): INDEX OF '{}': {}\".format(aName, tup[2]))\n\n # Extract the index from the tuple\n return tup[2]", "def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]", "def get(self, symbol):\n if symbol not in self.symbol_map:\n self.symbol_map[symbol] = self.symbol_counter\n self.symbol_counter += 1\n return self.symbol_map[symbol]", "def isect_index(self):\n return self._lazy_isect_index()", "def idx(self):\n return int(self.__ph.get('idx', 0))", "def findRepIdx(self, rep, cell = 1):\n\n match = self.findRep(rep = rep, cell = cell)\n return np.arange(self.atoms.shape[0])[match]", "def _get_index(self, character):\n OFFSET = 65 # ascii value of 'A' since the first element should be 'A'\n index = ord(character) - OFFSET\n return index", "def getSymbolValue(self) -> int:\n ...", "def find_section_state(line, current_section, section_order, content, highlight_content):\n for section, pattern in SEC_PAT_DICT.items():\n if pattern.match(line):\n section_order.append(section)\n content[section] = []\n highlight_content[section] = []\n return section, 1\n\n if current_section is None:\n raise InvalidDataError(\"Could not identify section from line: {}\".format(line))\n else:\n return current_section, 1", "def address(self, symbol):\r\n return self.s_table[symbol]", "def find_special_token_index(identified_concepts: IdentifiedConcepts, special_token: str):\n for i in range(len(identified_concepts.ordered_concepts)):\n concept = identified_concepts.ordered_concepts[i]\n if concept.name == special_token:\n return i\n return -1", "def _section_index(self, chapter_index, title):\r\n\r\n # This is a hideous CSS selector that means:\r\n # Get the links containing the section titles in `chapter_index`.\r\n # The link text is the section title.\r\n section_css = 'div.chapters>section:nth-of-type({0}) div.sections div h3 a'.format(chapter_index)\r\n section_titles = self.q(css=section_css).map(lambda el: el.text.lower().strip()).results\r\n\r\n # The section titles also contain \"n of m possible points\" on the second line\r\n # We have to remove this to find the right title\r\n section_titles = [t.split('\\n')[0] for t in section_titles]\r\n\r\n # Some links are blank, so remove them\r\n section_titles = [t for t in section_titles if t]\r\n\r\n try:\r\n # CSS indices are 1-indexed, so add one to the list index\r\n return section_titles.index(title.lower()) + 1\r\n except ValueError:\r\n self.warning(\"Could not find section '{0}'\".format(title))\r\n return None", "def index_for_location(self, location):\r\n if location == '_begin':\r\n i = 0\r\n elif location == '_end':\r\n i = None\r\n elif location.startswith('<') or location.startswith('>'):\r\n i = self.index(location[1:])\r\n if location.startswith('>'):\r\n if i >= len(self):\r\n # last item\r\n i = None\r\n else:\r\n i += 1\r\n else:\r\n raise ValueError('Not a valid location: \"%s\". Location key '\r\n 'must start with a \">\" or \"<\".' % location)\r\n return i", "def next_symbol(self):\r\n try:\r\n return self.rule.rightside[self.position]\r\n except IndexError:\r\n return None", "def getOhcNameIndx( self, name ):\n \n if not self.ohcNames:\n self.getOhcNames( )\n\n if name in self.ohcNames:\n return self.ohcNames[ name ]\n elif name in self.ohcNames.values():\n return name\n else:\n return -1", "def index(self, atom):\n return self.atom_list.index(atom)", "def get_section(self, sy: int) -> numpy.ndarray:\n if sy not in self._sections:\n self.create_section(sy)\n return self._sections[sy]", "def _find_start_or_end_non_code(\n cls, segments: Sequence[BaseSegment]\n ) -> Optional[int]:\n if segments:\n for idx in [0, -1]:\n if not cls._is_code_or_meta(segments[idx]):\n return idx\n return None", "def instrumentLookup(instrument_df,symbol):\r\n try:\r\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\r\n except:\r\n return -1", "def _get_charindex(self, x, y):\r\n verts = self.shapes[0].buf[0].vertices\r\n x = x - self.x + verts[2][0]\r\n y = y - self.y + verts[0][1]\r\n nv = len(verts)\r\n for i in range(0, nv, 4):\r\n vtr = verts[i] # top right\r\n vbl = verts[i + 2] # bottom left\r\n if x >= vbl[0] and x < vtr[0] and y >= vbl[1] and y < vtr[1]:\r\n i = int(i / 4)\r\n c_i = self.c_lookup[i]\r\n if c_i == (len(self.txt) - 1) or self.c_lookup[i + 1] > c_i + 1:\r\n if (vtr[0] - x) < (x - vbl[0]):\r\n c_i += 1\r\n return c_i\r\n return len(self.txt)", "def instrumentLookup(instrument_df,symbol):\n try:\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\n except:\n return -1", "def index(self, word):\n return self.tokens.index(word)", "def getIndex(condition='', component=''):\n if component == 'IC2' or component == 'IC14':\n index = '.nii[0]'\n elif component == 'IC7' or component == 'IC29':\n index = '.nii[1]'\n elif component == 'IC25':\n index = '.nii[2]'\n elif component == 'IC31':\n index = '.nii[3]'\n elif component == 'IC39':\n index = '.nii[4]'\n else:\n index = '.nii'\n\n return index", "def get_stimulus_index(data, stim_name):\n for i_stim, stim_data in enumerate(data['stimuli']):\n if stim_name in stim_data['stim_path']:\n return i_stim\n\n raise KeyError('Stimulus with stim_name={} not found!'.format(stim_name))", "def find1symbols(symbol, reel):\n for i in range(len(reel)):\n if reel[i] == symbol:\n return i", "def find_symbol(self, op):\n for ii in self.__symbols:\n if ii.get_name() == op:\n return ii\n return None" ]
[ "0.6868311", "0.6411154", "0.6275179", "0.6243905", "0.59100974", "0.5822145", "0.5802371", "0.57700807", "0.5695456", "0.56313735", "0.55781096", "0.557163", "0.5570825", "0.55552155", "0.55312073", "0.5525112", "0.5520313", "0.5517731", "0.5509883", "0.55041385", "0.5454695", "0.5451267", "0.54445356", "0.5436226", "0.54354495", "0.5434317", "0.5429792", "0.5429344", "0.542666", "0.5416419" ]
0.6920975
0
Read logfile with the profiles written
def read_log(prefix): l = [] with open('%s.log' % prefix) as F: for line in F: if 'profile written' not in line: continue else: l.append(line.split()[0]) return l
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_linelog():", "def log(length, file):\n\n if user_init.check_pre_init() and user_utility.check_drive_init() == 'True':\n \n data = user_utility.read_log(length, file)\n\n for log in data:\n print(log)\n\n\n else:\n user_utility.print_error(\"Sink folder not Found! Initialise folder first or reset your configuration!\")", "def read_log_dir(self, read_track=False, read_all_profiles=False):\n self.track_data = TrackData(\n file_name='/'.join([self.log_path, self.history_file]),\n read_file=read_track\n )\n self.profile_index = ProfileIndex(\n file_name='/'.join([self.log_path, self.profile_index_file])\n )\n \n for i, m in enumerate(\n self.profile_index.data[ProfileIndex.index_column_names[0]]):\n # For each model number\n self.profile[str(m)] = ProfileData(\n file_name='{0}/{1}{2}.{3}'.format(self.log_path,\n self.profile_prefix, i+1,\n self.profile_suffix),\n read_file=read_all_profiles)", "def read_logs(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"logs\"]\n with open(input_file) as fin:\n self._logs[system] = fin.read()", "def logs():\n puts(yellow(\"[Reading log-file]\"))\n run(\"cat %s\" % REMOTE_ERR_FILE)\n run(\"cat %s\" % REMOTE_LOG_FILE)", "def _read_log(self):\n\n line_regex = compile(r\"\\[I\\]\\s*\\(\\d+ms\\)[^\\d]+(?P<counter>\\d+)\"\n r\"[^\\d]+(?P<timestamp>\\d+(\\.\\d+)?)[^\\d]+\"\n r\"(?P<acceleration>\\d+);\")\n values = []\n with open(self.filepath) as file:\n for line in file:\n match = line_regex.match(line)\n if match:\n values.append({\n 'counter':\n int(match['counter']),\n 'timestamp':\n int(float(match['timestamp']) * 1000),\n 'acceleration':\n int(match['acceleration'])\n })\n\n self.values = values", "def logs():\n with open(configs.LOG_PATH) as f:\n return f.read()", "def getLogs():", "def getLogs():", "def parselog(filen, progress=0):\n\n # Process a file and return a populated logfile object\n #\n # Maximum size of text buffer to use\n bufsize = 50\n # Initial size of chunks to process\n chunksize = 50\n # Regular expression object\n regex = patternmatch()\n # Buffer objects\n buff = buffer(bufsize)\n tablebuff = tablebuffer()\n linecount = 0\n # New (empty) logfile object\n log = logfile(filen)\n prog = False\n summary = None\n # Open the file for reading\n f = open(filen, \"r\")\n # Read line-by-line\n for line in f:\n linecount += 1\n # Progress indicator (if requested)\n # Report reaching \"progress\" number of lines\n if progress:\n if not linecount % progress:\n print(\"Processed \" + str(linecount) + \" lines\")\n # Append line to buffers\n buff.append(line)\n tablebuff.append(line)\n # Get a chunk of text to process\n bufftext = buff.tail(chunksize)\n # Test the line for matches\n #\n # Data line i.e. CCP4 program keywords\n result = regex.isdataline(line)\n if result:\n if not prog or not prog.isprogram():\n # Found a data line outside the context\n # of a program\n # Assume that we are now inside a program\n prog = log.addprogram()\n # Set the start line to be immediately\n # after the previous fragment\n try:\n previous_fragment = log.fragment(log.nfragments() - 2)\n start = previous_fragment.get_endline() + 1\n except IndexError:\n # Failed to get end line of previous\n # fragment\n start = 0\n log.set_fragment_start(start)\n # Remove any html tags and store\n data_line = strip_logfile_html(result[\"data_line\"])\n prog.addkeyword(data_line)\n # File opening report line i.e. logical name/filename pairs\n result = regex.isfileopen(line)\n if result:\n if not prog or not prog.isprogram():\n # Found a file opening report outside the context\n # of a program\n # Assume that we are now inside a program\n prog = log.addprogram()\n # Set the start line to be immediately\n # after the previous fragment\n try:\n previous_fragment = log.fragment(log.nfragments() - 2)\n start = previous_fragment.get_endline() + 1\n except IndexError:\n # Failed to get end line of previous\n # fragment\n start = 0\n log.set_fragment_start(start)\n # Store the logical name/filename pair\n prog.addlogicalname(result[\"logical_name\"], result[\"filename\"])\n # Start of a summary block i.e. <!--SUMMARY_BEGIN-->\n result = regex.issummary_begin(line)\n if result:\n summary = log.addsummary(linecount)\n # End of a summary block i.e. <!--SUMMARY_END-->\n result = regex.issummary_end(line)\n if result:\n if not summary:\n # Make a new summary with no start\n summary = log.addsummary()\n # Close out the current summary\n summary.set_end(linecount)\n # Test the buffer for matches\n #\n # CCP4 program banner\n result = regex.isccp4banner(bufftext)\n if result:\n ##print \"Found CCP4 program banner\"\n ##print \"Result = \"+str(result)\n prog = log.addprogram()\n prog.set_isccp4(True)\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n buff.clear()\n tablebuff.clear()\n continue\n # SHELX program banner\n result = regex.isshelxbanner(bufftext)\n if result:\n ##print \"Found SHELX program banner\"\n ##print \"Result = \"+str(result)\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 program termination\n result = regex.isccp4termination(bufftext)\n if result:\n ##print \"Found CCP4 program termination\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Outside the context of any fragment, and\n # found the end of a program before its start\n log.set_fragment_end(offsetline(linecount, result))\n prog = log.addprogram()\n elif not prog.isprogram():\n # Within the context of a fragment which\n # is not a program and found the end of a\n # program before its start\n log.set_fragment_end(offsetline(linecount, result))\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_end(linecount)\n prog.set_termination(True)\n # Clear the current pointer\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # SHELX program termination\n result = regex.isshelxtermination(bufftext)\n if result:\n ##print \"Found SHELX program termination\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Found the end of a program before its start\n prog = log.addprogram()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_end(linecount)\n prog.set_termination(True)\n # Clear the current pointer\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 table\n if tablebuff.complete():\n if not prog:\n # Found a table outside the context of a program\n ##print \"Adding table as a fragment\"\n prog = log.newfragment()\n log.set_fragment_start(linecount)\n table_error = False\n table = prog.addtable(tablebuff.all())\n if not table:\n print(\"*** Failed to extract table data ***\")\n table_error = True\n elif table.parse_error():\n print(\"*** Failed to parse table data ***\")\n table_error = True\n if table_error:\n print(\"\\tLogfile: \" + str(log.filename()))\n print(\"\\tTable start: L\" + str(linecount - len(tablebuff) + 1))\n print(\"\\tTable end : L\" + str(linecount))\n # Add the table to the log, regardless of status\n log.addtable(table)\n # clear the buffers\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4 keytext message\n result = regex.isccp4keytext(bufftext)\n if result:\n ##print \"Found CCP4 keytext\"\n ##print \"Result = \"+str(result)\n if not prog:\n # Found a message outside the context of a program\n ##print \"Adding keytext as a fragment\"\n prog = log.newfragment()\n log.set_fragment_start(linecount)\n keytext = prog.addkeytext(\n result[\"name\"], result[\"junk_text\"], result[\"message\"]\n )\n log.addkeytext(keytext)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4i header\n result = regex.isccp4iheader(bufftext)\n if result:\n ##print \"Found CCP4i header\"\n ##print \"Result = \"+str(result)\n log.append_ccp4i_header(result)\n buff.clear()\n continue\n # CCP4i tail\n result = regex.isccp4itail(bufftext)\n if result:\n ##print \"Found CCP4i tail\"\n ##print \"Result = \"+str(result)\n log.append_ccp4i_tail(result)\n buff.clear()\n tablebuff.clear()\n continue\n # CCP4i information\n result = regex.isccp4i_information(bufftext)\n if result:\n ##print \"Found CCP4i information\"\n ##print \"Result = \"+str(result)\n # Make a new fragment - these messages shouldn't\n # appear inside the context of another program\n prog = log.addccp4i_info()\n prog.set_attributes_from_dictionary(result)\n log.set_fragment_start(linecount)\n log.set_fragment_end(linecount)\n # Clear the current context\n prog = False\n buff.clear()\n tablebuff.clear()\n continue\n # Ensure that the endline of the last fragment\n # is assigned\n log.set_fragment_end(linecount)\n # Close the file\n f.close()\n return log", "def read_game_logs(file_path):\n\n if os.path.isfile(file_path):\n with open(file_path, \"r\") as read_file:\n log = json.load(read_file)\n # event_type = set([e[\"event\"] for e in log ])\n # the event types: command, text_message, set_attribute, join\n # print(\"event types\", event_type)\n\n # sort all messages chronologically\n log.sort(key=lambda x: x[\"date_modified\"])\n\n start = None\n end = None\n real_end = None # WHen The came master says COngrats or you die, because rest of the messages looks like bugs...\n episode_list = []\n length = len(log)\n game_finished = False\n # Episode are being searched between 2 starts commands\n # only the one where the command done has been issued is kept\n for i, l in enumerate(log):\n if \"command\" in l.keys():\n if l[\"command\"] == \"start\":\n if start == None:\n start = i\n elif end == None:\n end = i\n if l[\"command\"] == \"done\":\n game_finished = True\n\n if l[\"user\"][\"id\"] == 1 and l[\"event\"] == \"text_message\" and type(l[\"message\"]) is str and (\n l[\"message\"].startswith(\"Congrats\") or l[\"message\"].startswith(\n \"The rescue robot has not reached you\")):\n real_end = i + 1 # +1 because we want to include this message in the log slice...\n if start is not None and end is not None:\n if game_finished:\n episode_list.append(log[start:real_end])\n start = end\n end = None\n real_end = None\n game_finished = False\n\n if i + 1 == length:\n if start is not None and end is None and game_finished:\n episode_list.append(log[start:real_end])\n\n score_list = {}\n for i, e in enumerate(episode_list):\n # the number of answers the avatar utters gives us the number of question asked\n # num_questions = sum(\n # [1 for m in e if m[\"user\"][\"name\"] == \"Avatar\" and m[\"event\"] == \"text_message\"])\n\n # Just sum every messages ending with a question mark issueed by the user...\n num_questions = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].endswith(\"?\")])\n\n # user id 1 is alway the game master, we are looping here on the messages of the \"real\" player\n # when we tell the avatar to change location, we don't get an answer, this is why the substraction gives the number of orders\n # this does not include the order \"done\"\n # num_orders = sum(\n # [1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n # \"event\"] == \"text_message\"]) - num_questions\n\n # Just sum every order of type \"go west\". Describe orders are not counted.\n num_orders = sum([1 for m in e if m[\"user\"][\"name\"] != \"Avatar\" and m[\"user\"][\"id\"] != 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and (\n \"east\" in m[\"message\"].lower() or \"north\" in m[\"message\"].lower() or \"west\" in m[\n \"message\"].lower() or \"south\" in m[\"message\"].lower() or \"back\" in m[\"message\"].lower())])\n\n game_won = sum([1 for m in e if m[\"user\"][\"id\"] == 1 and m[\n \"event\"] == \"text_message\" and type(m[\"message\"]) is str and m[\"message\"].startswith(\"Congrats\")]) > 0\n\n # Work-Around - the final reward giving +1.0 on success and -1.0 on loss happens after the messages\n # Saying \"congratulations\" or \"you die horribly\" just repeating the message when the game starts.\n # We had to exclude that message to segment finished games but this is why we have to add these rewards here manually...\n\n final_reward = -1.0\n if game_won:\n final_reward = 1.0\n score_list[i] = {\"score\": sum([m[\"message\"][\"observation\"][\"reward\"] for m in e if\n \"message\" in m.keys() and type(m[\"message\"]) is dict])+final_reward,\n \"num_questions\": num_questions, \"num_orders\": num_orders, \"game_session\": e,\n \"game_won\": game_won}\n\n return score_list\n\n else:\n raise Exception(f\"{file_path} is not a correct file path.\")", "def load(logFile):\n pass #TODO", "def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath,lines=True)\n\n # filter by NextSong action - i.e. get only listening music events from the logs\n df = df[(df.page == \"NextSong\")]\n\n # insert time records\n __insert_time_data(cur, df)\n \n # insert user records\n __insert_user_data(cur, df)\n \n # insert songplay records\n __insert_songplay_data(cur, df)\n \n # erase dataframe\n df = df.iloc[0:0]", "def save_to_base(log_file, logons_err=None):\n source_file = open(log_file)\n\n if logons_err:\n if os.path.isfile(logons_err):\n os.remove(logons_err)\n output_err = open(logons_err, \"a\", encoding='utf-8')\n else:\n output_err = open(logons_err, \"a\", encoding='utf-8')\n \n lines = source_file.readlines()\n \n #get max last logon date from ADLogonFromComputer\n last_logon_date = ADLogonFromComputer.objects.all().aggregate(Max('logon_date'))['logon_date__max']\n if last_logon_date:\n begin_date = last_logon_date-datetime.timedelta(2)\n else:\n begin_date = None\n import_err = []\n for line in lines:\n try:\n s_line = line.split('; ')\n if len(s_line) == 7:\n l_date = datetime.datetime.strptime(s_line[0], \"%d.%m.%Y\").date()\n \n if begin_date == None:\n #l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S,%f\").time()\n l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S\").time()\n comp = s_line[3]\n login = s_line[4]\n #l_time = datetime.datetime.strptime(s_line[1][-3], \"%H:%M:%S\").time()\n \n print('_____________________')\n print(s_line)\n #print('Дата: %s' %(s_line[0]))\n print('Дата: %s' %(l_date))\n #print('Время: %s' %(s_line[1]))\n print('Время: %s' %(l_time))\n print('Компьютер: %s' % (comp))\n print('Логин: %s' % (login))\n \n try:\n ad_logon = ADLogonFromComputer.objects.get(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n except:\n ad_logon = ADLogonFromComputer(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n ad_logon.save()\n \n try:\n ldap_obj=LdapInfo.objects.get(samaccountname__iexact=ad_logon.login_name)\n ad_logon.person=ldap_obj.person\n ad_logon.save()\n except:\n import_err.append(ad_logon.login_name)\n\n elif l_date > begin_date:\n l_time = datetime.datetime.strptime(s_line[1], \"%H:%M:%S\").time()\n comp = s_line[3]\n login = s_line[4]\n #l_time = datetime.datetime.strptime(s_line[1][-3], \"%H:%M:%S\").time()\n \n print('_____________________')\n print(s_line)\n #print('Дата: %s' %(s_line[0]))\n print('Дата: %s' %(l_date))\n #print('Время: %s' %(s_line[1]))\n print('Время: %s' %(l_time))\n print('Компьютер: %s' % (comp))\n print('Логин: %s' % (login))\n \n try:\n ad_logon = ADLogonFromComputer.objects.get(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n except:\n ad_logon = ADLogonFromComputer(logon_date=l_date, logon_time=l_time, computer_name=comp, login_name=login)\n ad_logon.save()\n \n try:\n ldap_obj=LdapInfo.objects.get(samaccountname__iexact=ad_logon.login_name)\n ad_logon.person=ldap_obj.person\n ad_logon.save()\n except:\n import_err.append(ad_logon.login_name)\n elif logons_err:\n output_err.write(line)\n except:\n if logons_err:\n output_err.write(line)\n if logons_err:\n output_err.close()\n source_file.close()\n print('Ошибка поиска ADLogonFromComputer.login_name в LdapInfo.samaccountname: ',import_err)", "def parse_log(lg_dns, testID):\n\n url = 'http://' + lg_dns + '/log?name=test.' + str(testID) + '.log'\n webpage = urlopen(url)\n text = webpage.readlines()\n with open('log_lg.txt', 'w') as log:\n for line in text:\n if line.startswith('Your submission'):\n continue\n log.write(line)\n\n total = 0\n conf = ConfigParser.ConfigParser()\n conf.read('log_lg.txt')\n sections = conf.sections()\n last_section = ''\n for item in sections:\n last_section = item\n for opt in conf.options(last_section):\n num = conf.get(last_section, opt)\n total += float(num)\n print 'Total = ' + str(total)\n return total", "def _forward_log(self):\n\n if self.log is None:\n return\n\n fd = None\n try:\n fd = os.open(\"%s.out\" % self.vm_log_path, os.O_RDONLY)\n data = \"\"\n while True:\n new_data = os.read(fd, 4096)\n if new_data == \"\":\n self._log_to_file(data)\n return\n\n data += new_data\n lines = data.split(\"\\n\")\n for line in lines[:-1]:\n self._log_to_file(line)\n data = lines[-1]\n\n finally:\n if fd is not None:\n os.close(fd)", "def read_file(log_file):\n\t\tfile = open(log_file, 'r')\n\t\tresult = []\n\t\twhile 1:\n\t\t\tcontent = file.readline()\n\t\t\tif not content:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tdata = content.split(\"\\003\")\n\t\t\t\tif len(data) == 13:\n\t\t\t\t\ttrack = {\n\t\t\t\t\t\t\t 'device_no' : long(data[0][3:]), 'sim' : data[1], 'type':int(data[2]), 'gps_time' : MongoTrack.time_trans(data[3]),\n\t\t\t\t\t\t\t 'valid' : data[4], 'loc':{'long' : float(data[5]), 'lat' : float(data[6]) }, 'altitude' : float(data[7]),\n\t\t\t\t\t\t\t 'speed' : float(data[8]), 'course' : float(data[9]), 'km' : float(data[10]), 'para' : float(data[11]),\n\t\t\t\t\t\t\t 'rtime' : MongoTrack.time_trans(data[12].strip())\n\t\t\t\t\t\t\t}\n\t\t\t\t\tresult.append(track)\n\t\tfile.close()\n\t\treturn result", "def _process_logs_download(self, logfile):\r\n\r\n print 'Downloading PCU logs'\r\n command = 'robot --outputdir \"C:\\Robot Framework\\Output\\PCU_logs\" {}.robot'.format(self.name)\r\n\r\n return self._run_command(command, logfile)", "def collect_logs(self):\n logs = glob.glob(f\"{self.production.rundir}/*.err\") #+ glob.glob(f\"{self.production.rundir}/*/logs/*\")\n logs += glob.glob(f\"{self.production.rundir}/*.out\")\n messages = {}\n for log in logs:\n with open(log, \"r\") as log_f:\n message = log_f.read()\n messages[log.split(\"/\")[-1]] = message\n return messages", "def _read_log(self, **kwargs):\n\n log_file = find_log_file()\n\n if not log_file:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to determine log filename. \"\n \"Please verify that the plugin is writing to a log file.\"\n )\n\n try:\n return read_log_file(log_file=log_file, **kwargs)\n except IOError as e:\n raise RequestProcessingError(\n \"Error attempting to retrieve logs - unable to read log file at {0}. \"\n \"Root cause I/O error {1}: {2}\".format(log_file, e.errno, e.strerror)\n )", "def _readin_syslog(file, time_offset='+0000'):\n\tf = open(file, 'r')\n\tcounter = 0\n\tcontent = []\n\tsources = []\n\tp = re.compile(r'^(\\D{3}\\s+\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s(\\S+)\\s([^\\][:]+)(\\[\\d+\\]){0,1}([^:])*:\\s(.*)$')\n\tp2 = re.compile(r'^.*---\\slast\\smessage\\srepeated\\s\\d+\\stime[s]{0,1}\\s---$')\n\tprecise_date = re.compile(r'^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{1,6}\\+\\d{2}:\\d{2})\\s(\\S+)\\s([^\\][:]+)(\\[\\d+\\]){0,1}([^:])*:\\s(.*)$')\n\n\tfor x in f.readlines():\n\t\tcounter+=1\n\t\tm = p.search(x)\n\t\t_print_progress(counter)\n\t\tif m:\n\t\t\t# default syslog line was read, herre we assign the year 2017 to all timestamps\n\t\t\tformatted_date = datetime.datetime.strptime('2017 ' + m.group(1)+ time_offset,\"%Y %b %d %H:%M:%S%z\")\n\t\t\tcontent.append(logfile_entry(counter, file, m.group(6), m.group(0), formatted_date, m.group(2),m.group(3)))\n\t\t\tif not m.group(3) in sources:\n\t\t\t\tsources.append(m.group(3))\n\t\telif p2.search(x):\n\t\t\t# a message syaing \"last message repeated x times\" was read, here we simply ignore such lines\n\t\t\tcounter -= 1\n\t\telse:\n\t\t\tm3 = precise_date.search(x)\n\t\t\tif m3:\n\t\t\t\t# precise timestamps are detected\n\t\t\t\tunformatted_date = m3.group(1)\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# this hack around is not needed in Python 3.7, see https://bugs.python.org/issue15873\n\t\t\t\tformatted_date = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\t\tcontent.append(logfile_entry(counter, file, m3.group(6), m3.group(0), formatted_date, m3.group(2), m3.group(3)))\n\t\t\t\tif not m3.group(3) in sources:\n\t\t\t\t\tsources.append(m3.group(3))\n\t\t\telse:\n\t\t\t\t# in case no prior regex matches, the line is added to the line read before\n\t\t\t\tif len(content) > 0:\n\t\t\t\t\tcontent[-1].message += x\n\t\t\t\t\tcontent[-1].structured_data += x\n\t\t\t\t\tcounter -= 1\n\t\t\t\telse:\n\t\t\t\t\tcounter -= 1\n\t\t\t\t\tpass\n\tf.close()\n\t_delete_print()\n\tlf = logfile(file, counter, 'syslog', content,sources)\n\treturn lf", "def collect_log(self):\n path = 'cluster_test_%d/*.log' % self.address[1]\n src = \"%s@%s:%s\" % (self.user_name, self.address[0], path)\n dest = console_config._log_path\n self._rsync(src, dest)", "def parse_file(self):\n with open(self.file_name, 'r', errors='ignore') as log_file:\n for line in log_file:\n self.process_line(line)", "def load_profile(dir):\n filename = \"profile.pkl\"\n with open(osp.join(dir, filename), \"rb\") as f:\n logger = pickle.load(f)\n return logger", "def parse_log_file(self, compute_stress=False):\n output_filename = self.node.get_option('output_filename')\n output_txt = self.retrieved.get_object_content(output_filename)\n try:\n output_data = read_log_file(output_txt, compute_stress=compute_stress)\n except Exception:\n traceback.print_exc()\n return None, self.exit_codes.ERROR_LOG_PARSING\n return output_data, None", "def reads(err_log):\n # Initialise variables\n num_reads = 0\n paired_reads = 0\n # Open the log file\n with open(err_log, 'r') as error_log:\n # Extract the necessary information\n for line in error_log:\n if 'Pairs:' in line:\n num_reads = line.split('\\t')[-1].rstrip()\n elif 'Joined:' in line:\n paired_reads = line.split('\\t')[-2].rstrip()\n return num_reads, paired_reads", "def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')", "def readLog(self, offset, length):\r\n self._update('readLog')\r\n\r\n logfile = self.supervisord.options.logfile\r\n\r\n if logfile is None or not os.path.exists(logfile):\r\n raise RPCError(Faults.NO_FILE, logfile)\r\n\r\n try:\r\n return as_string(readFile(logfile, int(offset), int(length)))\r\n except ValueError as inst:\r\n why = inst.args[0]\r\n raise RPCError(getattr(Faults, why))", "def process_log_file(cur, filepath):\r\n df=pd.read_json(filepath,lines=True)\r\n df2=df\r\n df=df[df['page']=='NextSong']\r\n ser=pd.to_datetime(df['ts'],unit='ms')\r\n times=[]\r\n for i in ser:\r\n times.append([i,i.hour,i.day,i.week,i.month,i.year,i.day_name()])\r\n for i in times:\r\n cur.execute(time_table_insert,i)\r\n df=df[['userId','firstName','lastName','gender','level']]\r\n for i,row in df.iterrows():\r\n cur.execute(users_table_insert,list(row))\r\n for i, row in df2.iterrows():\r\n cur.execute(song_select, (row.song, row.artist, row.length))\r\n res = cur.fetchone()\r\n if res:\r\n song_id, artist_id = res\r\n else:\r\n song_id, artist_id = None, None\r\n\r\n songplay_data = (\r\n i, pd.to_datetime(row.ts, unit='ms'),int(row.userId), row.level, song_id, artist_id, row.sessionId,\r\n row.location, row.userAgent)\r\n cur.execute(songplays_table_insert, songplay_data)", "def read_agent_logfile(self):\n server = self.get_agent()\n assert server.logfile.exists(), \"don't have logfile?\"\n return server.logfile.read_text(errors=\"backslashreplace\")" ]
[ "0.64771223", "0.6366919", "0.6224004", "0.6189014", "0.6156716", "0.589416", "0.56880486", "0.5653552", "0.5653552", "0.5563616", "0.55355364", "0.55350137", "0.55187297", "0.5504658", "0.5428539", "0.54203796", "0.54149693", "0.5402457", "0.53929543", "0.53881913", "0.5377459", "0.53773975", "0.53667355", "0.5360907", "0.534167", "0.5305604", "0.5296174", "0.5275214", "0.5270657", "0.52653825" ]
0.66682994
0
Execute line with subprocess
def executeLine(line): pl = Popen(line, shell=True, stderr=PIPE, stdout=PIPE) o, e = pl.communicate() return o, e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subproc(self,line):\n self.set_stdout()\n proc = subprocess.Popen(line.split(),stdout=self.stdout)\n proc.wait() #ensures that the subprocess executes and terminates before returning to the shell", "def do_shell(self, line):\n os.system(line)", "def do_shell(self, line):\n subprocess.call(line, shell=True)", "def do_shell(self, line):\n # print(\"running shell command:\", line)\n sub_cmd = subprocess.Popen(line, shell=True, stdout=subprocess.PIPE)\n output = sub_cmd.communicate()[0].decode('utf-8')\n print(output)\n self.last_output = output", "def do_shell(self, line):\n print 'Running shell command:', line\n output = os.popen(line).read()\n print output\n self.last_output = output", "def do_shell(self, line):\n eval(line)", "def do(self, line): \n self.interface.onecmd(line)", "def remote_execute(lines):", "def Run(command_line):\n print >> sys.stderr, command_line\n return subprocess.check_output(command_line, shell=True)", "def call_command_line(string, **kwargs):\n return subprocess.run(string.split(\" \"), **kwargs)", "def do_shell(self, line):\n print(\"Shell command:\", line)\n\n output = os.popen(line).read()\n\n print_info(\"Shell output: \", output)\n\n self.last_output = output", "def run_process(self, inp=\"\"):\n return subprocess.run(self.binary,\n input=inp,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)", "def cmd(commandLine, choice, verbose = False):\n\tif verbose:\n stdout=None\n\telse:\n stdout=subprocess.PIPE\n\n\tlCmd = shlex.split(commandLine)\n\ttry:\n\t run = subprocess.call(lCmd, \n\t\t\t shell=choice,\n stdout=stdout,\n\t\t\t stderr=subprocess.PIPE)\n\texcept subprocess.CalledProcessError as err:\n\t sys.stderr.write(str(err))", "def send_to_cmdline(string):\n retval=subprocess.check_output(string, shell=True)\n retval=str(retval) # Convert from byte string", "def execute(cmd) :\n return os.system( cmd )", "def output_shell(line):\n try:\n shell_command = subprocess.Popen(\n line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n except OSError:\n return None\n except ValueError:\n return None\n\n (output, err) = shell_command.communicate()\n shell_command.wait()\n if shell_command.returncode != 0:\n print(\"Shell command failed to execute\")\n print(line)\n return None\n\n return output", "def run_command(command_line: str, **kwargs) -> subprocess.CompletedProcess:\n # kwargs = {'check', 'True'}\n logging.info(command_line)\n if 'check' not in kwargs:\n kwargs['check'] = True\n parsed_commands = parse_arguments(command_line, **kwargs)\n # pylint: disable=subprocess-run-check\n return subprocess.run(**parsed_commands)", "def _subexec(command):\n lcwd = fabric.state.env.get('lcwd', None) or None #sets lcwd to None if it bools to false as well\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=lcwd)\n out, err = process.communicate()\n print \"command : %s \" % command\n print \"out: %s\" % out\n print \"err: %s\" % err", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def _callOnCommandLine(self, cmd=[]):\n\t\tp = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n\t\tstdout, stderr = p.communicate()\n\t\treturn stdout, stderr", "def execCMD(self, cmd, arg):\n result = subprocess.check_output([cmd, arg])\n return result", "def run_cmdline(self, line):\n\n try:\n result = self.run_argv(shlex.split(line))\n except Exception as e:\n log.warn(messages.generic.error_parsing_command_s % str(e))\n return\n\n if result not in (None, ''):\n log.info(utilities.stringify(result))\n\n # Data is returned for the testing of _cmdline calls\n return result", "def execute(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n return process.communicate()", "def sub_process(path, student_name, course_name, block_id) :\n\t\n\tcommand = ['python', '../lib/python2.7/site-packages/eyeGaze.py', path, student_name, course_name, block_id]\n\tprocess_call = subprocess.call(command)", "def run_subprocess(text, args):\n proc = subprocess.run(\n args,\n input=text,\n encoding='utf-8',\n stdout=subprocess.PIPE)\n return proc.stdout.strip()", "def _run_command(args):\n subprocess.run(args, check=True)", "def execute(self):\n self.process = subprocess.Popen(self.command_text_list)\n self.process.wait()", "def execute(cmd_string):\n pass", "def execute(self, *params):\n if not self.running:\n raise ValueError(\"ExifTool instance not running.\")\n self._process.stdin.write(b\"\\n\".join(params + (b\"-execute\\n\",)))\n self._process.stdin.flush()\n output = b\"\"\n fd = self._process.stdout.fileno()\n while not output[-32:].strip().endswith(sentinel):\n output += os.read(fd, block_size)\n return output.strip()[:-len(sentinel)]", "def exe(self, inp):\n try:\n spl = shlex.split(inp)\n except:\n self.err_print('Mismatched quotations.')\n self.command_event.set()\n return\n\n if not spl:\n self.err_print(\"\")\n elif spl[0] in self.commands:\n self.err_print(\"\")\n self.commands[spl[0]](spl[1:])\n else:\n self.err_print('Invalid command: ' + spl[0])\n\n self.command_event.set()" ]
[ "0.748457", "0.7471307", "0.7392303", "0.71741265", "0.7088497", "0.69453466", "0.68881345", "0.6846201", "0.6814815", "0.67655087", "0.6763409", "0.6605997", "0.65672106", "0.6551925", "0.6531274", "0.65091294", "0.64287466", "0.6407803", "0.6388115", "0.63674563", "0.63492435", "0.63303185", "0.62574553", "0.6255583", "0.6231966", "0.62155247", "0.6175664", "0.61476624", "0.61246186", "0.6100108" ]
0.78371656
0
Read a bim/fam files from the plink fileset
def read_BimFam(prefix): Bnames = ['CHR', 'SNP', 'cM', 'BP', 'A1', 'A2'] bim = pd.read_table('%s.bim' % (prefix), delim_whitespace=True, header=None, names=Bnames) Fnames = ['FID', 'IID', 'father', 'mother', 'Sex', 'Phenotype'] fam = pd.read_table('%s.fam' % (prefix), delim_whitespace=True, header=None, names=Bnames) return bim, fam
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_plink(file_prefix, verbose=True):\n\n fn = {s: \"%s.%s\" % (file_prefix, s) for s in ['bed', 'bim', 'fam']}\n\n with TimeIt(\"Reading %s...\" % fn['bim'], not verbose):\n bim = _read_bim(fn['bim'])\n nmarkers = bim.shape[0]\n\n with TimeIt(\"Reading %s...\" % fn['fam'], not verbose):\n fam = _read_fam(fn['fam'])\n nsamples = fam.shape[0]\n\n with TimeIt(\"Reading %s...\" % fn['bed'], not verbose):\n bed = _read_bed(fn['bed'], nsamples, nmarkers)\n\n return (bim, fam, bed)", "def load_plink_bed_bim_fam_dataset(path_dataset, snp_ids=None,\n subject_ids=None, count_A1=True):\n\n # Load the metadata, without loading the genotypes\n snp_data = Bed(path_dataset, count_A1=count_A1)\n\n # If requested, filter on snp ids\n if snp_ids is not None:\n snp_ids = set(snp_ids)\n snp_bool_indexes = [(s in snp_ids) for s in snp_data.sid]\n snp_data = snp_data[:, snp_bool_indexes]\n\n # If requested, filter on subject ids\n if subject_ids is not None:\n subject_ids = set(subject_ids)\n subject_bool_indexes = [(s in subject_ids) for s in snp_data.iid[:, 1]]\n snp_data = snp_data[subject_bool_indexes, :]\n\n # Load the genotypes from the Plink dataset\n snp_data = snp_data.read()\n\n return snp_data", "def read_binned(run, bin_scheme):\n\n fname=get_binned_file(run,bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def readFastaFile(filename):", "def read_match_binned(lens_run, rand_run, bin_scheme):\n\n fname=get_match_binned_file(lens_run, rand_run, bin_scheme)\n print(\"reading:\",fname)\n return fitsio.read(fname)", "def read_relations(db, openfile):\n pass", "def fea_rank_read(select = None):\n\n if select is None:\n select = ['all']\n\n select = [name.lower() for name in select]\n\n # read data file paths\n with open(feature_ranking_file_path, \"r\") as result_file:\n paths = [p.strip('/|\\n| ') for p in result_file.readlines() if len(p.strip('/|\\n| ')) > 0]\n\n # crete output file name\n new_paths = [p.strip('/|\\n| ').split('data')[-1] for p in paths]\n\n have_read = []\n for path in new_paths:\n\n if path in have_read:\n continue\n\n have_read.append(path)\n\n file_paths = get_filepath_in_folders(path)\n for file_path in file_paths:\n\n flag = False\n for filename in select:\n if filename.lower() in file_path.lower():\n flag = True\n\n if 'all' in select:\n flag = True\n\n if flag == False:\n continue\n\n print('read : ', file_path)\n feature_order_table = pd.read_csv(file_path, index_col='index name')\n # print(feature_order_table.values)\n yield feature_order_table, file_path", "def LoadTroikaRefFile(ref_fl):\n refdata = sp.io.loadmat(ref_fl)['BPM0']\n return refdata[2:]", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def readFT(self,file=\"out__1.ft\"):", "def read_ptbtagged(ptbtagged_path: str) -> Iterator[Tuple[TokenSeq, PosSeq]]:\n #do this immediately (first)\n #start generating feature matrices\n \n #read file into an array \n with open(ptbtagged_path) as f:\n file_array = f.readlines()\n file_array.append(\"\\n\")\n array_of_tuples = create_tuples(file_array)\n\n return generator(array_of_tuples)", "def readPFM(file):\n file = open(file, 'rb')\n\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().rstrip()\n if header == b'PF':\n color = True\n elif header == b'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n\n dims = file.readline()\n try:\n width, height = list(map(int, dims.split()))\n except:\n raise Exception('Malformed PFM header.')\n\n scale = float(file.readline().rstrip())\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f')\n shape = (height, width, 3) if color else (height, width, 1)\n\n data = np.reshape(data, shape)\n data = np.flipud(data)\n return data, scale", "def _fromfile(self, fh):\r\n fh.seek(0)\r\n data = fh.read(4096)\r\n if (len(data) < 7) or not (b'0' < data[1:2] < b'8'):\r\n raise ValueError(\"Not a Netpbm file:\\n%s\" % data[:32])\r\n try:\r\n self._read_pam_header(data)\r\n except Exception:\r\n try:\r\n self._read_pnm_header(data)\r\n except Exception:\r\n raise ValueError(\"Not a Netpbm file:\\n%s\" % data[:32])", "def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )", "def read_pfm_image(stream) -> HdrImage:\n magic = _read_line(stream)\n if magic != \"PF\":\n raise InvalidPfmFileFormat(\"invalid magic in PFM file\")\n\n img_size = _read_line(stream)\n (width, height) = _parse_img_size(img_size)\n\n endianness_line = _read_line(stream)\n endianness = _parse_endianness(endianness_line)\n\n result = HdrImage(width=width, height=height)\n for y in range(height - 1, -1, -1):\n for x in range(width):\n (r, g, b) = [_read_float(stream, endianness) for i in range(3)]\n result.set_pixel(x, y, Color(r, g, b))\n\n return result", "def readPtfm(self):\n\n fname = self.ptfm_file\n print \"reading platform file from \", fname\n try:\n fh = open(fname,'r')\n self.lines_ptfm = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening {:}\\n\".format(fname))\n return 0\n\n for ln in self.lines_ptfm:\n ln = ln.split()\n if (len(ln) > 1 and ln[1] == \"WAMITFile\"):\n self.wamit_path = fix_path(ln[0][1:-1])", "def _read_files(self) -> MMD:\n\t\theaders = []\n\t\tbodies = []\n\t\tif self.config.file_type == FileType.CSV:\n\t\t\tif self.config.source_uris.endswith('.zip'):\n\t\t\t\twith ZipFile(self.config.source_uris) as zf:\n\t\t\t\t\tfor item in zf.namelist():\n\t\t\t\t\t\tif item.endswith('.csv'):\n\t\t\t\t\t\t\t# with zf.open(item, 'r') as infile:\n\t\t\t\t\t\t\tcsv_reader = csv.reader(TextIOWrapper(zf.open(item, 'r'), 'utf-8'))\n\t\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\t\t# need to find a more efficient way, the csv reader is a generator that can only be used once\n\t\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\t\telif self.config.source_uris.endswith('.csv'):\n\t\t\t\tfor uri in self.config.source_uris:\n\t\t\t\t\tif uri.endswith('.csv'):\n\t\t\t\t\t\tcsv_reader = csv.reader(open(uri, newline='', encoding='utf-8'))\n\t\t\t\t\t\theaders.append(next(csv_reader))\n\t\t\t\t\t\tbodies.append(list(csv_reader))\n\t\telif self.config.file_type == FileType.CNSCHEMA:\n\t\t\theader = ['@id', 'label_@language', 'label_@value']\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tload_dict = json.load(load_f)\n\t\t\t\theader.extend(load_dict['@context'].keys())\n\t\t\t\theader = [h for h in header if h not in ['label', 'range', 'domain', 'subClassOf']]\n\t\t\t\ttmp_h = [h for h in header if h not in ['@id', '@language', '@value']]\n\t\t\t\tfor item in load_dict['@graph']:\n\t\t\t\t\tif item['@id'].split('/')[-2] == 'resource':\n\t\t\t\t\t\trow = [item['@id'], item['label']['@language'], item['label']['@value']]\n\t\t\t\t\t\tfor h in tmp_h:\n\t\t\t\t\t\t\tif h in item:\n\t\t\t\t\t\t\t\trow.append(item[h])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\t\tbody.append(tuple(row))\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(body)\n\t\telif self.config.file_type == FileType.OPENBASE:\n\t\t\theader = []\n\t\t\tbody = []\n\t\t\twith open(self.config.source_uris, 'r') as load_f:\n\t\t\t\tfor line in load_f:\n\t\t\t\t\trow = []\n\t\t\t\t\tflat_line = flatten_json(json.loads(line))\n\t\t\t\t\tfor key in flat_line:\n\t\t\t\t\t\tif key not in header:\n\t\t\t\t\t\t\theader.append(key)\n\t\t\t\t\tfor h in header:\n\t\t\t\t\t\tif h in flat_line:\n\t\t\t\t\t\t\trow.append(flat_line[h])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trow.append(None)\n\t\t\t\t\tbody.append(row)\n\t\t\tfor item in body:\n\t\t\t\tif len(item) < len(header):\n\t\t\t\t\titem.extend([None for i in range(len(header) - len(item))])\n\t\t\theaders.append(tuple(header))\n\t\t\tbodies.append(tuple([tuple(item) for item in body]))\n\t\telif self.config.file_type == FileType.OPENKS:\n\t\t\t# knowledge graph dataset loading \n\t\t\tif os.path.exists(self.config.source_uris + '/entities') and os.path.exists(self.config.source_uris + '/triples'):\n\t\t\t\theaders = [['entities'], ['triples']]\n\t\t\t\tfor file in ['entities', 'triples']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('\\t')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\t# general text dataset loading\n\t\t\telif os.path.exists(self.config.source_uris + '/train') and os.path.exists(self.config.source_uris + '/valid'):\n\t\t\t\theaders = [['train'], ['valid']]\n\t\t\t\tfor file in ['train', 'valid']:\n\t\t\t\t\ttmp = []\n\t\t\t\t\twith open(self.config.source_uris + '/' + file, 'r') as load_f:\n\t\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\t\ttmp.append(tuple([item.strip() for item in line.split('@@')]))\n\t\t\t\t\t\tbodies.append(tuple(tmp))\n\t\t\telse:\n\t\t\t\tlogger.warn('Only allows loading with entities and triples for now!')\n\t\t\t\traise IOError\n\t\telif self.config.file_type == FileType.NERO:\n\t\t\theaders = [['unlabeled_data'], ['predict'], ['pattern']]\n\t\t\tfor file in ['unlabeled_data', 'predict', 'pattern']:\n\t\t\t\ttmp = []\n\t\t\t\twith open(self.config.source_uris + '/' + file + '.json', 'r') as load_f:\n\t\t\t\t\tfor line in load_f:\n\t\t\t\t\t\ttmp.append(line.strip())\n\t\t\t\t\tbodies.append(tuple(tmp))\n\n\t\tmmd.name = self.config.data_name\n\t\tmmd.headers = headers\n\t\tmmd.bodies = bodies\n\t\treturn mmd", "def read_file(path_to_file):\n 8", "def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)", "def _read_rmf(file):\n\n with fits.open(file) as hdul:\n data = hdul[2].data\n\n return data['energ_lo'], data['energ_hi'], data['n_grp'], data['f_chan'], data['n_chan'], data['matrix']", "def read(path):", "def readfile(file, sub_im, cr):\n\n root, ext = os.path.splitext(file)\n\n if ext == '.tif':\n print('Reading tiff image:', file)\n par = readpar(root + '.mli.par')\n data = readtiff(file, sub_im, cr)\n\n else: # must be GAMMA flat binary float format\n print('Reading flat binary image', file)\n par = readpar(root + ext + '.par')\n data = readmli(file, par, sub_im, cr)\n\n # extract relevant metadata\n rho_r = float(par['range_pixel_spacing'].split()[0])\n rho_a = float(par['azimuth_pixel_spacing'].split()[0])\n theta = float(par['incidence_angle'].split()[0])\n\n return data, rho_r, rho_a, theta", "def read_pfeatures(namefile):\n db = shelve.open(namefile)\n hashes = db['hashes']\n nif = db['nif']\n year = db['year']\n pfeatures = db['pfeatures']\n methodvalues = db['methodvalues']\n db.close()\n return hashes, nif, year, pfeatures, methodvalues", "def _read_local(self):\n\n self.attributions = np.genfromtxt(\n self.attributions_path, dtype=float, delimiter=\",\", skip_header=1\n )\n\n with open(self.attributions_path) as attribution_file:\n self.feature_labels = next(csv.reader(attribution_file))", "def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram", "def read_data(feature_file, label_file):", "def bbl_file(self, base_file):\n bbl_path = os.path.abspath(os.path.splitext(base_file)[0]) + '.bbl'\n return self.open_encode_safe(bbl_path).readlines()", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def cfdReadFacesFile(self): \r\n\r\n with open(self.facesFile,\"r\") as fpid:\r\n print('Reading faces file ...')\r\n self.faceNodes=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n \r\n self.numberOfFaces = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\" \")\r\n tline=tline.replace(\")\",\"\")\r\n faceNodesi=[]\r\n for count, node in enumerate(tline.split()):\r\n if count == 0:\r\n continue\r\n #faceNodesi.append(int(node))\r\n else:\r\n faceNodesi.append(float(node))\r\n \r\n self.faceNodes.append(faceNodesi)\r\n \r\n ## (array) with the nodes for each face\r\n self.faceNodes=np.asarray(self.faceNodes)\r\n print(self.faceNodes)", "def read_dir():\n file_list=[]\n title_list = []\n for filename in os.listdir(\"alignments/\"):\n if filename.endswith(\".aln\"): #Retrieve only alignment files.\n file_list.append(filename)\n with open (\"genID.txt\",'r') as x: #The genID.txt file contains relevant gene names.\n while True:\n rule = x.readline()\n if len(rule) > 0: #If the rule is empty, the program does not use it.\n if rule[0] == \"B\": #Only fetch gen names.\n title_list.append(rule) #The title_list is used to create the variant files in a later stadium\n else:\n break\n return file_list,title_list" ]
[ "0.64510775", "0.5825708", "0.5510868", "0.55018467", "0.5465175", "0.5433269", "0.54157674", "0.53745013", "0.52349013", "0.5129814", "0.5121936", "0.51169163", "0.50746495", "0.5056231", "0.5055647", "0.50515175", "0.5049878", "0.5017783", "0.5001219", "0.4999236", "0.49899185", "0.49884793", "0.49653384", "0.49646822", "0.49520463", "0.49380532", "0.49328682", "0.4932302", "0.49197373", "0.49171862" ]
0.6117494
1
Generate and read frequency files and filter based on threshold
def read_freq(bfile, plinkexe, freq_threshold=0.1, maxmem=1700, threads=1): high = 1 - freq_threshold low = freq_threshold if not os.path.isfile('%s.frq.gz' % bfile): nname = os.path.split(bfile)[-1] frq = ('%s --bfile %s --freq gz --keep-allele-order --out %s --memory ' '%d --threads %d') line = frq % (plinkexe, bfile, nname, maxmem, threads) o, e = executeLine(line) frq = pd.read_table('%s.frq.gz' % nname, delim_whitespace=True) else: frq = pd.read_table('%s.frq.gz' % bfile, delim_whitespace=True) # filter MAFs greater than 1 - freq_threshold and smaller than freq_threshold return frq[(frq.MAF < high) & (frq.MAF > low)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_frequencies(self, args):\n\n for file in args.frequencies:\n self._check_valid_file(file[0])", "def automatic_checking(files):\n for i in range(10):\n fft_checking(files[i])", "def update_freq_dist(filename):\r\n pass", "def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")", "def read_file_simple(self,filename):\n\n freqlim = config.cutoff*self.cutoff\n exceed_freqlim = False\n freqfile = open(filename)\n freqfile.readline() # skip head\n mode_temp = []\n for line in freqfile:\n line = line.strip()\n columns = line.split()\n n = int(columns[1])\n freq = utilities.to_float(columns[2])\n # remove frequencies above AIMS_configure.cutoff*nu_{cut-off}\n if (freq > freqlim):\n exceed_freqlim = True\n continue\n if (config.npositive and (n < 0)): continue # remove g-modes if need be\n mode_temp.append((n,int(columns[0]),freq,utilities.to_float(columns[4])))\n freqfile.close()\n self.modes = np.array(mode_temp,dtype=modetype)\n\n return exceed_freqlim", "def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)", "def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))", "def source_freq(self) -> int:", "def test_1d_freq():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test.ft\")\n assert data.shape == (4096,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -63789.66\n assert round(data[1],2) == -63159.88\n assert round(data[100],2) == -29308.34\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[297.92, -99.82])", "def test_inspect_freq_filter(tmp_path, l_freq, h_freq):\n pytest.importorskip(\"matplotlib\")\n bids_root = setup_bids_test_dir(tmp_path)\n bids_path = _bids_path.copy().update(root=bids_root)\n inspect_dataset(bids_path, l_freq=l_freq, h_freq=h_freq, find_flat=False)", "def grep_data(cutoff, files):\n res = {}\n for file in files:\n if is_gaussian(file) and is_fluorescence(file):\n res, name = update_dict_with_name(file, res)\n res, root = find_root(file, res, name)\n res = find_spectral_data(file, res, name, root, cutoff)\n return res", "def cache_counts(counts, sorted=False, file_ingredients='../data/ingredients-teste.txt',\n frequency_threshold=30): #frequency_threshold=750\n if sorted:\n # another save procedure. counts is not a dictionary\n with open(file_ingredients, 'w') as f:\n for index in range(0, len(counts)):\n name = counts[index][0]\n frequency = counts[index][1]\n if frequency > frequency_threshold:\n f.write(name + ';' + str(frequency) + '\\n')\n else:\n # Not sorted\n with open(file_ingredients, 'w') as f:\n for more_freq in counts.keys():\n if counts[more_freq] > frequency_threshold:\n f.write(more_freq + ';' + str(counts[more_freq]) + '\\n')", "def count(handle, extractor, sample_size, threshold, use_freq=False):\n barcodes = defaultdict(int)\n\n for i, record in enumerate(SeqIO.parse(handle, guess_file_format(handle))):\n if i > sample_size:\n break\n barcodes[extractor.get(record)] += 1\n\n if use_freq:\n return filter(lambda x: barcodes[x] >= threshold, barcodes)\n return sorted(barcodes, key=barcodes.get, reverse=True)[:threshold]", "def generate_file_list(self, threshold=16):\n def check_mask(cls, imgId):\n # Set cache to False to avoid out of memory\n label = np.array(self.get_label(cls, imgId, cache=False))\n if np.count_nonzero(255 - label) < threshold:\n # two small background\n return False\n elif np.count_nonzero(label) < threshold:\n # too small foreground\n return False\n else:\n return True\n\n print(\"No sample List Found. Generating now...\")\n sample_by_class = {}\n all_count = 0\n waste_count = 0\n for split in cv_split:\n for cls in split:\n sample_by_class['%d' % cls] = []\n all_sample = self.coco.getImgIds(catIds=cls)\n all_count += len(all_sample)\n tqdm_gen = tqdm.tqdm(all_sample, leave=False)\n for pic in tqdm_gen:\n if check_mask(cls, pic):\n sample_by_class['%d' % cls].append(pic)\n else:\n waste_count += 1\n print(waste_count, \"samples are removed.\")\n return sample_by_class", "def bandpass_filter(files, lowpass_freq=0.1, highpass_freq=0.01, tr=2):\n import os\n\n import nibabel as nb\n import numpy as np\n from nipype.utils.filemanip import (\n filename_to_list,\n list_to_filename,\n split_filename\n )\n\n fs = 1./tr\n\n out_files = []\n for filename in filename_to_list(files):\n path, name, ext = split_filename(filename)\n out_file = os.path.join(os.getcwd(), name + '_bandpassed' + ext)\n\n img = nb.load(filename)\n timepoints = img.shape[-1]\n F = np.zeros((timepoints))\n\n lowidx = int(timepoints / 2) + 1\n if lowpass_freq > 0:\n lowidx = np.round(float(lowpass_freq) / fs * timepoints)\n\n highidx = 0\n if highpass_freq > 0:\n highidx = np.round(float(highpass_freq) / fs * timepoints)\n F[int(highidx):int(lowidx)] = 1\n F = ((F + F[::-1]) > 0).astype(int)\n data = img.get_data()\n if np.all(F == 1):\n filtered_data = data\n else:\n filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))\n img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)\n img_out.to_filename(out_file)\n out_files.append(out_file)\n\n return list_to_filename(out_files)", "def filter_by_freq(self, low=0.5, high=40):\n self.epochs.load_data()\n self.epochs.filter(l_freq=low, h_freq=high, picks = 'all')\n return self.epochs", "def main():\n filter_freq = 1.e4\n re_sample_freq = 1.e5\n glob_search = '*.log'\n\n # parse the command line arguments\n parser = argparse.ArgumentParser(description=\"Filters files in a directory based on a file extension.\")\n parser.add_argument('-d', '--directory', type=str, nargs=1,\n help=\"directory of files to filter. Default is the current directory.\")\n parser.add_argument('-ff', '--filter-freq', type=float, nargs=1,\n help=\"low-pass filter frequency cutoff. Default is {0} Hz\".format(filter_freq))\n parser.add_argument('-osr', '--out-sample-rate', type=float, nargs=1,\n help=\"output sample rate. Default is {0} Hz\".format(re_sample_freq))\n parser.add_argument('-g', '--glob', type=str, nargs=1,\n help=\"Unix pattern to search for files in the directory. Default is \\'*.log\\', which finds all\"\n \" files with a '.log' extension. Must surround with quotes.\")\n parser.add_argument('-r', '--recursive', action='store_true',\n help=\"search for files recursively.\")\n args = parser.parse_args()\n\n directory = '.'\n # Use the command line arguments to set our variables, if necessary.\n if args.directory is not None:\n directory = args.directory[0]\n\n if args.filter_freq is not None:\n filter_freq = args.filter_freq[0]\n\n if args.out_sample_rate is not None:\n re_sample_freq = args.out_sample_rate[0]\n\n if args.glob is not None:\n glob_search = args.glob[0]\n print glob_search\n\n # find all of the files in the current directory with .log extension.\n files = []\n for root, dirname, filenames in os.walk(directory):\n for filename in fnmatch.filter(filenames, glob_search):\n files.append(os.path.join(root, filename))\n # Only do top level directory, unless recursive is specified.\n if not args.recursive:\n break\n\n print \"Filter frequency: {0} Hz\".format(filter_freq)\n print \"Output sample frequency: {0} Hz\".format(re_sample_freq)\n print \"Glob search: {0}\".format(glob_search)\n print \"Recursive: {0}\".format(args.recursive)\n print \"Filtering these files:\", files\n print \"\\n----------------------------\\n\"\n\n p = Pool()\n\n # add the file names and filter frequency and output sample rate to a tuple to pass in multiprocessing\n pool_args = []\n for filename in files:\n tup = (filename, filter_freq, re_sample_freq)\n pool_args.append(tup)\n\n # filter each file\n output_file_names = p.map(_filter_wrap, pool_args)\n\n print \"\\n----------------------------\\n\"\n print \"Output files:\", output_file_names", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def detect_freqs(self):\n n_fft_bins = self._config[\"audio_config\"][\"N_FFT_BINS\"]\n channel_avgs = []\n differences = []\n \n for i in range(n_fft_bins):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.2)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n else:\n self.current_freq_detects[i] = False", "def filter_sff_file(flowgrams, header, filter_list, out_fh):\r\n\r\n write_sff_header(header, out_fh)\r\n\r\n l = 0\r\n for f in flowgrams:\r\n passed = True\r\n for filter in filter_list:\r\n passed = passed and filter(f)\r\n if not passed:\r\n # bail out\r\n break\r\n if (passed):\r\n out_fh.write(f.createFlowHeader() + \"\\n\")\r\n l += 1\r\n return l", "def filtered_parent_freq_count(filenames, gram_size):\n counts = Counter()\n vocab = list(read_files(filenames, gram_size=gram_size))\n parent_list = parent_shared_ngrams(filenames, gram_size=gram_size)\n for _, _, speaker, ngram in vocab:\n if speaker == \"MOT\" and ngram in parent_list:\n counts[ngram] += 1\n return counts.most_common(10)", "def test_2d_freq():\n dic,data = ng.pipe.read(\"common_data/2d_pipe/test.ft2\")\n assert data.shape == (2048, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == 1601.83\n assert round(data[10,22],2) == 3079.44\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[174.84, 65.21])\n check_ppm_limits(dic,data,1,[253.90, -143.80])", "def threshold_and_combine_files(files: list, rf_threshold: float, rf_threshold_tolerance: float):\n\n headers = []\n thresholded = None\n fs = constants.FILE_FS\n frame_numbers = []\n\n # read all the RF files\n for filename in files:\n print(\"processing {}\".format(filename))\n header, rows = read_rf_file(filename)\n headers.append(header)\n\n new_thresholded = np.zeros((3, len(rows[\"frame_num\"]), headers[0][\"nfft\"]), dtype=np.uint8)\n\n new_thresholded[0, :, :] = np.squeeze(np.where(rows[\"fft_bins\"] >= rf_threshold-rf_threshold_tolerance, 1, 0))\n new_thresholded[1, :, :] = np.squeeze(np.where(rows[\"fft_bins\"] >= rf_threshold, 1, 0))\n new_thresholded[2, :, :] = np.squeeze(np.where(rows[\"fft_bins\"] >= rf_threshold+rf_threshold_tolerance, 1, 0))\n\n if thresholded is None:\n thresholded = new_thresholded\n else:\n thresholded = np.maximum(thresholded, new_thresholded)\n\n frame_numbers = rows[\"frame_num\"]\n\n thresholded = thresholded.astype('uint8')\n\n LOGGER.info(\"dims: %i, %i, %i\", 3, frame_numbers[-1]+1, thresholded.shape[2])\n\n zero_filled = np.zeros((3, frame_numbers[-1]+1, thresholded.shape[2]), dtype=np.uint8)\n\n for ind, frame_num in enumerate(frame_numbers):\n zero_filled[:, frame_num, :] = thresholded[:, ind, :]\n\n return headers, zero_filled", "def filter(args):\n p = OptionParser(filter.__doc__)\n p.add_option(\n \"--less\",\n default=False,\n action=\"store_true\",\n help=\"filter the sizes < certain cutoff [default: >=]\",\n )\n p.set_outfile()\n\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n fastafile, cutoff = args\n try:\n cutoff = int(cutoff)\n except ValueError:\n sys.exit(not p.print_help())\n\n f = Fasta(fastafile, lazy=True)\n\n fw = must_open(opts.outfile, \"w\")\n for name, rec in f.iteritems_ordered():\n\n if opts.less and len(rec) >= cutoff:\n continue\n\n if (not opts.less) and len(rec) < cutoff:\n continue\n\n SeqIO.write([rec], fw, \"fasta\")\n fw.flush()\n\n return fw.name", "def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)", "def main():\n # Create a new instance of a high pass filter, using the default constructor\n hpf = GRT.HighPassFilter()\n\n # Set the cutoff frequency of the filter to 2.0Hz\n hpf.setCutoffFrequency(2, 1.0 / 1000.0)\n\n # Create some variables to help generate the signal data\n num_seconds = 6 # The number of seconds of data we want to generate\n t = 0 # This keeps track of the time\n t_step = 1.0 / 1000.0 # This is how much the time will be updated at each iteration in the for loop\n\n # Add the freq rates\n # The first value is the time in seconds and the second value is the frequency that should be set at that time\n freq_rates = {0: 0.1, 1: 0.5, 2: 1, 3: 2, 4: 4, 5: 8, 6: 16}\n\n # Generate the signal and filter the data\n for i in range(num_seconds * 1000):\n # Check to see if we should update the freq rate to the next value\n # Set the new frequency value\n freq = [v for (k, v) in freq_rates.items() if k > (i / 1000)][0]\n\n # Generate the signal\n signal = math.sin(t * math.tau * freq)\n\n # Filter the signal\n filtered_value = hpf.filter(signal)\n\n # Print the signal and the filtered data\n print(\"%.3f %.3f %.3f\" % (freq, signal, filtered_value))\n\n # Update the t\n t += t_step\n\n # Save the HighPassFilter settings to a file\n hpf.save(\"HighPassFilterSettings.grt\")\n\n # We can then load the settings later if needed\n hpf.load(\"HighPassFilterSettings.grt\")", "def read_results(results_file, threshold=0.0):\n patterns = []\n with open(results_file, 'r') as input_file:\n patterns_reader = csv.reader(input_file)\n next(input_file)\n for line in patterns_reader:\n #if float(line[6]) >= threshold:\n patterns.append(line[0])\n print(\"Read {:d} patterns.\".format(len(patterns)))\n return patterns", "def clean_spec(input_filepath, output_filepath):\n file_list = glob.glob(input_filepath + '/*')\n file_list.sort()\n features_set = []\n with ShadyBar(f\"Extracting features {input_filepath}...\", max=len(file_list)) as bar:\n for f in file_list:\n interim_data = np.loadtxt(f, delimiter=',', skiprows=1)\n features_set.append(linear_int(interim_data[:, 0], interim_data[:, 1]))\n\n bar.next()\n\n save_feat_files(np.array(features_set), os.path.join(output_filepath, \"peaks_features.pkl\"))", "def frequency(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['frequency']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE':\n distillate_label = label\n else:\n distillate_label = get_distillate_label([label])\n if 'ANG' not in distillate_label:\n continue\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_label = label\n dep_name = fields['deps'][0]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"FREQ\"\n params = [[param_section_name, param_section_value],\n [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label+\"_1-SEC\"] = emitted[-3][-36:]\n output_uuid_map[label+\"_C37\"] = emitted[-2][-36:]\n\n filename = \"{0}/FREQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def collect_to_file(sensor):\n temperature_settings = settings.SENSORS.get(\"TEMPERATURE\")\n\n frequency = float(temperature_settings[1][1])\n period = float( temperature_settings[2][1])\n last_collection_time = temperature_settings[4][1]\n\n while 1: \n s = []\n count = 0 \n logger.info(\"collecting\")\n \n while(count <= period):\n s.append(os.path.join(time.strftime(\"%Y_%j_%H_%M_%S_\"),str(sensor.readTemperature())))\n time.sleep(1)\n count = count + 1\n print count\n \n write_to_file(s)\n logger.info(\"done counting\")\n last_collection_time = datetime.datetime.utcnow()\n logger.info( last_collection_time)\n time.sleep(frequency)\n\n return True" ]
[ "0.6244121", "0.615341", "0.60893506", "0.6055009", "0.5997631", "0.58850914", "0.58467615", "0.5836657", "0.5833301", "0.5826538", "0.5804678", "0.5772488", "0.5745384", "0.5713081", "0.5678326", "0.56347597", "0.5622866", "0.5610701", "0.56093115", "0.55927515", "0.5584923", "0.5583831", "0.5547366", "0.548933", "0.54763305", "0.54644006", "0.5449495", "0.544901", "0.54463154", "0.54180276" ]
0.6175935
1
Parse and sort clumped file
def parse_sort_clump(fn, allsnps): # make sure allsnps is a series allsnps = pd.Series(allsnps) try: df = pd.read_table(fn, delim_whitespace=True) except FileNotFoundError: spl = fn.split('.') if spl[0] == '': idx = 1 else: idx = 0 fn = '.'.join(np.array(spl)[[idx, 1 + idx, -1]]) if idx == 1: fn = '.%s' % fn df = pd.read_table(fn, delim_whitespace=True) SNPs = df.loc[:, 'SP2'] tail = [x.split('(')[0] for y in SNPs for x in y.split(',') if x.split('(')[ 0] != 'NONE'] full = pd.DataFrame(df.SNP.tolist() + tail, columns=['SNP']) full = full[full.SNP.isin(allsnps)] rest = allsnps[~allsnps.isin(full.SNP)] df = pd.concat((full.SNP, rest)).reset_index(drop=False) df.rename(columns={'index': 'Index'}, inplace=True) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _out_order(self, fname):\r\n # t = 1\r\n orderDict = {}\r\n order = []\r\n readWells = False\r\n lastBlock = False\r\n addOrder = False\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if readWells:\r\n if lastBlock:\r\n line = line.split('++')[0]\r\n addOrder = True\r\n lastBlock = False\r\n item = list(map(str.strip, line.split('+')))\r\n item = [e.split() for e in list(filter(None, item))]\r\n order.extend([w[1] for w in item])\r\n readWells = False\r\n if addOrder:\r\n orderDict[t] = order\r\n order = []\r\n addOrder = False\r\n # t += 1\r\n elif len(item) > 0:\r\n head = ''.join(item[2:])\r\n if 'GEMFIELDSUMMARY' in head:\r\n t = item[1]\r\n\r\n elif 'No.' in line and 'Name' in line and '+' in line:\r\n if '++' in line:\r\n lastBlock = True\r\n readWells = True\r\n next(fp)\r\n continue\r\n return orderDict", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def process(filename):\r\n x = open(filename, \"r\")\r\n words_from_songs=[]\r\n for line in x:\r\n array =line.split(\":\")\r\n songid= array[0]\r\n lyrics=array[1]\r\n lyrics=lyrics.replace(\"\\n\", \"\")\r\n lyrics=lyrics.split(\" \")\r\n for i in range(len(lyrics)):\r\n words_from_songs.append((lyrics[i],songid))\r\n words_from_songs=radixSortNumbers(words_from_songs)\r\n max1 = longestWord(words_from_songs)\r\n counting = []\r\n for _ in range(max1+1):\r\n counting.append([])\r\n for k in range(len(words_from_songs)-1,0,-1):\r\n counting[len(words_from_songs[k][0])].append(words_from_songs[k])\r\n new_list = []\r\n # for i in range(len(counting)-1,0,-1):\r\n # for k in range(len(counting[i])):\r\n # new_list.insert(0,counting[i][k])\r\n # for i in range(len(counting) - 1, 0, -1):\r\n # new_list = countingSort(new_list, i - 1)\r\n\r\n for i in range(len(counting)-1,0,-1):\r\n for k in range(len(counting[i])):\r\n new_list.insert(0,counting[i][k])\r\n new_list = countingSort(new_list,i-1)\r\n y = open(\"sorted_words.txt\",\"w\")\r\n for i in range(len(new_list)):\r\n y.write(str(new_list[i][0])+\":\"+str(new_list[i][1]+\"\\n\"))", "def sort(self):\n \n ct=[]\n rt=[]\n wr=[]\n # search for tags that aren't in the right position\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n if c.wa:\n if not self.wa:\n self.wa=[]\n self.wa.extend(c.wa)\n if c.ct:\n newcts=[ct_tag for ct_tag in c.ct if ct_tag.name!=c.name]\n map(self.contigs[i].ct.remove,newcts)\n ct.extend(newcts)\n for j in range(len(c.reads)):\n r = c.reads[j]\n if r.rt:\n newrts=[rt_tag for rt_tag in r.rt if rt_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].rt.remove,newrts)\n rt.extend(newrts)\n if r.wr:\n newwrs=[wr_tag for wr_tag in r.wr if wr_tag.name!=r.rd.name]\n map(self.contigs[i].reads[j].wr.remove,newwrs)\n wr.extend(newwrs)\n # now sort them into their proper place\n for i in range(len(self.contigs)):\n c = self.contigs[i]\n for ct_tag in ct:\n if ct_tag.name==c.name:\n if self.contigs[i].ct is None:\n self.contigs[i].ct=[]\n self.contigs[i].ct.append(ct_tag)\n if rt or wr:\n for j in range(len(c.reads)):\n r = c.reads[j]\n for rt_tag in rt:\n if rt_tag.name==r.rd.name:\n if self.contigs[i].reads[j].rt is None:\n self.contigs[i].reads[j].rt=[]\n self.contigs[i].reads[j].rt.append(rt_tag)\n for wr_tag in wr:\n if wr_tag.name==r.rd.name:\n if self.contigs[i].reads[j].wr is None:\n self.contigs[i].reads[j].wr=[]\n self.contigs[i].reads[j].wr.append(wr_tag)", "def sort_music_data(sort_by = None):\n for lists in read_file():\n print(lists)\n pass", "def mergeAllSortedFiles():\n entries = os.listdir('output/Temp/input')\n for entry in entries:\n arr = []\n with open(\"output/Temp/input/\" + entry) as file:\n for line in file:\n line = int(line.strip())\n arr.append(line)\n mergeSortedToFile(arr)", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def sort(file):\n fileHandle = open(file, 'r')\n lines = fileHandle.readlines()\n fileHandle.close()\n lines.sort()\n fileHandle = open(file, 'w')\n for line in lines:\n fileHandle.write(line)\n fileHandle.close()", "def preprocessBed(fname):\n res = {}\n iter = parseBed(fname)\n for i in iter:\n res.setdefault(i.chr,[])\n res[i.chr].append(i)\n for k in res.keys():\n res[k].sort()\n return res", "def main():\n try:\n fname = sys.argv[1]\n f = open(fname, 'r')\n except IndexError:\n f = sys.stdin\n\n reader = Reader()\n for line in f:\n reader.getline(line)\n for key in sorted(reader.d.keys(), key=str.lower):\n sys.stdout.writelines(reader.diffsort(key))", "def TurboSort(input_folder, output_file):\r\n\r\n atom_dict = {}\r\n for linelist in os.listdir(input_folder):\r\n file_line = 1\r\n with open(os.path.join(input_folder, linelist), \"r\") as fin:\r\n lines = fin.readlines()\r\n while file_line < len(lines):\r\n line_index = file_line - 1\r\n header, atomic_sym = lines[line_index], lines[line_index + 1]\r\n atomic_lines = int(header.split()[4])\r\n start = line_index + 2\r\n end = start + atomic_lines\r\n splice = lines[start: end]\r\n file_line = end + 1\r\n if atomic_sym in atom_dict.keys():\r\n atomic_lines_previous = int(atom_dict[atomic_sym][0].split()[4])\r\n atomic_lines += atomic_lines_previous\r\n start_line, end_line_previous = atom_dict[atomic_sym][0][:27], atom_dict[atomic_sym][0][27:]\r\n end_line_updated = end_line_previous.replace(str(atomic_lines_previous), str(atomic_lines))\r\n if len(end_line_updated) > 10:\r\n diff = len(end_line_updated) - 10\r\n end_line_updated = end_line_updated[diff:]\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n elif len(end_line_updated) < 10:\r\n diff = 10 - len(end_line_updated)\r\n atom_dict[atomic_sym][0] = start_line + \" \"*diff + end_line_updated\r\n else:\r\n atom_dict[atomic_sym][0] = start_line + end_line_updated\r\n # Sorts each element by wavelength\r\n atom_dict[atomic_sym].extend(splice)\r\n temp = atom_dict[atomic_sym][2:]\r\n temp.sort()\r\n atom_dict[atomic_sym] = atom_dict[atomic_sym][:2]\r\n atom_dict[atomic_sym].extend(temp)\r\n else:\r\n header = [header, atomic_sym]\r\n header.extend(splice)\r\n atom_dict[atomic_sym] = header\r\n\r\n # Sorts each element block by atomic number\r\n vals = list(atom_dict.values())\r\n for val in vals:\r\n \"\\n\".join(val)\r\n vals.sort()\r\n lines = []\r\n for val in vals:\r\n lines.extend(val)\r\n\r\n with open(output_file, \"w\") as fout:\r\n for line in lines:\r\n fout.write(line)", "def parse_file():\r\n if len(sys.argv) < 2:\r\n print(\"Need a file\")\r\n sys.exit(1)\r\n\r\n data_input = open(sys.argv[1])\r\n\r\n data = []\r\n for line in data_input: #for each of these lines\r\n if(len(line) == 0): pass #skip empty lines\r\n split_within_line = line.split(\"\\t\") #split by tabs\r\n new_datum = Datum(split_within_line[0], split_within_line[1], split_within_line[2]) #feed splits into a Datum object\r\n data.append(new_datum) #add Datum to list of data\r\n\r\n #make a list of characters representing the issues\r\n for i in range(len(data[0].dat_votes)-1): #from 0 to the end of the list of issues from the first datum\r\n original_issues.append(chr(i+97))\r\n\r\n\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n pair = _count_parties(training_set)\r\n\r\n unpruned = induce_node_tree(training_set, original_issues,\"D\",-1)\r\n # print(\"\\n#### UNPRUNED TREE ####\\n\")\r\n # print(unpruned)\r\n\r\n unprune_acc = calc_accuracy(unpruned, tuning_set)\r\n\r\n pruned = prune_tree(unpruned, tuning_set)\r\n print(\"\\n#### PRUNED TREE ####\\n\")\r\n print(pruned)\r\n\r\n acc = calc_accuracy(pruned, training_set)\r\n\r\n # print(\"Accuracy of unpruned tree with tuning_set: \" + str(unprune_acc))\r\n print(\"Accuracy of pruned tree with tuning_set: \" + str(acc))\r\n leave_one_out_cross_validation(data)", "def sort_data(input_file): \r\n with open(input_file,\"r\") as input_file: \r\n data = input_file.readlines() \r\n row_data = [] \r\n for line in data: \r\n row_data.append(line.split(', ')) \r\n return row_data", "def ReadInLASTFile(FileName):\n FinalGroups = []\n with open(FileName, 'r') as f:\n SmalllList = []\n for line in f:\n if line.startswith('#'):\n pass\n else:\n CleanLine = line.strip().split()\n if len(CleanLine) != 0 :\n SmalllList.append(CleanLine)\n else:\n FinalGroups.append(SmalllList)\n SmalllList = []\n\n return FinalGroups", "def sort(self):\n\t\twith self.AutoSplitlines():\n\t\t\tself.lines = sorted(self.lines)", "def gff2sort2(gff, pathgff, pathsort):\n outFileName = pathsort + gff[:gff.rfind('.')] + '.sort2'\n inputFile = open(pathgff + gff, 'r')\n open(outFileName, 'w').close()\n outputFile = open(outFileName, 'w')\n for line in inputFile:\n # grab gene info from each line if it's longest and mRNA strand and output to sort2 file\n if 'mRNA' in line and 'longest=1' in line:\n lineInList = line.split()\n parserList = lineInList[-1].split(';')\n lineOutputList = [parserList[1].replace('Name=',''), lineInList[0].replace('-', 'S'), lineInList[3],\n lineInList[4]]\n outputFile.write('%s %s %s %s\\n' % tuple(lineOutputList))\n\n inputFile.close()\n outputFile.close()", "def main(iterator):\n\n entries = OrderedDict()\n for line in iterator:\n\n if \"START\" in line:\n entries.update({\"start_time\":int(re.search(r'\\d+', line).group())})\n if \"STOP\" in line:\n entries.update({\"end_time\":int(re.search(r'\\d+', line).group())})\n if \"NUMERIC SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"numeric_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"numeric_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"BITFIELD\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"bitfield\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_ops_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FP EMULATION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fp_emul\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_loops\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FOURIER\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fourier\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_coef\":int(re.search(r'\\d+', line).group())})\n\n if \"ASSIGNMENT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"assignment\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"assignment_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"IDEA\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"idea\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_loops\":int(re.search(r'\\d+', line).group())})\n \n if \"HUFFMAN\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"huffman\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_loops\":int(re.search(r'\\d+', line).group())})\n\n\n if \"NEURAL NET\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"nnet\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_loops\":int(re.search(r'\\d+', line).group())})\n\n if \"LU DECOMPOSITION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"lu_decomp\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"libc\" in line and \"Baseline\" not in line and \"*\" not in line:\n line = next(iterator)\n \n entries.update({\"memory_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"integer_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"float_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n\n #print(entries)\n return entries", "def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)", "def _readAndCombine(inputBed, withinBp):\n junct = {}\n\n # collapse a \n count = 0\n for line in open(inputBed):\n count += 1\n #if count % 100000==0: \n # print count \n if line.startswith(\"track\"):\n #out.write(line.strip()) \n #out.write(\" useScore=1\\n\") \n continue\n\n [chr, start, stop, name, score, strand, thStart, thStop, rgb, blockCount, blockSizes, blockStarts] = line.split(\"\\t\")\n score = float(score)\n if not junct.has_key(chr):\n junct[chr] = {}\n\n if int(blockCount) != 2:\n #print \"Illegal line does not have 2 blocks\" \n #print line \n continue\n\n start = int(start)\n stop = int(stop)\n [size1, size2] = [int(x) for x in blockSizes.split(\",\")[:2]]\n [start1, start2] = [int(x) for x in blockStarts.split(\",\")[:2]]\n leftEdge = start + size1\n rightEdge = start + start2 # start2 is relative to chr start \n intronLength = rightEdge - leftEdge\n\n toCombine = []\n for (other) in junct[chr].keys():\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, otherLength) = other\n if otherLength != intronLength:\n continue\n\n if otherMaxLeft < (leftEdge-withinBp) or otherMinLeft > (leftEdge+withinBp):\n continue\n\n if otherMaxRight < (rightEdge-withinBp) or otherMinRight > (rightEdge+withinBp):\n continue\n\n toCombine.append(other)\n\n allLines = [ (score, line, leftEdge, rightEdge) ]\n minLeft = maxLeft = leftEdge\n minRight = maxRight = rightEdge\n for (other) in toCombine:\n (otherMinLeft, otherMaxLeft, otherMinRight, otherMaxRight, intronLength) = other\n minLeft = min(minLeft, otherMinLeft)\n maxLeft = max(maxLeft, otherMaxLeft)\n minRight = min(minRight, otherMinRight)\n maxRight = max(maxRight, otherMaxRight)\n\n allLines.extend(junct[chr][other])\n del junct[chr][other]\n\n junct[chr][ (minLeft, maxLeft, minRight, maxRight, intronLength) ] = allLines\n\n return junct", "def process(fileglob):\n\n filepaths = glob.glob(fileglob)\n\n for filepath in filepaths:\n datum_list = []\n aggregated_data = {'user_id': None, 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n for parsed_row in extract_csv_data(filepath):\n\n if aggregated_data['user_id'] is None:\n aggregated_data['user_id'] = parsed_row['user_id']\n\n if aggregated_data['user_id'] != parsed_row['user_id']:\n # We want earliest 'date' datum first.\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n # Dump current stack of user info to output file.\n dump_aggregated_data(aggregated_data, output_filepath(filepath))\n\n # Re-initialize\n datum_list = []\n aggregated_data = {'user_id': parsed_row['user_id'], 'n': 0, 'sum': 0, 'min': 0, 'max': 0}\n\n \"\"\"\n We are still on same user_id so just append to datum_list.\n \"\"\"\n datum_list.append(parsed_row)\n\n\n \"\"\"\n At end of csv file, roll-up and dump last chunk of user_data.\n \"\"\"\n\n sorted_datum = sorted(datum_list, key=lambda k: k['date'])\n\n for datum in sorted_datum:\n aggregated_data = update_aggregated_data(aggregated_data, datum)\n\n aggregated_data = finalize_aggregated_data(aggregated_data)\n\n dump_aggregated_data(aggregated_data, output_filepath(filepath))", "def get_sorted_sequences(filename):\n f = open(filename, \"r\", encoding = \"utf-8\")\n messages = []\n index = 0\n for line in f:\n # if line starts with a date, append it to list of messages\n if re.match(r\"\\d+/\\d+/\\d+, \\d+:\\d+\", line):\n messages.append(line)\n index += 1\n # otherwise, the line is a result of typing a new line and \n # it is therefore appended to the last message\n else:\n messages[index - 1] += line\n # sort messages by time of receival\n messages.sort(key = get_date_from_msg)\n\n return messages", "def sort_files(file_list, set_name, time_freq, normalise):\n \n out_dict = {}\n order = []\n \n if file_list:\n\tfor item in file_list:\n key = tuple(item[0:3])\n window = int(item[2])\n out_dict[key] = nio.InputData(item[0], item[1], runave=window, normalise=normalise)\n out_dict[key].tag = item[3]\n out_dict[key].window = window\n out_dict[key].set = set_name\n out_dict[key].datetimes = runave_time_correction(out_dict[key].datetime_axis()[:], time_freq)\n order.append(key) \n else:\n outdict = None\n\torder = None\n\n return out_dict, order", "def mergeSortedToFile(arr):\n # list the elements of sorted text file\n # print(arr)\n sortedFileList = []\n with open(OUTPUT_SORTED_FILE) as file:\n for line in file:\n line = int(line.strip())\n sortedFileList.append(line)\n l1 = len(arr)\n l2 = len(sortedFileList)\n l3 = l1 + l2\n m = 0\n i = 0\n j = 0\n out2 = [0] * l3\n while (i < l1 and j < l2):\n if (arr[i] < sortedFileList[j]):\n out2[m] = arr[i]\n m += 1\n i += 1\n else:\n out2[m] = sortedFileList[j]\n m += 1\n j += 1\n while (i < l1):\n out2[m] = arr[i]\n m += 1\n i += 1\n while (j < l2):\n out2[m] = sortedFileList[j]\n m += 1\n j += 1\n # writing merged sorted output list to tht output file\n with open(OUTPUT_SORTED_FILE, \"w\") as file:\n for item in out2:\n file.write('%s\\n' % item)", "def parse_trace_file(filename):\n f = open(filename, 'r')\n trace_data = f.read()\n\n messages = parse_atm_messages(trace_data) + parse_host_messages(trace_data)\n f.close()\n messages.sort()\n\n return messages", "def sortClassificationReport(classificationReport):\n tmp = classificationReport.split(\"\\n\")\n sortedReport = \"\\n\".join(tmp[:2]) + \"\\n\"\n catValues = []\n for line in tmp[2:-5]:\n items = re.split(r'(\\s+)', line)\n newList = [''.join(items[:-8]), ''.join(items[-8:-6]),\n ''.join(items[-6:-4]), ''.join(items[-4:-2]),\n ''.join(items[-2:])]\n\n catValues.append(newList)\n\n catValues = sorted(catValues, key=lambda v: int(v[4]), reverse=True)\n\n for repList in catValues:\n sortedReport += (''.join(repList) + \"\\n\")\n sortedReport += \"\\n\".join(tmp[-5:])\n \n return sortedReport", "def readGrouping(infile, abschnitt=None):\n\n def readItems(cfg, section):\n for num, text in cfg.items(section):\n num = pat_number.findall(num)\n text = ' '.join(text.splitlines())\n num = map(lambda x: x.upper(), num)\n yield num, text\n\n cfg = ConfigParser()\n cfg.readfp(open(infile))\n\n if abschnitt:\n sections = [abschnitt]\n else:\n sections = cfg.sections()\n\n for section in sections:\n yield None, section\n for num, text in sorted(readItems(cfg, section)):\n num = map(lambda n: n.lstrip('0'), num)\n yield num, text", "def read_grp(fname):\n global DAYS\n uint_types = [DAYS,\n 'Current crop type', \n 'Current residue on ground type', \n 'Previous residue on ground type', \n 'Old residue on ground type', \n 'Current dead root type', \n 'Previous dead root type', \n 'Old dead root type']\n\n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'rb')\n for i, line in enumerate(fid.readlines()):\n line_as_list = line.strip().split()\n\n if len(line_as_list) == 0:\n continue\n\n elif line_as_list[0][0] == '#':\n continue\n\n elif line_as_list[0] == 'int':\n try:\n meta[line[1]] = int(line[2])\n except:\n pass\n \n elif line_as_list[0] == 'float':\n try:\n meta[line[1]] = float(line[2])\n except:\n pass\n\n elif line_as_list[0] == 'char':\n continue\n\n elif line_as_list[0][0] == '{':\n cname = line.strip()[1:-1].replace(r'kg/m', r'kg*m**-1') \\\n .replace(r'kg/m**2', r'kg*m**-2') \\\n .replace(r'kg/m**3', r'kg*m**-3') \\\n .replace(r'kg/m**4', r'kg*m**-4') \\\n .replace(r'mm/hr', r'mm*hr**-1') \\\n .replace(r'mm/h', r'mm*hr**-1') \\\n .replace(r'm/day', r'm*day**-1') \\\n .replace(r'g/cc', r'g*cc**-1') \\\n .replace(r'kg-s/m**4', r'kg-s*m**-4') \\\n .replace(r's/m', r's*m**-1') \\\n .replace(r'Irrigation_volume_supplied/unit_area',\n r'Irrigation_volume_supplied*unit_area**-1')\n header.append(cname)\n\n else:\n if len(header) == len(line_as_list):\n \n # if we are here and data == None we need to initialize the data dictionary\n if data == None:\n data = {}\n for cname in header:\n typecode = ('f', 'h')[any([cname==s for s in uint_types])]\n data[cname] = array.array(typecode)\n\n for (cname, string) in zip(header, line_as_list):\n if any([cname==s for s in uint_types]):\n value = int(string)\n else:\n value = float(string)\n\n if cname == DAYS:\n\n if value in set(data[DAYS]):\n break\n\n data[cname].append(value)\n\n else:\n raise Exception('Failed to parse line %i, unexpected number of columns.'%(i+1))\n \n fid.close()\n\n # pack the table data into numpy arrays\n for (cname, v) in data.items():\n dtype = (np.float32, np.int16)[any([cname==s for s in uint_types])]\n data[cname] = np.array(v, dtype=dtype)\n\n return (meta, data)", "def stochastic_filesort(stochastic_file_csv, taw_tup, var_list, model_dates, runs, output_root):\n\n print 'doing a file sort on the csv created by stochastic file finder'\n\n main_dictionary = {}\n\n taw_list = make_taw_list(taw_tup)\n\n open_read = time.time()\n rzsm_lst = []\n ro_lst = []\n eta_lst = []\n infil_lst = []\n print 'opening'\n with open(stochastic_file_csv, 'r') as rfile:\n print 'iterating on lines'\n line_start = time.time()\n\n for j, line in enumerate(rfile):\n line_item = line.split(',')\n\n numpy_path = line_item[0]\n string_date = line_item[1][:-1]\n numpy_date = datetime.strptime(string_date, '%Y-%m-%d')\n\n numpy_filename = os.path.split(numpy_path)[1]\n # print numpy_filename\n # print j, line\n if 'rzsm' in numpy_filename:\n rzsm_lst.append((numpy_path, numpy_date))\n elif 'ro' in numpy_filename:\n ro_lst.append((numpy_path, numpy_date))\n elif 'eta' in numpy_filename:\n eta_lst.append((numpy_path, numpy_date))\n elif 'infil' in numpy_filename:\n infil_lst.append((numpy_path, numpy_date))\n\n # if j > 1000000:\n # break\n if not j%10000:\n print j\n print('file line count {}'.format(j))\n line_end = (time.time() - line_start)\n print 'line time elapsed {}'.format(line_end)\n elapsed = (time.time() - open_read)\n print 'time elapsed to parse {}'.format(elapsed)\n\n # TODO now use sorted(list5, key=lambda vertex: (degree(vertex), vertex)) (firstkey, secondkey) tuple to sort by seed then TAW\n\n # sorting by a tuple of first, second and third criteria (seed, taw, date)\n def keyfunc(x):\n return os.path.split(x[0])[1].split('_')[6], os.path.split(x[0])[1].split('_')[4], x[1]\n\n rzsm_lst.sort(key=keyfunc)\n ro_lst.sort(key=keyfunc)\n eta_lst.sort(key=keyfunc)\n infil_lst.sort(key=keyfunc)\n\n print 'starting the taw sort'\n sort_start = time.time()\n ro_taw_sorted = taw_sort(ro_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n eta_taw_sorted = taw_sort(eta_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n infil_taw_sorted = taw_sort(infil_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n rzsm_taw_sorted = taw_sort(rzsm_lst, runs, taw_list)\n sort_elapsed = (time.time() - sort_start)\n print 'sort elapsed {}'.format(sort_elapsed)\n\n # outname = '{}.csv'.format()\n\n list_output(taw_list, ro_taw_sorted, output_root, outname='ro_taw_{}.csv')\n list_output(taw_list, eta_taw_sorted, output_root, outname='eta_taw_{}.csv')\n list_output(taw_list, infil_taw_sorted, output_root, outname='infil_taw_{}.csv')\n list_output(taw_list, rzsm_taw_sorted, output_root, outname='rzsm_taw_{}.csv')\n\n # todo - finish out this so you can extract the value by loading the array and multiplying through each seed by each taw.", "def parse(input_file):\n clasa = []\n adiacente = []\n suparati = []\n start = None\n final = None\n _before_suparati = True # inaintea liniei care separa clasa de copiii suparati\n with open(input_file) as f:\n lines = list(f.readlines())\n for line in lines: # Procesam fiecare linie\n l = line.replace(\"\\n\", \"\").split()\n if _before_suparati:\n if l[0] == \"suparati\":\n _before_suparati = False\n continue\n clasa.append(l)\n else:\n if l[0] == \"mesaj:\":\n start = l[1]\n final = l[3]\n else:\n suparati.append((l[0], l[1]))\n\n ## Construim adiacentele\n ##\n ## len(clasa) = numarul de randuri din clasa. \n ## 6 copii pe fiecare rand => numarul de copii = 6 * len(clasa)\n adiacente = list([0] * (6 * len(clasa)) for _ in range(6 * len(clasa)))\n\n def _nesuparati(copil1, copil2):\n return (copil1, copil2) not in suparati and (copil2, copil1) not in suparati\n\n ## coloana de la stanga\n for i in range(len(clasa)):\n for j in range(6):\n\n if j % 2 == 0: ## drumuri orizontale pe cele 3 coloane\n \n if _nesuparati(clasa[i][j], clasa[i][j+1]) and\\\n clasa[i][j] != \"liber\" and clasa[i][j+1] != \"liber\":\n adiacente[i * 6 + j][i * 6 + j + 1] = 1\n adiacente[i * 6 + j + 1][i * 6 + j] = 1\n \n if i < len(clasa) - 1: # drumuri verticale de la primul rand pana la ultimul rand - 1\n\n if clasa[i][j] != \"liber\" and clasa[i+1][j] != \"liber\" and\\\n _nesuparati(clasa[i][j], clasa[i+1][j]):\n adiacente[i * 6 + j][(i + 1) * 6 + j] = 1\n adiacente[(i + 1) * 6 + j][i * 6 + j] = 1\n \n if (j == 1 or j == 3) and (i >= len(clasa) - 2): # transferul intre ultimele si penultimele banci\n\n if _nesuparati(clasa[i][j], clasa[i][j+1]) and\\\n clasa[i][j] != \"liber\" and clasa[i][j+1] != \"liber\":\n adiacente[i * 6 + j][i * 6 + j + 1] = 1\n adiacente[i * 6 + j + 1][i * 6 + j] = 1\n\n\n ## Vector de copii\n copii = reduce(lambda x, y: x + y, clasa, []) ## pastram locurile libere ca sa putem potrivi indicii\n\n if copii == [] or start is None or final is None: ## Fisierul e gol sau formatul gresit. Bail out\n raise MalformedInputException(\"Malformed input file. Bailing.\")\n \n start_index = copii.index(start)\n final_index = copii.index(final)\n \n if sum(adiacente[start_index]) < 1 or sum(adiacente[final_index]) < 1:\n raise EarlyNoSolution(\"Nu poate exista o solutie.\")\n\n return start, final, copii, adiacente", "def merge_files(\n files: List[TextIOWrapper],\n ) -> Generator[Tuple[List[TextIOWrapper]], str, None]:\n\n result = []\n\n for index, file in enumerate(files):\n try:\n iterator = iter(file)\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()\n\n previous = None\n comment_count = 0\n max_comment_count = 2\n\n while result:\n ignore = False\n\n _, index, value, iterator, file = heapq.heappop(result)\n\n if remove_duplicates and value == previous:\n ignore = True\n\n if (\n write_header\n and comment_count < max_comment_count\n and value[0] == \"#\"\n ):\n ignore = True\n max_comment_count += 1\n\n if not ignore:\n yield value\n previous = value\n\n try:\n value = next(iterator)\n\n heapq.heappush(\n result, ((sorting_key(value), index, value, iterator, file))\n )\n except StopIteration:\n file.close()" ]
[ "0.6187829", "0.6099515", "0.6068014", "0.6001136", "0.59910196", "0.5986808", "0.58804303", "0.5873911", "0.57860065", "0.5770826", "0.5711253", "0.57094413", "0.5662024", "0.5638028", "0.5623567", "0.5581438", "0.553245", "0.5521676", "0.55184275", "0.5508633", "0.55056983", "0.55055803", "0.5502771", "0.5502601", "0.54713005", "0.545557", "0.5441592", "0.5431663", "0.54229325", "0.54226005" ]
0.62394416
0
Generate qrange file to be used with plink qrange
def gen_qrange(prefix, nsnps, prunestep, every=False, qrangefn=None): order = ['label', 'Min', 'Max'] # dtype = {'label': object, 'Min': float, 'Max': float} if qrangefn is None: # Define the number of snps per percentage point and generate the range percentages = set_first_step(nsnps, prunestep, every=every) snps = np.around((percentages * nsnps) / 100).astype(int) try: # Check if there are repeats in ths set of SNPS assert sorted(snps) == sorted(set(snps)) except AssertionError: snps = ((percentages * nsnps) / 100).astype(int) assert sorted(snps) == sorted(set(snps)) labels = ['%.2f' % x for x in percentages] if float(labels[-1]) > 100.: labels[-1] = '100.00' if snps[-1] != nsnps: snps[-1] = nsnps assert snps[-1] == nsnps assert labels[-1] == '100.00' # Generate the qrange file qrange = '%s.qrange' % prefix qr = pd.DataFrame({'label': labels, 'Min': np.zeros(len(percentages)), 'Max': snps}).loc[:, order] qr.to_csv(qrange, header=False, index=False, sep=' ') else: qrange = qrangefn qr = pd.read_csv(qrange, sep=' ', header=None, names=order) # , dtype=dtype) return qr, qrange
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_qubits(self):\n return cq.LineQubit.range(4)", "def process_qrange_file(filename):\n\n f = open(filename, 'r')\n q_ranges = yaml.load(f)\n\n return q_ranges", "def write_q_table_file(q_table, q_file=\"Q_Table.txt\"):\n file = open(q_file, \"w+\")\n rows = len(q_table)\n cols = len(q_table[0])\n file.write(str(rows) + \"x\" + str(cols) + \"\\n\")\n for i in range(len(q_table)):\n file.write(str(i) + \"-\" + \"24\\n\") # TODO: deshardcodear el objetivo del juego\n file.write(\"UP\\n\")\n file.write(\"RIGHT\\n\")\n file.write(\"DOWN\\n\")\n file.write(\"LEFT\\n\")\n for row in q_table:\n for col in row:\n file.write(str(col) + \"\\n\")\n file.close()", "def _build_range(self) -> str:\n return build_sequence(filter(None, (self.uids, self.sequence)))", "def generate_rpn_on_range(\n args,\n dataset_name,\n proposal_file,\n output_dir,\n ind_range=None,\n gpu_id=0\n):\n assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN\n\n roidb, start_ind, end_ind, total_num_images = get_roidb(\n dataset_name, ind_range\n )\n logger.info(\n 'Output will be saved to: {:s}'.format(os.path.abspath(output_dir))\n )\n\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n\n boxes, scores, ids = generate_proposals_on_roidb(\n model,\n roidb,\n start_ind=start_ind,\n end_ind=end_ind,\n total_num_images=total_num_images\n )\n\n cfg_yaml = yaml.dump(cfg)\n if ind_range is not None:\n rpn_name = 'rpn_proposals_range_{}_{}.pkl'.format(ind_range[0], ind_range[1])\n else:\n rpn_name = 'rpn_proposals.pkl'\n rpn_file = os.path.join(output_dir, rpn_name)\n save_object(\n dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file\n )\n logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))\n return boxes, scores, ids, rpn_file", "def dqxx_write_to_file(dqcr, dqch, dqid, runnumber, outfilename=None):\n if outfilename is None:\n outfilename = \"PMT_DQXX_%i.ratdb\" % (runnumber)\n # RAT has an issue with reading in the dqch integer array,\n # therefore, we are manually writing out the file for now:\n runrange = \"run_range: [%i, %i],\" % (runnumber, runnumber)\n f = open(outfilename, 'w')\n f.write(' {\\n type: \"PMT_DQXX\",\\n ')\n f.write('version: 1,\\n')\n f.write( runrange )\n # These variables are taking only default values for now\n f.write('pass: 0,\\n')\n f.write('timestamp: \\\"\\\",\\n')\n f.write('comment: \\\"\\\",\\n')\n f.write(' production: true,\\n')\n # The following variables are zero by default for now? (Freija)\n f.write(' cratestatus_n100: 0,\\n cratestatus_n20: 0, \\n cratestatus_esumL: 0, ')\n f.write(' \\n cratestatus_esumH: 0,\\n cratestatus_owlN: 0, \\n cratestatus_owlEL: 0, ')\n f.write(' \\n cratestatus_owlEH: 0,')\n f.write('\\n\\n dqid : [ ')\n for x in range(0, 19 * 96):\n f.write(str(dqid[x]))\n f.write(', ')\n f.write('],\\n')\n f.write('\\n dqch : [ ')\n for x in range(0, 19 * 16 * 32):\n f.write(str(hex(dqch[x])))\n f.write(', ')\n f.write('],\\n ')\n f.write('\\n dqcr : [ ')\n for x in range(0, 19 * 16):\n f.write(str(hex(dqcr[x])))\n f.write(', ')\n f.write('],\\n }')", "def lrange(self, name, start, end):\r\n return self.format_inline('LRANGE', name, start, end)", "def generateDRQFor(self, domain):\n block = PatternRangeQuery.generateBaseDRQ(self, domain)\n head = [block[0]]\n tail = block[1:]\n shuffle(tail) # Shuffle the list to remove information about the order of the queries.\n block = head + tail\n return block", "def generateDRQFor(self, domain):\n block = BasicRangeQuery.generateBaseDRQ(self, domain)\n head = [block[0]]\n tail = block[1:]\n shuffle(tail) # Shuffle the list to remove information about the order of the queries\n block = head + tail\n return block", "def generateDRQFor(self, domain):\n block = PatternRangeQuery.generateBaseDRQ(self, domain)\n query = set()\n for set_of_queries in block: # Put the contents of all blocks into one big block\n query.update(set_of_queries)\n return query", "def writeQrels(qrelList, fileName):\n with open(fileName, 'w') as f:\n for e in qrelList:\n f.write(qrelEntry2Str(e))\n f.write('\\n')", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def _range_expression(self):\n # using filter expression to define the time range of the query\n # In influx2, range query is in the format\n # range(start:2018-05-22T23:30:00Z, stop: 2018-05-23T00:00:00Z) or\n # range(start: -12h, stop: -15m)\n # with stop parameter being optional\n if self.filter is None:\n return u''\n exp = (self._sql_where_expression(self.filter)).replace('AND',',').split(',')\n return u'|> range({})'.format(u' , '.join([(i.replace('\"','').replace(\"'\",'')) for i in exp if \"start\" in i or \"stop\" in i]))", "def generateDRQFor(self, domain):\n block = PatternRangeQuery.generateBaseDRQ(self, domain)\n head = block[0] # First Set of Queries\n tail = set() # Remaining Queries\n for set_of_queries in block[1:]: # Add all elements from the tailing query blocks to big query block\n tail.update(set_of_queries)\n return (head, tail)", "def zrange(self, name, start, end, desc=False, withscores=False):\r\n if desc:\r\n return self.zrevrange(name, start, end, withscores)\r\n pieces = ['ZRANGE', name, start, end]\r\n if withscores:\r\n pieces.append('withscores')\r\n return self.format_inline(*pieces, **{'withscores': withscores})", "def _generate_qubits(self) -> Sequence[cirq.Qid]:\n return cirq.LineQubit.range(openfermion.count_qubits(self.hamiltonian))", "def generateDRQFor(self, domain):\n block = BasicRangeQuery.generateBaseDRQ(self, domain)\n head = block[0] # First Set of Queries\n tail = set() # Remaining Queries\n for set_of_queries in block[1:]: # Add all elements from the tailing query blocks to big query block\n tail.update(set_of_queries)\n return (head, tail)", "def range_table(self):\n raise NotImplementedError('Abstract method.')", "def zrevrange(self, name, start, num, withscores=False):\r\n pieces = ['ZREVRANGE', name, start, num]\r\n if withscores:\r\n pieces.append('withscores')\r\n return self.format_inline(*pieces, **{'withscores': withscores})", "def create(self, range):\n raise NotImplementedError", "def range(self) -> str:\n return f\"{self.name}!A:F\"", "def write_google_map_range_header(outfile: TextIO, map_name: str) -> None:\n outfile.write(\" var range_map = new google.maps.Map(document.getElementById(\\\"range_map_canvas\\\"),\"\n \"mapOptions);\\n\")\n outfile.write(\" var range_layer = new google.maps.KmlLayer(\\\"\" + init_data().site_url() + \"/maps/\" +\n rangemap_name(map_name) + \".kmz\\\",{suppressInfoWindows: true});\\n\")\n outfile.write(\" range_layer.setMap(range_map);\\n\")", "def transpile_qasm(input, outf='default', verbose=False, mapping=None):\n\n if os.path.exists(input):\n file_name = input\n l = [line.rstrip('\\n') for line in open(input)][2:]\n else:\n file_name = \"dummy\"\n l = [line.rstrip('\\n') for line in io.StringIO(input)][2:]\n output = []\n qubit_names = []\n\n global custom_gates\n on_custom = False\n curr_custom = []\n\n for line in l:\n\n # if on_custom and ('}' not in line):\n # curr_custom.append(line)\n # elif on_custom and ('}' in line):\n # index = np.argwhere(np.array([ch for ch in line]) == '}')[0][0]\n # curr_custom.append(line[:index])\n # on_custom = False\n if line[:7] == \"include\" or line[:8] == \"OPENQASM\":\n pass\n\n elif line[:4] == 'qreg':\n # qregister line format are ike \"qreg q[1]\" The number of qubits\n # register is given in the bracket. Sometime, the qubit name is\n # not a single character. Added a regex search. The regex will\n # search for a digit inside bracker []\n # Add string of qubit name to list of qubits we may draw from?\n\n # How many qubits are we considering\n n_qubits = int(re.search(r\"\\[([0-9]+)\\]\", line).group(1))\n\n # Constructing the dictionnary of qubits names\n if (mapping is None):\n mapping = {i: i for i in range(n_qubits)}\n\n for i in range(n_qubits):\n q_name = \"Q\" + str(mapping[i])\n qubit_names.append(q_name)\n\n elif line[:4] == 'creg':\n # Simply pass if the input to the qpu does not\n # need to keep track of classical registers\n pass\n\n elif line[:4] == 'gate':\n # Parse things inside the brackets to list of gates,\n # add to dict of prebuilt gate names\n gate_name, rotations = parse_custom_gate(line[5:])\n custom_gates[gate_name] = rotations\n pass\n\n elif line[:7] == 'measure':\n # Do not have to handle measurement\n pass\n\n elif line[:7] == 'barrier':\n output.append('New Cycle')\n pass\n\n elif line == '':\n pass\n\n else:\n # It's a gate operation!\n q_name, gates = parse_gate_and_q(line[:- 1], mapping)\n\n for gate in gates:\n # first check if it's an entanglement gate\n if len(q_name) == 2:\n\n if gate == 'CNOT':\n output.append(f'CR/C{q_name[0][1]}T{q_name[1][1]}')\n\n # TODO: in our configuration, we cannot make CNOT in both direction...\n # We need to add some local gate to make this happen\n elif gate == 'swap':\n output.extend( \\\n ['{},{}/CNOT'.format(q_name[0].upper(), q_name[1].upper()), \\\n '{},{}/CNOT'.format(q_name[1].upper(), q_name[0].upper())])\n else:\n output.append(q_name[1].upper() + '/' + gate)\n else:\n output.append(q_name[0].upper() + '/' + gate)\n # print(output)\n if verbose:\n print(\"---------------\")\n print(output)\n if outf:\n fname = (outf == 'default') and file_name[:len(file_name) - 5] or outf\n with open('{}_qtrl.txt'.format(fname), 'w') as f:\n for item in output:\n f.write(\"%s\\n\" % item)\n if verbose:\n print(\"Output saved!\")\n return output", "def genrange(gen, *args):\n log = logging.getLogger(\"hepfab.util.genrange\")\n\n start, stop, step = 1, None, 1\n arglen = len(args)\n if arglen == 3:\n start, stop, step = args\n elif arglen == 2:\n start, stop = args\n else:\n (stop,) = args\n stop += 1\n\n log.debug(\"genrange(%r, %r, %r, %r)\", gen, start, stop, step)\n spec = \"%sn%%0%d.d\" % (gen, digits(stop))\n log.debug(\"Produced spec %r\", spec)\n\n for i in xrange(start, stop, step):\n yield spec % i", "def write_qsub_script(self, filename, echo=False):\n\n buf = ['#!/usr/bin/env qsub', '# Written using SGE module']\n\n for option, value in self.args.__dict__.items():\n if value is True:\n value = ''\n\n if option not in ['command', 'command_args', 'xterm_args']:\n if isinstance(value, list):\n val = ' '.join(value)\n else:\n val = str(value)\n\n buf.append(' '.join(['#', '-' + option, val]))\n\n args = getattr(self.args, 'command_args', [])\n args = getattr(self.args, 'xterm_args', args)\n\n buf.append(' '.join([self.args.command] + args))\n\n if echo:\n print('\\n'.join(buf))\n\n f = open(filename, 'w')\n f.write('\\n'.join(buf))\n f.close()", "def url_generator(cls, from_range: int):\n for i in range(from_range, from_range + cls.RANGE):\n for j in cls.COURTS:\n yield cls.URL.format(id=i, sid=j)", "def write_trunc_qual(trunc_qual_scores,\r\n qual_out_fp,\r\n seq_order):\r\n\r\n qual_line_size = 60\r\n\r\n qual_out = open(qual_out_fp, \"w\")\r\n\r\n for label in seq_order:\r\n trunc_label = label.split()[0].strip()\r\n current_trunc_qual_scores = trunc_qual_scores[trunc_label]\r\n qual_out.write(\">%s\\n\" % label)\r\n current_qual_scores_lines = []\r\n # Quality score format is a string of 60 base calls, followed by a\r\n # newline, until the last N bases are written\r\n for slice in range(0, len(trunc_qual_scores[trunc_label]),\r\n qual_line_size):\r\n # current_segment = map(str,\r\n # current_trunc_qual_scores[slice:slice + qual_line_size])\r\n current_segment = current_trunc_qual_scores[\r\n slice:slice +\r\n qual_line_size]\r\n current_qual_scores_lines.append(\" \".join(current_segment))\r\n\r\n qual_out.write('\\n'.join(current_qual_scores_lines))\r\n qual_out.write('\\n')", "def main(pathToseq, start, end, pathToOutputFile):\n infile = open(pathToseq)\n outfile = open(pathToOutputFile, 'w')\n fasta = infile.readline().replace(\"\\n\",\"\")\n sequence =\"\"\n for line in infile:\n sequence+=line.replace(\"\\n\",\"\")\n infile.close()\n outfile.write(\"Region \"+start +\" - \"+ end + \" of \"+fasta)\n\n outfile.write(\"\\n\"+sequence[int(start): int(end)])\n outfile.close()", "def do_range_forloop(self, line):\n self.E_str = \"do_range_forloop\"\n\n # Get the range parameters\n line = line.replace(\" \", \"\")\n line = line[line.find(\"range\")+5:]\n range_str, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n words = range_str.split(\",\")\n\n if len(words) == 1:\n start, step = 0, 1\n end = int(words[0])\n elif len(words) == 2:\n step = 1\n start, end = [int(i) for i in words]\n else:\n start, end, step = [int(i) for i in words]\n\n return range(start, end, step)", "def computerange(lyrindex):\n for i in range(len(lyrindex)):\n if i != len(lyrindex) - 1:\n if lyrindex[i][0].find('.') > 0: # special case where inventory files have two records\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+2][1]) - 1) )\n else:\n lyrindex[i].append( 'range=%s-%s' %( lyrindex[i][1], int(lyrindex[i+1][1]) - 1) )\n else:\n lyrindex[-1].append( 'range=%s' % ( lyrindex[-1][1] ) ) \n return lyrindex" ]
[ "0.60668355", "0.56891936", "0.5675259", "0.56339264", "0.55886894", "0.5510636", "0.5363446", "0.535094", "0.53416795", "0.5267484", "0.5214582", "0.5211727", "0.51969045", "0.5183751", "0.51725626", "0.5170478", "0.51103914", "0.5081725", "0.5069747", "0.50608844", "0.5060245", "0.505794", "0.5036236", "0.5010028", "0.50029814", "0.49907354", "0.49828687", "0.49805164", "0.49754068", "0.49724373" ]
0.6948805
0
Helper function to paralellize score_qfiles
def single_score_plink(prefix, qr, tup, plinkexe, gwasfn, qrange, frac_snps, maxmem, threads): qfile, phenofile, bfile = tup suf = qfile[qfile.find('_') + 1: qfile.rfind('.')] ou = '%s_%s' % (prefix, suf) # score = ('%s --bfile %s --score %s 2 4 7 header --q-score-range %s %s ' # '--allow-no-sex --keep-allele-order --pheno %s --out %s ' # '--memory %d --threads %d') score = ( '%s --bfile %s --score %s sum --q-score-range %s %s --allow-no-sex ' '--keep-allele-order --pheno %s --out %s --memory %d --threads %d') score = score % (plinkexe, bfile, gwasfn, qrange, qfile, phenofile, ou, maxmem, threads) o, e = executeLine(score) profs = read_log(ou) df = pd.DataFrame([read_scored_qr('%s.%s.profile' % (ou, x.label), phenofile, suf, round(float(x.label) * frac_snps), profs) for x in qr.itertuples()]) # frames.append(df) with tarfile.open('Profiles_%s.tar.gz' % ou, mode='w:gz') as t: for fn in glob('%s*.profile' % ou): if os.path.isfile(fn): t.add(fn) os.remove(fn) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task3(dataset,writepickle=False,pfilename=None,usepickle=True):\n model,bitext = task1(dataset,printoutput = False,writepickle=writepickle,pfile = pfilename,usepickle=usepickle)\n phrases = extract_phrases(bitext,model)\n scored_phrases = phrase_scoring_ranking(phrases,model,dataset,bitext)\n print_output_task3(scored_phrases,dataset)", "def main(paths, minscore, outfile, is_matrix):\n \n # Get references and lengths from first BAM file.\n # We need these to print them in the output.\n # Might as well do it before spawning all those processes.\n firstfile = pysam.AlignmentFile(paths[0], \"rb\")\n references = firstfile.references\n lengths = firstfile.lengths\n \n if not len(references) == len(lengths):\n raise ValueError('Could not parse headers of first bam-file')\n \n # Spawn independent processed to calculate RPKM for each of the BAM files\n processresults = list()\n processes_done = 0\n \n # This is just to print to terminal when a process finishes. Not necessary.\n def callback(result, totalps=len(paths)):\n \"Generator yielding processed\"\n nonlocal processes_done\n processes_done += 1\n print('Files processed: {}/{}'.format(processes_done, totalps))\n return None\n\n # Queue all the processes\n with multiprocessing.Pool(processes=args.processors) as pool:\n for fileno, path in enumerate(paths):\n arguments = (fileno, path, args.minscore)\n processresults.append(pool.apply_async(get_contig_rpkms, arguments,\n callback=callback, error_callback=callback))\n \n # For some reason, this is needed.\n pool.close()\n pool.join()\n \n print('All processes finished. Checking outputs')\n sample_rpkms = list()\n \n for processresult in processresults:\n if processresult.successful():\n sample_rpkms.append(processresult.get())\n \n else:\n raise multiprocessing.ProcessError\n \n # sample_rpkms now contain (identifier, sample_rpkms) tuples, in the order\n # they were returned from the pool. We want to sort them by identifier,\n # so that we know which RPKMs belong to which BAM file\n sample_rpkms.sort()\n \n # Now we can discard the identifiers\n sample_rpkms = [i[1] for i in sample_rpkms]\n \n # Each BAM file MUST contain the same headers\n if not all(len(rpkms) == len(lengths) for rpkms in sample_rpkms):\n raise ValueError('Not all BAM files contain the same amount of headers.')\n \n print('Outputs alright. Printing table.')\n \n with open(outfile, 'w') as filehandle:\n # Print header if asked\n if not is_matrix:\n print('#contig\\tcontiglength', '\\t'.join(paths), sep='\\t', file=filehandle)\n \n # Print the actual output\n for fields in zip(references, lengths, *sample_rpkms):\n numbers = '\\t'.join([str(round(i, 3)) for i in fields[2:]])\n \n if not is_matrix:\n print(fields[0], fields[1], sep='\\t', end='\\t', file=filehandle)\n \n print(numbers, file=filehandle)", "def main():\n verbose = False\n \n try:\n opts,args = getopt.getopt(sys.argv[1:], \"v?\")\n except:\n usage(os.path.basename(sys.argv[0]))\n for opt,arg in opts:\n if opt == \"-v\": # verbose\n verbose = True\n else:\n usage(os.path.basename(sys.argv[0]))\n\n if len(args) != 2:\n usage(os.path.basename(sys.argv[0]))\n\n posfilename = args[0]\n negfilename = args[1]\n\n (namelist, slrtabfilelist) = zip(*[line.split('\\t') for line in sys.stdin]) # trciky use of zip and * to unzip list\n slrtabfilelist = [x[:-1] for x in slrtabfilelist] # remove newlines on end\n\n posfile_fh = open(posfilename, \"w\")\n negfile_fh = open(negfilename, \"w\")\n\n posscores = [] # list of lists: each list is scores for each method in pos class\n negscores = [] # similarly for negative class scores\n for slrtabfile in slrtabfilelist:\n if verbose:\n sys.stderr.write(\"Reading results from file %s...\" % slrtabfile)\n slrlist = list(iter_slrtab(open(slrtabfile))) # (score,label) list\n posscores.append([sl[0] for sl in slrlist if sl[1] == 1])\n negscores.append([sl[0] for sl in slrlist if sl[1] == 0])\n assert(len(posscores[-1]) + len(negscores[-1]) == len(slrlist))\n if verbose:\n sys.stderr.write(\" %d entries (%d pos, %d neg)\\n\" % (len(slrlist),len(posscores[-1]),len(negscores[-1])))\n \n if verbose:\n sys.stderr.write(\"writing output to %s and %s...\" %(posfilename, negfilename))\n \n posfile_fh.write('\\t'.join(namelist) + '\\n')\n negfile_fh.write('\\t'.join(namelist) + '\\n')\n\n numpos = len(posscores[0]) # FIXME may be different lengths\n for i in xrange(numpos):\n for j in xrange(len(namelist)):\n posfile_fh.write(str(posscores[j][i]))\n if j < len(posscores) - 1:\n posfile_fh.write('\\t')\n posfile_fh.write('\\n')\n\n numneg = len(negscores[0]) # FIXME may be different lengths\n for i in xrange(numneg):\n for j in xrange(len(namelist)):\n negfile_fh.write(str(negscores[j][i]))\n if j < len(negscores) - 1:\n negfile_fh.write('\\t')\n negfile_fh.write('\\n')\n\n\n posfile_fh.close()\n negfile_fh.close()\n if verbose:\n sys.stderr.write(\"done\\n\")", "def dqp(q, p=1, m='QL'):\n st = time.time()\n nodesfile = os.path.realpath(os.path.join(os.path.dirname(__file__), getattr(user, \"dqp_nodes_file\", \"local.nodes\")))\n nodes = open(nodesfile).read().strip().splitlines()\n\n indexwords = open(os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data', 'algorithmForGettingIndex'))).read().strip().splitlines() \n indexnodes = []\n for ttype,terms in q.iteritems():\n for term in terms:\n ind = findIndex(indexwords, term)\n if ind != -1 and ind not in indexnodes:\n indexnodes.append(ind)\n \n args = []\n for ind in indexnodes:\n args.append(('PYROLOC://' + nodes[ind] + '/dqp', q, (p * 10), m))\n \n total = 0\n combined_result = []\n if len(args) > 0:\n pool = Pool(processes=len(args))\n result = pool.map(do_search, args)\n \n # Merge results\n totalInDocs=set([])\n totalExDocs=set([])\n rdict = {}\n for (rcount,r,indocids,exdocids) in result:\n totalExDocs.update(exdocids)\n if len(totalInDocs) == 0 and len(indocids) > 0:\n totalInDocs = set(indocids)\n elif len(indocids) > 0:\n totalInDocs = totalInDocs.intersection(set(indocids))\n for rec in r:\n if rec['docid'] in rdict:\n rdict[rec['docid']]['score'] = rdict[rec['docid']]['score'] + rec['score']\n else:\n rdict[rec['docid']] = rec\n\n badDocs=set([])\n for docID in rdict.keys():\n if(docID in totalExDocs):\n badDocs.update([docID])\n if(len(totalInDocs)!=0 and docID not in totalInDocs):\n badDocs.update([docID])\n for docID in badDocs:\n del rdict[docID]\n \n results = rdict.values()\n combined_result = sorted(results, key=operator.itemgetter('score'), reverse=True)\n combined_result = combined_result[(p - 1) * 10:(p * 10)]\n total = len(results)\n \n return {'count': total, 'time': (time.time() - st), 'records': combined_result}", "def alignScore():\n matrix = mapMatrix(\"BLOSUM62\")\n \n path = \"./data/\"\n for file in os.listdir(path):\n if file.endswith(\".fa\") or file.endswith(\".fasta\"):\n sequences = []\n input_sequences = SeqIO.parse(path + file, \"fasta\", \\\n IUPAC.protein)\n\n for record in input_sequences:\n seq = str(record.seq)\n sequences.append(seq) \n \n SumOfPairs = 0\n for pair in combinations(sequences, 2): \n SumOfPairs += pairwiseScore(pair[0], pair[1], matrix)\n \n print SumOfPairs", "def fastq_qc(demultiplex_result, out_dir, config):\n\n pigz_cores = int(config['fastqTrim']['pigz_cores'])\n cutadapt_cores = int(config['fastqTrim']['cutadapt_cores'])\n\n r1_adapter = config['fastqTrim']['r1_adapter']\n r2_adapter = config['fastqTrim']['r1_adapter']\n length_threshold = config['fastqTrim']['length_threshold']\n quality_threshold = config['fastqTrim']['quality_threshold']\n r1_left_cut = config['fastqTrim']['r1_left_cut']\n r1_right_cut = config['fastqTrim']['r1_right_cut']\n r2_left_cut = config['fastqTrim']['r2_left_cut']\n r2_right_cut = config['fastqTrim']['r2_right_cut']\n overlap = config['fastqTrim']['overlap']\n total_reads_threshold = int(config['fastqTrim']['total_reads_threshold'])\n\n results = []\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n sample_demultiplex_total = sub_df['Trimmed'].sum()\n if sample_demultiplex_total < total_reads_threshold:\n log.info(f'In uid {uid}: index {index_name} skipped '\n f'due to too less reads: {sample_demultiplex_total}')\n continue\n # process R1\n r1_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R1.fq.gz'\n r1_out = f'{out_dir}/{uid}_{index_name}_R1.trimed.fq.gz'\n r1_cmd = f'pigz -cd -p {pigz_cores} {r1_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r1_left_cut} ' \\\n f'-u -{r1_right_cut} -m {length_threshold} ' \\\n f'-a {r1_adapter} -o {r1_out} -'\n r1_result = subprocess.run(r1_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n\n # get R1 result stat\n lines = []\n for line in r1_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R1'\n results.append(s)\n\n # process R2\n r2_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R2.fq.gz'\n r2_out = f'{out_dir}/{uid}_{index_name}_R2.trimed.fq.gz'\n r2_cmd = f'pigz -cd -p {pigz_cores} {r2_path_pattern} | ' \\\n f'cutadapt -j {cutadapt_cores} --report=minimal -O {overlap} ' \\\n f'-q {quality_threshold} -u {r2_left_cut} ' \\\n f'-u -{r2_right_cut} -m {length_threshold} ' \\\n f'-a {r2_adapter} -o {r2_out} -'\n r2_result = subprocess.run(r2_cmd, stdout=subprocess.PIPE,\n encoding='utf8', shell=True, check=True)\n # get R2 result stat\n lines = []\n for line in r2_result.stdout.split('\\n'):\n ll = line.split('\\t')\n if len(ll) > 1:\n lines.append(ll)\n s = pd.Series({name: number for name, number in zip(*lines)})\n s['uid'] = uid\n s['index_name'] = index_name\n s['read_type'] = 'R2'\n results.append(s)\n\n fastq_final_result = pd.DataFrame(results)\n if len(results) == 0:\n # all sample skipped\n return fastq_final_result\n fastq_final_result['out_reads_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n fastq_final_result['out_bp_rate'] = \\\n fastq_final_result['out_reads'].astype(int) / fastq_final_result['in_reads'].astype(int)\n\n # clean up\n for (uid, index_name), sub_df in demultiplex_result.groupby(['uid', 'index_name']):\n r_path_pattern = f'{out_dir}/{uid}_L*_{index_name}_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n # remove unknown reads\n r_path_pattern = f'{out_dir}/{uid}_L*_unknown_R*.fq.gz'\n r_rm_cmd = f'rm -f {r_path_pattern}'\n subprocess.run(r_rm_cmd, shell=True)\n\n return fastq_final_result", "def read_data_split_and_search():\n\n\n\n dataReader = Movielens10MReader()\n dataset = dataReader.load_data()\n\n URM_train, URM_test = split_train_in_two_percentage_global_sample(dataset.get_URM_all(), train_percentage = 0.80)\n URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)\n\n output_folder_path = \"result_experiments/\"\n\n\n # If directory does not exist, create\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n\n\n\n\n\n\n collaborative_algorithm_list = [\n Random,\n TopPop,\n P3alphaRecommender,\n RP3betaRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n MatrixFactorization_BPR_Cython,\n MatrixFactorization_FunkSVD_Cython,\n PureSVDRecommender,\n SLIM_BPR_Cython,\n SLIMElasticNetRecommender\n ]\n\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorHoldout\n\n evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[5])\n evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[5, 10])\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n metric_to_optimize = \"MAP\",\n n_cases = 10,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = output_folder_path,\n similarity_type_list = [\"cosine\"],\n parallelizeKNN = False)\n\n\n\n\n\n pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)\n pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)\n\n #\n #\n # for recommender_class in collaborative_algorithm_list:\n #\n # try:\n #\n # runParameterSearch_Collaborative_partial(recommender_class)\n #\n # except Exception as e:\n #\n # print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n # traceback.print_exc()\n #", "def __init__(self, numWorkers, numTasks, ratings_path):\n\n\t\tself.numWorkers = numWorkers\n\t\tself.numTasks = numTasks\n\t\t\n\t\tself.f = {}\n\t\tself.data = read_file(ratings_path)\n\t\tfor i in xrange(self.numTasks):\n\t\t\tif np.random.rand() > 0.5:\n\t\t\t\tself.f[i] = 1\n\t\t\telse:\n\t\t\t\tself.f[i] = 0\n\n\t\tself.p = np.array([[0.6,0.4],[0.4,0.6]])\n\t\t\n\t\tself.current_truths = np.array(map(self.mapping_func, range(numTasks)))\n\t\tself.bucket2i = {}\n\t\tself.m2bucket = {}\n\t\tself.sorted_bucket = []", "def main():\n global collection\n #args = argparse.ArgumentParser()\n #args.add_argument('directory', help='Directory in which the files'\n #'are stored.')\n #args.add_argument('collection', help='The collection to use.')\n #parser = args.parse_args()\n collection = get_collection()\n #documents = glob.glob('*.asm')\n documents = collection.find()\n num_cores = multiprocessing.cpu_count()\n print('Running code on %d processors' % num_cores)\n Parallel(n_jobs=num_cores)(\\\n delayed(save_comments)(doc) for doc in documents)", "def compute_metrics_from_files(p_path_to_reference_file,\r\n p_path_to_candidate_file,\r\n p_max_bleu_order):\r\n\r\n reference_dictionary, reference_no_answer_query_ids = \\\r\n load_file(p_path_to_reference_file)\r\n candidate_dictionary, candidate_no_answer_query_ids = load_file(p_path_to_candidate_file)\r\n query_id_answerable = set(reference_dictionary.keys())-reference_no_answer_query_ids\r\n query_id_answerable_candidate = set(candidate_dictionary.keys())-candidate_no_answer_query_ids\r\n \r\n true_positives = len(query_id_answerable_candidate.intersection(query_id_answerable))\r\n false_negatives = len(query_id_answerable)-true_positives\r\n true_negatives = len(candidate_no_answer_query_ids.intersection(reference_no_answer_query_ids))\r\n false_positives = len(reference_no_answer_query_ids)-true_negatives\r\n precision = float(true_positives)/(true_positives+false_positives) if (true_positives+false_positives)>0 else 1.\r\n recall = float(true_positives)/(true_positives+false_negatives) if (true_positives+false_negatives)>0 else 1.\r\n F1 = 2 *((precision*recall)/(precision+recall))\r\n filtered_reference_dictionary = \\\r\n {key: value for key, value in reference_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n filtered_candidate_dictionary = \\\r\n {key: value for key, value in candidate_dictionary.items() \\\r\n if key not in reference_no_answer_query_ids}\r\n\r\n for query_id, answers in filtered_candidate_dictionary.items():\r\n assert \\\r\n len(answers) <= 1, \\\r\n 'query_id %d contains more than 1 answer \\\"%s\\\" in candidate file' % \\\r\n (query_id, str(answers))\r\n\r\n reference_query_ids = set(filtered_reference_dictionary.keys())\r\n candidate_query_ids = set(filtered_candidate_dictionary.keys())\r\n common_query_ids = reference_query_ids.intersection(candidate_query_ids)\r\n assert (len(common_query_ids) == len(reference_query_ids)) and \\\r\n (len(common_query_ids) == len(candidate_query_ids)), \\\r\n 'Reference and candidate files must share same query ids'\r\n\r\n all_scores = {}\r\n bleu_scores, _ = \\\r\n Bleu(p_max_bleu_order).compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n for i, bleu_score in enumerate(bleu_scores):\r\n all_scores['bleu_%d' % (i+1)] = bleu_score\r\n\r\n rouge_score, _ = Rouge().compute_score(filtered_reference_dictionary, \\\r\n filtered_candidate_dictionary)\r\n all_scores['rouge_l'] = rouge_score\r\n all_scores['F1'] = F1\r\n similarity = 0\r\n for key in filtered_reference_dictionary:\r\n candidate_answer = nlp(filtered_candidate_dictionary[key][0])\r\n reference_answer = filtered_reference_dictionary[key]\r\n answersimilarity = 0\r\n for answer in reference_answer:\r\n answersimilarity += candidate_answer.similarity(nlp(answer))\r\n similarity += answersimilarity/len(reference_answer)\r\n semantic_similarity = similarity/len(filtered_reference_dictionary)\r\n all_scores['Semantic_Similarity'] = semantic_similarity\r\n return all_scores", "def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)", "def calc_metrics(metric_scores_list):\n\n N_split, N_miss, N_add, Q_P, Q_R, Q_F, N_gt, N_pred = [], [], [], [], [], [], [], []\n Q_rand, Q_jaccard, Q_aggregated_jaccard, Q_ctc, Q_piou = [], [], [], [], []\n tp, fp, fn = [], [], []\n\n for score in metric_scores_list:\n N_split.append(score['N_split']), N_miss.append(score['N_miss']), N_add.append(score['N_add'])\n Q_P.append(score['Q_P']), Q_R.append(score['Q_R']), Q_F.append(score['Q_F'])\n Q_rand.append(score['Q_rand']), Q_jaccard.append(score['Q_jaccard'])\n Q_aggregated_jaccard.append(score['Q_aggregated_jaccard'])\n if \"Q_ctc\" in score:\n Q_ctc.append(score['Q_ctc']), \n Q_piou.append(score['Q_piou'])\n N_gt.append(score['N_gt']), N_pred.append(score['N_pred'])\n tp.append(score['tp']), fp.append(score['fp']), fn.append(score['fn'])\n\n N_split, N_miss, N_add = np.array(N_split), np.array(N_miss), np.array(N_add)\n N_gt, N_pred = np.array(N_gt), np.array(N_pred)\n tp, fp, fn = np.array(tp), np.array(fp), np.array(fn)\n Q_P_macro, Q_R_macro, Q_F_macro = np.mean(np.array(Q_P)), np.mean(np.array(Q_R)), np.mean(np.array(Q_F))\n Q_P_micro = np.sum(tp) / (np.sum(tp) + np.sum(fp)) if (np.sum(tp) + np.sum(fp)) > 0 else 0\n Q_R_micro = np.sum(tp) / (np.sum(tp) + np.sum(fn)) if (np.sum(tp) + np.sum(fn)) > 0 else 0\n Q_rand_macro, Q_jaccard_macro = np.mean(np.array(Q_rand)), np.mean(np.array(Q_jaccard))\n Q_aggregated_jaccard_macro = np.mean(np.array(Q_aggregated_jaccard))\n Q_ctc_macro, Q_piou_macro = np.mean(np.array(Q_ctc)), np.mean(np.array(Q_piou))\n\n metrics = {\n 'Q_split_micro': float(np.sum(N_split) / np.sum(N_gt)),\n 'Q_split_macro': float(np.mean(N_split / N_gt)),\n 'Q_miss_micro': float(np.sum(N_miss) / np.sum(N_gt)),\n 'Q_miss_macro': float(np.mean(N_miss / N_gt)),\n 'Q_add_micro': float(np.sum(N_add) / np.sum(N_gt)),\n 'Q_add_macro': float(np.mean(N_add / N_gt)),\n 'N_gt': int(np.sum(N_gt)),\n 'N_pred': int(np.sum(N_pred)),\n 'Q_rand_macro': float(Q_rand_macro),\n 'Q_jaccard_macro': float(Q_jaccard_macro),\n 'Q_aggregated_jaccard_macro': float(Q_aggregated_jaccard_macro),\n 'Q_ctc_macro': float(Q_ctc_macro),\n 'Q_piou_macro': float(Q_piou_macro),\n 'Q_P_micro': float(Q_P_micro),\n 'Q_P_macro': float(Q_P_macro),\n 'Q_R_micro': float(Q_R_micro),\n 'Q_R_macro': float(Q_R_macro),\n 'Q_F_macro': float(Q_F_macro),\n 'Q_F_micro': float(2 * Q_P_micro * Q_R_micro / (Q_P_micro + Q_R_micro)) if (Q_P_micro + Q_R_micro) > 0 else 0\n }\n return metrics", "def sub_processor(lock, pid, video_list):\r\n text = 'processor %d' % pid\r\n with lock:\r\n progress = tqdm.tqdm(\r\n total=len(video_list),\r\n position=pid,\r\n desc=text\r\n )\r\n for i in range(len(video_list)):\r\n video_name = video_list[i]\r\n \"\"\" Read result csv file \"\"\"\r\n df = pd.read_csv(os.path.join(config.post_csv_load_dir, video_name + \".csv\"))\r\n \"\"\" Calculate final score of proposals \"\"\"\r\n df['score'] = df.iou.values[:] * df.start.values[:] * df.end.values[:]\r\n if len(df) > 1:\r\n df = softNMS(df)\r\n df = df.sort_values(by=\"score\", ascending=False)\r\n video_info = video_dict[video_name]\r\n video_duration = video_info[\"duration_second\"]\r\n proposal_list = []\r\n\r\n for j in range(min(top_number, len(df))):\r\n tmp_proposal = {}\r\n tmp_proposal[\"score\"] = df.score.values[j]\r\n tmp_proposal[\"segment\"] = [max(0, df.xmin.values[j]) * video_duration,\r\n min(1, df.xmax.values[j]) * video_duration]\r\n tmp_proposal[\"label\"] = \"行走\"\r\n # tmp_proposal[\"label\"] = \"Fun sliding down\"\r\n proposal_list.append(tmp_proposal)\r\n result_dict[video_name] = proposal_list\r\n with lock:\r\n progress.update(1)\r\n\r\n with lock:\r\n progress.close()", "def calculateScore(self, queue):\n for song in queue:\n if song['explicit']:\n song['score'] = 3 * song['age'] + 2 * song['upvotes'] - 2 * song['downvotes']\n else:\n song['score'] = -1 * song['downvotes']", "def painting_matching_ml(imgs, db_imgs, method_list, text_masks, author_text, gt_text, metrics, weights, splits=30, max_rank=10):\n descriptor_extractors = [get_descriptor_extractor(method_name) for method_name in method_list]\n tmp_img_format = []\n tmp_mask_format = []\n tmp_text_format = []\n for i in range(len(imgs)):\n for j in range(len(imgs[i])):\n tmp_img_format.append(imgs[i][j])\n tmp_mask_format.append(text_masks[i][j])\n tmp_text_format.append(author_text[i][j])\n\n #db_imgs = [img[0] for img in db_imgs]\n db_img_splits = [i*len(db_imgs)//splits for i in range(splits-1)]\n \n scores = []\n query_descriptors = extract_descriptors(tmp_img_format, descriptor_extractors, method_list, tmp_text_format, tmp_mask_format) \n #np.array([extract_descriptors(img, matching_methods, mask) for img, mask in zip(tmp_img_format, tmp_mask_format)])\n print(\"Starting db extraction + matching\")\n for split in tqdm(range(splits-2)):\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[split]:db_img_splits[split+1]], descriptor_extractors, method_list, gt_text[db_img_splits[split]:db_img_splits[split+1]], None) #np.array([mrhm(db_img) for db_img in db_imgs[db_img_splits[split]:db_img_splits[split+1]]])\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n # compare_descriptors(query_descriptors, db_descriptors, descriptor_comp_methods, descriptor_names, weights)\n db_descriptors = extract_descriptors(db_imgs[db_img_splits[-1]:], descriptor_extractors, method_list, gt_text[db_img_splits[-1]:], None)\n scores.append(compare_descriptors(query_descriptors, db_descriptors, metrics, method_list, weights))\n \n # concatenate all the results\n scores = np.concatenate(scores, 1)\n \n top_k_matches = np.argpartition(scores, list(range(max_rank)))[:, :max_rank]\n return top_k_matches", "def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))", "def score_func(\n log_path: str,\n dictionaries: List[str],\n score_options: ConfigDict,\n lat_paths: Dict[str, str],\n rescored_lat_paths: Dict[str, str],\n carpa_rescored_lat_paths: Dict[str, str],\n words_paths: Dict[str, str],\n tra_paths: Dict[str, str],\n) -> None:\n with open(log_path, \"w\", encoding=\"utf8\") as log_file:\n for dict_name in dictionaries:\n language_model_weight = score_options[\"language_model_weight\"]\n word_insertion_penalty = score_options[\"word_insertion_penalty\"]\n carpa_rescored_lat_path = carpa_rescored_lat_paths[dict_name]\n rescored_lat_path = rescored_lat_paths[dict_name]\n lat_path = lat_paths[dict_name]\n words_path = words_paths[dict_name]\n tra_path = tra_paths[dict_name]\n if os.path.exists(carpa_rescored_lat_path):\n lat_path = carpa_rescored_lat_path\n elif os.path.exists(rescored_lat_path):\n lat_path = rescored_lat_path\n scale_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-scale\"),\n f\"--inv-acoustic-scale={language_model_weight}\",\n f\"ark:{lat_path}\",\n \"ark:-\",\n ],\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n penalty_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-add-penalty\"),\n f\"--word-ins-penalty={word_insertion_penalty}\",\n \"ark:-\",\n \"ark:-\",\n ],\n stdin=scale_proc.stdout,\n stdout=subprocess.PIPE,\n stderr=log_file,\n env=os.environ,\n )\n best_path_proc = subprocess.Popen(\n [\n thirdparty_binary(\"lattice-best-path\"),\n f\"--word-symbol-table={words_path}\",\n \"ark:-\",\n f\"ark,t:{tra_path}\",\n ],\n stdin=penalty_proc.stdout,\n stderr=log_file,\n env=os.environ,\n )\n best_path_proc.communicate()", "def run(input_folder, H5_FILEPATH, precomputed_artist_ranking, precomputed_album_ranking, precomputed_track_ranking):\t\t\n\n\t# def _dump_to_dict(dump_filepath):\n\t# \t\"\"\"\n\t# \tConvert a numpy array in the form (('k1', v1), ('k2', v2), ... , ('kn', vn)) to a dictionary. It also deletes an empty key (''), and the dictionary is converted to a collection and is ordered by value\n\t# \t\"\"\"\n\t# \twith open(dump_filepath, 'rb') as handle:\n\t# \t\tf = cPickle.load(handle)\n\t# \tt0 = time.time()\n\t# \td = {k : v for k, v in f}; del f\t\n\t# \tprint '{0} secs for creating dict from dump {1}'.format(int(time.time() - t0), dump_filepath),\n\t# \t# do not consider empty MBID's\n\t# \tif d.has_key(''): d.pop('', None) \n\t# \t# return sorted ranking by value\n\t# \treturn collections.OrderedDict(sorted(d.items(), key=lambda t: t[1])) \n\n\n\n\tglobal size\n\tglobal rank\n\n\t# Generating ordered dictionaries of the rankings\n\tt0 = time.time()\n\toverall_ranking_artist = GVM_classes.dump_to_dict(precomputed_artist_ranking)\n\t# if rank == 0: print ' size: {0}'. format(sys.getsizeof(overall_ranking_artist))\n\n\toverall_ranking_album = GVM_classes.dump_to_dict(precomputed_album_ranking)\n\t# if rank == 0: print ' size: {0}'. format(sys.getsizeof(overall_ranking_album))\n\n\toverall_ranking_track = GVM_classes.dump_to_dict(precomputed_track_ranking)\n\t# if rank == 0: print ' size: {0}'. format(sys.getsizeof(overall_ranking_track))\n\tprint 'Rank', rank, 'features in', str(int(time.time() - t0)), 'secs'\n\n\t# ##########################################################\n\t# Iterate over all files in a TAR, searching for all MBIDs\n\t# ##########################################################\n\n\tfile_list = [] # List of all files in input_folder\n\tfor root, subFolders, files in os.walk(input_folder):\n\t\tfor f in files:\n\t\t\tif f.split('/')[-1].startswith('.'):\n\t\t\t\tcontinue\n\t\t\tfile_list.append('/'.join([root,f]))\n\n\t# print 'RANK:', rank, '\\nFILE_LIST:', file_list\n\t# print 'FILE: ', file_list[size * int(factor) + rank]\n\ttar_object = tarfile.open('/'.join([file_list[size * int(factor) + rank]]))\n\ttar_object.extractall(TEMP_FOLDER)\n\n\t# print size * int(factor) + rank, file_list[size * int(factor) + rank]\n\n\t#list with dictionaries of aggregated features\n\tlist_of_dict_agg_feat= []\n\n\n\n\tfor file_in_tar in GVM_classes.folder_iterator(TEMP_FOLDER)[:]:\n\t\tlistening_features = Features.ListeningFeatures(file_in_tar) \n\t\ttry:\n\t\t\t# Metadata\n\t\t\t\n\t\t\t\n\t\t\t# Feature Extraction\n\t\t\tcollected_features = dict()\n\n\t\t\tcollected_features['metadata'] = listening_features.metadata_dict()\n\t\t\tcollected_features['mainstreamness'] = listening_features.mainstreamness(overall_ranking_artist, overall_ranking_album, overall_ranking_track)\n\n\t\t\t\n\n\t\t\tlist_of_dict_agg_feat.append(collected_features)\n\n\t\t\t# print \"In file {0}, there are {1} extracted users\".format(file_list[size * int(factor) + rank], len(list_of_dict_agg_feat))\n\n\t\texcept:\n\t\t\tprint file_list[size * int(factor) + rank].split('/')[-1], file_in_tar.split('/')[-1], sys.exc_info()\n\n\treturn list_of_dict_agg_feat", "def generateScore(self):\n totalFreq = self.lazySum(key=None)\n for file in self._candidate_files:\n filename = os.path.basename(file)\n score_file = os.path.join(self._score_dir, filename)\n with open(score_file, 'w') as ofd:\n with open(file) as ifd:\n for line in ifd:\n words = line.strip().split('\\t')\n if len(words) < 2 or any(map(lambda word:len(word)<2, words)):\n continue\n\n XFreq = self.lazySum(words[0])\n YFreq = self.lazySum(words[1])\n XYFreq = self.lazySum(line.strip())\n # frequences filter\n #if XYFreq < 2 or XYFreq > 24:\n # continue\n if YFreq == 0 or XFreq == 0 or XYFreq == 0:\n # because when generating grams, we last last words' frequency\n continue\n PX = XFreq * 1.0 / totalFreq\n PY = YFreq * 1.0 / totalFreq\n PXY = XYFreq * 1.0 / totalFreq\n score = math.log(PXY/PX/PY, 2) * XYFreq\n #print \"Freq:\", XFreq, YFreq, XYFreq\n result = \"{0}\\t{1:.2f}\\n\".format(line.strip(), score)\n ofd.write(result)", "async def get_file_text_segments_and_parallels(\n file_name: str,\n active_segment: str = \"none\",\n score: int = 0,\n par_length: int = 0,\n co_occ: int = 0,\n limit_collection: List[str] = Query([]),\n multi_lingual: List[str] = Query([]),\n):\n #parallel_ids_type = \"parallel_ids_limited\"\n parallel_ids_type = \"parallel_ids\"\n # when the limit_collection filter is active,\n # we have to fetch all possible parallels.\n if len(limit_collection) > 0:\n parallel_ids_type = \"parallel_ids\"\n start_int = 0\n if active_segment != \"none\":\n active_segment = unquote(active_segment)\n try:\n text_segment_count_query_result = get_db().AQLQuery(\n query=main_queries.QUERY_SEGMENT_COUNT,\n bindVars={\"segmentnr\": active_segment},\n )\n start_int = text_segment_count_query_result.result[0] - 400\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error\n if start_int < 0:\n start_int = 0\n limitcollection_positive, limitcollection_negative = get_collection_files_regex(\n limit_collection, get_language_from_filename(file_name)\n )\n current_bind_vars ={\n \"parallel_ids_type\": parallel_ids_type,\n \"filename\": file_name,\n \"limit\": 800,\n \"startint\": start_int,\n \"score\": score,\n \"parlength\": par_length,\n \"coocc\": co_occ,\n \"multi_lingual\": multi_lingual,\n \"limitcollection_positive\": limitcollection_positive,\n \"limitcollection_negative\": limitcollection_negative,\n }\n try:\n text_segments_query_result = get_db().AQLQuery(\n query=main_queries.QUERY_TEXT_AND_PARALLELS,\n bindVars=current_bind_vars,\n )\n if start_int == 0:\n add_source_information(file_name,text_segments_query_result.result[0])\n return text_segments_query_result.result[0]\n\n except DocumentNotFoundError as error:\n print(error)\n raise HTTPException(status_code=404, detail=\"Item not found\") from error\n except AQLQueryError as error:\n print(\"AQLQueryError: \", error)\n raise HTTPException(status_code=400, detail=error.errors) from error\n except KeyError as error:\n print(\"KeyError: \", error)\n raise HTTPException(status_code=400) from error", "def readDataForPhasingScoreComputation(options,phase):\n filename=options.output_directory+\"/\"+options.input_filename+\"_bowtie1.bwt\"\n fhr=open(filename,\"r\")\n score={}\n readcount={}\n readseq={}\n for line in fhr:\n read_id, strand, chromosome, coordinate, alignment, quality, mapped_times = line.strip().split()\n coordinate=int(coordinate)\n mapped_times=int(mapped_times)+1\n length=len(alignment)\n if length!=phase:continue\n if strand=='-':\n coordinate+=2\n seq=str(Seq(alignment).reverse_complement())\n else:\n seq=alignment\n if 'x' in read_id.split(\"_\")[-1]:\n count=int(read_id.split(\"_\")[-1][1:])\n else:\n count=int(read_id.split(\"_\")[-1])\n \n if chromosome not in score:\n score[chromosome]={}\n if coordinate not in score[chromosome]:\n score[chromosome][coordinate]=0\n score[chromosome][coordinate]+=count\n \n if chromosome not in readcount:\n readcount[chromosome]={}\n if coordinate not in readcount[chromosome]:\n readcount[chromosome][coordinate]={}\n if strand not in readcount[chromosome][coordinate]:\n readcount[chromosome][coordinate][strand]=count\n \n if chromosome not in readseq:\n readseq[chromosome]={}\n if coordinate not in readseq[chromosome]:\n readseq[chromosome][coordinate]={}\n if strand not in readseq[chromosome][coordinate]:\n readseq[chromosome][coordinate][strand]=seq\n return score,readcount,readseq", "def import_musicxml_file_idea(scorePath, museScoreFile):\n\n myScore = m.converter.parse(scorePath+'/'+museScoreFile, format='musicxml')\n num_parts=get_number_of_parts(myScore)\n print(\"number_of_parts:\",num_parts)\n \n # Limit max Parts that can be processed \n if num_parts > 2:\n sys.exit(\"Error: this program can only process max 2 parts input musicxml file!\\nProgram aborted.\") \n\n # loop over Parts\n part_cnt=0\n music_info=dict()\n key=''\n for p in myScore.recurse().parts:\n for e in p.recurse().getElementsByClass('TimeSignature'): # meter.timeSignature:\n print(\"time signature score: \", e)\n used_time_signature = e # Because of grant staff only use the last\n key='time_signature'+str(part_cnt)\n print('key:', key)\n music_info[key]=used_time_signature\n print('music_info[key]:',music_info[key])\n\n for e in myScore.recurse().getElementsByClass('KeySignature'): # meter.timeSignature:\n print(\"key signature score: \", e)\n used_key_signature = e # Because of grant staff only use the last\n key='key_signature'+str(part_cnt)\n print('key:', key)\n music_info[key]=used_key_signature\n print('music_info[key]:',music_info[key])\n\n time_list = []\n note_property_list=[]\n smallest_quarterlength=sys.float_info.max\n\n for element in myScore.recurse().notes:\n # Encoding X\n # Fill time\n time_list.append(element.measureNumber) \n time_list.append(element.offset) \n #print(\"Time_list iter:\", time_list)\n \n # Encoding Y \n # Fill note properties\n note_property_list.append(nc.getNoteValue(element.name))\n note_property_list.append(element.octave)\n note_property_list.append(element.duration.quarterLength)\n # search smallest quarterlength\n if element.duration.quarterLength < smallest_quarterlength:\n smallest_quarterlength = element.duration.quarterLength\n #print(\"Note_property_list iter:\", note_property_list)\n \n \n used_smallest_quarterlength = smallest_quarterlength \n key='smallest_quarterlength'+str(part_cnt)\n print('key:', key)\n music_info[key]=used_smallest_quarterlength \n print('music_info[key]:',music_info[key])\n\n # Create 2 dimensional array for the time list with 2 elements per row\n # First index -1 creates dynamically an amount off rows based on the size of the time list\n X = np.array(time_list).reshape(-1, 2)\n #print(\"X.shape\",X.shape)\n #print(X)\n\n # put in music_info\n used_X = X \n key='X'+str(part_cnt)\n print('key:', key)\n music_info[key]=used_X\n print('music_info[key]:',music_info[key])\n \n # Create 2 dimension array for the note property list with 3 elements per row\n # First index -1 creates dynamically an amount off rows based on the size of the note list\n Y = np.array(note_property_list).reshape(-1, 3)\n #print(\"Y.shape\",Y.shape)\n #print(Y)\n\n used_Y = Y \n key='Y'+str(part_cnt)\n print('key:', key)\n music_info[key]=used_Y\n print('music_info[key]:',music_info[key])\n\n part_cnt=part_cnt+1\n\n '''\n # Get used TimeSignature of input file\n for e in myScore.recurse().getElementsByClass('TimeSignature'): # meter.timeSignature:\n print(\"time signature score: \", e)\n used_time_signature = e # Because of grant staff only use the last\n ''' \n \n '''\n # Get used KeySignature of input file\n for e in myScore.recurse().getElementsByClass('KeySignature'): # meter.timeSignature:\n print(\"key signature score: \", e)\n used_key_signature = e # Because of grant staff only use the last\n '''\n\n ''' \n time_list = []\n note_property_list=[]\n smallest_quarterlength=sys.float_info.max\n '''\n \n '''\n for element in myScore.recurse().notes:\n # Encoding X\n # Fill time\n time_list.append(element.measureNumber) \n time_list.append(element.offset) \n #print(\"Time_list iter:\", time_list)\n \n # Encoding Y \n # Fill note properties\n note_property_list.append(nc.getNoteValue(element.name))\n note_property_list.append(element.octave)\n note_property_list.append(element.duration.quarterLength)\n # search smallest quarterlength\n if element.duration.quarterLength < smallest_quarterlength:\n smallest_quarterlength = element.duration.quarterLength\n #print(\"Note_property_list iter:\", note_property_list)\n \n # Create 2 dimensional array for the time list with 2 elements per row\n # First index -1 creates dynamically an amount off rows based on the size of the time list\n X = np.array(time_list).reshape(-1, 2)\n #print(\"X.shape\",X.shape)\n #print(X)\n \n # Create 2 dimension array for the note property list with 3 elements per row\n # First index -1 creates dynamically an amount off rows based on the size of the note list\n Y = np.array(note_property_list).reshape(-1, 3)\n #print(\"Y.shape\",Y.shape)\n #print(Y)\n '''\n \n '''\n return(X, Y, used_time_signature, used_key_signature, smallest_quarterlength) # import_musicxml_file_idea \n '''\n return(music_info) # import_musicxml_file_idea ", "def parse_varQs(files):\n\n bestKs = []\n for file in files:\n handle = open(file,'r')\n Q = np.array([list(map(float,line.strip().split())) for line in handle])\n Q = Q/utils.insum(Q,[1])\n handle.close()\n\n N = Q.shape[0]\n C = np.cumsum(np.sort(Q.sum(0))[::-1])\n bestKs.append(np.sum(C<N-1)+1)\n\n return bestKs", "def process_quasar(folder, set_type, doc_size):\n print(\"def process_quasar(folder, set_type, doc_size) ...\")\n\n # create counter for enumeration of batch-files\n counter = 0\n\n # Question File and Path\n question_file = set_type + \"_questions.json\"\n question_file_path = Path(\"/\".join([folder, \"questions\", question_file]))\n\n # Contexts File and Path\n context_file = set_type + \"_contexts.json\"\n context_file_path = Path(\"/\".join([folder, \"contexts\", doc_size, context_file]))\n\n with open(question_file_path, \"r\") as qf, open(context_file_path, \"r\") as cf:\n question_id_list = list()\n data_dict = dict()\n batches_data = list()\n\n # Parse each line separate to avoid memory issues\n for line in qf:\n parsed_question = json.loads(line)\n question_id = parsed_question[\"uid\"]\n question_id_list.append(question_id)\n data_dict[question_id] = {\"answer\": parsed_question[\"answer\"]}\n data_dict[question_id].update({\"question\": parsed_question[\"question\"]})\n\n # in order to create batches with the size of 30 and to avoid Memory Errors\n if len(data_dict) == 30:\n contexts_counter = 0\n for line2 in cf:\n parsed_answer = json.loads(line2)\n # Answer ID should have a corresponding question ID\n answer_id = parsed_answer[\"uid\"]\n if answer_id in question_id_list:\n contexts_counter += 1\n # List of contexts with retrieval scores, contexts are sorted from highest to lowest score\n answer_contexts = parsed_answer[\"contexts\"]\n # remove scores of contexts\n cleaned_answer_contexts = [ls_elem[1] for ls_elem in answer_contexts]\n data_dict[answer_id].update({\"contexts\": cleaned_answer_contexts})\n if contexts_counter == 30:\n contexts_counter = 0\n break\n\n # add information where answer in context is\n answers_list, questions_list, contexts_list = add_end_idx(data_dict)\n\n # create the batch-encodings\n batches_data.append(create_encodings(answers_list, questions_list, contexts_list))\n data_dict.clear()\n question_id_list.clear()\n # if len(batches_data) % 1000 == 0:\n\n print(\"\\n length batches_data \" + str(len(batches_data)) + \" \" + str(counter))\n\n if len(batches_data) == 2000:\n counter += 1\n save_batch_files(\"/local/anasbori/bert_odqa/ODQA_Bert_Project/batch_output\", batches_data,\n counter)\n\n batches_data.clear()\n\n counter += 1\n save_batch_files(Path(\"/local/anasbori/bert_odqa/ODQA_Bert_Project/batch_output\"), batches_data, counter)", "def compute(self, result_file_dict):\r\n for part in self.parts:\r\n #=====================Need to change, temporal=========================\r\n if part == 'train':\r\n continue # because the train not have the label\r\n #=======================================================================\r\n gt = self.gt_dict[part]\r\n result_file = result_file_dict[part]\r\n # import ipdb; ipdb.set_trace()\r\n for key, item in result_file.items():\r\n self._result_name = item\r\n # score_records, num_videos = self.load_results(result_file)\r\n score_records, num_videos = self.load_results(item)\r\n logger.info(f'Compute Metric of {item}')\r\n assert num_videos == len(gt), f'the number of saved videos does not match the ground truth, {num_videos} != {len(gt)}'\r\n temp_result = self.eval_method(score_records, gt, str(key))\r\n if temp_result > self.optimal_resulst:\r\n self.optimal_resulst = temp_result\r\n \r\n return self.optimal_resulst", "def __call__(self, query, texts, multilabel=True, workers=0):\n\n scores = []\n for q in [query] if isinstance(query, str) else query:\n # Pass (query, text) pairs to model\n result = self.pipeline([{\"text\": q, \"text_pair\": t} for t in texts], top_k=None, function_to_apply=\"none\", num_workers=workers)\n\n # Apply score transform function\n scores.append(self.function([r[0][\"score\"] for r in result], multilabel))\n\n # Build list of (id, score) per query sorted by highest score\n scores = [sorted(enumerate(row), key=lambda x: x[1], reverse=True) for row in scores]\n\n return scores[0] if isinstance(query, str) else scores", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def load_scores(score_dir):\n score_files = fi.find_files(score_dir, 'sc')\n scores = {\n get_target_name(f):\n pd.read_csv(f, delimiter='\\s*', index_col='description',\n engine='python')\n for f in score_files\n }\n # If duplicate structures present, remove all but first.\n for x, y in scores.items():\n scores[x] = y.loc[~y.index.duplicated(keep='first')]\n return scores", "def retrieve_scores(self, filenames, batches = []):\n \n def get_batch_id(filename):\n return int(filename[filename.rfind(\"_\") + 1:])\n \n data = []\n \n # Filenames have to be sorted to ensure correct batch is extracted\n filenames = sorted(filenames, key = get_batch_id)\n \n if not batches: \n for filename in filenames:\n data.append(self.load_batch_scores(filename))\n else:\n for entry in batches:\n try:\n data.append(self.load_batch_scores(filenames[entry]))\n except IndexError:\n print (\"Attempted to access filename of index\", entry)\n return data", "def test_score_ddp(preds, targets, exact_match, f1):\n world_size = 2\n mp.spawn(_test_score_ddp_fn, args=(world_size, preds, targets, exact_match, f1), nprocs=world_size, join=False)" ]
[ "0.575768", "0.56732047", "0.5492514", "0.548684", "0.54390764", "0.5437585", "0.54353154", "0.5411575", "0.53655994", "0.5360998", "0.53581184", "0.535637", "0.5347568", "0.5334679", "0.5334244", "0.53258014", "0.53248626", "0.53238755", "0.5297921", "0.5297799", "0.52928126", "0.5278666", "0.52774155", "0.5277309", "0.5260846", "0.5255373", "0.5252253", "0.5241049", "0.52344024", "0.52332723" ]
0.5791598
0
Return a list of strings of METAR meteorological data for the specified station on sthe specified date.
def get_met_data(self, stn, ignore_errors, retries, **kwargs): # Validate the common station name and convert it to the # corresponding official station ID try: stn = self.stns[stn] except: raise UnknownStationError, stn # Process the date components in the keyword args into # instance attribute values for kw in kwargs: if kw in ('year', 'month', 'day'): self.__dict__[kw] = kwargs[kw] else: raise UnknownParameterError, (kw, kwargs[kw]) # Get the list of METARs try: self.data = self._get_metars(stn, retries) except: raise # Validate and clean up the METAR data try: self._clean_data(stn, ignore_errors) except: raise return self.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metar_data(station, begin, end, ignore_errors, retries):\n\n def _parse_date(date_str):\n \"\"\"Minimal date parser.\"\"\"\n yr, mo, day = [int(x) for x in date_str.split('-')]\n try:\n return date(yr, mo, day)\n except ValueError:\n raise InvalidDateError, begin\n \n metar = METARdata()\n # Validate the beginning and end dates\n if not begin:\n return metar.get_met_data(station, ignore_errors, retries)\n else:\n date1 = _parse_date(begin)\n if not end:\n date2 = (datetime.today() - timedelta(days=1)).date()\n else:\n date2 = _parse_date(end)\n if date1 > date2:\n raise EndDateBeforeBeginError, (begin, end)\n # Retrieve the METAR data for the date range\n metars = []\n while date1 <= date2:\n metars.extend(metar.get_met_data(station, ignore_errors, retries,\n year=date1.year, month=date1.month,\n day=date1.day))\n date1 += timedelta(days=1)\n return metars", "def meteo(station='caqc0177'):\r\n long=getLongForecast(station)\r\n return dict(\r\n title= long[0] + \" - \" + station,\r\n message=datetime.now(),\r\n year=datetime.now().year,\r\n longTerm=long[1],\r\n shortTerm=getShortForecast(station)\r\n )", "def read_metar_ZA(metar_url, date_as_ISO_text=False):\n\n \n metar_list = [] # The list of dictionaries that will be returned, containing METAR data\n \n # Regular expressions to extract the wind\n re_wind_no_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})KT') # 10005KT\n re_wind_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})G(?P<gust>[0-9]{2,2})KT') # 10005G15KT\n re_wind_variable = re.compile(r'(?P<direction>VRB)(?P<spd>[0-9]{2,2})KT') # VRB05KT\n re_no_data = re.compile(r'No Data For (?P<missing>[A-Z,a-z]{4,4})', re.IGNORECASE) # No data for FAGC\n re_temp = re.compile(r' (?P<temp>[M]?[0-9]{2,2})+/(?P<dewpt>[M]?[0-9]{2,2}) ') #temp in format 20/12 or 20/M02 or M03/M10 etc. \n re_qnh = re.compile(r'Q(?P<qnh>[0-9]{3,4})')\n \n \n # Retrieve the webpage containing METAR data\n try:\n r = requests.get(metar_url, verify=False)\n except:\n current_app.logger.error(f\"Error retrieving METAR - failed at REQUESTS call\")\n return None\n \n \n # If error retrieving page, return None\n if r.status_code != 200: \n current_app.logger.error(f\"Error retrieving METAR: URL = {metar_url}: {r.status_code} - {r.reason}\")\n return None\n \n # Setup Beautiful Soup, and extract all the \"PRE\" tags - these are where the METAR data is stored\n soup = BeautifulSoup(r.text, 'html.parser')\n mets = soup.find_all('pre')\n \n #Connect to DB\n sess = sqa_session()\n \n # Loop through the individual METAR\n for met in mets:\n \n # Get just the text. Sould be: similar to: 'View DecodedMETAR FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG='\n met_string = str(met.text)\n \n is_speci = False # Is this a SPECI and not a METAR - default to False\n is_correction = False #Is this METAR a correction of an earlier (i.e. 'METAR COR xxxxxxxxx')\n \n # Determine if this is a METAR, a SPECI, or a line to be ignored\n s = met_string.find('METAR') # Is it a METAR?\n \n # If text not found, this is not a METAR - is it a SPECI?\n if s < 0:\n s = met_string.find('SPECI') # Is it a SPECI\n\n if s >= 0: # It is a speci\n is_speci = True\n \n else: # It's not a SPECI either, so continue to the next element\n continue\n\n s += 5 # 5 is the length of the text METAR and SPECI - we want to remove this.\n # Remove METAR/SPECI text - we should now have the raw METAR/SPECI only (eg. 'FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG=')\n met_string = met_string[s:].strip()\n \n # If this METAR is a Correction, then flag and remove the 'COR ' (eg: METAR COR FAHS 011200Z AUTO 30009KT 34/02 Q1017=\n if met_string[:4] == 'COR ':\n is_correction = True\n met_string = met_string[4:]\n \n # Extract aerodrome name\n aerodrome = met_string[:4]\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Get the date and time\n day = int(met_string[5:7])\n hr = int(met_string[7:9])\n mn = int(met_string[9:11])\n \n met_date = calc_metar_taf_date(day, hr, mn)\n \n #Get the winds\n wind_variable = False # Wind defaults to not light and variable\n wind_gust = 0 # Gust defaults to 0\n no_wind = False #Is there no wind data avail (i.e. /////KT)\n \n \n #Check whether there is now wind specified (i.e. /////KT)\n if met_string.find('///KT') > 0:\n no_wind = True\n wind_dir = 0\n wind_spd = 0\n else:\n \n # Use regular expression to try to extract non-gusting wind (eg. 10010KT)\n tmp = re_wind_no_gust.search(met_string)\n if tmp:\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n except:\n current_app.logger.error(f\"Error passing METAR winds: {met_string}\")\n \n # Use regular expression to try to extract gusting wind (eg. 10010G15KT)\n elif re_wind_gust.search(met_string):\n tmp = re_wind_gust.search(met_string)\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n wind_gust = tmp.group('gust')\n except:\n current_app.logger.error(f\"Error passing METAR wind GUSTING: {met_string}\")\n \n # Use regular expression to try to extract variable wind (eg. VRB02KT)\n elif re_wind_variable.search(met_string):\n tmp = re_wind_variable.search(met_string)\n try:\n wind_dir = -1\n wind_spd = tmp.group('spd')\n wind_variable = True\n except:\n current_app.logger.error(f\"Error passing METAR wind VARIABLE: {met_string}\")\n\n # Use regular expression to try to extract Temp and Dewpoint (eg. 25/M02)\n temperature = 0\n dew_point = 0\n\n tmp = re_temp.search(met_string)\n if tmp:\n try:\n temperature = int(tmp.group('temp').replace('M','-'))\n dew_point = int(tmp.group('dewpt').replace('M','-'))\n except:\n current_app.logger.error(f\"Error passing METAR temperature: {met_string}\")\n\n\n # Use regular expression to try to extract QNH (eg. Q1025)\n qnh = 1013\n \n tmp = re_qnh.search(met_string)\n if tmp:\n try:\n qnh = tmp.group('qnh')\n except:\n current_app.logger.error(f\"Error passing METAR QNH: {met_string}\")\n \n if date_as_ISO_text == True:\n met_date = datetime.isoformat(met_date)\n \n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude), \n 'has_no_data': False , 'is_speci': is_speci, 'is_correction': is_correction, 'time': met_date, \n 'wind': {'no_wind_data': no_wind, 'direction': wind_dir, 'speed': wind_spd, 'gusting': wind_gust, 'is_variable': wind_variable}, #(wind_dir, wind_spd, wind_gust, wind_variable) , \n 'temperature': temperature, 'dew_point': dew_point,\n 'qnh': qnh,\n 'body': met_string}\n \n metar_list.append(met_dict)\n \n # Check for any stations with no data - search the whole page\n aero_no_datas = re_no_data.findall(soup.text)\n # If there are stations with no data, iterate through them\n if aero_no_datas:\n for aerodrome in aero_no_datas:\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Add a disctionary item\n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude) , \n 'has_no_data': True, 'body': f'No data for {aerodrome}'}\n \n metar_list.append(met_dict)\n\n return metar_list", "def station_measurement(self, station_id):\n try:\n params = {\n 'id': station_id\n }\n measurements_api = requests.get(url=self._measurement_url, params=params)\n measurements = []\n for measurement in measurements_api.json():\n measurement_array = measurement.split(';')\n measurements.append(measurement_array)\n\n return measurements\n\n except (RequestException, KeyError) as exc:\n LOG.error('could not read from api: %s', exc)\n raise SlfError('could not read from api: %s' % exc) from None", "def get_mars_ephemeris(timedate):\n t = Time(timedate)\n with solar_system_ephemeris.set('builtin'):\n mars = get_body('mars', t) \n return mars", "def test():\n temp_data = fetch_temp_data(\n (\"https://opendata-download-metobs.smhi.se/api/version/\" +\n \"latest/parameter/1/station/52350/period/latest-day/data.json\"))\n data = temp_series(temp_data)\n print(data)", "def temp_monthly():\n # Calculate the date 1 year ago from last date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n results = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').\\\n filter(Measurement.date >= prev_year).all()\n # Unravel results into a ID array and convert to a list\n temps = list(np.ravel(results))\n \n # Return the results\n return jsonify(temps)", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def stations():\n\n return station_list", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def parse_station(station):\n if not station:\n return pd.DataFrame()\n header = get_header(station[0])\n header['ftime'] = get_fntime(station[1], station[2], header) \n df = get_rows(header, station)\n return df", "async def stations():\n with open(\"/data/station_list.json\") as j:\n data = json.load(j)\n return data", "def get_cycling_timeseries_2017(station: str):\n\n # Load data\n cycling_df = pd.read_csv(\"src/Helsingin_pyorailijamaarat.csv\", sep=\";\")\n\n # Drop rows and columns with only null values\n cycling_df = cycling_df \\\n .dropna(axis=0, how=\"all\") \\\n .dropna(axis=1, how=\"all\")\n\n # Create Date column and reindex dataset\n cycling_df[\"Date\"] = create_date_column(cycling_df[\"Päivämäärä\"])\n cycling_df = cycling_df.set_index(\"Date\")\n\n # Drop redundan\n cycling_df.drop([\"Päivämäärä\"], axis=\"columns\", inplace=True)\n\n cycling_df = cycling_df.loc['2017', station]\n\n cycling_df = cycling_df \\\n .groupby(cycling_df.index.date) \\\n .sum()\n\n return cycling_df", "async def stations_data():\n with open(\"/data/station_data.json\") as j:\n data = json.load(j)\n return data", "def gatherStationData():\n flist = list_files()\n station_dics = {}\n print(\"Reading in csv data...\")\n for f_in in flist:\n start,end = find_timespan(f_in)\n station = station_name(f=f_in)\n print(\"File: {0} Station: {1} {2}--{3}\".format(f_in, \n station, start, end))\n station_dics[station] = read_precip(fname=f_in, \n label=station, start_year=start, end_year=end)\n data_list = []\n for s in station_dics:\n data_list.append(station_dics[s]) \n return pd.concat(data_list,axis=1)", "def stations():\n # Query all stations before a given date 2017\n results = session.query(Measurement.date, Measurement.tobs).filter(func.strftime(\"%Y\", Measurement.date) >= \"2017\").all()\n all_results = list(np.ravel(results))\n \n return jsonify(all_results)", "def pacMare(date, estac):\n monthList = [\"JAN\", \"FEV\", \"MAR\", \"ABR\", \"MAI\", \"JUN\", \"JUL\",\n \"AGO\", \"SET\", \"OUT\", \"NOV\", \"DEZ\"]\n an = date.year\n Mesl = date.month\n strmes = monthList[Mesl-1]\n di = date.day\n data1 = \"%s/%s/%s\" %(di, Mesl, an)\n\n DT = 1\n HI = -3\n d0 = 1\n\n estacoes = Estacao()\n constantes = Constantes()\n cadastro = Cadastro()\n combinacoes = Combinacoes()\n\n f = estacoes.data['name'].index(estac)\n Cod = estacoes.data['ID'][f]\n LA1 = estacoes.data['latG'][f]\n LA2 = estacoes.data['latM'][f]\n LO1 = estacoes.data['lonG'][f]\n LO2 = estacoes.data['lonM'][f]\n nc = estacoes.data['ncomp'][f]\n NM = estacoes.data['nm'][f]\n fu = estacoes.data['fuso'][f]\n ca = estacoes.data['carta'][f]\n hemlat = estacoes.data['hemlat'][f]\n hemlon = estacoes.data['hemlon'][f]\n \n infoList = []\n lat = base10Tobase60(lat=base60Tobase10(LA1, LA2, hemlat))\n lon = base10Tobase60(lon=base60Tobase10(LO1, LO2, hemlon))\n latSTR = u\"Lat: %s\" % lat\n lonSTR = u\"Lon: %s\" % lon\n ncSTR = u\"Componentes: %s\" %(nc)\n nmSTR = u\"Nível Médio: %s cm\" %(int(NM))\n fuSTR = u\"Fuso: - %sh\" %(int(fu))\n caSTR = u\"Número Carta: %s\" %(ca)\n\n infoList.append(latSTR)\n infoList.append(lonSTR)\n infoList.append(ncSTR)\n infoList.append(nmSTR)\n infoList.append(fuSTR)\n infoList.append(caSTR)\n\n f = constantes.data['ID'].index(Cod)\n ai = constantes.data['const'][ f:f+nc ]\n h = constantes.data['amp'][ f:f+nc ]\n G = constantes.data['phase'][ f:f+nc ]\n HH = h[:]\n GG = G[:]\n\n MK, constID = [],[]\n for k in range(nc):\n f = cadastro.data['const'].index(ai[k])\n MK.append(cadastro.data['M'][f])\n constID.append(cadastro.data['cod'][f])\n MK = str2int(MK)\n constID = str2int(constID)\n\n BB, CC = [],[]\n for k in range(nc):\n f = combinacoes.data['ID'].index(constID[k])\n aux = combinacoes.data['subs'][ f: f+MK[k] ]\n aux = str2float(aux)\n BB.append(aux)\n aux = combinacoes.data['comb'][ f: f+MK[k] ]\n aux = str2float(aux)\n CC.append(aux)\n\n cdat = open(web2pyPath + \"modules/data/Vdata.txt\")\n V = []\n for line in cdat.readlines():\n line2 = line.strip('\\r\\n').split(',')\n line2 = str2float(line2)\n V.append(line2)\n\n D = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n n = 30\n\n # calculo dos elementos astronomicos\n MB = float(an % 4)\n MC = float(an % 100)\n MD = float(an % 400)\n dd = float(di)\n\n if MB == 0 and MC != 0 or MD == 0:\n D[2] = 29\n\n i1 = float(an / 100)\n i2 = i1 - 19\n if i2 != 0:\n t1 = i2\n j1 = abs(i2)\n c3 = j1 / i2\n t2 = t1 * t1 * c3\n c1 = int(j1 * 0.75 + 0.5) * c3\n else:\n t1 = 0.\n t2 = 0.\n c1 = 0.\n\n s0 = 277.0224 + 307.8831 * t1 - 0.0011 * t2 - 13.1764 * c1\n h0 = 280.1895 + 0.7689 * t1 + 0.0003 * t2 - 0.9856 * c1\n p0 = 334.3853 + 109.034 * t1 - 0.0103 * t2 - 0.1114 * c1\n nl = 100.7902 + 134.142 * t1 - 0.0021 * t2 - 0.053 * c1\n P1 = 281.2208 + 1.7192 * t1 + 0.00045 * t2 - 0.000047 * c1\n\n for i in range(Mesl):\n di = float(di + D[i])\n\n # bug de 2001\n if an <= 2000:\n di = di - 1 \n\n IA = i1 * 100\n BI = an - IA\n\n AI = int((BI - 1) * 0.25); AI = float(AI)\n if MD == 0: AI = AI + 1\n AD = AI + di\n N2 = n * DT * 0.5\n AV = N2\n SN = AV / 10000\n b = [None]\n b.append( s0 + 129.38481 * BI + 13.1764 * AD )\n b.append( h0 - 0.23872 * BI + 0.98565 * AD )\n b.append( p0 + 40.66249 * BI + 0.1114 * AD )\n b.append(None)\n b.append( nl + 19.32818 * BI + 0.05295 * AD )\n b.append( P1 + 0.01718 * BI + 0.000047 * AD )\n b[0] = b[2] - b[1]\n b[4] = 90.\n b.append( b[3] + N2 * 0.00464183 )\n b.append( b[5] + N2 * 0.00220641 )\n b.append( b[6] + N2 * 0.00000196 )\n\n a = [ [0.,1.,0.], [0.,2.,0.], [0.,3.,0.], [0.,0.,2.], [0.,1.,2.], [1.,0.,-1.], \n [2.,-1.,-1.], [2.,-1.,0.], [2.,-1.,1.], [2.,0.,0.], [2.,1.,0.], \n [2.,2.,0.], [2.,3.,0.] ]\n\n b[0] = b[0] + HI * 14.49205211\n b[1] = b[1] + HI * 0.54902653\n b[2] = b[2] + HI * 0.0410686\n b[3] = b[3] + HI * 0.00464183\n b[5] = b[5] + HI * 0.00220641\n b[6] = b[6] + HI * 0.00000196\n\n z, Q = [], []\n for i in range(13):\n s = 0.\n for J in range(3):\n s = s + a[i][J] * b[J + 7]\n \n XX = s * 0.017453\n z.append(np.cos(XX))\n Q.append(np.sin(XX))\n\n W = []\n for i in range(37):\n WQ = 0.\n for J in range(5):\n WQ = WQ + V[i][J] * b[J]\n \n if i == 13 or i == 30:\n W.append( WQ + b[9] )\n elif i == 17 or i == 32:\n W.append( WQ - b[9] )\n else:\n W.append(WQ)\n\n F, U = [], []\n for k in range(38):\n F.append(None) # apenas para facilitar a copia do codigo em VB\n U.append(None) # depois, ambos serao popped-up\n z.insert(0, None) # idem\n Q.insert(0, None) # idem\n\n F[1] = 1\n F[2] = 1\n F[3] = 1 - 0.0307 * z[1] + 0.0007 * z[2] - 0.0534 * z[10] - 0.0218 * z[11] - 0.0059 * z[12]\n F[4] = 1 + 0.4142 * z[1] + 0.0377 * z[2] - 0.0008 * z[3] - 0.0028 * z[8] + 0.0431 * z[10] - 0.0023 * z[11]\n F[5] = 1 + 0.4141 * z[1] + 0.0384 * z[2] - 0.003 * z[7] - 0.003 * z[9] + 0.0179 * z[10] - 0.004 * z[12] - 0.0017 * z[13]\n F[6] = 1 + 0.1885 * z[1] - 0.0063 * z[2] - 0.0063 * z[12]\n F[7] = 1 + 0.1884 * z[1] - 0.0061 * z[2] - 0.0087 * z[10]\n F[8] = 1 + 0.1884 * z[1] - 0.0057 * z[2] + 0.0007 * z[6] - 0.0028 * z[10] - 0.0039 * z[12] - 0.0007 * z[13]\n F[9] = 1 + 0.1881 * z[1] - 0.0058 * z[2] - 0.0576 * z[10] + 0.0175 * z[11]\n F[10] = 1 + 0.1885 * z[1] - 0.0058 * z[2] + 0.0001 * z[8] - 0.0054 * z[10] - 0.001 * z[11]\n F[11] = 1 - 0.2454 * z[1] - 0.0142 * z[2] + 0.0445 * z[10]\n F[12] = 1 + 0.1714 * z[1] - 0.0054 * z[2] + 0.3596 * z[10] + 0.0664 * z[11] - 0.0057 * z[12]\n F[13] = 1 + 0.1905 * z[1]\n F[14] = 1 - 0.0078 * z[1]\n F[15] = 1 - 0.0112 * z[1] + 0.0007 * z[2] - 0.0004 * z[4] - 0.0015 * z[10] - 0.0003 * z[11]\n F[16] = 1\n F[17] = 1 + 0.1158 * z[1] - 0.0029 * z[2] + 0.0001 * z[11]\n F[18] = 1 + 0.019 * z[1]\n F[19] = 1 - 0.0384 * z[1] - 0.0185 * z[2] + 0.0132 * z[4] + 0.0105 * z[8] + 0.0344 * z[10]\n F[20] = 1 + 0.1676 * z[1] + 0.03 * z[11]\n F[21] = 1 + 0.1685 * z[1] - 0.0047 * z[2] - 0.0152 * z[10] - 0.0098 * z[11] - 0.0057 * z[12]\n F[22] = 1 + 0.6398 * z[1] + 0.1342 * z[2] + 0.008500001 * z[3] + 0.0296 * z[8] + 0.1496 * z[10] - 0.0037 * z[11]\n F[23] = 1 - 0.0337 * z[1]\n F[24] = 1 - 0.0374 * z[1] - 0.061 * z[12]\n F[25] = 1 - 0.0375 * z[1]\n F[26] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0007 * z[6] - 0.0039 * z[12]\n F[27] = 1 - 0.0373 * z[1] + 0.0042 * z[10] - 0.0036 * z[11]\n F[28] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0005 * z[10] - 0.0001 * z[11]\n F[29] = 1 - 0.0448 * z[1]\n F[30] = 1 - 0.0367 * z[1] + 0.0047 * z[8] - 0.2505 * z[10] - 0.1102 * z[11] - 0.0156 * z[12]\n F[31] = 1\n F[32] = 1 - 0.0022 * z[1]\n F[33] = 1 - 0.2535 * z[4] + 0.0141 * z[5]\n F[34] = 1 + 0.2852 * z[1] + 0.0324 * z[2]\n F[35] = 1 + 0.4389 * z[1] + 0.0487 * z[2] + 0.0487 * z[10] + 0.065 * z[11]\n F[36] = 1 + 0.4168 * z[1] + 0.0466 * z[2] - 0.078 * z[10]\n F[37] = 1 - 0.0564 * z[1]\n\n U[1] = 0\n U[2] = 0\n U[3] = 0.0007 * Q[1] - 0.0008 * Q[2] - 0.0534 * Q[10] - 0.0218 * Q[11] - 0.0059 * Q[12]\n U[4] = 0.4142 * Q[1] + 0.0377 * Q[2] - 0.0008 * Q[3] + 0.0027 * Q[8] - 0.0432 * Q[10] + 0.0022 * Q[11]\n U[5] = 0.4142 * Q[1] + 0.0384 * Q[2] + 0.003 * Q[7] + 0.003 * Q[9] - 0.018 * Q[10] - 0.004 * Q[12] - 0.0017 * Q[13]\n U[6] = -0.1885 * Q[1] + 0.0062 * Q[2] + 0.0062 * Q[12]\n U[7] = -0.1884 * Q[1] + 0.006 * Q[2] - 0.0087 * Q[10]\n U[8] = -0.1884 * Q[1] + 0.0057 * Q[2] - 0.0008 * Q[6] - 0.0028 * Q[10] + 0.0039 * Q[12] + 0.0007 * Q[13]\n U[9] = -0.1882 * Q[1] + 0.0057 * Q[2] - 0.0576 * Q[10] + 0.0175 * Q[11]\n U[10] = -0.1885 * Q[1] + 0.0057 * Q[2] + 0.0001 * Q[8] - 0.0064 * Q[10] - 0.001 * Q[11]\n U[11] = -0.1886 * Q[1] - 0.0142 * Q[2] - 0.0446 * Q[10]\n U[12] = -0.2294 * Q[1] - 0.3596 * Q[10] - 0.0665 * Q[11] + 0.0057 * Q[12]\n U[13] = 0.246 * Q[1]\n U[14] = 0.0077 * Q[1]\n U[15] = 0.0111 * Q[1] - 0.0008 * Q[2] - 0.0004 * Q[4] - 0.0015 * Q[10] - 0.0003 * Q[11]\n U[16] = 0\n U[17] = 0.1554 * Q[1] - 0.003 * Q[2] - 0.0002 * Q[11]\n U[18] = 0.019 * Q[1]\n U[19] = -0.0384 * Q[1] - 0.0185 * Q[2] - 0.0132 * Q[4] - 0.0106 * Q[8] - 0.0344 * Q[10]\n U[20] = 0.231 * Q[1] - 0.03 * Q[11]\n U[21] = 0.2274 * Q[1] - 0.0047 * Q[2] - 0.0152 * Q[10] - 0.0098 * Q[11] - 0.0057 * Q[12]\n U[22] = 0.6398 * Q[1] + 0.1342 * Q[2] - 0.0296 * Q[8] - 0.1497 * Q[10] + 0.0037 * Q[11]\n U[23] = 0.0373 * Q[1]\n U[24] = 0.0373 * Q[1] + 0.006 * Q[12]\n U[25] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[26] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[27] = 0.0373 * Q[1] + 0.0042 * Q[10] + 0.0036 * Q[11]\n U[28] = 0.0373 * Q[1] - 0.0005 * Q[2] + 0.0005 * Q[9] + 0.0001 * Q[11]\n U[29] = 0.0487 * Q[1]\n U[30] = 0.0366 * Q[1] + 0.0047 * Q[8] - 0.2505 * Q[9] - 0.1102 * Q[11]\n U[31] = 0\n U[32] = -0.0022 * Q[1]\n U[33] = -0.2535 * Q[4] + 0.0141 * Q[5]\n U[34] = 0.3108 * Q[1] + 0.0324 * Q[2]\n U[35] = 0.4389 * Q[1] + 0.0487 * Q[2] - 0.0488 * Q[9] - 0.065 * Q[11]\n U[36] = 0.4542 * Q[1] + 0.0466 * Q[2] - 0.0078 * Q[10]\n U[37] = 0.0563 * Q[1]\n\n z.pop(0)\n Q.pop(0)\n F.pop(0)\n U.pop(0)\n AV = n * DT * 0.5\n\n for i in range(37):\n XX = F[i]\n YY = U[i]\n F[i] = np.sqrt( XX ** 2 + YY ** 2 )\n U[i] = W[i] + np.arctan(YY / XX) * 57.29578\n U[i] = U[i] - int(U[i] / 360) * 360\n if U[i] < 0: U[i] = U[i] + 360\n\n\n # calculo das alturas\n HC, GC = [],[]\n for k in range(110):\n HC.append(0)\n GC.append(0)\n\n for i in range(nc):\n s = 0.\n WQ = 0.\n T = 1.\n\n for J in range(MK[i]):\n jj = int(BB[i][J])\n kk = CC[i][J]\n T = T * F[jj-1] ** abs(kk)\n s = s + U[jj-1] * kk\n WQ = WQ + V[jj-1][5] * kk\n ZQ = s\n \n h[i] = T * h[i]\n s = s - G[i]\n if s < 0: s = s + 360.\n G[i] = s\n try: \n W[i] = WQ * DT\n except IndexError:\n W.append( WQ * DT )\n HC[i] = T * HC[i]\n ZQ = ZQ - GC[i]\n if ZQ < 0: ZQ = ZQ + 360.\n GC[i] = ZQ\n\n x, Y2, y = [],[],[]\n MM = 0\n for i in range(n):\n s = 0.\n ZQ = 0.\n\n for j in range(nc):\n AA = G[j] * 0.017453\n s = s + h[j] * np.cos(AA)\n G[j] = G[j] + W[j]\n AC = GC[j] * 0.017453\n ZQ = ZQ + HC[j] * np.cos(AC)\n GC[j] = GC[j] + W[j]\n\n x.append(s + NM)\n Y2.append(x[i])\n y.append(ZQ + MM)\n\n x = np.array(x, dtype=np.float32)\n x = x/100.\n h = x[3:-3]\n hours = np.arange(24)\n years, months, days = 0*hours+an, 0*hours+Mesl, 0*hours+int(dd)\n time = []\n for year, month, day, hour in zip(years, months, days, hours):\n time.append( dt.datetime(year, month, day, hour) )\n\n time = mpldates.date2num(time)\n time2 = np.linspace(time[0], time[-1], 500)\n\n interp = interp1d(time, h, kind='cubic')\n h2 = interp(time2)\n\n dh = np.gradient(h2)\n dhSign = dh > 0\n # gathering pairs\n pairs = []\n for k in range(len(dh)-1):\n pairs.append([dhSign[k], dhSign[k+1]])\n\n f = []\n for k in range(len(pairs)):\n if pairs[k] == [True, False] or pairs[k] == [False, True]:\n f.append(k)\n\n datas = mpldates.num2date(time2[f])\n hora = []\n for data in datas:\n hora.append(\"%02i:%02i\" %(data.hour, data.minute))\n altura = h2[f]\n altura = ['%.1f' % a for a in altura]\n\n return infoList, hora, altura, time2, h2", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def metar_extract( now ):\n acursor.execute(\"\"\"\n SELECT metar from t%s WHERE valid BETWEEN '%s+00' and '%s+00' \n and metar is not null\n \"\"\" % (now.year, \n (now - min10).strftime(\"%Y-%m-%d %H:%M\"),\n (now + min10).strftime(\"%Y-%m-%d %H:%M\")))\n output = open('metar.txt', 'w')\n output.write(\"\\x01\\r\\r\\n\")\n output.write(\"000 \\r\\r\\n\")\n output.write(\"SAUS99 KISU %s\\r\\r\\n\" % (now.strftime(\"%d%H%M\"),))\n output.write(\"METAR\\r\\r\\n\")\n for row in acursor:\n output.write(row[0]+\"=\\r\\r\\n\")\n output.write(\"\\x03\\r\\r\\n\")\n output.close()", "def temperatures():\n\n return station_9281", "def get_obsdate():\n\n#\n#--- read sot data\n#\n f = open(sot_directory, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n\n obsid_list = []\n start_date = []\n index_date = []\n for ent in data:\n temp = re.split('\\^', ent)\n obsid = temp[1]\n#\n#--- check the data are valid\n#\n try:\n atemp = re.split('\\s+', temp[13])\n mon = atemp[0]\n date = atemp[1]\n year = atemp[2][2] + atemp[2][3]\n except:\n continue\n#\n#--- convert month in letter into digit\n#\n for i in range(0, 12):\n if mon == month_list[i]:\n mon = i + 1\n break\n#\n#--- two forms of starting date: 05/23/14 and 20140523\n#\n lmon = str(mon)\n if int(mon) < 10:\n lmon = '0' + lmon\n ldate = str(date)\n if int(date) < 10:\n ldate = '0' + ldate\n\n dline = lmon + '/' + ldate + '/' + year\n iline = atemp[2] + lmon + ldate\n\n obsid_list.append(int(obsid))\n start_date.append(dline)\n index_date.append(iline)\n\n return (obsid_list, start_date, index_date)", "def tobs():\n # query for the last day\n\n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n last_day = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n len_months = 12\n # convert result to datetime format\n last_day = datetime.datetime.strptime(last_day, \"%Y-%m-%d\")\n # calculate start day\n start_day = last_day - datetime.timedelta(days=365)\n start_day = \"{:%Y-%m-%d}\".format(start_day)\n\n # Design a query to retrieve the last 12 months of temperature data and plot the results\n results = session.query(Measurement.date, Measurement.tobs, Measurement.station).\\\n filter(Measurement.date >= start_day ).\\\n order_by(Measurement.date).all()\n\n session.close()\n \n temps = []\n for result in results:\n temp_dict = {}\n temp_dict[\"date\"] = result.date\n temp_dict[\"tobs\"] = result.tobs\n temp_dict[\"station\"] = result.station\n temps.append(temp_dict)\n \n return jsonify(temps)", "def station_list() -> List[Dict]:\n return STATIONS", "def JupiterMoons(time):\n infolist = []\n for (mu, al0, al1, a, l, z, zeta) in _JupiterMoonModel:\n infolist.append(_CalcJupiterMoon(time, mu, al0, al1, a, l, z, zeta))\n return JupiterMoonsInfo(infolist)", "def forecastdata_print_command(station_id, forecast_date):\n try:\n t = datetime.strptime(forecast_date, '%Y-%m-%d %H:%M').timestamp()\n except ValueError:\n t = datetime.now().timestamp()\n except TypeError:\n t = datetime.now().timestamp()\n\n forecast = forecasts.get_forecast(station_id, t)\n print(forecast)", "def get_results():\n _, body = API.measurements(city='Los Angeles', parameter='pm25', limit=100)\n result = []\n for dict in body['results']:\n date = dict['date']['utc']\n value = dict['value']\n result.append((date, value))\n return result", "def temp_series(smhi_data):\n consumable_data = {\n \"station\": smhi_data[\"station\"][\"name\"],\n \"temp\": [],\n \"from\": smhi_data[\"value\"][0][\"date\"],\n \"to\": smhi_data[\"value\"][-1][\"date\"]\n }\n for temp_post in smhi_data[\"value\"]:\n consumable_data[\"temp\"].append(float(temp_post[\"value\"]))\n return consumable_data", "def get_station_data(self, station_id, time='daily'):\n prov = self.get_province(station_id, time)\n\n # Download and read the file into a dataframe, and strip white space from headings\n df = pandas.read_csv(\n urlretrieve(self.build_url(prov, time, station_id))[0]\n ).rename(columns=lambda x: x.strip())\n\n return df", "def get_data(last):\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res", "def getstationaryobslist(self):\n\n stationaryobslist = [self.__tablecm]\n return stationaryobslist" ]
[ "0.6079679", "0.60203874", "0.59128946", "0.5794843", "0.57113194", "0.5494066", "0.5489716", "0.5488133", "0.5465871", "0.53732324", "0.53613657", "0.5346025", "0.5334262", "0.5303991", "0.5268003", "0.5259103", "0.523938", "0.52334833", "0.52326477", "0.5229239", "0.5220071", "0.521653", "0.52141017", "0.52078706", "0.51960796", "0.5185002", "0.5182093", "0.51766515", "0.5147213", "0.51333237" ]
0.6702449
0
Return the METAR data for the specified station and date range.
def metar_data(station, begin, end, ignore_errors, retries): def _parse_date(date_str): """Minimal date parser.""" yr, mo, day = [int(x) for x in date_str.split('-')] try: return date(yr, mo, day) except ValueError: raise InvalidDateError, begin metar = METARdata() # Validate the beginning and end dates if not begin: return metar.get_met_data(station, ignore_errors, retries) else: date1 = _parse_date(begin) if not end: date2 = (datetime.today() - timedelta(days=1)).date() else: date2 = _parse_date(end) if date1 > date2: raise EndDateBeforeBeginError, (begin, end) # Retrieve the METAR data for the date range metars = [] while date1 <= date2: metars.extend(metar.get_met_data(station, ignore_errors, retries, year=date1.year, month=date1.month, day=date1.day)) date1 += timedelta(days=1) return metars
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_met_data(self, stn, ignore_errors, retries, **kwargs):\n # Validate the common station name and convert it to the\n # corresponding official station ID\n try:\n stn = self.stns[stn]\n except:\n raise UnknownStationError, stn\n # Process the date components in the keyword args into\n # instance attribute values\n for kw in kwargs:\n if kw in ('year', 'month', 'day'):\n self.__dict__[kw] = kwargs[kw]\n else:\n raise UnknownParameterError, (kw, kwargs[kw])\n # Get the list of METARs\n try:\n self.data = self._get_metars(stn, retries)\n except:\n raise\n # Validate and clean up the METAR data\n try:\n self._clean_data(stn, ignore_errors)\n except:\n raise\n return self.data", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def get_spatial(date, spatial_index,dataStruct,interval):\n from lon_to_m import lon_to_m # Function to turn longitude degrees into metres\n from lat_to_m import lat_to_m # Function to turn latitude degrees into metres\n import numpy as np\n\n available_indices = [\"NDVI\", \"NDWI\",\"MNDWI_SW1\",\"MNDWI_SW2\"]\n lonData, latData, spatialData = [], [], []\n \n id = dataStruct['id']\n lon = dataStruct['longitude']\n lat = dataStruct['latitude']\n\n full_month = {'lonData':[],'latData':[],'spatialData':[]}\n\n if interval == \"daily\":\n for i in range(len(id)):\n if date == int(id[i][12:]):\n \n # Appending longitude and latitude data \n lonData.append(lon_to_m(lon[i]))\n latData.append(lat_to_m(lat[i]))\n \n # Finding appropriate index data to append\n spatialData.append(dataStruct[spatial_index][i])\n elif interval == \"monthly\":\n # Getting Spatial data\n monthly_ids = []\n \n for each_id in id:\n if str(date)[:6] == each_id[12:18]:\n monthly_ids.append(each_id)\n\n for this_month in monthly_ids:\n holding_spatialData = []\n for i in range(len(id)):\n if this_month[12:] == id[i][12]:\n holding_spatialData.append(dataStruct[spatial_index][i])\n full_month['spatialData'].append(holding_spatialData)\n\n\n for j in range(len(full_month['spatialData'][0])): # Iterating through each point within each month (~1560)\n averaging = []\n for jj in range(len(full_month['spatialData'])): # iterating through each stored month (~4)\n \n averaging.append(full_month['spatialData'][jj][j])\n spatialData.append(np.nanmean(averaging))\n\n # Getting lat/lon\n if date == int(id[i][12:]):\n \n # Appending longitude and latitude data \n lonData.append(lon_to_m(lon[i]))\n latData.append(lat_to_m(lat[i]))\n\n\n \n return np.array(lonData), np.array(latData), np.array(spatialData)", "def read_metar_ZA(metar_url, date_as_ISO_text=False):\n\n \n metar_list = [] # The list of dictionaries that will be returned, containing METAR data\n \n # Regular expressions to extract the wind\n re_wind_no_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})KT') # 10005KT\n re_wind_gust = re.compile(r'(?P<direction>[0-9]{3,3})(?P<spd>[0-9]{2,2})G(?P<gust>[0-9]{2,2})KT') # 10005G15KT\n re_wind_variable = re.compile(r'(?P<direction>VRB)(?P<spd>[0-9]{2,2})KT') # VRB05KT\n re_no_data = re.compile(r'No Data For (?P<missing>[A-Z,a-z]{4,4})', re.IGNORECASE) # No data for FAGC\n re_temp = re.compile(r' (?P<temp>[M]?[0-9]{2,2})+/(?P<dewpt>[M]?[0-9]{2,2}) ') #temp in format 20/12 or 20/M02 or M03/M10 etc. \n re_qnh = re.compile(r'Q(?P<qnh>[0-9]{3,4})')\n \n \n # Retrieve the webpage containing METAR data\n try:\n r = requests.get(metar_url, verify=False)\n except:\n current_app.logger.error(f\"Error retrieving METAR - failed at REQUESTS call\")\n return None\n \n \n # If error retrieving page, return None\n if r.status_code != 200: \n current_app.logger.error(f\"Error retrieving METAR: URL = {metar_url}: {r.status_code} - {r.reason}\")\n return None\n \n # Setup Beautiful Soup, and extract all the \"PRE\" tags - these are where the METAR data is stored\n soup = BeautifulSoup(r.text, 'html.parser')\n mets = soup.find_all('pre')\n \n #Connect to DB\n sess = sqa_session()\n \n # Loop through the individual METAR\n for met in mets:\n \n # Get just the text. Sould be: similar to: 'View DecodedMETAR FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG='\n met_string = str(met.text)\n \n is_speci = False # Is this a SPECI and not a METAR - default to False\n is_correction = False #Is this METAR a correction of an earlier (i.e. 'METAR COR xxxxxxxxx')\n \n # Determine if this is a METAR, a SPECI, or a line to be ignored\n s = met_string.find('METAR') # Is it a METAR?\n \n # If text not found, this is not a METAR - is it a SPECI?\n if s < 0:\n s = met_string.find('SPECI') # Is it a SPECI\n\n if s >= 0: # It is a speci\n is_speci = True\n \n else: # It's not a SPECI either, so continue to the next element\n continue\n\n s += 5 # 5 is the length of the text METAR and SPECI - we want to remove this.\n # Remove METAR/SPECI text - we should now have the raw METAR/SPECI only (eg. 'FAOR 100530Z 19015KT CAVOK 15/M03 Q1020 NOSIG=')\n met_string = met_string[s:].strip()\n \n # If this METAR is a Correction, then flag and remove the 'COR ' (eg: METAR COR FAHS 011200Z AUTO 30009KT 34/02 Q1017=\n if met_string[:4] == 'COR ':\n is_correction = True\n met_string = met_string[4:]\n \n # Extract aerodrome name\n aerodrome = met_string[:4]\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Get the date and time\n day = int(met_string[5:7])\n hr = int(met_string[7:9])\n mn = int(met_string[9:11])\n \n met_date = calc_metar_taf_date(day, hr, mn)\n \n #Get the winds\n wind_variable = False # Wind defaults to not light and variable\n wind_gust = 0 # Gust defaults to 0\n no_wind = False #Is there no wind data avail (i.e. /////KT)\n \n \n #Check whether there is now wind specified (i.e. /////KT)\n if met_string.find('///KT') > 0:\n no_wind = True\n wind_dir = 0\n wind_spd = 0\n else:\n \n # Use regular expression to try to extract non-gusting wind (eg. 10010KT)\n tmp = re_wind_no_gust.search(met_string)\n if tmp:\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n except:\n current_app.logger.error(f\"Error passing METAR winds: {met_string}\")\n \n # Use regular expression to try to extract gusting wind (eg. 10010G15KT)\n elif re_wind_gust.search(met_string):\n tmp = re_wind_gust.search(met_string)\n try:\n wind_dir = tmp.group('direction')\n wind_spd = tmp.group('spd')\n wind_gust = tmp.group('gust')\n except:\n current_app.logger.error(f\"Error passing METAR wind GUSTING: {met_string}\")\n \n # Use regular expression to try to extract variable wind (eg. VRB02KT)\n elif re_wind_variable.search(met_string):\n tmp = re_wind_variable.search(met_string)\n try:\n wind_dir = -1\n wind_spd = tmp.group('spd')\n wind_variable = True\n except:\n current_app.logger.error(f\"Error passing METAR wind VARIABLE: {met_string}\")\n\n # Use regular expression to try to extract Temp and Dewpoint (eg. 25/M02)\n temperature = 0\n dew_point = 0\n\n tmp = re_temp.search(met_string)\n if tmp:\n try:\n temperature = int(tmp.group('temp').replace('M','-'))\n dew_point = int(tmp.group('dewpt').replace('M','-'))\n except:\n current_app.logger.error(f\"Error passing METAR temperature: {met_string}\")\n\n\n # Use regular expression to try to extract QNH (eg. Q1025)\n qnh = 1013\n \n tmp = re_qnh.search(met_string)\n if tmp:\n try:\n qnh = tmp.group('qnh')\n except:\n current_app.logger.error(f\"Error passing METAR QNH: {met_string}\")\n \n if date_as_ISO_text == True:\n met_date = datetime.isoformat(met_date)\n \n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude), \n 'has_no_data': False , 'is_speci': is_speci, 'is_correction': is_correction, 'time': met_date, \n 'wind': {'no_wind_data': no_wind, 'direction': wind_dir, 'speed': wind_spd, 'gusting': wind_gust, 'is_variable': wind_variable}, #(wind_dir, wind_spd, wind_gust, wind_variable) , \n 'temperature': temperature, 'dew_point': dew_point,\n 'qnh': qnh,\n 'body': met_string}\n \n metar_list.append(met_dict)\n \n # Check for any stations with no data - search the whole page\n aero_no_datas = re_no_data.findall(soup.text)\n # If there are stations with no data, iterate through them\n if aero_no_datas:\n for aerodrome in aero_no_datas:\n # Get aerodrome NavPoint - contains coordinates\n aero_point = sess.query(NavPoint).filter(NavPoint.ICAO_Code == aerodrome).first()\n \n # If aerdrome not found, this is a non-aerodrome station - ignore it (May implement later)\n if not aero_point:\n continue\n \n # Add a disctionary item\n met_dict = {'aerodrome': aerodrome , 'coords': (aero_point.Longitude, aero_point.Latitude) , \n 'has_no_data': True, 'body': f'No data for {aerodrome}'}\n \n metar_list.append(met_dict)\n\n return metar_list", "def meteo(station='caqc0177'):\r\n long=getLongForecast(station)\r\n return dict(\r\n title= long[0] + \" - \" + station,\r\n message=datetime.now(),\r\n year=datetime.now().year,\r\n longTerm=long[1],\r\n shortTerm=getShortForecast(station)\r\n )", "def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df", "def filter_meteo_data(self, startdate, enddate):\n self.all_meteo_data.columns.values[0]='Datum-tijd'\n self.all_meteo_data['datetime']=pd.to_datetime(self.all_meteo_data['Datum-tijd'], format='%Y-%m-%dT%H:%M:%SZ')\n self.all_meteo_data.drop(['Datum-tijd'],axis=1, inplace=True)\n mask = (self.all_meteo_data['datetime'] > startdate) & (self.all_meteo_data['datetime'] <= enddate)\n meteodata = self.all_meteo_data.loc[mask].copy()\n meteodata.set_index('datetime',inplace=True)\n return meteodata", "def get_weather_data(weather_station):\n now = datetime.datetime.now()\n then = now - datetime.timedelta(days=7)\n\n query_date_start = (\"%d%02d%02d\" % (then.year, then.month, then.day))\n query_date_end = (\"%d%02d%02d\" % (now.year, now.month, now.day))\n\n api_key = '/api/%s' % WUNDERGROUND_KEY\n history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)\n query = '/q/%s.json?showObs=0&ttl=120' % weather_station\n\n weather_url = (\"%s%s%s%s\" % (WUNDERGROUND_HOST, api_key, history_key, query))\n\n logger.info('Weather URL: %s', weather_url)\n response = requests.get(weather_url).text\n\n max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']\n sum_precip = json.loads(response)['history']['summary']['precip_sum']\n\n return max_temp_avg, sum_precip", "def satReader(directory,month,latmin,latmax,lonmin,lonmax):\n \n ### Enter filename\n filename = 'cs2icesat_regrid_mar_20042015.nc' \n \n ### Month/Years extracted\n dateyr = now.year \n datemo = datetime.date(dateyr,month+1,1).strftime('%B')\n \n ### Retrieve data\n data = Dataset(directory + filename)\n lat = data.variables['lat'][:]\n lon = data.variables['lon'][:]\n thkn = data.variables['thick'][:]\n data.close()\n \n ### Calculate lat/lon region\n xmask = (lat > latmin) & (lat < latmax)\n ymask = (lon > lonmin) & (lon < lonmax)\n \n mask = xmask[:] & ymask[:]\n latvals = np.where(mask == True)[0]\n lonvals = np.where(mask == True)[1]\n latvals = np.unique(latvals)\n lonvals = np.unique(lonvals)\n \n thk = thkn[:,latvals,:]\n thk = thk[:,:,lonvals]\n \n lat = lat[latvals,:]\n lat = lat[:,lonvals]\n lon = lon[latvals,:]\n lon = lon[:,lonvals]\n\n grid = '---> [[%s to %s N, %s to %s E]]' % (latmin,latmax,lonmin,lonmax)\n print 'Completed: Satellite data read (%s)!' % datemo, grid\n \n return lat,lon,thk", "def read300yrh(period):\n directory300 = '/seley/ypeings/simu/PAMIP-1.1-QBO-300yr/monthly/'\n file300 = 'U10_1700-2000.nc'\n filename = directory300 + file300\n \n data = Dataset(filename)\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n u10q = data.variables['U10'][:]\n data.close()\n \n ### Reshape in year/month\n u10n = np.reshape(u10q,(u10q.shape[0]//12,12,lat.shape[0],lon.shape[0]))\n \n ### Calculate over particular months\n u10 = UT.calcDecJanFeb(u10n,lat,lon,'surface',1)\n \n ### Slice U10 at 65N\n latq = np.where((lat >= 64.5) & (lat <= 65.5))[0]\n lat = lat[latq].squeeze()\n u10 = u10[:,latq,:].squeeze()\n \n ### Take zonal mean \n u10z = np.nanmean(u10,axis=1)\n \n ### Remove missing data\n mask = np.where(u10z > -1e5)[0]\n \n ### Detrend\n u10zdt = sss.detrend(u10z[mask],type='linear')\n \n return lat,lon,u10zdt", "def _obtain_data(self):\n (self.data_df, self.column_df, self.station_name, self.log_file, self.station_lat, self.station_lon,\n self.station_elev, self.ws_anemometer_height, self.missing_fill_value, self.script_mode,\n self.auto_mode, self.fill_mode, self.metadata_mode, self.generate_bokeh, self.metadata_df,\n metadata_series) = input_functions.obtain_data(self.config_path, self.metadata_path)\n\n if self.script_mode == 1: # correcting data\n self.mc_iterations = 1000 # Number of iters for MC simulation of thornton running solar radiation gen\n else:\n self.mc_iterations = 50 # if we're not correcting data then only do a few iterations to save time\n\n print(\"\\nSystem: Raw data successfully extracted from station file.\")\n\n # Extract individual variables from data frame back into to numpy arrays.\n self.data_year = np.array(self.data_df.year)\n self.data_month = np.array(self.data_df.month)\n self.data_day = np.array(self.data_df.day)\n self.data_tavg = np.array(self.data_df.tavg)\n self.data_tmax = np.array(self.data_df.tmax)\n self.data_tmin = np.array(self.data_df.tmin)\n self.data_tdew = np.array(self.data_df.tdew)\n self.data_ea = np.array(self.data_df.ea)\n self.data_rhavg = np.array(self.data_df.rhavg)\n self.data_rhmax = np.array(self.data_df.rhmax)\n self.data_rhmin = np.array(self.data_df.rhmin)\n self.data_rs = np.array(self.data_df.rs)\n self.data_ws = np.array(self.data_df.ws)\n self.data_precip = np.array(self.data_df.precip)\n\n self.output_file_path = \"correction_files/\" + self.station_name + \"_output\" + \".xlsx\"", "def get_daily(Data, Y, M, D):\n start = datetime(year=Y, month=M, day=D, hour=0, minute=0)\n end = datetime(year=Y, month=M, day=D, hour=23, minute=59, second=59)\n return Data[start:end][\"clouds\"].map(value_by_cloud)", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n months = {}\n while(start_date <= end_date):\n first_of_month = start_date.replace(day=1)\n last_of_month = start_date.replace(\n day=1) + relativedelta(months=1) - relativedelta(days=1)\n readings_sum = MeterReading.objects.filter(\n date__gte=first_of_month,\n date__lte=last_of_month,\n unit=unit_id,\n reading_type='GAS'\n ).aggregate(Sum('usage'))\n months[start_date.strftime(\n \"%Y-%m-%d\")] = round(readings_sum['usage__sum']*.06, 0)\n start_date += relativedelta(months=1)\n return JsonResponse(months, safe=False)", "def read300yr(period):\n directory300 = '/seley/ypeings/simu/PAMIP-1.1-QBO-300yr/monthly/'\n file300 = 'U10_1700-2000.nc'\n filename = directory300 + file300\n \n data = Dataset(filename)\n lat = data.variables['latitude'][:]\n lon = data.variables['longitude'][:]\n u10q = data.variables['U10'][:]\n data.close()\n \n ### Reshape in year/month\n u10n = np.reshape(u10q,(u10q.shape[0]//12,12,lat.shape[0],lon.shape[0]))\n \n ### Calculate over particular months\n u10 = UT.calcDecJanFeb(u10n,lat,lon,'surface',1)\n \n ### Slice U10 at 65N\n latq = np.where((lat >= 64.5) & (lat <= 65.5))[0]\n lat = lat[latq].squeeze()\n u10 = u10[:,latq,:].squeeze()\n \n ### Take zonal mean \n u10z = np.nanmean(u10,axis=1)\n \n ### Remove missing data\n mask = np.where(u10z > -1e5)[0]\n \n ### Detrend\n u10zdt = sss.detrend(u10z[mask],type='linear')\n \n return lat,lon,u10zdt", "def get_time_series_data():\r\n # Grab the requested years and columns from the query arguments\r\n ls_year = [int(year) for year in request.args.getlist(\"n\")]\r\n ls_col = request.args.getlist(\"m\")\r\n\r\n # Generate a list of all the months we need to get\r\n all_years = [str(year) for year in range(min(ls_year), max(ls_year) + 1)]\r\n\r\n # Grab all of the wanted months by filtering for the ones we want\r\n wanted_months = reduce(\r\n lambda a, b: a | b, (app.df[\"month\"].str.contains(year) for year in all_years)\r\n )\r\n\r\n # Create a new dataframe from the one that\r\n df_new = app.df[wanted_months][[\"month\"] + ls_col]\r\n\r\n # Convert all string dates into datetime objects and then sort them\r\n df_new[\"month\"] = pd.to_datetime(df_new[\"month\"])\r\n df_new = df_new.sort_values(by=[\"month\"])\r\n\r\n # Return the dataframe as json\r\n return df_new.to_json(), 200", "def momm(data, date_from: str = '', date_to: str = ''):\n if isinstance(data, pd.Series):\n momm_data = data.to_frame()\n else:\n momm_data = data.copy()\n sliced_data = utils.slice_data(momm_data, date_from, date_to)\n output = _mean_of_monthly_means_basic_method(sliced_data)\n if output.shape == (1, 1):\n return output.values[0][0]\n return output", "def get_time_series(this_lat, this_lon, case, varnames):\n\n cesmdir = '/gpfs/fs1/collections/cdg/data/cesmLE/CESM-CAM5-BGC-LE/atm/proc/tseries/monthly'\n\n if 'LE' in case:\n\n from observational_large_ensemble.params import karen_params_cesm\n\n mode_lag = karen_params_cesm.mode_lag\n cvdp_loc = karen_params_cesm.cvdp_loc\n AMO_cutoff_freq = karen_params_cesm.AMO_cutoff_freq\n\n name_conversion = {'tas': 'TREFHT', 'pr': 'PRECC', 'slp': 'PSL'}\n cesm_names = [name_conversion[v] for v in varnames]\n this_member = int((case).split('-')[-1])\n cvdp_file = '%s/CESM1-CAM5-BGC-LE_#%i.cvdp_data.1920-2018.nc' % (cvdp_loc, this_member)\n\n # Historical filenames for CESM. Will need to append part of RCP8.5 to get full period\n filenames = []\n for var in cesm_names:\n file_str = '%s/%s/b.e11.B20TRC5CNBDRD.f09_g16.%03d.cam.h0.%s.??????-200512.nc' % (cesmdir, var,\n this_member, var)\n this_file = glob(file_str)[0]\n filenames.append(this_file)\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames,\n karen_params_cesm.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n else:\n\n from observational_large_ensemble.params import karen_params_obs\n\n mode_lag = karen_params_obs.mode_lag\n cvdp_loc = karen_params_obs.cvdp_loc\n AMO_cutoff_freq = karen_params_obs.AMO_cutoff_freq\n\n tas_dir = karen_params_obs.tas_dir\n pr_dir = karen_params_obs.pr_dir\n slp_dir = karen_params_obs.slp_dir\n cvdp_file = '%s/HadISST.cvdp_data.1920-2018.nc' % cvdp_loc\n file_dict = {'tas': '%s/Complete_TAVG_LatLong1.nc' % tas_dir,\n 'pr': '%s/full_data_monthly_v2020.nc' % pr_dir,\n 'slp': '%s/prmsl.mon.mean.nc' % slp_dir}\n\n filenames = []\n for var in varnames:\n filenames.append(file_dict[var])\n\n name_conversion = {'tas': 'temperature', 'pr': 'precip', 'slp': 'prmsl'}\n\n daX, df_shifted, _ = get_obs(case, varnames[0], filenames[0],\n karen_params_obs.valid_years, mode_lag,\n cvdp_file, AMO_cutoff_freq, name_conversion)\n\n this_ts = daX.sel({'lat': this_lat, 'lon': this_lon}, method='nearest')\n\n return this_ts, df_shifted", "def test():\n temp_data = fetch_temp_data(\n (\"https://opendata-download-metobs.smhi.se/api/version/\" +\n \"latest/parameter/1/station/52350/period/latest-day/data.json\"))\n data = temp_series(temp_data)\n print(data)", "def generate_weather_data(self):\n months = pd.to_datetime(self.output['Local Time']).dt.month\n self.output['Month'] = months # set month values for later joins\n\n # merge output data frame with historical data to get ranges\n keys = ['Location', 'Month']\n m = pd.merge(self.output, self.histdata, how='left',\n left_on=keys, right_on=keys)\n\n # uniformly select random pressure, temperature\n # and humidity values between the historical max and min ranges\n r = np.random.rand(m.shape[0])\n m['Temperature'] = ((m['Tmean_high'] - m['Tmean_low']\n ) * r + m['Tmean_low']).round(1)\n m['Pressure'] = ((m['Pmax'] - m['Pmin']) * r + m['Pmin']).round(1)\n m['Humidity'] = ((m['Hmax'] - m['Hmin']) * r + m['Hmin']).astype(int)\n\n # drop redundant columns and assign to output\n dcols = ['Month', 'Timezone', 'Pmax', 'Pmin',\n 'Hmax', 'Hmin', 'Tmean_high', 'Tmean_low']\n m.drop(columns=dcols, inplace=True)\n self.output = m", "def get_data_for_day(i,t0):\n t0 = UTCDateTime(t0)\n\n # open clients\n client = FDSNClient(\"GEONET\")\n client_nrt = FDSNClient('https://service-nrt.geonet.org.nz')\n \n daysec = 24*3600\n data_streams = [[2, 5], [4.5, 8], [8,16]]\n names = ['rsam','mf','hf']\n\n # download data\n datas = []\n try:\n site = client.get_stations(starttime=t0+i*daysec, endtime=t0 + (i+1)*daysec, station='WIZ', level=\"response\", channel=\"HHZ\")\n except FDSNNoDataException:\n pass\n\n try:\n WIZ = client.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n \n # if less than 1 day of data, try different client\n if len(WIZ.traces[0].data) < 600*100:\n raise FDSNNoDataException('')\n except ObsPyMSEEDFilesizeTooSmallError:\n return\n except FDSNNoDataException:\n try:\n WIZ = client_nrt.get_waveforms('NZ','WIZ', \"10\", \"HHZ\", t0+i*daysec, t0 + (i+1)*daysec)\n except FDSNNoDataException:\n return\n\n # process frequency bands\n WIZ.remove_sensitivity(inventory=site)\n data = WIZ.traces[0].data\n ti = WIZ.traces[0].meta['starttime']\n # round start time to nearest 10 min increment\n tiday = UTCDateTime(\"{:d}-{:02d}-{:02d} 00:00:00\".format(ti.year, ti.month, ti.day))\n ti = tiday+int(np.round((ti-tiday)/600))*600\n N = 600*100 # 10 minute windows in seconds\n Nm = int(N*np.floor(len(data)/N))\n for data_stream, name in zip(data_streams, names):\n filtered_data = bandpass(data, data_stream[0], data_stream[1], 100)\n filtered_data = abs(filtered_data[:Nm])\n datas.append(filtered_data.reshape(-1,N).mean(axis=-1)*1.e9)\n\n # compute dsar\n data = cumtrapz(data, dx=1./100, initial=0)\n data -= np.mean(data)\n j = names.index('mf')\n mfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n mfd = abs(mfd[:Nm])\n mfd = mfd.reshape(-1,N).mean(axis=-1)\n j = names.index('hf')\n hfd = bandpass(data, data_streams[j][0], data_streams[j][1], 100)\n hfd = abs(hfd[:Nm])\n hfd = hfd.reshape(-1,N).mean(axis=-1)\n dsar = mfd/hfd\n datas.append(dsar)\n names.append('dsar')\n\n # write out temporary file\n datas = np.array(datas)\n time = [(ti+j*600).datetime for j in range(datas.shape[1])]\n df = pd.DataFrame(zip(*datas), columns=names, index=pd.Series(time))\n df.to_csv('_tmp/_tmp_fl_{:05d}.dat'.format(i), index=True, index_label='time')", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n months = {}\n while(start_date <= end_date):\n first_of_month = start_date.replace(day=1)\n last_of_month = start_date.replace(\n day=1) + relativedelta(months=1) - relativedelta(days=1)\n readings_sum = MeterReading.objects.filter(\n date__gte=first_of_month,\n date__lte=last_of_month,\n unit=unit_id\n ).aggregate(Sum('usage'))\n months[start_date.strftime(\n \"%Y-%m-%d\")] = round(readings_sum['usage__sum']*.06, 0)\n start_date += relativedelta(months=1)\n return JsonResponse(months, safe=False)", "def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)", "def get_data(last):\n Table = \"ServerRoom\"\n filter = \"\"\n if last == \"lastone\":\n data = request_meteodata(\"SELECT * from `ServerRoom` ORDER BY id DESC LIMIT 1 \")\n if len(data) == 0:\n return [SensorData(datetime.datetime.now(), 0, 0)]\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res\n if last != \"All\":\n limit = datetime.datetime.now().astimezone(utz)\n if last == \"24hours\":\n limit -= datetime.timedelta(hours=24)\n else:\n limit = limit.replace(hour=0, minute=0, second=0, microsecond=0)\n if last == \"3days\":\n limit -= datetime.timedelta(days=3)\n elif last == \"7days\":\n limit -= datetime.timedelta(days=7)\n elif last == \"month\":\n limit = limit.replace(day=1)\n elif last == \"30days\":\n limit -= datetime.timedelta(days=30)\n elif last == \"year\":\n limit = limit.replace(day=1, month=1)\n filter = \" WHERE `date` > '\" + str(limit) + \"'\"\n order = \" ORDER BY `date` ASC\"\n req = \"SELECT * FROM `\" + Table + \"`\" + filter + order\n data = request_meteodata(req)\n if len(data) == 0:\n print(\"no data: get all\")\n req = \"SELECT * FROM `\" + Table + \"`\" + order\n data = request_meteodata(req)\n res = []\n for d in data:\n res.append(SensorData(d[1], d[2], d[3]))\n return res", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n readings = MeterReading.objects.filter(\n unit=unit_id,\n date__gte=start_date,\n date__lte=end_date,\n reading_type='GAS'\n )\n serializer = MeterReadingSerializer(readings, many=True)\n return Response(serializer.data)", "def query_radar_data(station,product,start,\n minute_delta=0,hour_delta=0,day_delta=0):\n \n end = start+timedelta(days=day_delta, minutes=minute_delta, hours=hour_delta)\n \n print(f\"query start time:{start}\")\n print(f\"query end time:{end}\")\n rs = RadarServer('http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/')\n query = rs.query()\n rs.validate_query(query)\n print(rs.stations[station])\n\n query.stations(station).time_range(start,end).variables(product)\n catalog = rs.get_catalog(query)\n file_station = str(catalog.datasets[0])\n file_station = file_station[0:4]\n \n file_list = list(catalog.datasets.values())\n for t in file_list: print(t)\n LatLonBox = [rs.stations[station].longitude-3,rs.stations[station].longitude+3,\n rs.stations[station].latitude-2,rs.stations[station].latitude+2]\n \n return file_list,LatLonBox", "def get(self, request, unit_id):\n start_date = get_start_date(request)\n end_date = get_end_date(request)\n months = {}\n while(start_date <= end_date):\n first_of_month = start_date.replace(day=1)\n last_of_month = start_date.replace(\n day=1) + relativedelta(months=1) - relativedelta(days=1)\n readings_sum = MeterReading.objects.filter(\n date__gte=first_of_month,\n date__lte=last_of_month,\n unit=unit_id,\n reading_type='ELECTRICITY'\n ).aggregate(Sum('usage'))\n months[start_date.strftime(\n \"%Y-%m-%d\")] = round(readings_sum['usage__sum']*.06, 0)\n start_date += relativedelta(months=1)\n return JsonResponse(months, safe=False)", "def get_alpaca_data(self,ticker_list,start,end, timeframe = \"1D\"):\n s = pd.Timestamp(start,tz = \"America/New_York\").isoformat()\n e = pd.Timestamp(end,tz = \"America/New_York\").isoformat()\n \n df = api.get_barset(\n ticker_list,\n timeframe,\n start = s,\n end = e\n\n ).df\n return df", "def retrieve_data_timeseries(hfile, setname):\n dset = hfile[setname]\n sample_rate = dset.attrs[\"SamplingRate(Hz)\"]\n gps_epoch = construct_utc_from_metadata(dset.attrs[\"Date\"], dset.attrs[\"t0\"])\n data = retrieve_channel_data(hfile, setname)\n ts_data = TimeSeries(data, sample_rate=sample_rate, epoch=gps_epoch)\n return ts_data", "def temp_monthly():\n # Calculate the date 1 year ago from last date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n results = session.query(Measurement.tobs).\\\n filter(Measurement.station == 'USC00519281').\\\n filter(Measurement.date >= prev_year).all()\n # Unravel results into a ID array and convert to a list\n temps = list(np.ravel(results))\n \n # Return the results\n return jsonify(temps)" ]
[ "0.7159997", "0.5764372", "0.5731595", "0.5731141", "0.56950647", "0.5617954", "0.5477128", "0.5425443", "0.542119", "0.540292", "0.5396697", "0.5381608", "0.5376013", "0.536097", "0.53551173", "0.53006876", "0.5294632", "0.52827334", "0.5266694", "0.52493864", "0.524286", "0.5226539", "0.5221424", "0.520972", "0.5206583", "0.5203294", "0.5195351", "0.5192514", "0.51864916", "0.51814735" ]
0.7412664
0
Add trick to the dog This function illustrate mistaken use of mutable class variable tricks (see below).
def add_trick(self, trick): self.tricks.append(trick)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_class_and_instance_variables():\n\n # pylint: disable=too-few-public-methods\n class Dog:\n \"\"\"Dog class example\"\"\"\n\n kind = \"canine\" # Class variable shared by all instances.\n\n def __init__(self, name):\n self.name = name # Instance variable unique to each instance.\n\n fido = Dog(\"Fido\")\n buddy = Dog(\"Buddy\")\n\n # Shared by all dogs.\n assert fido.kind == \"canine\"\n assert buddy.kind == \"canine\"\n\n # Unique to fido.\n assert fido.name == \"Fido\"\n\n # Unique to buddy.\n assert buddy.name == \"Buddy\"\n\n # Shared data can have possibly surprising effects with involving mutable objects such as lists\n # and dictionaries. For example, the tricks list in the following code should not be used as a\n # class variable because just a single list would be shared by all Dog instances.\n\n # pylint: disable=too-few-public-methods\n class DogWithSharedTricks:\n \"\"\"Dog class example with wrong shared variable usage\"\"\"\n\n tricks = [] # Mistaken use of a class variable (see below) for mutable objects.\n\n def __init__(self, name):\n self.name = name # Instance variable unique to each instance.\n\n def add_trick(self, trick):\n \"\"\"Add trick to the dog\n\n This function illustrate mistaken use of mutable class variable tricks (see below).\n \"\"\"\n self.tricks.append(trick)\n\n fido = DogWithSharedTricks(\"Fido\")\n buddy = DogWithSharedTricks(\"Buddy\")\n\n fido.add_trick(\"roll over\")\n buddy.add_trick(\"play dead\")\n\n assert fido.tricks == [\"roll over\", \"play dead\"] # unexpectedly shared by all dogs\n assert buddy.tricks == [\"roll over\", \"play dead\"] # unexpectedly shared by all dogs\n\n # Correct design of the class should use an instance variable instead:\n\n # pylint: disable=too-few-public-methods\n class DogWithTricks:\n \"\"\"Dog class example\"\"\"\n\n def __init__(self, name):\n self.name = name # Instance variable unique to each instance.\n self.tricks = [] # creates a new empty list for each dog\n\n def add_trick(self, trick):\n \"\"\"Add trick to the dog\n\n This function illustrate mistaken use of mutable class variable tricks (see below).\n \"\"\"\n self.tricks.append(trick)\n\n fido = DogWithTricks(\"Fido\")\n buddy = DogWithTricks(\"Buddy\")\n\n fido.add_trick(\"roll over\")\n buddy.add_trick(\"play dead\")\n\n assert fido.tricks == [\"roll over\"]\n assert buddy.tricks == [\"play dead\"]", "def make_mutable(obj):\n _mutable_objs.append(obj)", "def __init__(self, val):\n self.lst = []\n self.val = val\n MyClass._spam += 1", "def test_setter_shadowing(self):\n class Test(pyperry.Base):\n\n def get_foo(self):\n return self['foo']\n\n def set_foo(self, val):\n self['foo'] = \"Mine\"\n\n foo = property(get_foo, set_foo)\n Test.attributes('foo')\n\n test = Test({'foo': 1})\n\n self.assertEqual(test.foo, 1)\n test.foo = 'Test'\n self.assertEqual(test.foo, 'Mine')", "def __setattr__(self, ???):", "def test_oldclass_and_direct_set(self):\n global setVal\n class OldStyle:\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n\n class OldStyleWithLen:\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n def __len__(self):\n return 10\n\n class NewStyle(object):\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n\n class OldStyleWithLenAndGetSlice:\n def __setitem__(self, index, value):\n global setVal\n setVal = index, value\n def __len__(self):\n return 10\n def __setslice__(self, start, stop, value):\n global setVal\n setVal = start, stop, value\n\n # slice object should pass through unmodified if constructed explicitly.\n NewStyle()[slice(None, -1, None)] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyleWithLen()[slice(None, -1, None)] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyle()[slice(None, -1, None)] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyleWithLenAndGetSlice()[slice(None, -1, None)] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n\n # using the slice syntax\n NewStyle()[:-1] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyleWithLen()[:-1] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyleWithLenAndGetSlice()[:-1] = 123\n self.assertEqual(setVal, (slice(None, -1), 123))\n OldStyle()[:-1:1] = 123\n self.assertEqual(setVal, (slice(None, -1, 1), 123))\n OldStyle()[:-1] = 123\n self.assertEqual(setVal, (slice(-1), 123))\n OldStyle()[-1:] = 123\n self.assertEqual(setVal, (slice(-1, None), 123))\n OldStyle()[:-1:None] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyle()[-1::None] = 123\n self.assertEqual(setVal, (slice(-1, None, None), 123))\n OldStyle()[:-1:] = 123\n self.assertEqual(setVal, (slice(None, -1, None), 123))\n OldStyle()[-1::] = 123\n self.assertEqual(setVal, (slice(-1, None, None), 123))", "def teach_trick(self, trick_name):\r\n # TODO\r\n pass", "def patchit(ofs, aname):\n def _getter(self, index):\n \"\"\"herp\"\"\"\n return self._vec[index]\n\n def _setter(self, index, value):\n \"\"\"derp\"\"\"\n self._vec[index] = value\n\n setattr(Vector3, aname, property(\n lambda s: _getter(s, ofs),\n lambda s,v: _setter(s, ofs, v)))", "def frozen(setfun):\n def set_attr(self,name,value):\n if hasattr(self,name): #If attribute already exists, simply set it\n setfun(self,name,value)\n return\n elif sys._getframe(1).f_code.co_name == '__init__': #Allow __setattr__ calls in __init__ calls of proper object types\n for k,v in sys._getframe(1).f_locals.items():\n if k==\"self\" and isinstance(v, self.__class__):\n setfun(self,name,value)\n return\n raise AttributeError(\"You cannot add attributes to %s\" % self)\n return set_attr", "def __setattr__(self, attr, value):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def __setattr__(*args):", "def _add_variable_proxy_methods(var, proxy_tensor):\n proxy_tensor.read_value = lambda: tf.identity(proxy_tensor)\n proxy_tensor.assign_sub = var.assign_sub\n proxy_tensor.assign = var.assign\n proxy_tensor.initialized_value = var.initialized_value" ]
[ "0.62275213", "0.54250777", "0.5253472", "0.5125945", "0.50684893", "0.50618595", "0.50537896", "0.50460774", "0.50388473", "0.5032443", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029293", "0.5029053" ]
0.5709575
1
This method register the book in the books table, but before checks if the books is already registered. I decided to use the barcode in data string, because I can use the both bar code parameters. And the stock is defined in 0, because if the user doesn't pass the stock, the quantity is already set to 0
def register_book(self, title: str, author: str, price: float, barcode: str, stock=0): try: if not self.verify_register(barcode): self.db.cursor.execute('INSERT INTO books (title, author, price, bar_code, stock) VALUES (%s, %s, %s, ' '%s, %s)', (title, author, round(price, 2), barcode, stock)) self.db.con.commit() self.db.con.close() print('Registered Successfully!') else: print('Book already registered!') except Exception as error: print(error)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)", "def verify_register(self, barcode: str):\n try:\n test = []\n self.db.cursor.execute(f'SELECT * FROM books where bar_code = {barcode}')\n for i in self.db.cursor.fetchall():\n test.append(i)\n except Exception as error:\n print(error)\n else:\n if len(test) >= 1:\n return True\n else:\n return False", "def add_book(code: str, name: str, author: str, quantity: int):\n pass", "def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")", "def save(self, *args, **kwargs):\n if not self.id:\n while True:\n code = ''.join(random.choice('0123456789ABCDEF') for i in range(16))\n if not Book.objects.filter(unique_code=code).exists():\n self.unique_code = code\n break\n return super(Book, self).save(*args, **kwargs)", "def create(self, book_info, destroy):\n self.connect()\n bid = book_info[0].get()\n title = book_info[1].get()\n author = book_info[2].get()\n status = book_info[3].get()\n status = status.lower()\n\n q = \"insert into {} values ('{}','{}','{}','{}')\"\n addbook_query = q.format(self.book_table, bid, title, author, status)\n try:\n self.cur.execute(addbook_query)\n self.con.commit()\n messagebox.showinfo('Success', \"Book added successfully\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't add data into Database\")\n print(err)\n destroy()", "def restock_book(self, isbn, quantity):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM book WHERE ISBN=%s\"\"\", (isbn,))\n if self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"UPDATE book set stock=stock+%s WHERE ISBN=%s\"\"\", (quantity, isbn))\n self.db.commit()\n return True\n return False", "def add_book() -> None:\r\n title = input('Enter Book Title :')\r\n author = input('Enter Book Author : ')\r\n pages = input('Enter Book Pages : ')\r\n price = input('Enter Book Price : ')\r\n book_status = \"\"\r\n if title.lower() not in books_di:\r\n books_di[title.lower()] = [author, pages, price, book_status]\r\n else:\r\n print(f'\\n Student is already existed with that name{title}')\r\n logging.warning(f'\\n Student is already existed with that name{title}')\r\n print(f'\\n\\nNew Book {title} added successfully')\r\n logging.info(f'\\n\\nNew Book {title} added successfully')\r\n wait = input('\\n\\n\\n Press any key to continue....')", "def add_book(self):\r\n self.addbook = input(\"\\nInput the name of book:\")\r\n if self.addbook in self.books:\r\n print(f\"{self.addbook} is already in the list\")\r\n else:\r\n books = self.books.append(self.addbook)\r\n print(f\"The book {self.addbook} added successfully\")", "def add_book(self, book):\n\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('INSERT INTO books values (?, ?, ?)', (book.title, book.author, book.read))\n book.id = cur.lastrowid\n except sqlite3.IntegrityError:\n raise BookError('This book is already in the database')\n except sqlite3.Error as e:\n raise BookError(f'Error adding book {book}') from e", "def create_books_data():\n pass", "def populate_tables(self, data_book, data_author, datafile_name, initial_stock=20):\n\n print(\"\\nPopulating book table with input data from\", datafile_name, \"...\", end='')\n count = 0\n failed_books = []\n for book in data_book:\n try:\n date = datetime.datetime.strptime(book[7], '%m/%d/%Y').date()\n t = (book[0], book[1], book[8], book[3], date,\n int(book[4]), initial_stock, book[9])\n self.cursor.execute(\n \"\"\"INSERT INTO book (ISBN, title, publisher, lang, publicationDate, pageCount, stock, price) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\"\", t)\n except Exception as e:\n count = count + 1\n failed_books.append(t[1])\n if failed_books:\n print(\"\\nSome books were not added to the database because they had an invalid format:\")\n for book in failed_books:\n print(book)\n print(\"\\nTotal books not included in database: \", count)\n self.cursor.execute(\n \"\"\"SELECT COUNT(*)\n FROM book\"\"\")\n num_successful = self.cursor.fetchall()\n print(num_successful[0][0], \"books successfully inserted into table \\\"Book\\\".\")\n self.db.commit()\n print(\"done\")\n # Now we populate authors. First need to get all ISBNs of books that were added to the book table\n print(\"\\nAdding authors to \\\"Author\\\" table...\", end='')\n self.cursor.execute(\"SELECT ISBN FROM Book\")\n list_books = [book[0] for book in self.cursor.fetchall()]\n\n for author in data_author:\n self.cursor.execute(\"INSERT INTO author (name) VALUES (%s)\", (author,))\n self.db.commit()\n for book in data_author[author]:\n if book in list_books:\n self.cursor.execute(\"SELECT ID FROM author WHERE name = %s\", (author,))\n auth_id = self.cursor.fetchone()[0]\n self.cursor.execute(\"INSERT IGNORE INTO wrote VALUES (%s,%s)\", (auth_id, book))\n self.db.commit()\n print(\"done\")\n # # Finally, populate HasKeyword table. For now just add words in title and author names\n # print(\"\\nGenerating keywords for \\\"HasKeyword\\\" table...\", end='')\n # for book in list_books:\n # self.cursor.execute(\"SELECT title from book WHERE ISBN = %s\", (book,))\n # keywords = [i[0].split(' ') for i in self.cursor.fetchall()]\n # self.cursor.execute(\"SELECT name FROM author A, wrote W WHERE A.ID = W.authorID AND W.ISBN = %s\", (book,))\n # authors = [i[0].split(' ') for i in self.cursor.fetchall()]\n #\n # keywords.extend(authors)\n # for word_subset in keywords:\n # for word in word_subset:\n # if not word.isspace() and word:\n # self.cursor.execute(\"INSERT IGNORE INTO HasKeyword VALUES(%s,%s)\", (book, word))\n # self.db.commit()\n # print(\"done\")", "def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)", "def insert_book(self, title, author, year, isbn):\n self.cursor.execute(\"INSERT INTO Book VALUES(NULL, ?, ?, ?, ?)\",\n (title, author, year, isbn))\n self.connection.commit()", "def delete_book(self, barcode):\n try:\n self.db.cursor.execute('DELETE FROM books where id_books = %s', (barcode,))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Deleted Successfully!')", "def order_book(self, order_details):\n order_date = datetime.date.today()\n self.cursor.execute(\"INSERT INTO orderlog (loginID, orderDate) VALUES (%s, %s)\",\n (order_details['loginID'], order_date))\n order_id = self.cursor.lastrowid\n for i in range(len(order_details['ISBN'])):\n self.cursor.execute(\"INSERT INTO productof Values (%s, %s, %s)\",\n (order_details['ISBN'][i], order_id, order_details['quantity'][i]))\n self.cursor.execute(\"UPDATE book SET stock=stock-%s WHERE ISBN=%s\",\n (order_details['quantity'][i], order_details['ISBN'][i]))\n self.db.commit()\n return order_id", "def sqlite_save(barcodes):\n\n # quick and dirty - don't save if there is no data.\n if not len(barcodes):\n # skip this if there are no barcodes\n return \"No data to save... continue...\"\n\n # reformat each list item to a tuple for sqlite3 executemany\n barcode_tuples = [(x,) for x in barcodes]\n\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n\n c.executemany(\"INSERT INTO barcode VALUES (?)\", barcode_tuples)\n\n c.execute(\"SELECT COUNT(*) FROM barcode\")\n BOOK_COUNT = c.fetchall()\n\n conn.commit()\n conn.close()\n \n return \"Save seemed successful. {} total books have been entered.\".format(BOOK_COUNT[0][0],)", "def barcode(self, barcode):\n sample_sql = \"\"\"UPDATE barcodes.sample\n SET barcode = %s\n WHERE sample_id = %s\n \"\"\"\n barcode_sql = \"\"\"UPDATE barcodes.barcode\n SET assigned_on = NOW()\n WHERE barcode = %s\n \"\"\"\n with pm.sql.TRN:\n if pm.util.check_barcode_assigned(barcode):\n raise ValueError(\"Barcode %s already assigned\" % barcode)\n if self.barcode is not None:\n raise pm.exceptions.AssignError(\n 'Barcode already assigned to this sample')\n pm.sql.TRN.add(sample_sql, [barcode, self.id])\n pm.sql.TRN.add(barcode_sql, [barcode])\n pm.sql.TRN.execute()", "def rent_book(self, user, book):\n if book.in_stock > 0:\n # check if the user has the book\n try:\n r = self.get(rented_by=user, book=book, returned_on=None)\n # if there is a rental by the user, raise a custom exception\n raise RentalExists(\"Book %s is already rented by %s\" % (book.title, user.username))\n except Rental.DoesNotExist:\n # if the user doesn't have the book\n r = self.create(book=book, rented_by=user)\n r.save()\n # remove the reservation if it exists\n Reservation.objects.remove_reservation(user=user, book=book)\n book.in_stock -= 1\n book.save()\n else:\n # if the book isn't in stock raise a custom exception\n raise BookNotInStock(\"Book %s is out of stock!\" % book.title)", "def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()", "def update_price_books(self, barcode, new_price):\n try:\n self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Updated Successfully!')", "def _insert_billcode(self):\n # Insert\n if db_billcode.idx_billcode_exists(1) is False:\n record = Billcode(\n code=general.encode(self.reserved),\n name=general.encode(self.reserved))\n database = db.Database()\n database.add(record, 1104)", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def receive_book_start(request, uniqname):\n if not Permissions.can_process_bookswap(request.user):\n request.session['error_message'] = messages.BOOKSWAP_NO_PERM\n return get_previous_page(request, alternate='bookswap:admin_index')\n if not BookSwapStatus.can_receive(AcademicTerm.get_current_term()):\n request.session['error_message'] = 'Book receiving not enabled'\n return get_previous_page(request, alternate='bookswap:admin_index')\n form = BookSearchForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n barcode = form.cleaned_data.get('book_barcode','')\n book_type = BookType.objects.filter(isbn=barcode)\n if book_type.exists():\n # TODO: If multiple give choice?\n book_type = book_type[0]\n request.session['success_message'] = ('Book found, please '\n 'enter sale details.')\n return redirect('bookswap:receive_book',\n uniqname=uniqname,\n book_type_id=book_type.id)\n\n else:\n request.session['warning_message'] = ('Book not found, please '\n 'enter details.')\n request.session['uniqname'] = uniqname\n request.session['isbn'] = barcode\n return redirect('bookswap:create_book_type')\n\n else:\n request.session['error_message'] = messages.GENERIC_SUBMIT_ERROR\n template = loader.get_template('generic_form.html')\n context_dict = {\n 'form': form,\n 'subnav': 'admin',\n 'has_files': False,\n 'submit_name': 'Search for book by ISBN',\n 'form_title': 'Search for a book in the system',\n 'help_text': ('You can search for a book by its ISBN, which is the '\n '13 digit code scanned by the barcode.'),\n 'base': 'bookswap/base_bookswap.html',\n }\n context_dict.update(get_permissions(request.user))\n context_dict.update(get_common_context(request))\n context = RequestContext(request, context_dict)\n return HttpResponse(template.render(context))", "def addBooks(request):\n if request.method == 'POST':\n userHash = request.data['hash']\n email = request.data['email']\n\n User = UserInformation.objects.get(loginSessionHash=userHash)\n\n title = request.data['title']\n pages = request.data['pages']\n author = request.data['author']\n url = request.data['url']\n if User.email == email:\n book = Book(title=title, holder=User, pages=pages, author=author, url=url)\n book.save()\n return Response({'status': 'successfull'})", "def insert_book(title, author, year):\n try:\n cursor = conn.cursor()\n cursor.execute(\"\"\"\n INSERT INTO books(title, author, year)\n VALUES(?,?,?)\n \"\"\", (title, author, year))\n conn.commit()\n except Exception as e:\n logging.error(e)\n return False\n\n return True", "def lend_book(self):\r\n self.name = input(\"Please enter your name: \")\r\n lend_input = input(\"Enter the name of the book you want to lend:\")\r\n self.lend_data =dict()\r\n for book in self.books:\r\n if book.lower() == lend_input.lower():\r\n self.availablity_data[book] = \"Unavailable\"\r\n if self.lend_data is None:\r\n self.lend_data[book] = self.name\r\n else:\r\n self.lend_data.update({book: self.name})\r\n self.books.remove(book)\r\n return print(f\"{book} is lend to {self.name}\")\r\n elif lend_input not in self.books:\r\n print(\"book is not in the library\")\r\n break", "def return_book(self, user, book):\n r = self.get(rented_by=user, book=book, returned_on=None)\n r.returned_on = datetime.now()\n r.save()\n r.book.in_stock += 1\n r.book.save()", "def test_add_duplicate_book(self):\n create_admin()\n response = self.client().post('/api/v1/login', json=self.test_admin)\n json_data = json.loads(response.data)\n access_token = json_data.get('access_token')\n self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n response = self.client().post('/api/v1/products',\n headers={\"Authorization\": \"Bearer \" + access_token}, json=self.test_book)\n json_data = json.loads(response.data)\n self.assertTrue(json_data.get('Error'))\n self.assertEqual(json_data.get('Error'), \"Book already exists\")\n self.assertEqual(response.status_code, 409)", "def save_book(self):\n db.session.add(self)\n db.session.commit()" ]
[ "0.7018125", "0.67015535", "0.6207142", "0.6084916", "0.60626495", "0.60456854", "0.59645706", "0.591887", "0.5900992", "0.5860568", "0.5826839", "0.57534194", "0.5710244", "0.57028556", "0.5676682", "0.56455344", "0.56382316", "0.5637503", "0.5599727", "0.5594511", "0.55896395", "0.5574478", "0.5514177", "0.5493709", "0.5492868", "0.5381653", "0.53711087", "0.5367783", "0.5358385", "0.53416896" ]
0.824485
0
This method update the price of the books, by the barcode.
def update_price_books(self, barcode, new_price): try: self.db.cursor.execute('UPDATE books SET price = %s where id_books = %s', (round(new_price, 2), barcode)) except Exception as error: print(error) else: self.db.con.commit() self.db.con.close() print('Updated Successfully!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __on_update_bookticker(self, action, bookticker):\n self.best_bid_price = float(bookticker['b'])\n self.best_ask_price = float(bookticker['a'])", "def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume", "def _update_total_bid(self, volume, price):\r\n self.total_bid += \\\r\n self.gox.base2float(volume) * self.gox.quote2float(price)", "def updatePrice(self, isinkey, field, data, qtype):\r\n isin = isinkey[0:12]\r\n bond = regsToBondName[isin]\r\n if qtype == BloombergQuery.BID:\r\n # 1/ WE CACHE THE OLD PRICE\r\n self.updateCell(bond, 'OLDBID', self.df.at[bond, 'BID'])\r\n self.updateCell(bond, 'OLDASK', self.df.at[bond, 'ASK'])\r\n # 2/ WE CHECK IF PRICE CHANGED\r\n if bond in self.rfbonds:\r\n self.blptsAnalytics.get(isin + '@CBBT' + ' Corp', self.bbgPriceRFQuery)\r\n else:\r\n self.blptsPriceOnly.get(isin + BBGHand + ' Corp', self.bbgPriceOnlyQuery)\r\n elif qtype == BloombergQuery.PRICEONLY:\r\n data = data.astype(float)\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n self.lock.acquire()\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n self.lock.release()\r\n if (data['BID'] != self.df.at[bond, 'OLDBID']) or (data['ASK'] != self.df.at[bond, 'OLDASK']):\r\n if bond in SPECIALBONDS:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceSpecialQuery)\r\n else:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # try:\r\n # self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # except:\r\n # print 'error asking analytics for ' + bond\r\n else:\r\n # print 'Update event without a price change for ' + bond\r\n pub.sendMessage('BOND_PRICE_UPDATE', message=MessageContainer(self.df.loc[bond]))\r\n elif qtype == BloombergQuery.RTGACC:\r\n for item, value in data.iteritems():\r\n self.updateCell(bond,bbgToBdmDic[item],value)\r\n else:#'ANALYTICS' or 'FIRSTPASS'\r\n data = data.astype(float)\r\n # try:\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n # except:\r\n # print data\r\n self.lock.acquire()\r\n try:\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n except:\r\n self.lock.release()\r\n print data\r\n self.lock.release()\r\n if bond in SINKABLEBONDS:\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['BID'])\r\n self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'BID'])\r\n self.bbgSinkRequest.get()\r\n self.updateCell(bond, 'ZB', float(self.bbgSinkRequest.output.values[0,0]))\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['ASK'])\r\n # self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'ASK'])\r\n # self.bbgSinkRequest.get() \r\n # self.updateCell(bond, 'ZA', float(self.bbgSinkRequest.output.values[0,0]))\r\n if qtype == BloombergQuery.ANALYTICS:\r\n self.updateStaticAnalytics(bond)", "def change_price(self, value): \n value = self.price", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price - book.price * .2, 2)\n return book", "def sales_price(book):\n book = copy(book)\n book.price = round(book.price-book.price*.2, 2)\n return book", "async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)", "def update(self, book_info, destroy):\n self.connect()\n is_issue = len(book_info) == 2\n\n bid = book_info[0].get()\n if is_issue:\n issue_to = book_info[1].get()\n\n if is_issue:\n extract_bid = f\"select bid from {self.book_table}\"\n else:\n extract_bid = f\"select bid from {self.issued_table}\"\n\n status = False\n try:\n self.cur.execute(extract_bid)\n self.con.commit()\n for i in self.cur:\n self.all_bid.append(i[0])\n\n if bid in self.all_bid:\n check_avail = f\"select status from {self.book_table} where \" \\\n f\"bid = '{bid}'\"\n self.cur.execute(check_avail)\n self.con.commit()\n check = None\n for i in self.cur:\n check = i[0]\n\n if (is_issue and check == 'avail'\n or not is_issue and check == 'issued'):\n status = True\n else:\n status = False\n else:\n messagebox.showinfo(\"Error\", \"Book ID not present\")\n except MySQLError as err:\n messagebox.showinfo(\"Error\", \"Can't fetch Book IDs\")\n print(err)\n\n if is_issue:\n issue_sql = f\"insert into {self.issued_table} values ('{bid}',\" \\\n f\"'{issue_to}')\"\n up_status = f\"update {self.book_table} set status = 'issued' \" \\\n f\"where bid = '{bid}'\"\n else:\n issue_sql = f\"delete from {self.issued_table} where bid = '{bid}'\"\n up_status = f\"update {self.book_table} set status = 'avail' \" \\\n f\"where bid = '{bid}'\"\n\n try:\n if bid in self.all_bid and status:\n self.cur.execute(issue_sql)\n self.con.commit()\n self.cur.execute(up_status)\n self.con.commit()\n if is_issue:\n msg = \"Book Issued Successfully\"\n else:\n msg = \"Book Returned Successfully\"\n state = 'Success'\n else:\n if is_issue:\n msg = \"Book Already Issued\"\n else:\n msg = \"Please check the book ID\"\n state = \"Message\"\n messagebox.showinfo(state, msg)\n except MySQLError as err:\n messagebox.showinfo(\n \"Search Error\", \"The value entered is wrong, Try again\"\n )\n print(err)\n self.all_bid.clear()\n destroy()", "def update(self, context, data):\n self.context = context\n self.data = data\n\n dt = get_datetime()\n\n for tkt, bo in self._d_orders['trades'].items():\n price = self.data[bo.symbol].price\n bo.update(price, dt)", "def add_book(self, data):\n exists = self.check_if_exists(data['isbn'])\n\n if exists:\n query = f\"\"\"UPDATE {TABLE} SET quantity = quantity + 10 WHERE bookID = '{data[\"isbn\"]}'\"\"\"\n else:\n query = f\"\"\"INSERT INTO {TABLE}(bookID, title, authors, avg_rating, ratings_count,\n lang_code, num_pages, text_reviews, pub_date, publisher) values(\n \"{data['isbn']}\",\n \"{data['title']}\",\n \"{data['authors']}\",\n {float(data['average_rating'])},\n {int(data['ratings_count'])},\n \"{data['language_code']}\",\n {int(data[' num_pages'])},\n {int(data['text_reviews_count'])},\n \"{data['publication_date']}\",\n \"{data['publisher']}\"\n );\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)", "def update_book(isbn):\n put_req = request.get_json()\n if not (Book.replace_book(isbn, put_req['name'], put_req['price'])):\n invalid_book_object_error_msg = {\n \"error\": \"Invalid book object update passed in PUT request\",\n \"helpString\": \"Valid data format is {'name': 'bookname', 'price': 7.9, 'isbn': 12345678}\"\n }\n # Because invalidBookObjectErrorMsg is a dictionary, need to convert it into a json object.\n # Set Header info for location (location of endpoint in request)\n return Response(json.dumps(invalid_book_object_error_msg), status=406, mimetype='application/json')\n # See https://www.flaskapi.org/api-guide/status-codes/ for flask API\n # response codes\n response = Response(\"\", 204, mimetype='application/json')\n response.headers['Location'] = \"/books/\" + str(isbn)\n return response", "def updatePrices(self,dd):\n for key in dd:\n self.DoS[key].updatePrice(dd[key])", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def price(self, price):\n\n self._price = price", "def rent_book(self, bookID):\n query = f\"\"\"UPDATE {TABLE} set quantity = quantity - 1 where bookID = '{bookID}';\"\"\"\n\n try:\n self.cursor.execute(query)\n self.conn.commit()\n except Error as e:\n print(e)", "def _onchange_price(self):\n self.price_subtotal = self.price", "def set_isbn(self, new_isbn):\n old_isbn = self.isbn\n self.isbn = new_isbn\n print(\n f\"The isbn for the book '{self.title}' has been updated from '{old_isbn}' to '{self.isbn}'.\")", "def setPrice(self, val):\n self.price = val", "def updatePriceOfExistingStocks(self):\n currentDate = str(datetime.now().strftime(\"%Y-%m-%d\"))\n listOfStocks = self.retrieveAllDistinctStocks()\n isPercentageChangeUpdated = self.checkIfPercentageChangesUpdated(\n currentDate)\n\n if not isPercentageChangeUpdated:\n print(\"PercentageChanges not updated. Commencing update now...\")\n for stockSymbol, stockExchange in listOfStocks:\n latestPercentageChange = self.getLatestPercentageChange(\n stockSymbol, stockExchange, currentDate)\n if latestPercentageChange is not None:\n self.storeLatestPercentageChange(\n stockSymbol, stockExchange, currentDate, latestPercentageChange)\n print(\"{} updated.\".format(stockSymbol))\n print(\"Update complete.\")\n else:\n print(\"Prices are already up to date.\")", "def _update_book(self, typ, price, total_vol):\r\n (lst, index, level) = self._find_level(typ, price)\r\n if total_vol == 0:\r\n if level == None:\r\n return False\r\n else:\r\n voldiff = -level.volume\r\n lst.pop(index)\r\n else:\r\n if level == None:\r\n voldiff = total_vol\r\n level = Level(price, total_vol)\r\n lst.insert(index, level)\r\n else:\r\n voldiff = total_vol - level.volume\r\n if voldiff == 0:\r\n return False\r\n level.volume = total_vol\r\n\r\n # now keep all the other stuff in sync with it\r\n self.last_change_type = typ\r\n self.last_change_price = price\r\n self.last_change_volume = voldiff\r\n if typ == \"ask\":\r\n self._update_total_ask(voldiff)\r\n if len(self.asks):\r\n self.ask = self.asks[0].price\r\n self._valid_ask_cache = min(self._valid_ask_cache, index - 1)\r\n else:\r\n self._update_total_bid(voldiff, price)\r\n if len(self.bids):\r\n self.bid = self.bids[0].price\r\n self._valid_bid_cache = min(self._valid_bid_cache, index - 1)\r\n\r\n return True", "def update(self, price, dt):\n log.info(\"Update bo feature '%s' for bo#%s with price=%s dt=%s\" % (self.name, self.bo.ticket, price, dt))", "def set_price(self, request, pk):\n return Response('20$')", "def set_price(self, request, pk):\n return Response('20$')", "def update_cursor_and_price(self, new_quant):\n self.quant = new_quant\n self.quantity_cursor.change_count(self.quant)\n self.cost_surf = \\\n self.text_maker.get_surface(str(self.item.sell_price * self.quant))", "def update_product_details(book_id, stock_delta, updated_cost):\n\n response = {}\n\n if stock_delta and stock_delta != 0:\n curr_stock_details = query_db('select stock from books where id='+str(book_id))\n app.logfile.info('select stock from books where id='+str(book_id))\n\n updated_stock_count = curr_stock_details[0]['stock'] + stock_delta\n update_stock_details = update_db('update books set stock='+str(updated_stock_count)+' where id='+str(book_id))\n app.logfile.info('update books set stock='+str(updated_stock_count)+' where id='+str(book_id))\n response['stock_updated'] = update_stock_details\n\n if updated_cost:\n update_cost_details = update_db('update books set cost='+str(updated_cost)+' where id='+str(book_id))\n app.logfile.info('update books set stock='+str(updated_cost)+' where id='+str(book_id))\n response['cost_updated'] = update_cost_details\n \n return(jsonify(response))", "def update_book_scores(self):\n self.cursor.execute(\"\"\"UPDATE book SET avg_rating=NULL, total_rating_score=0, num_ratings=0\"\"\")\n self.db.commit()\n self.cursor.execute(\"\"\"SELECT * FROM comment\"\"\")\n for comment in self.cursor.fetchall():\n self.cursor.execute(\"\"\"UPDATE book SET total_rating_score=total_rating_score+%s,\n num_ratings=num_ratings+1 WHERE ISBN=%s\"\"\", (comment[3], comment[1]))\n self.db.commit()\n self.update_average_book_rating(comment[1])", "def price(self, price: float):\n\n self._price = price" ]
[ "0.6417695", "0.62173057", "0.61378425", "0.603132", "0.5936183", "0.5868149", "0.58071566", "0.5730834", "0.57285744", "0.5673982", "0.5657812", "0.56183827", "0.5603246", "0.55851513", "0.55851513", "0.55851513", "0.55851513", "0.55597377", "0.555364", "0.55461675", "0.5521341", "0.55077475", "0.54911333", "0.54520166", "0.54210126", "0.54210126", "0.54209465", "0.53816885", "0.53778505", "0.5374694" ]
0.8498672
0
This method deleted books already registered in the database, by the barcode.
def delete_book(self, barcode): try: self.db.cursor.execute('DELETE FROM books where id_books = %s', (barcode,)) except Exception as error: print(error) else: self.db.con.commit() self.db.con.close() print('Deleted Successfully!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, book_info, destroy):\n self.connect()\n bid = book_info[0].get()\n\n delete_sql = f\"delete from {self.book_table} where bid = '{bid}'\"\n delete_issue = f\"delete from {self.issued_table} where bid = '{bid}'\"\n try:\n self.cur.execute(delete_sql)\n self.con.commit()\n self.cur.execute(delete_issue)\n self.con.commit()\n messagebox.showinfo('Success', \"Book Record Deleted Successfully\")\n book_info[0].delete(0, END)\n except MySQLError as err:\n messagebox.showinfo(\"Please check Book ID\")\n print(err)\n destroy()", "def delete_book(code: str):\n pass", "def delete_book(self, book):\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('DELETE FROM books WHERE rowid = ?', (book.id, ))\n if not cur.rowcount:\n raise BookError('Tried to delete book that doesn\\'t exist')\n except sqlite3.Error as e:\n raise BookError('Error deleting book') from e", "def delete_orderbooks(self):\n counter = 0 \n orderbooksListlen = 0 \n if self.stored_query:\n queryInstruments = self.db_ops.get_instruments_from_stored_query(self.stored_query)\n else:\n logger.LOG(\"If deleting all order books on all instruments, please write and quiery for that. You should be sure of what you are doing.\")\n \n if queryInstruments:\n logger.DLOG(\"Deleting order books for instruments in market segment <%s> in the stored query <%s>\"%(self.market_segment, self.stored_query))\n \n orderbooksList = [] \n if self.market_segment and self.market_place:\n for ob in acm.FOrderBook.Select(\"marketPlace='%s' \"%(self.market_place)):#instrument, marketPlace, currency, externalType are indexes that can be used, the Oid also, but it s unique key index\n for gmp in ob.GroupMaps():#check if there is a leaf on this orderbook \n if gmp.Group().Name() == self.market_segment: \n orderbooksList.append(ob)\n orderbooksListlen =len(orderbooksList)\n if not orderbooksList:\n logger.LOG(\"No OrderBooks on Segment:'%s' and Market:'%s'\"%(self.market_segment, self.market_place)) \n else:\n for each_orderbook in orderbooksList: \n if queryInstruments.Includes(each_orderbook.Instrument()): \n isDeleted = self.db_ops.Delete_SingleOrderBookWithReference(each_orderbook, self.market_segment)\n if isDeleted: \n counter=counter+1\n \n logger.DLOG(\"**%s order books** were deleted for the following including '%s' instruments: %s\"%(str(counter), str(orderbooksListlen), queryInstruments))", "def delete_all_books(self):\n try:\n with self._db as db:\n cur = db.cursor()\n cur.execute('DELETE FROM books')\n except sqlite3.Error as e:\n raise BookError('Error deleting all books') from e", "def clear(self):\n cursor = self._dbcon.cursor()\n cursor.execute(u\"delete from books\")\n self._dbcon.commit()\n cursor.close()", "def del_book(username, book_id):\n data = db_books.get_by_id(username, book_id)\n if data['front'] != None:\n try:\n remove(data['front'])\n except FileNotFoundError:\n print(\"No cover to delete\")\n db_books.delete_by_id(username, book_id)\n return 0", "def delete(request):\n if request.method == \"POST\":\n Books.objects.get(isbn=request.POST['delete_book']).delete()\n return redirect('libros:home')", "def delete_orderbooks(self):\n logger.DLOG(\"Deleting all FX order books for instruments in market segment <%s>\"%(self.market_segment))\n counter = 0 \n orderbooksList = [] \n if self.market_segment and self.market_place:\n for ob in acm.FOrderBook.Select(\"marketPlace='%s' \"%(self.market_place)):#instrument, marketPlace, currency, externalType are indexes that can be used, the Oid also, but it s unique key index\n for gmp in ob.GroupMaps():#check if there is a leaf on this orderbook \n if gmp.Group().Name() == self.market_segment:\n orderbooksList.append(ob)\n if not orderbooksList:\n logger.LOG(\"No OrderBooks on Segment:'%s' and Market:'%s'\"%(self.market_segment, self.market_place)) \n else:\n for each_orderbook in orderbooksList: \n isDeleted = self.db_ops.Delete_SingleOrderBookWithReference(each_orderbook, self.market_segment)\n if isDeleted: \n counter=counter+1\n \n logger.DLOG(\"**%s order books** were deleted fron the market segment leaf: %s\"%(str(counter), self.market_segment))", "def delete(self, book_id):\n self.curr.execute(\n \"\"\"DELETE FROM library WHERE book_id={}\"\"\".format(book_id))\n self.conn.commit()\n self.curr.close()", "def __del__(self):\n Library.functions.delete_(self._book)", "def delete_all_book(request):\n all_books = Book.objects.all()\n for book in all_books:\n book.pdf.delete()\n book.cover.delete()\n book.delete()\n return redirect('book_list')", "def delete(self, book_id):\n a_book = query_book_by_id(book_id)\n if a_book is None:\n return 'Book does not exit', 404\n db.session.delete(a_book)\n db.session.commit()\n return \"book has been deleted\", 200", "def test_delete_book(self):\n\n\t\t# create book\n\t\tadd_book = {\n\t\t\t'title': 'Hello Books',\n\t\t\t'isbn': '5698745124'\n\t\t}\n\n\t\tlogin_data = self.login_test_user()\n\t\ttoken = login_data['auth_token']\n\t\tres = self.client.post(\n\t\t\tf'{URL_BOOKS}',\n\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\tcontent_type='application/json',\n\t\t\tdata=json.dumps(add_book)\n\t\t)\n\n\t\t# delete book\n\t\tdel_book = self.client.delete(\n\t\t\tf'{URL_BOOKS}/1',\n\t\t\theaders=dict(Authorization=f'Bearer {token}')\n\t\t)\n\n\t\tres3 = json.loads(del_book.data.decode())\n\t\tself.assertTrue(res3['message'] == 'book with id 1 has been deleted')", "def del_all_books(username):\n db_books.drop(username)\n db_sql.init_books(username)\n try:\n rmtree('static/covers/' + username + '_front/')\n except FileNotFoundError:\n print(\"No cover to delete\")\n return 0", "def delete(d):\n conn_obj = mysql.connector.connect(host='localhost',database='mydb',user='root',password='kks')\n cur_obj = conn_obj.cursor()\n cur_obj.execute(\"DELETE FROM book WHERE isbn = %s\",(d,))\n conn_obj.commit()\n conn_obj.close()", "def delete_book(book_name, user_id):\n book = session.query(Book).filter(Book.book_name == book_name).first()\n if book:\n session.delete(book)\n # auto increment id from 1\n books = get_user_books(user_id)\n auto_increment(books)\n session.commit()\n return True", "def delete_audiobook(_id):\r\n Audiobook.query.filter_by(id=_id).delete()\r\n # filter audio book by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def delete_book():\n try:\n key = list(request.args.keys())[0]\n if key is None:\n return render_template(\"error.html\", message=\"Please enter a correct key\"), 400\n val = request.args[key].strip('\"')\n except IndexError:\n queryVal = request.form.to_dict()\n key = list(queryVal.keys())[0]\n val = queryVal[key].strip('\"')\n entry = mongo.db.Books\n elem_to_delete = entry.find_one({key: val})\n if elem_to_delete is None:\n return render_template('error.html', message='No entry was found that matches query'), 400\n mongo.db.Books.delete_one(elem_to_delete)\n return render_template('deleted_book.html', message=\"Book Has been Deleted\")", "def remove_book(request, slug):\n\n user = CustomUser.objects.get(\n id=request.user.id\n )\n book_name = Book.objects.get(\n slug=slug\n )\n book = get_object_or_404(\n Book,\n customuser=user,\n book_name=book_name,\n )\n book.delete()\n\n return redirect('favorite')", "def remove_book() -> None:\r\n globstatus = status_check()\r\n print(f\"\\n Below students are carrying books {globstatus['reserved_students']}\")\r\n global_removar(globstatus)", "def test_delete_book(self):\n\n delete_books()\n\n book = create_book(\"title one\")[\"book\"]\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book\n }\n )\n\n self.assertEqual(\n read_book(book[\"id\"]),\n {\n \"status\": \"error\"\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book[\"id\"])) as response:\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"error\"\n }\n )\n\n \"\"\"\n clear the table, create several books and list them, remove one and list them again, remove another one \n and list them again\n \"\"\"\n\n delete_books()\n\n book_one = create_book(\"title one\")[\"book\"]\n book_two = create_book(\"title two\")[\"book\"]\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one,\n book_two\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_two[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_two\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": [\n book_one\n ]\n }\n )\n\n with test_client.delete(\"/book/{}/\".format(book_one[\"id\"])) as response:\n\n self.assertEqual(\n json.loads(\n response.get_data(as_text=True)\n ),\n {\n \"status\": \"success\",\n \"book\": book_one\n }\n )\n\n self.assertEqual(\n list_books(),\n {\n \"status\": \"success\",\n \"books\": []\n }\n )", "def RemoveFromBorrowList(request): \n \n BookDetails =request.POST.getlist('deletefrommylist')\n booksSelected =getSelectedbooks(request,BookDetails)\n username =request.user.username\n myObj={}\n message=\"\"\n withinlimt=False\n inventory=[]\n #gets the books that are not 0-0 in location\n allbooks=get_valid_Books()\n avialble=False\n #gets the library member object\n libmember = models.Libmem.objects.get(cwid_id=username) \n book_ids=allbooks.keys()\n for id,bookval in booksSelected.items():\n inventory.append(models.Invt.objects.get(i_id_id=id))\n #decreasing qty available\n alreadytook=False\n #get the Inventory object based on ID\n thisobj =models.Invt.objects.get(i_id_id=id) \n try:\n \n #get count for user \n count=0\n #get the books count per user\n bowedobj=models.Bowed.objects.filter(cwid_id=libmember.pk)\n for f in bowedobj:\n if f.b_id_id == id:\n reqbook=models.Bowed.objects.get(id=f.pk)\n reqbook.delete() \n thisobj.qty=thisobj.qty+1\n message=\"sucess\"\n thisobj.save()\n except Exception as e:\n print(e)\n pass\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'message':message,\n 'year':datetime.now().year,\n }\n )", "def delete(self, id):\n self.cursor.execute(\"DELETE FROM Book WHERE Id = ?\", (id,))\n self.connection.commit()", "def remove_all_books(self):\n for book in self.books:\n self.remove_book(book)\n return self", "def test_remove_book(self):\n\n first_book_list = BookList()\n first_book = Book()\n\n first_book.create_book({\n \"title\": \"First Man\",\n \"author\": \"James R. Hansen\",\n \"year\": 2005,\n \"publisher_name\": \"Simon & Schuster\",\n \"publication_date\": \"01/01/2018\",\n \"num_copies\": 1\n })\n\n first_book_list.add_book(first_book)\n\n assert first_book_list.remove(\"title\", \"First Man\") == True\n assert first_book_list.count() == 0", "def delete():", "def delete():\n add_book_tk = DeleteBookDialog()\n entries_args = [\n (\"Book ID : \", 0.5),\n ]\n add_book_tk.create_components(entries_args)\n add_book_tk.mainloop()", "def delete(self):\n ...", "def cart_remove(request, book_id):\r\n cart = Cart(request)\r\n book = get_object_or_404(Book, id=book_id)\r\n cart.remove(book)\r\n\r\n return redirect('cart_detail')" ]
[ "0.72892725", "0.7267948", "0.70717716", "0.6985194", "0.6930456", "0.6827797", "0.6826523", "0.6772539", "0.67015284", "0.6700609", "0.6663117", "0.66131103", "0.6519632", "0.6362935", "0.63454336", "0.63371575", "0.6292271", "0.62690663", "0.6228916", "0.6208259", "0.61425465", "0.6096902", "0.60028976", "0.5943", "0.5937974", "0.5852619", "0.57883435", "0.5769685", "0.57564753", "0.57470745" ]
0.8566365
0
This method return the specifications of the books, consulting the database by barcode
def consult_books(self, bar_code: str): try: book_data = [] self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,)) for i in self.db.cursor.fetchall(): book_data.append(i) except Exception as error: print(error) else: print(f"ID BOOK: {book_data[0][0]}\n" f"TITLE: {book_data[0][1]}\n" f"AUTHOR: {book_data[0][2]}\n" f"PRICE: R$:{book_data[0][3]}\n" f"BAR CODE: {book_data[0][4]}\n" f"STOCK: {book_data[0][5]}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_book(code: str) -> Dict:\n pass", "def get_all_books() -> List[Dict]:\n pass", "def search_for_redbooks(book_codes: tuple):\n\n book_dict = {}\n\n global setup\n\n for book_code in book_codes:\n URI_string = build_URI_string(book_code)\n search_web_page = requests.get(URI_string)\n if search_web_page.status_code != 200:\n print(\"Book with code {} not found! Continuing...\".format(book_code))\n continue\n web_page_content = search_web_page.content\n soup = BS(web_page_content, 'html.parser')\n book_name = soup.find('h1',{'class':'ibm-h1','id':'ibm-pagetitle-h1'}).text\n book_dict[book_code] = book_name\n\n return book_dict", "def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName in mwIniFile.loadOrder:\n print modName\n fileRep = FileRep(modInfos[modName],False)\n fileRep.load(keepTypes=None,factory=factory)\n for record in fileRep.records:\n if record.name == 'BOOK':\n bookId = record.getId()\n if bookId in srcIds:\n print '',bookId\n self.srcBooks[bookId] = (record,modName)\n elif bookId in altIds:\n print '',bookId\n self.altBooks[bookId] = (record,modName)", "def get_book_infos(url):\n response = requests.get(url)\n if response.status_code == 200:\n # We get the link without the \\..\n link = response.url\n soup = BeautifulSoup(response.content, 'html.parser')\n search_img = soup.find('div', {\"class\": \"item active\"}).find('img')[\"src\"]\n image_link = requests.get(f\"http://books.toscrape.com/{search_img}\").url\n # Product info are in balise tr\n trs = soup.findAll('tr')\n # Stocking the info in a dictionnary\n dict_tr = {}\n for tr in trs:\n th = tr.find('th').text\n td = tr.find('td').text\n dict_tr[th] = td\n # All the informations of the book that we need\n return {'product_page_url': link,\n 'universal_ product_code (upc)': dict_tr['UPC'],\n 'title': soup.find('h1').text,\n 'price_including_tax': dict_tr['Price (incl. tax)'],\n 'price_excluding_tax': dict_tr['Price (excl. tax)'],\n 'number_available': dict_tr['Availability'],\n 'product_description': soup.findAll('meta')[2][\"content\"],\n 'category': soup.findAll('li')[2].find('a').text,\n 'review_rating': soup.findAll('p')[2][\"class\"][1],\n 'image_url': image_link}", "def get_books_data():\n entry = mongo.db.Books\n output = list()\n look_up_type = None\n if 'title' in request.args:\n look_up_type = 'title'\n if len(request.args['title']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['title'].strip('\"')\n title = entry.find({'title': {'$regex': value}})\n if title:\n for book in title:\n output.append({'title': book['title']})\n elif 'related_books' in request.args:\n look_up_type = 'similar_books'\n if len(request.args['related_books']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['related_books'].strip('\"')\n related_books = entry.find(\n {'similar_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for link in related['similar_books']:\n if value in link:\n output.append(({'similar_books': link}))\n elif 'author' in request.args:\n look_up_type = 'author'\n if len(request.args['author']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['author'].strip('\"')\n authors = entry.find({'author': {'$regex': value}})\n if authors:\n for name in authors:\n output.append({'author': name['author']})\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenBooks.html', output=output, look_up_type=look_up_type), 200", "def create_books_data():\n pass", "def get_books_from_api(request, url='https://www.googleapis.com/books/v1/volumes?q=Hobbit'):\n response = requests.get(url)\n data = response.json()\n items = data.get('items')\n if items is None:\n items = []\n for item in items:\n book = item.get('volumeInfo')\n title = book.get('title', '--')\n authors = book.get('authors', ['unknown'])\n publishedDate = book.get('publishedDate')\n isbns = book.get('industryIdentifiers', [])\n pages = book.get('pageCount')\n cover_url = book.get('imageLinks')\n if cover_url:\n cover_url = cover_url.get('thumbnail')\n language = book.get('language')\n authors_list = []\n for author in authors:\n auth = get_author_object(author)\n authors_list.append(auth)\n isbn_10 = None\n isbn_13 = None\n for isbn in isbns:\n if isbn['type'] == 'ISBN_10':\n isbn_10 = isbn['identifier']\n elif isbn['type'] == 'ISBN_13':\n isbn_13 = isbn['identifier']\n lang = get_language_object(language)\n try:\n published = datetime.strptime(publishedDate, '%Y-%m-%d')\n except ValueError:\n year = int(publishedDate[:4])\n month = None\n day = None\n except TypeError:\n year = None\n month = None\n day = None\n else:\n year = published.year\n month = published.month\n day = published.day\n try:\n book = get_object_or_404(Book, title=title, publishedYear=year, publishedMonth=month, publishedDay=day,\n language=lang, pages=pages, cover=cover_url, isbn_10=isbn_10, isbn_13=isbn_13)\n for name in book.authors.all():\n if name not in authors_list:\n raise Http404\n except Http404:\n book = Book.objects.create(title=title, publishedYear=year, publishedMonth=month, publishedDay=day,\n language=lang, pages=pages, cover=cover_url, isbn_10=isbn_10, isbn_13=isbn_13)\n book.authors.set(authors_list)\n return redirect('all-books')", "def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results", "def get_data_from_book(book):\n src_img = book.find(\"img\").get(\"src\")\n src_img = src_img.replace(\"../\", \"\")\n image = \"http://books.toscrape.com/\" + src_img\n\n in_stock = False\n in_stock_or_not = book.find(\"p\", {\"class\", \"instock\"}).text\n if \"In stock\" in in_stock_or_not:\n in_stock = True\n\n name = book.find(\"h3\").find(\"a\").text\n\n price = book.find(\"p\", {\"class\", \"price_color\"}).text\n price = price.replace(\"Â\", \"\")\n\n rating = book.find(\"p\", {\"class\", \"star-rating\"}).get(\"class\")[1]\n rating = w2n.word_to_num(rating)\n\n return {\n \"image\": image,\n \"in_stock\": in_stock,\n \"name\": name,\n \"price\": price,\n \"rating\": rating,\n }", "def get_book_info(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books_info = []\n for book in json_data['docs']:\n info = {'title': book['title']}\n if 'publisher' in book:\n info.update({'publisher': book['publisher']})\n if 'publish_year' in book:\n info.update({'publish_year': book['publish_year']})\n if 'language' in book:\n info.update({'language': book['language']})\n books_info.append(info)\n return books_info", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def make_books_dicts(xml, book_list):\n\n books_response = xml.GoodreadsResponse.reviews.review\n for book in books_response:\n a_book = {}\n a_book['title'] = book.book.title.cdata.encode('utf8')\n a_book['author_name'] = book.book.authors.author.name.cdata.encode('utf8')\n a_book['author_gr_id'] = int(book.book.authors.author.id.cdata.encode('utf8'))\n a_book['gr_work_id'] = int(book.book.work.id.cdata.encode('utf8'))\n a_book['description'] = book.book.description.cdata\n\n a_book['edition'] = {}\n a_book['edition']['isbn'] = valid_isbn(book.book.isbn.cdata.encode('utf8'))\n a_book['edition']['format_id'] = get_format_id(book.book.format.cdata.encode('utf8'))\n a_book['edition']['pic_url'] = book.book.image_url.cdata.encode('utf8')\n a_book['edition']['publisher'] = book.book.publisher.cdata.encode('utf8')\n a_book['edition']['gr_url'] = book.book.link.cdata.encode('utf8')\n a_book['edition']['gr_id'] = int(book.book.id.cdata.encode('utf8'))\n year = date_is_valid(book.book.publication_year.cdata.encode(\"utf8\"))\n month = date_is_valid(book.book.publication_month.cdata.encode(\"utf8\"))\n day = date_is_valid(book.book.publication_day.cdata.encode(\"utf8\"))\n a_book['edition']['date'] = datetime.date(year, month, day)\n a_book['edition']['num_pages'] = valid_page_count(book.book.num_pages.cdata.encode('utf8'))\n book_list.append(a_book)\n\n print \"*******THERE ARE \" + str(len(book_list)) + \" ON THIS SHELF*******\"\n\n return book_list", "def get_single_book_info(self, isbn):\n self.cursor.execute(\"SELECT * FROM book WHERE ISBN=%s\", (isbn,))\n books = self.cursor.fetchall()\n for book in books:\n authors = []\n self.cursor.execute(\"\"\"SELECT name FROM Author A, Wrote W, Book B WHERE A.ID = W.authorID AND\n W.ISBN = B.ISBN AND B.ISBN = %s\"\"\", (isbn,))\n for auth in self.cursor.fetchall():\n authors.append(auth[0])\n return book, authors", "def display_books(self, results):\n # Check if result is blank\n if not results:\n print(\"\\nNo Books found!!\")\n return\n # construct table and print\n book_schema = LMSLibraryDatabase.book_schema\n table = PrettyTable()\n table.field_names = book_schema\n for result in results:\n table.add_row(result)\n print(\"\\n{}\".format(table))\n self.prompt_borrow_book()", "def show_books():\n result = {'books': query.get_book_list()}\n return json.dumps(result, ensure_ascii=False)", "def search_for_books(self, query):\n books = []\n book = Book(self.db)\n for row in self.db.cursor().execute('SELECT genre_id FROM genres WHERE ' + query):\n books.extend(self.get_books(row[0]))\n\n return books", "def get_book_details(book_id, key):\n\n # call goodreads search method with book id here\n payload = {\"key\": key}\n\n query = requests.get(\"https://www.goodreads.com/book/show/{}.json\".format(book_id), params=payload)\n # parse response to get data needed to create a book object\n\n doc = untangle.parse(query.content)\n book_data = doc.GoodreadsResponse.book\n book = {}\n\n # create dictionary of book object data, subdictionary of edition data\n\n # book info\n #==========\n book[\"title\"] = book_data.title.cdata.encode(\"utf8\")\n book[\"author_name\"], book[\"author_gr_id\"] = get_author_data(book_data.authors)\n book['work_id'] = int(book_data.work.id.cdata.encode('utf8'))\n book[\"description\"] = book_data.description.cdata\n\n # edition info\n #=============\n book[\"edition\"] = {}\n book[\"edition\"][\"isbn\"] = valid_isbn(book_data.isbn.cdata.encode(\"utf8\"))\n book[\"edition\"][\"format_id\"] = get_format_id(book_data.format.cdata.encode(\"utf8\"))\n book[\"edition\"][\"pic_url\"] = book_data.image_url.cdata.encode(\"utf8\")\n book[\"edition\"][\"publisher\"] = book_data.publisher.cdata.encode(\"utf8\")\n book[\"edition\"][\"num_pages\"] = valid_page_count(book_data.num_pages.cdata.encode(\"utf8\"))\n year = date_is_valid(book_data.work.original_publication_year.cdata.encode(\"utf8\"))\n month = date_is_valid(book_data.work.original_publication_month.cdata.encode(\"utf8\"))\n day = date_is_valid(book_data.work.original_publication_day.cdata.encode(\"utf8\"))\n book[\"edition\"][\"date\"] = datetime.date(year, month, day)\n book[\"edition\"][\"gr_url\"] = book_data.url.cdata.encode(\"utf8\")\n book[\"edition\"][\"gr_id\"] = int(book_data.id.cdata.encode(\"utf8\"))\n\n return book", "def get_book_details(self):\n\n try:\n # gives response for the request from the API url\n response = requests.get(self.book_url)\n\n \n # using ElementTree to store the response content in a tree\n root = ET.fromstring(response.content)\n book = root.find('book')\n\n # getting the required details\n self.book_details[\"title\"] = book.find('title').text\n self.book_details[\"average_rating\"] = book.find('average_rating').text\n self.book_details[\"ratings_count\"] = book.find('ratings_count').text\n self.book_details[\"num_pages\"] = book.find('num_pages').text\n self.book_details[\"image_url\"] = book.find('image_url').text\n self.book_details[\"publication_year\"] = book.find('publication_year').text\n\n # getting list of all the authors\n authors = book.find('authors')\n if authors:\n author_names_list = []\n for author in authors.iter('author'):\n author_names_list.append(author.find('name').text)\n author_names_sentence = \", \".join(author_names_list)\n self.book_details[\"authors\"] = author_names_sentence\n except:\n raise Exception(\"invalid XML response\")", "def show_books():\n# need + or %20 for spaces in author (set encoding?)\n\n args = request.args\n column_names = get_column_names()\n\n sql_cmd = [\"SELECT title, author FROM books\"]\n if len(args) > 0:\n for j, arg in enumerate(args):\n if arg not in column_names: # return empty list\n sql_cmd = []\n break\n else:\n if not \" WHERE \" in sql_cmd:\n sql_cmd.append(\" WHERE \")\n sql_cmd.append(\"%s='%s'\" % (arg, args[arg]))\n if j+1 < len(args):\n sql_cmd.append(\" AND \")\n sql_cmd.append(\";\")\n sql_cmd = \"\".join(sql_cmd)\n# print('sql_cmd: ', sql_cmd)\n\n books = []\n if len(sql_cmd) > 1:\n cur = g.db.cursor()\n cur.execute(sql_cmd)\n if cur:\n books = [dict(title=row[0], author=row[1]) for row in cur.fetchall()]\n# return jsonify({'results': books})\n return json_dumps({'results': books}, indent=4)", "def get_book_data(isbn: int):\n try:\n book = next(iter(core.Book.search(('isbn', 'eq', isbn))))\n except StopIteration:\n pass # actually, I could put the whole rest of the function here\n else:\n data = core.Book.view_str(book.id)\n del data['id'], data['status'], data['return_date'], data['borrowed_by']\n del data['borrowed_by_id'], data['__str__']\n return data\n\n try:\n r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D'\n + str(isbn) + '&method=simpleSearch&cqlMode=true')\n r.raise_for_status()\n except requests.exceptions.RequestException:\n raise core.BuchSchlossError('no_connection', 'no_connection')\n\n person_re = re.compile(r'(\\w*, \\w*) \\((\\w*)\\)')\n results = {'concerned_people': []}\n\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n if table is None:\n # see if we got multiple results\n link_to_first = page.select_one('#recordLink_0')\n if link_to_first is None:\n raise core.BuchSchlossError(\n 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn)\n r = requests.get('https://portal.dnb.de'+link_to_first['href'])\n page = bs4.BeautifulSoup(r.text)\n table = page.select_one('#fullRecordTable')\n\n for tr in table.select('tr'):\n td = [x.get_text('\\n').strip() for x in tr.select('td')]\n if len(td) == 2:\n if td[0] == 'Titel':\n results['title'] = td[1].split('/')[0].strip()\n elif td[0] == 'Person(en)':\n for p in td[1].split('\\n'):\n g = person_re.search(p)\n if g is None:\n continue\n g = g.groups()\n if g[1] == 'Verfasser':\n results['author'] = g[0]\n else:\n results['concerned_people'].append(g[1]+': '+g[0])\n elif td[0] == 'Verlag':\n results['publisher'] = td[1].split(':')[1].strip()\n elif td[0] == 'Zeitliche Einordnung':\n results['year'] = td[1].split(':')[1].strip()\n elif td[0] == 'Sprache(n)':\n results['language'] = td[1].split(',')[0].split()[0].strip()\n\n results['concerned_people'] = '; '.join(results['concerned_people'])\n return results", "def _bio_sample(self, barcode):\n search = self._transaction.getSearchService()\n criteria = SearchCriteria()\n barcode_match = SearchCriteria.MatchClause.createAttributeMatch(\n SearchCriteria.MatchClauseAttribute.CODE, barcode\n )\n criteria.addMatchClause(barcode_match)\n samples = search.searchForSamples(criteria)\n if len(samples) > 1:\n raise RuntimeError(\n \"Found more than one sample for barcode %s.\" % barcode\n )\n if not samples:\n raise ValueError(\n \"Could not find a sample for barcode %s\" % barcode\n )\n sample = samples[0]\n return sample.getSpace(), self.barcode[:5], sample", "def load_book_info(val) -> Book:\n val = val.replace(\" \", \"%20\") # Replace spaces with '%20'\n data = requests.get(\n \"https://www.googleapis.com/books/v1/volumes?q={}\".format(\n val)\n ).json()\n\n if data['totalItems']:\n try:\n description = data['items'][0]['volumeInfo']['description']\n except KeyError:\n description = \"No description available.\"\n book = {\n 'isbn': data['items'][0]['volumeInfo']\n ['industryIdentifiers'][0]['identifier'],\n 'title': data['items'][0]['volumeInfo']['title'],\n 'author': ', '.join(data['items'][0]['volumeInfo']['authors']),\n 'description': description,\n 'img_url':\n data['items'][0]['volumeInfo']['imageLinks']['thumbnail'],\n }\n return Book(\n isbn=book['isbn'],\n title=book['title'],\n author=book['author'],\n description=book['description'],\n img_url=book['img_url'],\n )\n else:\n raise BookAPI.BookDoesNotExist(\"The book doesn't exists\")", "def _get_book_summary(self):\n self._get_book_prices()\n for price in self.ask_prices:\n volume = 0\n for k in self.ask_snapshot.keys():\n if self.ask_snapshot[k].price == price:\n volume += self.ask_snapshot[k].volume\n self.ask_volumes.append(volume)\n for price in self.bid_prices:\n volume = 0\n for k in self.bid_snapshot.keys():\n if self.bid_snapshot[k].price == price:\n volume += self.bid_snapshot[k].volume\n self.bid_volumes.append(volume)", "def test_get_specific_book_method(self):\n # When book id is int\n book_id = 1\n result = self.book.get_book(book_id)\n self.assertEqual(result, [{\"Title\": \"Harry Potter and Chamber of Secrets\",\n \"Author\": \"J.K Rowling\",\n \"Copies\": 2}])", "def get_ebooks(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n ebooks = []\n for book in json_data['docs']:\n if book['ebook_count_i'] >= 1:\n ebooks.append({'title': book['title'], 'ebook_count': book['ebook_count_i']})\n return ebooks", "def get_data(number_books):\r\n for i in range(number_books):\r\n print(\"----------You can enter information of book.----------\")\r\n title = input(\" Enter title : \")\r\n author = input(\" Enter author : \")\r\n Publish_year = int(input(\" Enter publish_year :\"))\r\n pages = int(input(\" Enter pages :\"))\r\n Language = input(\" Enter language :\")\r\n Price = float(input(\" Enter price : \"))\r\n book = Book(title, author, Publish_year, pages, Language, Price)", "def read_book(url,book_num):\n\t#calls open_url function to open the url\n\tbook_contents = open_url(url)\n\tif book_contents != None:\n\t\t#calls filter data function to clean the data\n\t\tclean_data = filter_data(book_contents)\n\t\t#create dictionary for all the words in this book with 0's filling for count in all the books\n\t\tcreate_dict(clean_data)\n\t\treturn clean_data\n\telse:\n\t\treturn []", "def get_books(self):\n # Implemented from template for\n # osid.resource.BinLookupSession.get_bins_template\n # NOTE: This implementation currently ignores plenary view\n if self._catalog_session is not None:\n return self._catalog_session.get_catalogs()\n collection = JSONClientValidated('commenting',\n collection='Book',\n runtime=self._runtime)\n result = collection.find().sort('_id', DESCENDING)\n\n return objects.BookList(result, runtime=self._runtime, proxy=self._proxy)", "def get_recommended_books(self, orderNumber, loginID):\n invalid_isbn_list = []\n books_in_order = []\n possible_isbn_list = []\n self.cursor.execute(\"\"\"SELECT orderNumber FROM orderlog WHERE loginID=%s\"\"\", (loginID,))\n for order in self.cursor.fetchall():\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (order[0],))\n for ISBN in self.cursor.fetchall():\n invalid_isbn_list.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT ISBN FROM productof WHERE orderNumber=%s\"\"\", (orderNumber,))\n for ISBN in self.cursor.fetchall():\n books_in_order.append(ISBN[0])\n self.cursor.execute(\"\"\"SELECT P.ISBN FROM productof P WHERE EXISTS \n (SELECT orderNumber FROM productof P2 WHERE ISBN = %s AND P2.orderNumber = P.orderNumber)\"\"\", (ISBN[0],))\n for valid_isbn in self.cursor.fetchall():\n possible_isbn_list.append(valid_isbn[0])\n valid_isbn_list = [i for i in possible_isbn_list if i not in invalid_isbn_list]\n return_list = []\n for book in valid_isbn_list:\n book, author = self.get_single_book_info(book)\n return_list.append([book, author])\n return return_list" ]
[ "0.6242606", "0.6134235", "0.61072266", "0.6074063", "0.601683", "0.6006033", "0.5984732", "0.59228015", "0.59210134", "0.59080076", "0.5892924", "0.5887974", "0.5790417", "0.577142", "0.57639205", "0.57627887", "0.5726483", "0.5719359", "0.57189673", "0.5704592", "0.5703736", "0.570264", "0.5686726", "0.56712115", "0.56482875", "0.56312686", "0.55990684", "0.5526035", "0.55179787", "0.550961" ]
0.7258699
0
This method checks if the books is already registered in the database, by barcode.
def verify_register(self, barcode: str): try: test = [] self.db.cursor.execute(f'SELECT * FROM books where bar_code = {barcode}') for i in self.db.cursor.fetchall(): test.append(i) except Exception as error: print(error) else: if len(test) >= 1: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register_book(self, title: str, author: str, price: float, barcode: str, stock=0):\n try:\n if not self.verify_register(barcode):\n self.db.cursor.execute('INSERT INTO books (title, author, price, bar_code, stock) VALUES (%s, %s, %s, '\n '%s, %s)', (title, author, round(price, 2), barcode, stock))\n self.db.con.commit()\n self.db.con.close()\n print('Registered Successfully!')\n else:\n print('Book already registered!')\n except Exception as error:\n print(error)", "def check_if_exists(self, bookID):\n query = f\"\"\"SELECT * from {TABLE} WHERE bookID = '{bookID}';\"\"\"\n res = self.cursor.execute(query)\n\n if self.cursor.fetchall():\n return True\n else:\n return False", "def is_book_exist(self, book_info):\n for type, link in book_info.links.items():\n try:\n bookfile = BookFile.objects.get( link_hash = md5(link).hexdigest() )\n books = bookfile.book_set.all()\n if books:\n return True, books[0]\n except BookFile.DoesNotExist:\n continue\n try:\n book = Book.objects.get(author__name=book_info.authors, title=book_info.title)\n return True, book\n except Book.DoesNotExist:\n continue\n return False, None", "def has_book(self, book):\n return self.books.filter(users_books.c.book_id == book.id).count() > 0", "def has_book(self, book):\n return self.books.filter(lists_books.c.book_id == book.id).count() > 0", "def save(self, *args, **kwargs):\n if not self.id:\n while True:\n code = ''.join(random.choice('0123456789ABCDEF') for i in range(16))\n if not Book.objects.filter(unique_code=code).exists():\n self.unique_code = code\n break\n return super(Book, self).save(*args, **kwargs)", "def has_been_provided(self, barcode):\n count = self._bc2cnt(barcode)\n return count < self.current()", "def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False", "def is_book_available(self, book):\n request_url = \"%s?q=%s\" % (self.API_URL, book)\n json_data = self.make_request(request_url)\n if json_data and len(json_data['docs']) >= 1:\n return True\n return False", "def consult_books(self, bar_code: str):\n try:\n book_data = []\n self.db.cursor.execute('SELECT * from books WHERE bar_code = %s', (bar_code,))\n for i in self.db.cursor.fetchall():\n book_data.append(i)\n except Exception as error:\n print(error)\n else:\n print(f\"ID BOOK: {book_data[0][0]}\\n\"\n f\"TITLE: {book_data[0][1]}\\n\"\n f\"AUTHOR: {book_data[0][2]}\\n\"\n f\"PRICE: R$:{book_data[0][3]}\\n\"\n f\"BAR CODE: {book_data[0][4]}\\n\"\n f\"STOCK: {book_data[0][5]}\")", "def book_exist(author, title, edition):\n book = Book.query.filter_by(\n author=author,\n book_title=title,\n edition=edition).first()\n if book:\n return True\n return False", "def available_book(rentalList, idBook):\n for rent in reversed(rentalList):\n if idBook == rent.get_idBook():\n if rent.get_flag() == \"1\":\n raise RepositoryExceptionRent (\"\\n The book is already rented. \\n\".upper())\n else:\n break", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def is_valid_book(current_author, inputed_name, availale_books):\n\tbook_info = []\n\tauthor_book = {}\n\n\tfor book in availale_books:\n\t\tauthor = book.author.username\n\t\tauthor_book[author] = book.book_name\n\t\tbook_info.append(author_book)\n\t\tauthor_book = {}\n\n\tfor book in book_info:\n\t\tfor author, book_name in book.items():\n\t\t\tif book_name == inputed_name and author == current_author:\n\t\t\t\treturn False\n\n\treturn True", "def delete_book(self, barcode):\n try:\n self.db.cursor.execute('DELETE FROM books where id_books = %s', (barcode,))\n except Exception as error:\n print(error)\n else:\n self.db.con.commit()\n self.db.con.close()\n print('Deleted Successfully!')", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def check_book(book_info, user_id):\n book = session.query(Book).filter(or_(Book.id == book_info,\n Book.book_name == book_info)).filter(Book.user_id == user_id).first()\n if book:\n return book", "def check_code_and_rent(main_page, book_code):\n\n with open('rented.csv', 'r') as rented_base:\n rented_reader = csv.reader(rented_base)\n next(rented_reader)\n\n rented_book_data = []\n check_if_available(main_page, rented_reader, book_code,\n rented_book_data)\n\n if rented_book_data == []:\n print(\"There is no book with this code\")\n return 1", "def borrow_book(self, author, title, publisher, edition, email, book_id):\n for book in self.books_list:\n if book['book_id'] != str(book_id):\n return 'book does not exist'\n continue\n else: \n book = {\n 'author' : author,\n 'title' : title,\n 'publisher' : publisher,\n 'edition' : edition,\n 'email' : email\n }\n self.borrowed_books.append(book)\n return book", "def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)", "def add_book(self):\r\n self.addbook = input(\"\\nInput the name of book:\")\r\n if self.addbook in self.books:\r\n print(f\"{self.addbook} is already in the list\")\r\n else:\r\n books = self.books.append(self.addbook)\r\n print(f\"The book {self.addbook} added successfully\")", "def exists_in_db(self) -> bool:\n query = '''SELECT * \n FROM ESLReceipts \n WHERE Transaction_Number=? AND Date=? AND Description=? \n AND Memo=? AND Amount_Debit=? \n AND Amount_Credit=? AND Balance=? \n AND Check_Number=? AND Fees=? \n AND Card_Type=? AND Is_Payment=? \n AND Is_Transaction=? AND User_id=?;'''\n return len(self.db.fetchall(query, values=self.to_tuple())) > 0", "def view_books():\r\n flag = 0\r\n for book in LibraryDB.book_list:\r\n if book.availability:\r\n book.student_display()\r\n flag = 1\r\n if not flag:\r\n print(\"No books are available in the catalogue!\")\r\n return 0\r\n else:\r\n return 1", "def check_if_available(main_page,rented_reader, book_code,\n rented_book_data):\n\n for line in rented_reader:\n if line[0] == book_code:\n if line[-2] == 'FALSE':\n print('Books is unavailable')\n return\n else:\n rented_book_data = line\n change_books_status(main_page,book_code,\n rented_book_data)\n print(\"Congratulations, you've rented a book!\")\n return", "def verify_barcodes(self):\r\n adata_parent = sc.read(self.parent)\r\n subset_inds = self.get_subset_inds(adata_parent)\r\n barcodes = adata_parent[subset_inds].obs_names.values\r\n if set(barcodes)!=set(self.adata.obs_names.values):\r\n raise ValueError('Subset differs from existing subset.')\r\n return", "def test_available_book():\n rep = RentRepository()\n rep.store( '23','12', '1', '1')\n try:\n\n idBook = '12'\n idCustomer = '22'\n flag = '1'\n id = '1'\n Validator.available_book(rep.get_all(), idBook)\n\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def book(self) -> bool:\n # Check for space\n if self.reservation.is_free():\n self.button.click()\n logging.info('Class registered: {}'.format(self))\n check = True\n else:\n logging.info('No space at the moment')\n check = False\n\n return check", "def sqlite_save(barcodes):\n\n # quick and dirty - don't save if there is no data.\n if not len(barcodes):\n # skip this if there are no barcodes\n return \"No data to save... continue...\"\n\n # reformat each list item to a tuple for sqlite3 executemany\n barcode_tuples = [(x,) for x in barcodes]\n\n conn = sqlite3.connect(db_file)\n c = conn.cursor()\n\n c.executemany(\"INSERT INTO barcode VALUES (?)\", barcode_tuples)\n\n c.execute(\"SELECT COUNT(*) FROM barcode\")\n BOOK_COUNT = c.fetchall()\n\n conn.commit()\n conn.close()\n \n return \"Save seemed successful. {} total books have been entered.\".format(BOOK_COUNT[0][0],)", "def valid(self):\n raise BookInfoNotImplementedError('valid', self.__class__.__name__)", "def addBooks(request):\n if request.method == 'POST':\n userHash = request.data['hash']\n email = request.data['email']\n\n User = UserInformation.objects.get(loginSessionHash=userHash)\n\n title = request.data['title']\n pages = request.data['pages']\n author = request.data['author']\n url = request.data['url']\n if User.email == email:\n book = Book(title=title, holder=User, pages=pages, author=author, url=url)\n book.save()\n return Response({'status': 'successfull'})" ]
[ "0.7017784", "0.6627457", "0.6388775", "0.6382595", "0.6364732", "0.6298332", "0.6274584", "0.6257029", "0.620658", "0.6175331", "0.61634517", "0.6128748", "0.6127025", "0.6117657", "0.60925686", "0.6036167", "0.59862727", "0.5967401", "0.59655285", "0.5958692", "0.5928705", "0.5753345", "0.56920356", "0.5690977", "0.56590736", "0.5658347", "0.5643462", "0.5641896", "0.56401634", "0.5631592" ]
0.8429786
0
A method to generate a nonce to send to the validation server. As specified by the protocol, the nonce must be between 16 and 40 alphanumeric characters long with random unique data.
def generate_nonce(): return uuid4().hex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nonce():\n return random.randint(0, 4294967295)", "def nonce():\n return random.randint(0, 4294967295)", "def _generate_nonce(self):\n return str(random.randrange(100000, 999999))", "def gen_nonce(self, length=32):\n if(length < 32):\n res = {\"message\": 'Invalid nonce length'}, 400\n else:\n nonce = secrets.token_hex(floor(length))\n nonces_file = \"client-generate-nonces.txt\"\n res = self.check_nonce(nonce, nonces_file, length)\n return res", "def generateNonce():\r\n hash = hashlib.sha1()\r\n hash.update(str(time.time()).encode('utf-8'))\r\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def nonce(length=40, prefix=\"access_token\"):\n rbytes = os.urandom(length)\n return \"{}_{}\".format(prefix, str(hashlib.sha1(rbytes).hexdigest()))", "def generate_nonce():\n return str(int(round(time.time() * 1000)))", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def generateNonce():\n hash = hashlib.sha1()\n hash.update(str(time.time()).encode('utf-8'))\n return int.from_bytes(hash.digest()[:2], byteorder=sys.byteorder)", "def _get_nonce():\n return uuid.uuid4().get_hex()", "def _nonce():\n return str(round(100000 * time.time()) * 2)", "def generate_nonce(length=8):\n return ''.join([str(random.randint(0, 9)) for i in range(length)])", "def get_nonce(length=16):\n characters = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n charlen = len(characters)\n return \"\".join([characters[SystemRandom().randint(0, charlen - 1)] for _ in range(0, length)])", "def GetCspNonce():\n NONCE_LENGTH = 16\n return base64.b64encode(os.urandom(NONCE_LENGTH))", "def create_nonce():\n default_seed = 'ifh2847fhsn\"lqOEYd@#Djh(&'\n hash = sha.new(default_seed)\n hash.update(str(datetime.utcnow()))\n return hash.hexdigest()", "def _nonce(self):\n return str(int(round(time.time() * 10000)))", "def make_nonce():\n time_format = '%Y-%m-%dT%H:%M:%SZ'\n time_component = time.strftime(time_format, time.gmtime())\n valid_chars = ''\n\n # iterate over all the aschii characters for a list of all alpha-numeric characters\n for char_index in range(0, 128):\n if chr(char_index).isalpha() or chr(char_index).isalnum():\n valid_chars += chr(char_index)\n\n random_str = ''\n random_chr = random.SystemRandom()\n for i in range(0, 6):\n random_str += random_chr.choice(valid_chars)\n\n return '001{time_str}{random_str}'.format(time_str=time_component,\n random_str=random_str)", "def make_nonce (self, request):\r\n ip = request.channel.server.ip\r\n now = str(long(time.time()))\r\n if now[-1:] == 'L':\r\n now = now[:-1]\r\n private_key = str (id (self))\r\n nonce = ':'.join([ip, now, private_key])\r\n return self.apply_hash (nonce)", "def _GetCspNonce():\n nonce_length = constants.NONCE_LENGTH\n return base64.b64encode(os.urandom(nonce_length * 2))[:nonce_length]", "def get_nonce() -> int:\n return int(time.time() * FACTOR)", "def _oauth_nonce_generate(self):\n\t\traw_data = random.getrandbits(32 * 8)\n\t\traw_str = ''\n\t\tfor i in range(32):\n\t\t\tnew_part = raw_data % 256\n\t\t\traw_data /= 256\n\t\t\traw_str += chr(new_part)\n\t\n\t\tencoded = base64.b64encode(raw_str) \n\t\treturn encoded.rstrip('=').replace('+', 'A').replace('/', 'B')", "def dirty_nonce(rev, NONCE_LEN=5, **kwargs):\n import uuid\n return '%s-%s' % (rev, uuid.uuid4().hex[:NONCE_LEN])", "def _build_new_nonce(self):\n seqno = self.new_sequence_number()\n\n partial_iv = seqno.to_bytes(5, 'big')\n\n return (self._construct_nonce(partial_iv, self.sender_id), partial_iv.lstrip(b'\\0') or b'\\0')", "def get_initial_nonce(self):\n\n #First we will initiate the nonce with the prng.\n bit_nonce = int_to_bitstr(self.prng, 16)\n\n \"\"\" Then we generate the second part by taking only \n the last 16 bits until we have 32 bits in total. \"\"\"\n for i in range(16):\n bit_nonce += self.prng_feedback(bit_nonce[i:i+16])\n\n \"\"\" The new state of the prng will be the last 16 bits\n of the nonce, because we discarded 16 bits during the\n feedback loop. The initial nonce has 32 bits now. \"\"\"\n bit_prng = bit_nonce[16:]\n\n self.prng = bitstr_to_int(bit_prng)\n self.nonce = bitstr_to_int(bit_nonce)\n\n return self.nonce", "def gen_oauth_nonce():\n\trandom = os.urandom(32)\n\tencoded = base64.b64encode(random)\n\twords = re.sub('[^\\w]', '', str(encoded))\n\treturn words", "def nonceRFC6979(privKey, inHash):\n # Truncate private key if too long.\n if len(privKey) > 32:\n privKey = privKey[:32]\n\n q = Curve.N\n x = privKey\n\n qlen = q.bit_length()\n holen = SHA256_SIZE\n rolen = (qlen + 7) >> 3\n bx = int2octets(x, rolen) + bits2octets(inHash, rolen)\n\n # Step B\n v = ByteArray(bytearray([1] * holen))\n\n # Step C (Go zeroes the all allocated memory)\n k = ByteArray(0, length=holen)\n\n # Step D\n k = mac(k, v + ByteArray(0x00, length=1) + bx)\n\n # Step E\n v = mac(k, v)\n\n # Step F\n k = mac(k, v + 0x01 + bx)\n\n # Step G\n v = mac(k, v)\n\n # Step H\n while True:\n # Step H1\n t = ByteArray(b\"\")\n\n # Step H2\n while len(t) * 8 < qlen:\n v = mac(k, v)\n t += v\n\n # Step H3\n secret = hashToInt(t)\n if secret >= 1 and secret < q:\n return secret\n\n k = mac(k, v + 0x00)\n v = mac(k, v)", "def set_nonce(self, nonce=None):\n if nonce is None:\n nonce = os.urandom(32)\n self.nonce = nonce", "def _nonce(self):\n # Note: if we use multithreading for a single exchange, this may\n # cause an issue.\n delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)\n return int(delta.total_seconds() * 1000)", "def get_nonce(self, address):\n nonce = self.web3_object.eth.getTransactionCount(address)\n return nonce", "def create_new_nonce(self):\n\n self.nonce_action_auth = util.create_id_task() # create a new random auth string\n self.nonce_action_auth_valid_uses = 1\n return self.nonce_action_auth" ]
[ "0.8042163", "0.8042163", "0.8010228", "0.7921566", "0.79005414", "0.78887206", "0.78473306", "0.78447676", "0.78447676", "0.77639616", "0.75877625", "0.75856775", "0.748053", "0.74550116", "0.7397726", "0.73679745", "0.73569447", "0.7110729", "0.70935357", "0.7075415", "0.7051455", "0.6972825", "0.6750428", "0.65229195", "0.6338321", "0.6337487", "0.62989247", "0.6127821", "0.6022267", "0.5982109" ]
0.8238167
0
Check that the response is a valid response to our request that is, the otp that was returned is the otp we sent originally, that the nonce that was sent was the nonce we had originally, and that the signature (if C{self.api_key} is not C{None}) is valid
def _verify_response(self, text_response, orig_otp, orig_nonce): response_dict = dict([line.strip(' ').split('=', 1) for line in re.split(r'\r\n', text_response) if line.strip()]) if 'otp' in response_dict and response_dict['otp'] != orig_otp: raise YubiKeyVerificationError( "Received response that does not match the OTP that was " "sent to be verified.") if 'nonce' in response_dict and response_dict['nonce'] != orig_nonce: raise YubiKeyVerificationError( "Received response that does not match the OTP that was " "sent to be verified.") if self.api_key is not None: sig = sign_query(response_dict, self.api_key) if response_dict['h'].decode('base64') != sig.decode('base64'): raise YubiKeyVerificationError( "Received a response whose signature is invalid") return response_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_response(self, response):\n pass", "def _check_response(self, res: requests.Response, token: str) -> None:\n return", "def verify_response_dict(api_key, response):\n LOGGER.debug('Verifying WSAPI response signature')\n\n # Remove signature from the response\n r = dict(response)\n del r['h']\n\n # Convert to HTML query as that is used by Yubico to sign the response\n query = sorted_urlencode(list(r.iteritems()))\n\n # We unquote it because it's not the HTTP quoted version\n query = urllib.unquote_plus(query)\n\n status = sign(api_key, query) == response['h']\n LOGGER.debug('Signature result ' + str(status))\n return status", "def _check_response(self, response, request):\n\n if (response.status_code == 401 or\n response.status_code == 403):\n login_request = (\"https://\" + self.gateway_address +\n \":\" + self.gateway_port + \"/api/login\")\n r = requests.get(login_request,\n auth=(self.sio_user, self.sio_pass),\n verify=False)\n token = r.json()\n self.sio_token = token\n # Repeat request with valid token.\n response = requests.get(request,\n auth=(self.sio_user, self.sio_token),\n verify=False)\n\n return response", "def _validate_response(self, response):\n # Check for unexpected response - all should be JSON dicts that have\n # already been deserialised\n if not isinstance(response, types.DictionaryType):\n self.message(\n \"\\t\\t[!] ERROR - Unexpected value returned from the API: '%s'\" %\n (response))\n return False\n\n # Check for valid errors\n if \"error\" in response and \"msg\" in response:\n self.message(\n \"\\t\\t[!] ERROR - %s (%s)\" %\n (response[\"msg\"], response[\"timestamp\"]))\n return False\n\n # Is this a valid response message\n if \"msg\" in response:\n return True\n\n # Catch all...dictionary returned but does not contain expected keys?\n # Who know's what's going on here?!\n else:\n self.message(\n \"\\t\\t[!] ERROR - Unexpected dictionary response returned from the API: '%s'\" %\n (response))\n return False", "def _check_200(self, response):\n if response.code != 200:\n raise YubiKeyVerificationError(\n \"Received {0} response.\".format(response.code))\n return response", "def is_valid_response(self, response):\r\n if response.status_code in VALID_CODES:\r\n return True\r\n return False", "def _check_response(self, res: requests.Response, token: str) -> None:\n raise RuntimeError('Cannot use _check_response of this abstract class.')", "def validate_response(response):\n assert response.ok\n rpcdict = response.json()\n assert rpcdict['jsonrpc'] == '2.0'\n assert rpcdict['id']\n assert 'error' in rpcdict.keys() or 'result' in rpcdict.keys()", "def _check_token_response(self, response, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')", "def validate_token(self):\n r = requests.get(urljoin(self._url, Client._token_resource),\n params={\"tokenid\": self._token_id})\n\n if r.status_code == requests.status_codes.codes.unauthorized:\n raise ClientUnauthorized()\n elif r.status_code != requests.status_codes.codes.ok:\n error_messages = self._parse_invalid_request(r.text)\n raise ClientException(r.status_code, error_messages)\n\n try:\n type_, value = r.text.split(\"=\")\n value = value.strip(\" \\r\\n\")\n except Exception, e:\n raise ClientException(r.status_code,\n \"Some error has ocurred getting the result value from %s\"\n % r.text)\n\n return value == \"true\"", "def validate(self, response):\n return response[\"status_code\"] == 1", "def check_response_errors(self, resp):\n return True", "def validate_connection(self):\n __method_name = inspect.currentframe().f_code.co_name\n res = self.pull(\n url=self.base_url + consts.OAUTH2_ENDPOINT,\n auth=HTTPBasicAuth(self.client_id, self.client_secretkey),\n data={\"grant_type\": \"client_credentials\"},\n method=\"POST\",\n )\n if res and res.get(\"access_token\"):\n self.session.headers[\"Authorization\"] = \"bearer {}\".format(\n res.get(\"access_token\")\n )\n self.applogger.info(\n \"{}(method={}) : {} : Validation successful.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return\n self.applogger.error(\n \"{}(method={}) : {} : Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n raise Exception(\n \"Error occurred while fetching the access token from the response. \"\n 'Key \"access_token\" was not found in the API response.'\n )", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def parse_response(self, response: requests.Response) -> APIResponse:\n if response.status_code != 200:\n if self.raise_exception:\n if response.status_code in HTTP_ERROR_CSOB_EXCEPTIONS.keys():\n raise HTTP_ERROR_CSOB_EXCEPTIONS[response.status_code](response)\n response.raise_for_status()\n else:\n return APIResponse(response, is_verified=None)\n\n is_verified = self.verify_signature(response.json())\n if is_verified is False and self.raise_exception:\n raise GatewaySignatureInvalid(response)\n\n return APIResponse(response, is_verified=is_verified)", "def is_response_valid(response):\n valid_keys = ('action', 'time', 'data', 'code', 'address')\n if all(key in response for key in valid_keys):\n return True\n raise ValueError", "def _check_response(self, response):\n if response.status_code == requests.codes.ok:\n # Since the ZenHub REST API does not send back 204 when there is\n # no content, we have to check the Content-Length for 0 :(\n if int(response.headers['Content-Length']):\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n return response.raise_for_status()", "def check_oauth(self):\n resp = dict(self.__httprequest.POST.dict())\n orderedresp = OrderedDict(sorted(resp.items(), key=lambda t: t[0]))\n query_string = urllib.urlencode(orderedresp)\n oauth_headers = dict(signature.collect_parameters(query_string, exclude_oauth_signature=False))\n sig = oauth_headers.pop('oauth_signature')\n consumer_secret = self.get_oauthsecret_for_key(orderedresp.get('oauth_consumer_key'))\n\n oauthrequest = Oauthrequest()\n oauthrequest.params = oauth_headers.items()\n oauthrequest.uri = unicode(urllib.unquote(self.__httprequest.build_absolute_uri()))\n oauthrequest.http_method = unicode('POST')\n oauthrequest.signature = sig\n if signature.verify_hmac_sha1(request=oauthrequest, client_secret=unicode(consumer_secret)):\n return True\n return False", "def check_response(self, challenge, response):\n if challenge is not None:\n expected_response = challenge.identifier + self.secret + challenge.challenge\n expected_response_hashed = hashlib.sha1(expected_response)\n if expected_response_hashed == response.response_hash:\n return True\n else:\n return False\n else:\n raise Exception", "def verify(self, response):", "def _validate_jwt_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/verify'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_access_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"token\": self.jwt_access_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n valid = not bool(dic)\n if self.debug:\n print('Valid token : {}'.format(valid))\n return valid\n except urllib_error.HTTPError as e:\n return False", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def validate_response(self, response):\n crypted = response[-0x100:]\n # check that not all values are the same\n if all(v == crypted[0] for v in crypted):\n return False\n # return if chunks of 0x10 repeat\n return (len([True for i in range(0x10, len(crypted), 0x10)\n if crypted[:0x10] == crypted[i:i+0x10]])) == 0xf", "def __CheckResponse(self, response):\n\n status = response.status\n if (status == httplib.OK or status == httplib.CREATED\n or status == httplib.NO_CONTENT):\n return\n elif (status == httplib.UNAUTHORIZED):\n raise BadCredentialsException\n elif (status == httplib.SERVICE_UNAVAILABLE):\n raise ServerBusyException\n elif (status == httplib.BAD_REQUEST\n or status == httplib.UNPROCESSABLE_ENTITY):\n raise BadArgumentsException\n elif (status == httplib.NOT_FOUND):\n raise NotFoundException\n else:\n raise BadOperationException", "def check(self, request, consumer, token, signature):\r\n built = self.sign(request, consumer, token)\r\n return built == signature", "def response_check(response):\n print(response)\n print(response.text)\n return response.status_code == 201", "def check_valid_cancel_response(response: HTTPResponse) -> bool:\n return response.status_code == 204", "def checkResponseOK(response):\n assert response['result'] == 'OK'", "def _validate_otp(self, otp):\n try:\n if self.ykval_client.verify(otp):\n return True\n return False\n except Exception as err:\n logger.error('OTP Validation failed: %r', err)\n return False" ]
[ "0.6813327", "0.6812169", "0.67820585", "0.65645856", "0.6504918", "0.6386462", "0.63724524", "0.6230743", "0.622919", "0.6145281", "0.6137439", "0.60897577", "0.608821", "0.6076044", "0.607178", "0.6069999", "0.60647804", "0.60482323", "0.6047397", "0.60116434", "0.59915906", "0.5940029", "0.59316415", "0.59143734", "0.5899624", "0.5859947", "0.5850863", "0.58272713", "0.5823491", "0.5822322" ]
0.7006299
0
Verifies an OTP against the validation servers provided to the verifier. It queries all servers in parallel and waits for answers. Servers will not respond positively until it has synchronized the new OTP counter with the other servers, and this will wait until it has received one valid (200, otp and nonce match, and signature is correct) response, positive (i.e., OTP is valid) or negative (i.e., OTP is replayed). Note that signature validation errors may occur, due to implementation details on the Yubico validation servers, if invalid parameters are passed e.g. if an OTP is provided one whose characters are outside the ModHex alphabet).
def verify(self, otp, timestamp=None, sl=None, timeout=None): query_dict = { 'id': self.verifier_id, 'otp': otp, 'nonce': self.generate_nonce() } if timestamp is not None: query_dict['timestamp'] = int(bool(timestamp)) if sl is not None: query_dict['sl'] = max(0, min(100, int(sl))) if timeout is not None: query_dict['timeout'] = int(timeout) if _otp_re.search(otp) is None: return fail(YubiKeyVerificationError( "OTP needs to be between 32 and 48 characters long")) if _nonce_re.search(query_dict['nonce']) is None: return fail(YubiKeyVerificationError( "Nonce generator produced an invalid nonce")) if self.api_key is not None: query_dict['h'] = sign_query(query_dict, self.api_key) return self._request_from_all_servers(query_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_otp(request: Request, body: VerifyOTPIn, db: Session = Depends(get_db)):\n mgr = LoginManager()\n mgr.verify_otp(db, body.identifier, body.code)\n request.session[\"access_token\"] = secrets.token_hex(16)\n return {\"status\": \"OK\"}", "def sync_remote(self, otp_params, local_params, server_nonce, required_answers, timeout=1):\n # Construct URLs\n responses = []\n dqueue = queue.Queue()\n for row in self.db.get_queue(otp_params['modified'], server_nonce):\n url = '%(server)s?otp=%(otp)s&modified=%(modified)s' % row\n url += '&' + row['info'].split(',')[0]\n _thread = threading.Thread(target=self._fetch_remote,\n args=(dqueue, row['server'], url, timeout))\n _thread.daemon = True\n _thread.start()\n loop_start = time.time()\n while len(responses) < required_answers and time.time() < loop_start + timeout * 1.5:\n try:\n resp = dqueue.get(timeout=0.2)\n responses.append(resp)\n # Delete entry from table\n self.db.remove_from_queue(resp['server'], otp_params['modified'], server_nonce)\n except queue.Empty:\n pass\n\n answers = len(responses)\n # Parse response\n valid_answers = 0\n for resp in responses:\n resp_params = resp['params']\n logger.debug('[%s] local DB contains %s',\n otp_params['yk_publicname'], local_params)\n logger.debug('[%s] response contains %s',\n otp_params['yk_publicname'], resp_params)\n logger.debug('[%s] OTP contains %s',\n otp_params['yk_publicname'], otp_params)\n # Update Internal DB (conditional)\n self.db.update_db_counters(resp_params)\n # Check for Warnings\n # https://developers.yubico.com/yubikey-val/doc/ServerReplicationProtocol.html\n # NOTE: We use local_params for validationParams comparison since they are actually\n # the same in this situation and we have them at hand.\n if counters_gt(local_params, resp_params):\n logger.warning('[%(yk_publicname)s] Remote server out of sync', otp_params)\n if counters_gt(resp_params, local_params):\n logger.warning('[%(yk_publicname)s] Local server out of sync', otp_params)\n if counters_eq(resp_params, local_params) \\\n and resp_params['nonce'] != local_params['nonce']:\n logger.warning('[%(yk_publicname)s] Servers out of sync. '\n 'Nonce differs.', otp_params)\n if counters_eq(resp_params, local_params) \\\n and resp_params['modified'] != local_params['modified']:\n logger.warning('[%(yk_publicname)s] Servers out of sync. '\n 'Modified differs.', otp_params)\n if counters_gt(resp_params, otp_params):\n logger.warning('[%(yk_publicname)s] OTP is replayed. '\n 'Sync response counters higher than OTP counters.', otp_params)\n elif counters_eq(resp_params, otp_params) \\\n and resp_params['nonce'] != otp_params['nonce']:\n logger.warning('[%(yk_publicname)s] OTP is replayed. Sync '\n 'response counters equal to OTP counters and nonce '\n 'differs.', otp_params)\n else:\n # The answer is ok since a REPLAY was not indicated\n valid_answers += 1\n if required_answers == valid_answers:\n break\n\n # NULL queued_time for remaining entries in queue, to allow\n # daemon to take care of them as soon as possible.\n self.db.null_queue(server_nonce)\n return {'answers': answers, 'valid_answers': valid_answers}", "def test_valid_otp(self, client, valid_otp_data):\n resp = client.post(self.url, json=valid_otp_data)\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"OK\"", "def test_successful_verification(self):\n for i in (-2, -1, 0, 1, 2):\n\n description = \"TOTP not verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertTrue(confirmed, description)\n\n self.relate.confirm = False", "def otp_verify(request):\n phone = request.GET.get('phone', None)\n otp = request.GET.get('otp', None)\n verified, user_exists, auth_token, user_id = UserAuth(phone).verify_otp(otp)\n return Response(\n {\n 'phone': phone,\n 'success': verified,\n 'is_registered': user_exists,\n 'auth_token': auth_token,\n 'user_id': user_id\n }\n )", "def telegram_check():\n hotp = pyotp.HOTP('base32secret3232')\n random_seed = random.randint(9999, 99999)\n tkinter.messagebox.showinfo(\"\", \"Ga naar: http://t.me/BevFietsBot\" + \"\\nen stuur deze code: \" + hotp.at(random_seed)\n + \"\\nGa na versturen verder.\")\n telegram_output = telegram_read()\n\n if hotp.verify(telegram_output, random_seed):\n return 1\n else:\n tkinter.messagebox.showinfo(\"\", \"Inlog gegevens niet correct\")\n return 0", "def authenticate():\n mobile_number = input(\"Enter mobile number : \")\n response = post_request(\n url=BASE_URL + GENERATE_OTP_URL,\n body={\n \"mobile\": mobile_number\n }\n )\n if response.status_code == 200:\n \"\"\"\n Check if OTP sent within last few minutes\n \"\"\"\n while True:\n if response.text == \"OTP Already Sent\":\n print(\"OTP Already Sent\")\n wait_no_of_seconds()\n response = post_request(\n url=BASE_URL + GENERATE_OTP_URL,\n body={\n \"mobile\": mobile_number\n }\n )\n else:\n return json.loads(response.text)['txnId']\n else:\n print(get_error_message(str(response.status_code)))\n exit(1)", "def _validate_otp(self, otp):\n try:\n if self.ykval_client.verify(otp):\n return True\n return False\n except Exception as err:\n logger.error('OTP Validation failed: %r', err)\n return False", "async def verify(self, data, signature):\n\t\tsignature_struct = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(signature)\n\t\tcalc_sig = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, signature_struct.SeqNum, data)\n\t\t#print('server signature : %s' % signature)\n\t\t#print('calculates signature: %s' % calc_sig)\n\t\treturn signature == calc_sig", "def verify():\n if flask.request.method == 'POST':\n req = flask.request.get_json(force=True)\n phone = req.get('phone')\n code = req['code']\n return check_verification(phone, code)", "def verify_token(vial_http: urllib3.connectionpool.ConnectionPool) -> bool:\n verify_resp = vial_http.request(\"GET\", \"/api/verifyToken\")\n return verify_resp.status == 200", "def verify(self, timeout=15):\n processed_host = (self.host.replace('sftp://', '')\n .replace('ftp://', '')\n #.replace('www.', '')\n .replace('https://', '')\n .replace('http://', '')\n .strip())\n protocol = self.protocol\n if protocol in ('ftp', 'ftps'):\n f = self._verify_ftp\n elif protocol == 'sftp':\n f = self._verify_sftp\n else:\n f = self._verify_spurious\n\n self.verified, self.verification_message = f(processed_host, timeout)\n self.last_verified = timezone.now()\n self.save(update_fields=['verified', 'verification_message',\n 'last_verified'])", "def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False", "def check_otp(email, otp_code):\n\tprint \"Inside check_otp\"\n\totp_key, qrcode_data = get_otp_key(email)\n\tprint \"DEBUG qrcode_data: \", qrcode_data\n\ttotp = pyotp.TOTP(otp_key)\n\n\tprint \"otp_code = \", otp_code\n\tprint \"otp_key = \", otp_key\n\tprint \"totp.now() = \", totp.now()\n\tprint \"TOTP provisioning_uri = \", totp.provisioning_uri(email) \n\n\tif totp.verify(otp_code):\n\t\tprint \"totp.verify() = True\"\n\t\treturn True\n\tprint \"totp.verify() = False\"\n\treturn False", "def nexmo_verify(request):\n number = request.validated[\"querystring\"][\"number\"]\n\n sender_id = nexmo_conf(request, \"sender_id\")\n params = {\n \"api_key\": nexmo_conf(request, \"api_key\"),\n \"api_secret\": nexmo_conf(request, \"api_secret\"),\n \"sender_id\": sender_id,\n \"code_length\": nexmo_conf(request, \"code_length\"),\n \"pin_expiry\": nexmo_conf(request, \"state_ttl_seconds\"),\n \"number\": number,\n \"brand\": nexmo_conf(request, \"brand\"),\n }\n\n verify_url = \"{}/verify/json\".format(\n nexmo_conf(request, \"api_endpoint\").rstrip(\"/\")\n )\n\n try:\n resp = requests.get(verify_url, params=params)\n except requests.exceptions.ConnectionError:\n logger.exception(\n \"A connection error occured when starting the nexmo auth process\"\n )\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.exception(\"An error occured when starting the auth process\")\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n data = resp.json()\n\n if data[\"status\"] == \"10\":\n description = (\n f\"An authentication request is already in progress for this number. \"\n f\"{data['error_text']}\"\n )\n error_details = {\n \"name\": \"number\",\n \"location\": \"querystring\",\n \"description\": description,\n }\n raise_invalid(request, **error_details)\n elif data[\"status\"] != \"0\":\n if data[\"status\"] in [\"6\", \"16\", \"19\"]: # pragma: no cover\n logger.info(\"Nexmo Verify Request failed: {}\".format(data))\n else:\n logger.error(\"Nexmo Verify Request failed: {}\".format(data))\n description = \"Something went wrong when trying to authenticate this number.\"\n error_details = {\n \"name\": \"number\",\n \"location\": \"querystring\",\n \"description\": description,\n }\n raise_invalid(request, **error_details)\n\n state = persist_state(request, {\"request_id\": data[\"request_id\"], \"number\": number})\n\n return {\"state\": state, \"sender_id\": sender_id}", "def verify(self):\n token = \"mytoken\" # set from wx server\n ll = []\n signature = self.get_argument(\"signature\", \"<none>\")\n ll.append(self.get_argument(\"timestamp\", \"<none>\"))\n ll.append(self.get_argument(\"nonce\", \"<none>\"))\n ll.append(token)\n ll.sort()\n m = hashlib.sha1()\n m.update(\"\".join(ll).encode(\"ascii\"))\n digest = m.hexdigest()\n\n if signature != digest:\n print(\"signature not match, discard this msg!\")\n return False\n else:\n print(\"signature match, got a wechat msg!\")\n return True", "def verify_otp(self, session, identifier, code):\n attempt = session.query(LoginAttempt).filter_by(identifier=identifier).first()\n conditions = [\n attempt,\n attempt.is_valid(),\n TOTPManager(attempt.user).verify(code),\n ]\n if not all(conditions):\n raise InvalidOTP\n return True", "def verify(self, ids):\n self._request('torrent-verify', {}, ids, True)", "def _verify_response(self, text_response, orig_otp, orig_nonce):\n response_dict = dict([line.strip(' ').split('=', 1) for line in\n re.split(r'\\r\\n', text_response)\n if line.strip()])\n\n if 'otp' in response_dict and response_dict['otp'] != orig_otp:\n raise YubiKeyVerificationError(\n \"Received response that does not match the OTP that was \"\n \"sent to be verified.\")\n\n if 'nonce' in response_dict and response_dict['nonce'] != orig_nonce:\n raise YubiKeyVerificationError(\n \"Received response that does not match the OTP that was \"\n \"sent to be verified.\")\n\n if self.api_key is not None:\n sig = sign_query(response_dict, self.api_key)\n if response_dict['h'].decode('base64') != sig.decode('base64'):\n raise YubiKeyVerificationError(\n \"Received a response whose signature is invalid\")\n\n return response_dict", "def verify_that_the_trust_secret_succeeded(driver):\n assert 'RPC calls succeeded' in results['output'], results['output']\n time.sleep(1)", "def verify():", "def verify_authenticaion_validation(self, provider_id, old_validation_details, authtypes_to_verify):\n def validated(old, new):\n \"\"\" Returns True if the validation timestamp, valid or invalid, is different\n from the old validation timestamp, False otherwise\n \"\"\"\n return ((old.get('last_valid_on'), old.get('last_invalid_on')) !=\n (new.get('last_valid_on'), new.get('last_invalid_on')))\n\n for i in range(ManageIQProvider.ITERATIONS):\n new_validation_details = self.auths_validation_details(provider_id)\n\n validations_done = True\n all_done_valid = \"Valid\" # Out of the (re)validated ones.\n details = {}\n for t in authtypes_to_verify:\n old = old_validation_details.get(t, {})\n new = new_validation_details.get(t, {})\n if not validated(old, new):\n details[t] = \"Validation didn't complete\"\n validations_done = False\n else:\n details[t] = (new.get('status'), new.get('status_details'))\n if new.get('status') != 'Valid':\n all_done_valid = \"Invalid\"\n\n if validations_done:\n return all_done_valid, details\n time.sleep(ManageIQProvider.WAIT_TIME)\n\n return \"Timed out\", details", "def mitm(self):\n DEBUG = GLOBAL_DEBUG and True\n if DEBUG: print \"mitm()\"\n\n auth_token_num = struct.unpack(\"<L\", self.state[\"auth_token\"])[0]\n req_dst = self._next_expected_dst()\n resp_dst = self._next_expected_dst()\n\n # The CLIENT and SERVER have gone about their negotiation; now it's \n # computation (mitm) time.\n\n #### Anticipate REQUEST contents from CLIENT\n x = struct.unpack(\"<L\", self._prng_get_bytes(SZ_UINT32_T))[0]\n y = struct.unpack(\"<L\", self._prng_get_bytes(SZ_UINT32_T))[0]\n true_op = struct.unpack(\"B\", self._prng_get_bytes(SZ_UINT8_T))[0]\n if DEBUG: print \"true_op = %s\" % true_op \n if OP_MOD == true_op: true_op = OP_ADD\n true_op %= 4\n true_req = Msg(\n dst = req_dst,\n x = x, \n y = y, \n op = true_op, \n result = 0)\n\n # Read & verify REQUEST from the TAP.\n if DEBUG: true_req.dump()\n self.read(length=SZ_MSG_BEFORE_GAP, expect=str(true_req)[:SZ_MSG_BEFORE_GAP])\n self.read(length=SZ_MSG_GAP) # bury GAP\n self.read(length=SZ_MSG_AFTER_GAP, expect=str(true_req)[-SZ_MSG_AFTER_GAP:])\n\n # Compute the true RESPONSE.\n true_answer = do_op(true_req)\n if DEBUG: print \"true_answer = %d\" % true_answer\n true_result_pt = struct.pack(\"<Q\", (uint64_t(true_answer << 32) | auth_token_num))\n true_result_ct = self._encrypt_block(v=true_result_pt, k=self.state[\"enckey\"])\n if DEBUG: print \"[D] true_result_pt = 0x%016lx; true_result_ct = 0x%016lx\" % \\\n (struct.unpack(\"<Q\", true_result_pt)[0], struct.unpack(\"<Q\", true_result_ct)[0])\n true_resp = Msg(\n dst = resp_dst,\n x = 0,\n y = 0,\n op = true_req.op,\n result = struct.unpack(\"<Q\", true_result_ct)[0])\n\n # Decide if we're going to modify the REQUEST to the SERVER.\n if randint(0, 1):\n # We're not going to modify. This one is easy.\n if DEBUG: print \"[D] passing REQUEST verbatim\"\n self.write(str(true_req))\n self.read(length=SZ_MSG_BEFORE_GAP, expect=str(true_resp)[:SZ_MSG_BEFORE_GAP])\n self.read(length=SZ_MSG_GAP) # bury GAP\n self.read(length=SZ_MSG_AFTER_GAP, expect=str(true_resp)[-SZ_MSG_AFTER_GAP:])\n self.write(str(true_resp))\n\n else:\n # Compute the modified REQUEST.\n mod_req = modify_req(true_req)\n\n # Compute the modified RESPOSNE.\n mod_answer = do_op(mod_req)\n mod_result_pt = struct.pack(\"<Q\", (uint64_t(mod_answer << 32) | auth_token_num))\n mod_result_ct = self._encrypt_block(v=mod_result_pt, k=self.state[\"enckey\"])\n if DEBUG: print \"[D] mod_result_pt = 0x%016lx; mod_result_ct = 0x%016lx\" % \\\n (struct.unpack(\"<Q\", mod_result_pt)[0], struct.unpack(\"<Q\", mod_result_ct)[0])\n mod_resp = Msg(\n dst = resp_dst,\n x = 0,\n y = 0,\n op = mod_req.op,\n result = struct.unpack(\"<Q\", mod_result_ct)[0])\n\n if DEBUG: print \"[D] REQUEST modified\"\n self.write(str(mod_req))\n self.read(length=SZ_MSG_BEFORE_GAP, expect=str(mod_resp)[:SZ_MSG_BEFORE_GAP])\n self.read(length=SZ_MSG_GAP) # bury GAP\n self.read(length=SZ_MSG_AFTER_GAP, expect=str(mod_resp)[-SZ_MSG_AFTER_GAP:])\n self.write(str(true_resp)) # CLIENT still expected true response.", "def handshakeServer(self, verifierDB=None,\r\n certChain=None, privateKey=None, reqCert=False,\r\n sessionCache=None, settings=None, checker=None,\r\n reqCAs = None, \r\n tacks=None, activationFlags=0,\r\n nextProtos=None, anon=False):\r\n for result in self.handshakeServerAsync(verifierDB,\r\n certChain, privateKey, reqCert, sessionCache, settings,\r\n checker, reqCAs, \r\n tacks=tacks, activationFlags=activationFlags, \r\n nextProtos=nextProtos, anon=anon):\r\n pass", "def verify(verification_code):\n verification.verify(verification_code)", "def do_mfa_verify(mfa_info):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_info[\"requestToken\"],\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/start-flow\"%mfa_info[\"mfaServerUrl\"],\n data=json.dumps({ \"state_transport\": \"polling\" }).encode(),\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA start flow error: %s\"%error) from None\n mfa_flow_info = json.loads(result)\n mfa_transaction_token = mfa_flow_info[\"transaction_token\"]\n # print(mfa_flow_info)\n # print(mfa_transaction_token)\n\n mfa_code = input(\"Please enter your MFA verification code: \")\n mfa_payload = {\n \"code\": mfa_code,\n \"type\": \"manual_input\"\n }\n mfa_payload_json = json.dumps(mfa_payload).encode()\n headers = {\n \"Content-Type\": \"application/json\",\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/verify-otp\"%mfa_info[\"mfaServerUrl\"],\n data=mfa_payload_json,\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"MFA verify error: %s\"%error) from None\n # print(result)\n\n headers = {\n \"Origin\": \"https://%s.auth0.com\"%TENANT,\n \"Authorization\": \"Bearer %s\"%mfa_transaction_token,\n \"x-global-tracking-id\": mfa_info[\"globalTrackingId\"]\n }\n request = urllib.request.Request(\n \"%s/api/transaction-state\"%mfa_info[\"mfaServerUrl\"],\n method=\"POST\",\n headers=headers)\n try:\n response = urllib.request.urlopen(request)\n result = response.read().decode()\n except urllib.error.HTTPError as e:\n error = e.read().decode()\n raise RuntimeError(\"Get MFA result error: %s\"%error) from None\n mfa_result = json.loads(result)\n if mfa_result[\"state\"] != \"accepted\":\n raise RuntimeError(\"MFA verification is not accepted: %s\"%result)\n # print(mfa_result)\n\n return mfa_result", "def verify(self, request, times=None):\n data = {\n 'httpRequest': request.dict()\n }\n if times:\n data['times'] = vars(times)\n else:\n data['times'] = {\n 'count': 1,\n 'exact': True\n }\n req = requests.put('{}/verify'.format(self._get_url()),\n headers=self.headers,\n data=json.dumps(data))\n resp = {\n 'status': 'OK',\n 'reason': req.content.decode('utf-8'),\n 'found': None\n }\n if req.status_code == 202:\n resp['reason'] = None\n resp['found'] = True\n elif req.status_code == 406:\n resp['found'] = False\n else:\n resp['status'] = 'ERROR'\n\n return resp", "def handshakeServer(self, verifierDB=None,\n certChain=None, privateKey=None, reqCert=False,\n sessionCache=None, settings=None, checker=None,\n reqCAs = None, \n tacks=None, activationFlags=0,\n nextProtos=None, anon=False, alpn=None, sni=None):\n for result in self.handshakeServerAsync(verifierDB,\n certChain, privateKey, reqCert, sessionCache, settings,\n checker, reqCAs,\n tacks=tacks, activationFlags=activationFlags,\n nextProtos=nextProtos, anon=anon, alpn=alpn, sni=sni):\n pass", "def verify(r, s, message, G, Q, timing_list = None):\r\n\r\n if timing_list == None:\r\n return _verify(r, s, long(sha256(message).hexdigest(), 16), G, Q)\r\n\r\n begin_time = clock()\r\n result = _verify(r, s, message, G, Q)\r\n timing_list.append(clock() - begin_time)\r\n\r\n return result", "def verify_auth_token(shared_key, eppn, token, nonce, timestamp, generator=sha256):\n # check timestamp to make sure it is within 300 seconds from now\n logger.debug(\"Trying to authenticate user {!r} with auth token {!r}\".format(eppn, token))\n # check timestamp to make sure it is within -300..900 seconds from now\n now = int(time.time())\n ts = int(timestamp, 16)\n if (ts < now - 300) or (ts > now + 900):\n logger.debug(\"Auth token timestamp {!r} out of bounds ({!s} seconds from {!s})\".format(\n timestamp, ts - now, now))\n raise HTTPForbidden(_('Login token expired, please await confirmation e-mail to log in.'))\n # verify there is a long enough nonce\n if len(nonce) < 16:\n logger.debug(\"Auth token nonce {!r} too short\".format(nonce))\n raise HTTPForbidden(_('Login token invalid'))\n\n expected = generator(\"{0}|{1}|{2}|{3}\".format(\n shared_key, eppn, nonce, timestamp)).hexdigest()\n # constant time comparision of the hash, courtesy of\n # http://rdist.root.org/2009/05/28/timing-attack-in-google-keyczar-library/\n if len(expected) != len(token):\n logger.debug(\"Auth token bad length\")\n raise HTTPForbidden(_('Login token invalid'))\n result = 0\n for x, y in zip(expected, token):\n result |= ord(x) ^ ord(y)\n logger.debug(\"Auth token match result: {!r}\".format(result == 0))\n return result == 0" ]
[ "0.5752108", "0.5737716", "0.56494606", "0.5645329", "0.5643826", "0.5574296", "0.5451564", "0.5326181", "0.53075504", "0.5299279", "0.52365446", "0.52112764", "0.5197768", "0.51941586", "0.51802087", "0.51605034", "0.514498", "0.51412606", "0.5097546", "0.508744", "0.5070752", "0.5061602", "0.50147873", "0.49973905", "0.49526533", "0.49477452", "0.49415675", "0.49325415", "0.49150696", "0.48899513" ]
0.68441504
0
r"""Makes a gif using a list of images.
def make_gif(image_list, gif_name): if not gif_name.endswith(".gif"): gif_name += ".gif" imageio.mimsave(gif_name, [imageio.imread(x) for x in image_list])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_gif():\n anim_file = 'sample/training.gif'\n\n with imageio.get_writer(anim_file, mode='I') as writer:\n filenames = glob.glob('sample/*.jpg')\n filenames = sorted(filenames, key=lambda filename: int(filename[11:-4]))\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n image = imageio.imread(filename)\n writer.append_data(image)", "def create_gif(base_folder):\n img_list = []\n search_path = glob.glob(os.path.join(base_folder, '*.png'))\n search_path.sort()\n for f in search_path:\n im = Image.open(f)\n img_list.append(im)\n save_file = os.path.join(base_folder, 'animated_gif.gif')\n img_list[0].save(save_file,\n save_all=True, append_images=img_list[1:], optimize=False, duration=180, loop=0)", "def compose_in_gif(images, output_file, delay):\n images[0].save(\n output_file, \n format='GIF', append_images=images[1:], \n save_all=True, duration=delay, loop=0,\n )", "def create_gif(trajectory):\r\n gif = []\r\n for i in range(len(trajectory)):\r\n p, s = trajectory[i][0]\r\n filename = 'images/car{}.jpeg'.format(i)\r\n save_caronthehill_image(p, s, filename)\r\n img = imageio.imread(filename)\r\n height, width, layers = img.shape\r\n gif.append(img)\r\n \r\n \r\n imageio.mimsave(\"visualization.gif\", gif, 'GIF')", "def generate_gif(directory: (\"Folder name\", \"positional\"),\n image_format: ('Image format', 'positional') = '.png',\n print_file_names=False):\n import imageio\n from glob import glob\n from natsort import natsorted\n\n images = []\n # Create a list of file names in the specified directory\n filenames = glob(directory + '/*' + image_format)\n\n filenames = natsorted(filenames, key=lambda y: y.lower())\n # Sort the list 'filenames' using the traditional method.\n # Traditional method -\n # isolate the entire first number in the string, then sort by that number\n # If this step is not included,\n # files will be sorted like so: 0, 100, 110, 200, 3, 420, etc...\n\n if print_file_names: # For troubleshooting\n for i in filenames:\n print(i)\n\n for filename in filenames:\n images.append(imageio.imread(filename))\n # Append each file to the list that will become the gif\n\n imageio.mimsave(directory + '.gif', images)\n # Save the gif as the name of the directory\n # that the images were generated from\n return", "def create_gifs(folder, time_per_image=0.1):\n # Retrieve images paths\n images_dict = defaultdict(list)\n folders_sorting_key = lambda s: int(s.split(\"_\")[-1])\n obs_folders = [f for f in os.listdir(folder) if f.split(\"_\")[0] == \"observations\"]\n obs_folders = sorted(obs_folders, key=folders_sorting_key)\n for obs_folder in obs_folders:\n for f in os.listdir(os.path.join(folder, obs_folder)):\n image_name = \"_\".join(f.split(\"_\")[:-1])\n images_dict[image_name].append(os.path.join(folder, obs_folder, f))\n # Create gifs\n for name in images_dict:\n target = os.path.join(folder, name + \".gif\")\n LungsLoader._create_gif(images_dict[name], target, time_per_image)", "def save_gif(frames):\n print(\"Saving gif images!\")\n for i in range(len(frames)):\n im_out_path = \"gif/gif_emilie_will_\" + str(i) + \".png\"\n plt.imsave(im_out_path, frames[i])", "def animated_gif(folder_with_images, gif_filename, loop_duration, size):\r\n\r\n\tos.chdir(folder_with_images) # changes directory to the folder with the images\r\n\r\n\tpng_files = []\r\n\r\n\t# get list of png files in folder\r\n\tfor fn in os.listdir(folder_with_images):\r\n\t\tif fn.endswith('.png'):\r\n\t\t\tpng_files.append(fn)\r\n\r\n\tsort_nicely(png_files)\r\n\r\n\tprint(png_files)\r\n\r\n\t# number of png_files\r\n\tnum_pngs = len(png_files)\r\n\tpng_time = float(loop_duration)/ float(num_pngs)\r\n\r\n\timages = [Image.open(fn) for fn in png_files]\r\n\tdim = (size, size) # change sizes for the image file dimension\r\n\t#for im in images:\r\n\t#\tim.thumbnail(dim, Image.ANTIALIAS)\r\n\r\n\toutput_file = os.path.join(folder_with_images, gif_filename) # path for output file\r\n\twriteGif(output_file, images, png_time) # writes out GIF\r", "def make_gifs_test(title, sort, path):\n images = os.listdir(path)\n generated_images = []\n\n for i in range(len(images)):\n file = os.path.join(path, '%s_%s_Results_%03d.png' % (title, sort, i+1))\n generated_images.append(imageio.imread(file))\n\n imageio.mimsave(path + '{}_{}_Test_Results.gif'.format(sort, title), generated_images, fps=2)\n print(\"{} gif file is generated.\".format(title))", "def make_gif():\n if MIGRATION:\n import imageio\n for n, JPG_DIR in enumerate(JPG_DIRS):\n images, image_file_names = [], []\n for file_name in os.listdir(JPG_DIR):\n if file_name.endswith('.jpg'):\n image_file_names.append(file_name) \n sorted_files = sorted(image_file_names, key=lambda y: int(y.split('_')[1]))\n for i in range(len(sorted_files)): \n file_path = os.path.join(JPG_DIR, sorted_files[i])\n images.append(imageio.imread(file_path))\n imageio.mimsave(FNAME.rsplit('.', 1)[0] + '_migration' + str(n) + '.gif', images, 'GIF', loop=1, fps=FPS)", "def gif(filename, array, fps=10, scale=1.0):\n # ensure that the file has the .gif extension\n filename = filename + '.gif'\n\n # copy into the color dimension if the images are black and white\n if array.ndim == 3:\n array = array[..., np.newaxis] * np.ones(3)\n\n # make the moviepy clip\n clip = ImageSequenceClip(list(array), fps=fps).resize(scale)\n clip.write_gif(filename, fps=fps)\n return True", "def create_gif(self, number_of_images=80, duration=0.1, output_filename=\"plot.gif\"):\n if self.quick_plot is None:\n self.quick_plot = pybamm.QuickPlot(self._solution)\n\n self.quick_plot.create_gif(\n number_of_images=number_of_images,\n duration=duration,\n output_filename=output_filename,\n )", "def make_GIF(image_path: Union[Path, str]) -> None:\n import imageio\n from pygifsicle import optimize\n\n if isinstance(image_path, str):\n image_path = Path(image_path)\n\n image_dir = image_path.parent\n image_file = image_path.stem\n gif_path = image_dir / f\"{image_file}.gif\"\n gif_path = Path(\"./xxxx.gif\")\n with imageio.get_writer(gif_path, mode='I') as writer:\n img_files = sorted((img_file for img_file in image_dir.glob('*.png')))\n for img_file in img_files:\n writer.append_data(imageio.imread(img_file))\n print(f\"{len(img_files)} images loaded from {image_path}\")\n try:\n optimize(gif_path)\n except Exception:\n print(\"gifsicle not installed\")", "def make_gif(im_dir, out_file, pattern='*.png', fps=10):\n im_files = glob.glob(os.path.join(im_dir, pattern))\n if len(im_files) == 0:\n raise ValueError(f'No images found in {im_dir}!')\n \n writer = imageio.get_writer(out_file, mode='I', fps=fps)\n for im_file in im_files:\n im = imageio.imread(im_file)\n writer.append_data(im)\n writer.close()", "def makeGif(imgPath):\r\n import imageio\r\n filenames = os.listdir(imgPath)\r\n filenames.sort()\r\n images = []\r\n for filename in filenames:\r\n images.append(imageio.imread(os.path.join(imgPath, filename)))\r\n imageio.mimsave(os.path.join(imgPath, \"sharpVid.gif\"), images, duration=0.2)", "def make_gifs_train(title, path):\n images = os.listdir(path)\n generated_images = []\n\n for i in range(len(images)):\n file = os.path.join(path, '%s_Samples_Epoch_%03d.png' % (title, i+1))\n generated_images.append(imageio.imread(file))\n\n imageio.mimsave(path + '{}_Train_Results.gif'.format(title), generated_images, fps=2)\n print(\"{} gif file is generated.\".format(title))", "def build_list_gif(self, pathgif, nocv2 = True):\n dsize = (self.size, self.size)\n gif = mimread(pathgif)\n # convert form RGB to BGR\n listcv2 = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in gif]\n listgif = []\n for img in listcv2:\n listgif.append(cv2.resize(img, dsize))\n if nocv2:\n return self.convert_list_images(listgif)\n else:\n return listgif", "def write_gifs(self, clip, gifs_dir, **kwargs):\n for start, end, _, _ in self:\n name = \"%s/%08d_%08d.gif\" % (gifs_dir, 100 * start, 100 * end)\n clip.subclip(start, end).write_gif(name, **kwargs)", "def gif_generation(orig_label_path, bound_data_path):\n for sample in os.listdir(bound_data_path):\n if not sample.startswith('.') and osp.isdir(osp.join(bound_data_path, sample)):\n sample_path = osp.join(bound_data_path, sample)\n for artery in os.listdir(sample_path):\n orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')\n bound_pick_path = osp.join(bound_data_path, sample, artery, 'data.pkl')\n\n # function to save result of each artery into gif\n save_gif_artery(orig_label_pick_path, bound_pick_path)", "def animate(images):\n images = np.array(images)\n converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)\n imageio.mimsave('./animation.gif', converted_images)\n return embed.embed_file('./animation.gif')", "def to_gif(diagram, *diagrams, **params): # pragma: no cover\n path = params.get(\"path\", None)\n timestep = params.get(\"timestep\", 500)\n loop = params.get(\"loop\", False)\n steps, frames = (diagram, ) + diagrams, []\n path = path or os.path.basename(NamedTemporaryFile(\n suffix='.gif', prefix='tmp_', dir='.').name)\n with TemporaryDirectory() as directory:\n for i, _diagram in enumerate(steps):\n tmp_path = os.path.join(directory, '{}.png'.format(i))\n _diagram.draw(path=tmp_path, **params)\n frames.append(Image.open(tmp_path))\n if loop:\n frames = frames + frames[::-1]\n frames[0].save(path, format='GIF', append_images=frames[1:],\n save_all=True, duration=timestep,\n **{'loop': 0} if loop else {})\n try:\n from IPython.display import HTML\n return HTML('<img src=\"{}\">'.format(path))\n except ImportError:\n return '<img src=\"{}\">'.format(path)", "def generate_gif(frames, reward, path, number=None, evaluation=False):\n for i, frame in enumerate(frames):\n frames[i] = resize(frame, (420, 320, 3),\n order=0, preserve_range=True).astype(np.uint8)\n if evaluation:\n path += '/atari-step-{}-reward-{}.gif'.format(number, reward)\n else:\n path += '/atari-play-reward-{}.gif'.format(reward)\n imageio.mimsave(path, frames, duration=1/30)", "def animated_gif(file):\n\n\timport os.path\n\timport Image\n\tfrom conf import *\n\tfrom util_errors import gen_error\n\tANIMGIF_TAGID = 2\n\n\tfilepath = os.path.join(PROBATION_DIR, file[\"filename\"])\n\ttry:\n\t\timg = Image.open(filepath)\n\t\ttry:\n\t\t\timg.seek(1)\n\t\texcept:\n\t\t\tpass\n\t\telse:\n\t\t\tdel(img)\n\t\t\treturn [ANIMGIF_TAGID]\n\texcept Exception, data:\n\t\tgen_error('GENERIC', \"File couldn't be operated on, check perms -- \" + str(data))\n\n\tdel(img)\n\treturn []", "def writeGif(filename, images, duration=0.1, loops=0, dither=1):\n \n if PIL is None:\n raise RuntimeError(\"Need PIL to write animated gif files.\")\n \n AD = Image.ADAPTIVE\n images2 = []\n \n # convert to PIL\n for im in images:\n \n if isinstance(im,Image.Image):\n images2.append( im.convert('P', palette=AD, dither=dither) )\n \n elif np and isinstance(im, np.ndarray):\n if im.dtype == np.uint8:\n pass\n elif im.dtype in [np.float32, np.float64]:\n im = (im*255).astype(np.uint8)\n else:\n im = im.astype(np.uint8)\n # convert\n if len(im.shape)==3 and im.shape[2]==3:\n im = Image.fromarray(im,'RGB').convert('P', palette=AD, dither=dither)\n elif len(im.shape)==2:\n im = Image.fromarray(im,'L').convert('P', palette=AD, dither=dither)\n else:\n raise ValueError(\"Array has invalid shape to be an image.\")\n images2.append(im)\n \n else:\n raise ValueError(\"Unknown image type.\")\n \n # check duration\n if hasattr(duration, '__len__'):\n if len(duration) == len(images2):\n durations = [d for d in duration]\n else:\n raise ValueError(\"len(duration) doesn't match amount of images.\")\n else:\n durations = [duration for im in images2]\n \n \n # open file\n fp = open(filename, 'wb')\n \n # write\n try:\n n = _writeGifToFile(fp, images2, durations, loops)\n print n, 'frames written'\n finally:\n fp.close()", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def saveGIFBatch(directory, path, name=''):\n # for each frame in batch\n images = []\n for filename in directory:\n print(filename)\n images.append(imageio.imread(filename))\n\n name_gif = path + '/' + name + '.gif'\n imageio.mimsave(name_gif, images)", "def gif(self, num_games, slow_mult=2, delete_pics=True,\n kill_limit_per_game=1000):\n slow_mult = int(slow_mult)\n gif_name = \"gifs\\\\\"+self.name\n\n try:\n os.remove(gif_name+'.gif')\n except Exception:\n pass\n\n kill_limit = kill_limit_per_game * num_games\n\n c = 0\n e = 0\n while c < kill_limit and e < num_games:\n self.env.reset()\n game_over = False\n # get initial input\n input_t = self.env.observe()\n\n plt.imshow(self.env.draw_state(),\n interpolation='none', cmap='gray')\n plt.savefig(\"gifs\\\\%d.png\" % c)\n plt.close()\n c += 1\n while not game_over and c < kill_limit:\n input_tm1 = input_t\n\n # get next action\n q = self.model.predict(input_tm1)\n action = np.argmax(q[0])\n\n # apply action, get rewards and new state\n input_t, reward, game_over = self.env.act(action)\n\n plt.imshow(self.env.draw_state(),\n interpolation='none', cmap='gray')\n plt.savefig(\"gifs\\\\%d.png\" % c)\n plt.close()\n c += 1\n\n e += 1\n\n # Making a temporary gif and slowing it down seems to be the only way I\n # can make a slower gif. For some reason the command works in cmd but\n # not here so i guess I am stuck with fast gifs.\n \"\"\"\n call1 = ['ffmpeg', '-i', '%d.png', gif_name+'_temp.gif']\n subprocess.call(call1)\n call2 = ['ffmpeg', '-i', gif_name+'_temp.gif', '-filter:v',\n '\"setpts={}.0*PTS\"'.format(slow_mult), gif_name+'.gif']\n subprocess.call(call2, shell=True)\n # ffmpeg -i catch_small_model.gif -filter:v \"setpts=3.0*PTS\" catch_small_model_slow.gif\n print(call2)\n try:\n os.remove(gif_name+'_temp.gif')\n except Exception as e:\n print(e)\n \"\"\"\n subprocess.call(['ffmpeg', '-i', 'gifs\\\\%d.png', gif_name+'.gif'])\n\n if delete_pics:\n for i in range(c):\n try:\n os.remove(\"gifs\\\\%d.png\" % i)\n except Exception as e:\n print(e)", "def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)", "def _writeGifToFile(fp, images, durations, loops):\n \n # Obtain palette for all images and count each occurance\n palettes, occur = [], []\n for im in images: \n palettes.append( getheader(im)[1] )\n for palette in palettes: \n occur.append( palettes.count( palette ) )\n \n # Select most-used palette as the global one (or first in case no max)\n globalPalette = palettes[ occur.index(max(occur)) ]\n \n # Init\n frames = 0\n firstFrame = True\n \n \n for im, palette in zip(images, palettes):\n \n if firstFrame:\n # Write header\n \n # Gather info\n header = getheaderAnim(im)\n appext = getAppExt(loops)\n \n # Write\n fp.write(header)\n fp.write(globalPalette)\n fp.write(appext)\n \n # Next frame is not the first\n firstFrame = False\n \n if True:\n # Write palette and image data\n \n # Gather info\n data = getdata(im) \n imdes, data = data[0], data[1:] \n graphext = getGraphicsControlExt(durations[frames])\n # Make image descriptor suitable for using 256 local color palette\n lid = getImageDescriptor(im) \n \n # Write local header\n if palette != globalPalette:\n # Use local color palette\n fp.write(graphext)\n fp.write(lid) # write suitable image descriptor\n fp.write(palette) # write local color table\n fp.write('\\x08') # LZW minimum size code\n else:\n # Use global color palette\n fp.write(graphext)\n fp.write(imdes) # write suitable image descriptor\n \n # Write image data\n for d in data:\n fp.write(d)\n \n # Prepare for next round\n frames = frames + 1\n \n fp.write(\";\") # end gif\n return frames", "def save_GIF(ht, name=\"trajectory\"):\n # Generation of images\n counter = 0\n images = []\n for e in range(0, len(ht), 3):\n p = ht[e][0]\n s = ht[e][1]\n save_caronthehill_image(p, s, \"image\\\\state\" + str(counter) + \".png\")\n images.append(imageio.imread(\"image\\\\state\" + str(counter) + \".png\"))\n counter += 1\n imageio.mimsave(\"{}.gif\".format(name), images)" ]
[ "0.78579974", "0.7659993", "0.7557394", "0.75059664", "0.72135156", "0.7159185", "0.7140775", "0.7111952", "0.69703233", "0.6958537", "0.694323", "0.68651026", "0.68095165", "0.68038476", "0.6797115", "0.6718196", "0.6681492", "0.6675865", "0.66499966", "0.66303796", "0.6558839", "0.6546823", "0.65313834", "0.6452449", "0.63927597", "0.6367029", "0.63311577", "0.6307722", "0.6303455", "0.6215578" ]
0.818792
0
wrapper function for starting a net.Server connected to `pipe`
async def net_server(pipe): server = await net.Server(pipe, host="0.0.0.0", port=8080) return await server.wait_closed()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def net_proc(pipe):\n asyncio.run(net_server(pipe))", "def new_server(self, name, pipeline, port=None):\n if port is None:\n port = self.next_port\n self.next_port += 1\n\n self.servers[name] = port\n\n args = [\"owl-server\",\"--port\", str(port)] + pipeline.split()\n proc = subprocess.Popen(args)\n self.processes[port] = proc\n return port", "def setup() -> socket:\n # Create a TCP/IP socket\n server = socket(AF_INET, SOCK_STREAM)\n server.setblocking(False)\n\n def sigint_handler(sig, frame):\n \"\"\"\n Catches a SIGINT and cleans up\n \"\"\"\n print(\"[i] Caught SIGINT, cleaning up...\")\n server.close()\n exit(0)\n\n signal(SIGINT, sigint_handler)\n\n # Parse arguments\n if len(argv) != 2:\n print(f\"Usage\\n\\t{argv[0]} <port>\")\n exit(1)\n\n try:\n server_address = ('', int(argv[1]))\n print(f'starting up on port {server_address[1]}', file=stderr)\n server.bind(server_address)\n except ValueError as e:\n print(f\"Error while trying to parse arguments {e}\")\n exit(1)\n except OSError as e:\n print(f\"Error while trying to bind to {argv[1]}: {e.strerror}\")\n exit(1)\n\n # Listen for incoming connections\n server.listen(5)\n\n register_functions()\n\n return server", "def _start_server(cls, cmd_path, address='localhost:502',\n tags=(20, 20, 20, 20), mode=1):\n\n try:\n cmd = ModbusProtocol._start_server_cmd(cmd_path, address, tags, mode)\n server = subprocess.Popen(cmd, shell=False)\n\n return server\n\n except Exception as error:\n print('ERROR modbus _start_server: ', error)", "def main():\n s = start_server()\n accept_connection(s)", "def net( pipe, address ):\n\n # create a session queue\n queue = session.SessionQueue()\n\n # set the maximum backlog for new connections (5 is often max)\n backlog = 5\n\n # set the maximum request payload size\n max_request_size = 2048\n\n # create and configure the server socket\n sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n sock.bind( address )\n sock.listen( backlog )\n\n # list of input connections to poll\n poll = [ sock, pipe ]\n\n # loop execution flag\n is_running = True\n\n # daemon loop\n while is_running == True:\n\n # select next connection with available data\n try:\n inputs, outputs, excepts = select.select( poll, [], [] )\n\n # select errors\n except select.error as e:\n\n # select was interrupted by system call (SIGINT)\n if e.args[ 0 ] == errno.EINTR:\n for s in poll:\n if ( s != sock ) and ( s != pipe ):\n s.close()\n is_running = False\n break\n\n # process shut down by interactive input or application exit\n except ( KeyboardInterrupt, SystemExit ):\n for s in poll:\n if ( s != sock ) and ( s != pipe ):\n s.close()\n is_running = False\n break\n\n # loop through all new inputs\n for ready in inputs:\n\n # handle parent process messages\n if ready == pipe:\n\n # fetch the message from the pipe\n message = pipe.recv()\n\n # check for daemon shutdown message\n if message.mid == Message.QUIT:\n is_running = False\n\n # check for response data message\n elif message.mid == Message.DATA:\n\n # remove the session from the queue\n sess = queue.remove( message.sid )\n\n # send the response data to the socket\n sess[ 'sock' ].send( message.data )\n\n # close the socket\n sess[ 'sock' ].close()\n\n # handle a new connection with a network client\n elif ready == sock:\n\n # accept the new connection\n connection, address = ready.accept()\n\n # add the connection to the input polling list\n poll.append( connection )\n\n # handle data from all other connections\n else:\n\n # load the request data from the socket\n payload, address = ready.recvfrom( max_request_size )\n\n # data is available if the payload is not an empty string\n if len( payload ) > 0:\n\n # add request to session queue\n sid = queue.add( address = address, sock = ready )\n\n # send request to parent\n pipe.send( Message( sid = sid, data = payload ) )\n\n # remove the socket from select polling\n poll.remove( ready )\n\n # no data in payload (empty string)\n else:\n\n # close the socket\n ready.close()\n\n # remove the socket from select polling\n poll.remove( ready )\n\n # shut down the listen socket\n sock.close()\n\n # return exit code\n return 0", "def main( argv ):\n\n address = ( '', 9999 )\n\n # test client accepts a string to send and prints the response\n if len( argv ) > 1:\n sock = socket.socket( socket.AF_INET, socket.SOCK_STREAM )\n sock.connect( ( 'localhost', address[ 1 ] ) )\n sock.sendall( argv[ 1 ] )\n print sock.recv( 128 )\n sock.close()\n return 0\n\n # test server echos messages in all caps... real mature, server\n\n import multiprocessing\n\n ( p_pipe, c_pipe ) = multiprocessing.Pipe( True )\n netd = multiprocessing.Process(\n target = net,\n args = ( c_pipe, address ),\n name = 'netd'\n )\n netd.start()\n\n print 'server started, listening on port %d' % address[ 1 ]\n\n while True:\n try:\n message = p_pipe.recv()\n message.data = message.data.upper()\n p_pipe.send( message )\n except:\n break\n\n p_pipe.send( QUIT )\n\n print 'server shutting down'\n\n netd.join()\n\n # return success\n return 0", "def __run_server(self):\n os.chdir(os.path.dirname(self.server_path))\n self.server_process = subprocess.Popen([self.server_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port)])", "def _start_server(cls, address, tags):\n\n try:\n cmd = EnipProtocol._start_server_cmd(address, tags)\n server = subprocess.Popen(cmd, shell=False)\n\n return server\n\n except Exception as error:\n print('ERROR enip _start_server: ', error)", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def run_server(port, create):\r\n host = '' # all available network interfaces\r\n # create an internet socket for TCP protocol\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((host, port))\r\n s.listen(True) # listen for incoming connections\r\n print 'listening on port', port\r\n while True:\r\n conn, addr = s.accept() # accept a connection\r\n new_connection = HandleConnection(conn, addr, create)\r\n new_connection.start()\r\n # handle_connection(conn, addr, create)\r\n s.close() # can't get here\r", "def main():\n print(\"Starting python server...\")\n\n # Set address to localhost\n address = \"tcp://127.0.0.1:\" + parse_port()\n\n # Start server with class API as \n server = zerorpc.Server(API.API())\n server.bind(address)\n\n print(\"Server started running on {}\".format(address))\n\n # Blocking command. Keeps server running\n server.run()", "def __init__(self, *args, **kwargs):\n mp.Process.__init__(self)\n self._args = args\n self._kwargs = kwargs\n self._host_conn, self._proc_conn = mp.Pipe()\n self.daemon = True\n self.start()\n reply = self._host_conn.recv()\n if isinstance(reply, Exception):\n raise reply", "def setup_for_run(self):\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind((self.ip_address, self.port))\n self.server.listen(100)", "def start_server(host, port, handler):\n httpd = socketserver.TCPServer((host, port), handler)\n print_server_message(host, port, handler)\n httpd.serve_forever() # This is stopped by using the handler", "def __init__(self, type, host, port):\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if type == \"server\":\n self._socket.bind((host, port))\n self._socket.listen(1)\n self._socket.setblocking(0)", "def main():\n return run_server(**parse_server_args())", "def main_with_fork():\n\n\t# Make sure we got enough arguments\n\tif len(sys.argv) < 2:\n\t\tprint(\"Usage: %s <port number>\" %sys.argv[0], file=sys.stderr)\n\t\texit(1)\n\n\t# Validate port number\n\ttry:\n\t\tport = int(sys.argv[1])\n\t\tif port < 1 or port > 65535:\n\t\t\traise ValueError()\n\texcept ValueError:\n\t\tprint(\"Invalid port\")\n\t\texit(1)\n\n\tserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\tserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\tserver.bind((\"\", port))\n\tserver.listen(10)\n\n\t# Catch SIGINT so we can exit cleanly\n\ttry:\n\t\twhile True:\n\t\t\tconn, addr = server.accept()\n\n\t\t\t# Fork a child process to handle an incoming connection\n\t\t\tpid = os.fork()\n\n\t\t\tif pid == 0:\n\t\t\t\t# We are in the child process\n\n\t\t\t\t# Print client address\n\t\t\t\tprint(\"Connection from: %s\" %str(addr))\n\n\t\t\t\t# Handle the connection\n\t\t\t\thandle_connection(conn)\n\n\t\t\t\t# Close the file descriptor and exit\n\t\t\t\tconn.close()\n\t\t\t\texit(0)\n\t\t\telse:\n\t\t\t\t# We are in the parent process\n\n\t\t\t\t# Clost the file descriptor as the\n\t\t\t\t# child handles the connection\n\t\t\t\tconn.close()\n\n\t\t\t\t# Continue to handle new connections\n\t\t\t\tcontinue\n\n\texcept KeyboardInterrupt:\n\t\t# Close the server socket and exit\n\t\tserver.close()\n\t\texit(0)", "def setup_server(port=0, verbose=False):\r\n\r\n host = gethostname()\r\n sock = socket(AF_INET, SOCK_STREAM)\r\n try:\r\n sock.bind((host, port))\r\n except error as msg:\r\n raise error(\"Could not open Socket on server: \" + str(msg))\r\n sock.listen(5) # max num of queued connections usually [1..5]\r\n if verbose:\r\n print \"Server listening on %s\" % str(sock.getsockname())\r\n return sock", "def start_server():\n server.bind(constants.ADDRESS)\n server.listen()\n print(\"Server listening on: \" + constants.HOST + \" on port \" + str(constants.PORT) + \"...\")", "def _start_server():\n args = [sys.executable] + sys.argv\n args.insert(args.index('wserver'), 'server')\n args.remove('wserver')\n pid = os.spawnv(os.P_NOWAIT, sys.executable, args)\n return pid", "def create_server_socket(local_port, verbose):\r\n \r\n socket_in = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n socket_in.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # deal with a socket in TIME_WAIT state\r\n\r\n if verbose:\r\n print(' binding on local port %d to accept a remote connection' % local_port)\r\n \r\n try:\r\n socket_in.bind(('', local_port))\r\n except:\r\n raise IOError('local port %d already in use by your group or the referee' % local_port)\r\n socket_in.listen(1)\r\n \r\n if verbose:\r\n print(' done -> can now accept a remote connection on local port %d\\n' % local_port)\r\n \r\n return socket_in", "def init(self, args):\n self.log_manager = LogManager(args.logdir, args.idle, args.trust_blindly)\n if is_a_socket(sys.stdin.fileno()):\n sock = socket.fromfd(sys.stdin.fileno(), socket.AF_UNIX, socket.SOCK_STREAM)\n sys.stdin.close()\n else:\n sock = args.socket\n self.server = Server(self.log_manager, sock)", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def socket_pipe():\n\n # Create read0end acceptor.\n read_acceptor = socket.socket()\n read_acceptor.bind(('localhost', 0))\n read_acceptor.listen(10)\n read_acceptor.setblocking(False)\n\n # Create writer and connect it\n writer = socket.socket()\n writer.setblocking(True)\n writer.connect(read_acceptor.getsockname())\n\n # Wait for connection from the right socket\n for _ in xrange(10):\n reader, writer_address = read_acceptor.accept()\n reader.setblocking(True)\n if writer_address != writer.getsockname():\n sys.stderr.write(__name__ + \".socket_pipe: Waring: port \"\n \"scanning detected.\\n\")\n reader.close()\n continue\n break\n else:\n raise RuntimeError(\"socket_pipe: did not receive writer connection.\")\n\n read_acceptor.close()\n\n # Verify, that the connected socket is really the right one.\n test_message = str(random.random())\n writer.sendall(test_message)\n while test_message:\n test_chunk = reader.recv(len(test_message))\n if not test_chunk or not test_message.startswith(test_chunk):\n raise RuntimeError(\"socket_pipe: invalid test data received.\")\n test_message = test_message[len(test_chunk):]\n\n return reader, writer", "def runWithProtocol(klass, masterIP, port):\r\n fd = sys.stdin.fileno()\r\n oldSettings = termios.tcgetattr(fd)\r\n tty.setcbreak(fd)\r\n\r\n try:\r\n p = ServerProtocol(klass, masterIP, port)\r\n stdio.StandardIO(p)\r\n reactor.run() #@UndefinedVariable\r\n finally:\r\n termios.tcsetattr(fd, termios.TCSANOW, oldSettings)\r\n os.write(fd, \"\\r\\x1bc\\r\")", "def startServer(self):\n processor = ThinService.Processor(self.serverLogic)\n serverSocket = TSocket.TServerSocket(Constants.SERVER_HOST, Constants.SERVER_PORT)\n transportFactory = TTransport.TBufferedTransportFactory()\n protocolFactory = TBinaryProtocol.TBinaryProtocolFactory()\n\n server = TServer.TSimpleServer(processor, serverSocket, transportFactory, protocolFactory)\n server.serve()", "def server(host, port, func):\n def handler(conn):\n try:\n yield func(conn)\n finally:\n conn.close()\n\n listener = Listener(host, port)\n try:\n while True:\n conn = yield listener.accept()\n yield spawn(handler(conn))\n except KeyboardInterrupt:\n pass\n finally:\n listener.close()", "def __init__(self, server, intf, port=3, rawmgr=None):\n #Name of interface\n self.intf = intf\n\n #Create client connection\n self.sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)\n self.sock.bind((intf,port))\n\n #Create socket manager\n self.mgr = rawmgr \n if (self.mgr == None):\n self.mgr = rawsocketmgr(server)\n server.recv.addconnection(self.sock, self.mgr)\n\n ##Cleanup\n server.register_cleanup(self)" ]
[ "0.7584677", "0.6351429", "0.6300604", "0.617805", "0.60826844", "0.5990248", "0.5973056", "0.5972735", "0.5965008", "0.5957159", "0.5957159", "0.5944811", "0.58804023", "0.5860215", "0.58540183", "0.58201164", "0.58175886", "0.57931465", "0.57761294", "0.5772888", "0.57713604", "0.57686996", "0.57681984", "0.5760051", "0.57522005", "0.5741501", "0.5731543", "0.5710181", "0.5699525", "0.5691277" ]
0.8183566
0
wrapper for running net_server on its own thread/process
def net_proc(pipe): asyncio.run(net_server(pipe))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "async def net_server(pipe):\n server = await net.Server(pipe, host=\"0.0.0.0\", port=8080)\n return await server.wait_closed()", "def server():", "def server():", "def main():\n s = start_server()\n accept_connection(s)", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def _run_server(port):\n\n # Configure allow_reuse_address to make re-runs of the script less painful -\n # if this is not True then waiting for the address to be freed after the\n # last run can block a subsequent run\n SocketServer.TCPServer.allow_reuse_address = True\n\n # Create the server instance\n server = ThreadingServer(\n ('', port),\n SimpleHTTPServer.SimpleHTTPRequestHandler\n )\n\n # Print out before actually running the server (cheeky / optimistic, however\n # you want to look at it)\n print('Your images are at http://127.0.0.1:%d/%s' % (\n port,\n args.index_file_name\n ))\n\n # Try to run the server\n try:\n # Run it - this call blocks until the server is killed\n server.serve_forever()\n except KeyboardInterrupt:\n # This is the expected way of the server being killed, since imageMe is\n # intended for ad-hoc running from command line\n print('User interrupted, stopping')\n except Exception as exptn:\n # Catch everything else - this will handle shutdowns via other signals\n # and faults actually starting the server in the first place\n print(exptn)\n print('Unhandled exception in server, stopping')", "def __run_server(self):\n os.chdir(os.path.dirname(self.server_path))\n self.server_process = subprocess.Popen([self.server_path, \\\n \"{}:{}\".format(self.args.ip, self.args.port)])", "def server_main(args=None):\n\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket.bind((HOST, PORT))\n server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n backlog = 0\n # server_socket.listen(backlog)\n\n games = GamesHandler(args)\n global shouldRunning\n threads = []\n # handle links with thread\n t = threading.Thread(target=handle_link_backs, args=(games,)).start()\n threads.append(t)\n\n server_socket.listen(backlog)\n while True: # grand loop of the server\n try:\n client_socket, client_addr = server_socket.accept()\n t = threading.Thread(target=client_thread, args=(client_socket, client_addr, games)).start()\n threads.append(t)\n except KeyboardInterrupt as e:\n shouldRunning = False\n break\n\n # clean-ups\n for thread in threads:\n thread.join()\n server_socket.close()", "def main():\n return run_server(**parse_server_args())", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def run():\n server = current_server()\n server._auto_stop = True\n return start()", "def main():\n signal(SIGINT, shutdown_handler)\n parser = ArgumentParser()\n parser.add_argument(\"-p\", \"--port\", help=\"The port to run the server on (Default: 8080)\",\n type=int, default=8080, required=False)\n parser.add_argument(\"-d\", \"--dir\", help=\"The directory to host (Default: current directory)\",\n type=str, default=os.getcwd(), required=False)\n parser.add_argument(\n \"-6\", \"--ipv6\", help=\"Use IPv6 instead of IPv4\", action='store_true')\n args = parser.parse_args()\n\n httpd = get_threaded_server(\n port=args.port, serve_path=args.dir, ipv6=args.ipv6)\n\n logging.info(\"Serving %s at localhost:%d via IPv%d...\" %\n (args.dir, args.port, 6 if args.ipv6 else 4))\n Thread(target=run_server, name=\"threaded_http_server\", kwargs={\n \"server\": httpd, \"sema\": runSema}).start()\n while not runSema.acquire(True, 0.25):\n pass\n logging.info(\"Shutting down\")\n httpd.server_close()", "def _HTTPServerProcess(conn, dirname, port, server_kwargs):\n try:\n os.chdir(dirname)\n httpd = PluggableHTTPServer(('', port), PluggableHTTPRequestHandler,\n **server_kwargs)\n except socket.error as e:\n sys.stderr.write('Error creating HTTPServer: %s\\n' % e)\n sys.exit(1)\n\n try:\n conn.send(httpd.server_address[1]) # the chosen port number\n httpd.timeout = 0.5 # seconds\n while httpd.running:\n # Flush output for MSVS Add-In.\n sys.stdout.flush()\n sys.stderr.flush()\n httpd.handle_request()\n if conn.poll():\n httpd.running = conn.recv()\n except KeyboardInterrupt:\n pass\n finally:\n conn.send(httpd.result)\n conn.close()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())", "def main():\n host = ''\n port = 8088\n HTTPServer((host, port), HandleRequests).serve_forever()", "def workerProcess(self):\r\n\r\n if self.postForkCallback:\r\n self.postForkCallback()\r\n\r\n while self.isRunning.value == True:\r\n try:\r\n client = self.serverTransport.accept()\r\n self.serveClient(client)\r\n except (KeyboardInterrupt, SystemExit):\r\n return 0\r\n except Exception, x:\r\n logging.exception(x)", "def start_server():\n host = 'localhost'\n port = 8080\n listener = socket.socket(socket.AF_INET)\n listener.bind((host, port))\n print 'Serving on {0}:{1}.'.format(host, port)\n listener.listen(0)\n while 1:\n connection, address = listener.accept()\n print 'Got connection from {}'.format(address)\n threading.Thread(\n target=Proxy, args=(connection, )).run()", "def server(host, port, debug):\n run_server(host, port, debug)", "def serve(self):\r\n\r\n #this is a shared state that can tell the workers to exit when set as false\r\n self.isRunning.value = True\r\n\r\n #first bind and listen to the port\r\n self.serverTransport.listen()\r\n\r\n #fork the children\r\n for i in range(self.numWorkers):\r\n try:\r\n w = Process(target=self.workerProcess)\r\n w.daemon = True\r\n w.start()\r\n self.workers.append(w)\r\n except Exception, x:\r\n logging.exception(x)\r\n\r\n #wait until the condition is set by stop()\r\n\r\n while True:\r\n\r\n self.stopCondition.acquire()\r\n try:\r\n self.stopCondition.wait()\r\n break\r\n except (SystemExit, KeyboardInterrupt):\r\n\t\tbreak\r\n except Exception, x:\r\n logging.exception(x)\r\n\r\n self.isRunning.value = False", "def _serverThreadRunner(self):\n args = self._argsForSubprocess()\n logging.info(\"Test server popen() args: %s\" % str.join(\" \", args))\n self._server_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n with self._has_launched_cv:\n self._has_launched = True\n self._has_launched_cv.notify_all()\n stdout, stderr = self._server_process.communicate()\n logging.info(\"Process stdout: %s\", stdout.decode(\"utf-8\"))\n logging.info(\"Process stderr: %s\", stderr.decode(\"utf-8\"))\n return stdout, stderr", "def run(self):\n try:\n # Initialize signal handler to be able to have a graceful shutdown.\n ServiceShutdownHandling.initServiceShutdownHandling()\n\n httpd = None\n # The HTTP server thread - our HTTP interface\n if self._port != None:\n httpd = RaspendHTTPServerThread(self._shutdownFlag, self._dataLock, self._sharedDict, self._cmdMap, self._port)\n # Start our threads.\n httpd.start()\n\n for worker in self._workers:\n worker.start()\n\n # Keep primary thread or main loop alive.\n while True:\n time.sleep(0.5)\n\n except ServiceShutdownHandling.ServiceShutdownException:\n # Signal the shutdown flag, so the threads can quit their work.\n self._shutdownFlag.set()\n\n # Wait for all threads to end.\n for worker in self._workers:\n worker.join()\n\n if httpd:\n httpd.join()\n\n except Exception as e:\n print (\"An unexpected error occured. Error: {}\".format(e))\n\n finally:\n pass\n\n return", "def image_server():\n yield from http_server_thread(ImageHandler)", "async def server_main(loop, proxy_config, server_config):\n\n controller = Controller(\n MessageProxy(proxy_config),\n hostname=server_config['listen']['addr'],\n port=server_config['listen']['port'],\n )\n controller.start()", "def serve(self):\n if self._server_thread is not None:\n return\n if self._port is None:\n self._port = portpicker.pick_unused_port()\n started = threading.Event()\n self._stopped = threading.Event()\n self._stopping = threading.Event()\n\n def build_server(started, stopped, stopping):\n \"\"\"Closure to build the server function to be passed to the thread.\n\n Args:\n started: Threading event to notify when started.\n stopped: Threading event to notify when stopped.\n stopping: Threading event to notify when stopping.\n Returns:\n A function that function that takes a port and WSGI app and notifies\n about its status via the threading events provided.\n \"\"\"\n\n def server(port, wsgi_app):\n \"\"\"Serve a WSGI application until stopped.\n\n Args:\n port: Port number to serve on.\n wsgi_app: WSGI application to serve.\n \"\"\"\n try:\n httpd = wsgiref.simple_server.make_server(self._host, port, wsgi_app)\n except socket.error:\n # Try IPv6\n httpd = wsgiref.simple_server.make_server(\n self._host, port, wsgi_app, server_class=WsgiServerIpv6)\n started.set()\n httpd.timeout = 30\n while not stopping.is_set():\n httpd.handle_request()\n stopped.set()\n\n return server\n\n server = build_server(started, self._stopped, self._stopping)\n server_thread = threading.Thread(\n target=server, args=(self._port, self._app))\n self._server_thread = server_thread\n\n server_thread.start()\n started.wait()", "def run_while_true(server_class=BaseHTTPServer.HTTPServer,\n handler_class=BaseHTTPServer.BaseHTTPRequestHandler):\n server_address = ('localhost', 8080)\n httpd = server_class(server_address, handler_class)\n while keep_running():\n httpd.handle_request()", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def _start_server():\n args = [sys.executable] + sys.argv\n args.insert(args.index('wserver'), 'server')\n args.remove('wserver')\n pid = os.spawnv(os.P_NOWAIT, sys.executable, args)\n return pid" ]
[ "0.7307538", "0.68068635", "0.6780022", "0.6780022", "0.6734137", "0.6704307", "0.668211", "0.66599005", "0.6595216", "0.65842754", "0.6539571", "0.65383536", "0.6500156", "0.6459733", "0.64578015", "0.6419993", "0.6366262", "0.6355513", "0.63487566", "0.6294894", "0.62633705", "0.6249347", "0.624153", "0.62409264", "0.6236663", "0.6226553", "0.6204705", "0.6203908", "0.6203908", "0.61755973" ]
0.7150661
1
Dynamic import of CVXOPT dense interface.
def get_cvxopt_dense_intf(): import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi return dmi.DenseMatrixInterface()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()", "def dense2cvxopt(value):\n import cvxopt\n return cvxopt.matrix(value, tc='d')", "def test_import_type_dense():\n x = np.random.rand(7, 11)\n export_data('/tmp/test.dense', x)\n assert x.dtype == import_data('/tmp/test.dense').dtype", "def init_dense(self, layer):\n pass", "def cvxopt2dense(value):\n return np.array(value)", "def test_import_values_dense():\n x = np.random.rand(7, 11)\n export_data('/tmp/test.dense', x)\n assert np.array_equal(x, import_data('/tmp/test.dense'))", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def available_sparse_deep_model():\n return ['fast-text-char']", "def dense_nn(x):\n dense_1 = tf.nn.dropout(tf.nn.relu(dense(x, input_dim, n_l1, 'dense_1')), keep_prob=0.25)\n dense_2 = tf.nn.dropout(tf.nn.relu(dense(dense_1, n_l1, n_l2, 'dense_2')), keep_prob=0.25)\n dense_3 = dense(dense_2, n_l2, n_labels, 'dense_3')\n return dense_3", "def convert_to_dense_graph(self) -> cjg.Dense:\n N = len(self.indices)\n ising_int = self.ising_interactions()\n\n # cxxjij.graph.dense\n cxx_dense_ising = cjg.Dense(N)\n for i in range(N):\n if ising_int[i,i] != 0.0:\n cxx_dense_ising[i,i] = ising_int[i,i]\n for j in range(i+1, N):\n if ising_int[i,j] != 0.0:\n cxx_dense_ising[i,j] = ising_int[i,j]\n \n return cxx_dense_ising", "def test_import_dense_type_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def test_sparse_with_dense():\n\n def test_func(df):\n df[\"new column\"] = 1 # Create dense column\n return df\n\n atom = ATOMClassifier(X_text, y10, random_state=1)\n atom.apply(test_func)\n atom.vectorize(strategy=\"BOW\", return_sparse=False)\n assert all(not pd.api.types.is_sparse(atom.X[c]) for c in atom.features)", "def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,\n bias=0.0, activation=None, batchnorm_enabled=False, dropout_keep_prob=-1, is_training=True):\n with tf.variable_scope(name) as scope:\n dense_o_b = dense_p(scope, x=x, w=w, output_dim=output_dim, initializer=initializer,\n l2_strength=l2_strength, bias=bias)\n\n if batchnorm_enabled:\n dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)\n else:\n dense_o_bn = dense_o_b\n\n if activation is None:\n dense_a = dense_o_bn\n else:\n dense_a = activation(dense_o_bn)\n\n if dropout_keep_prob != -1:\n dense_o_dr = tf.nn.dropout(dense_a, keep_prob=dropout_keep_prob)\n else:\n dense_o_dr = dense_a\n\n dense_o = dense_o_dr\n\n return dense_o", "def _read_csv_dense(\n data: pd.DataFrame,\n argvals: npt.NDArray[np.float64]\n) -> DenseFunctionalData:\n argvals_ = DenseArgvals({'input_dim_0': argvals})\n values = DenseValues(np.array(data))\n return DenseFunctionalData(argvals_, values)", "def testGetDenseFeature(self):\n op = ops.get_dense_feature(tf.constant([1, 2], dtype=tf.int64), [0, 1], [2, 3], 3)\n with tf.Session() as sess:\n dense_features = sess.run(op)\n self.assertAllClose([[2.4, 3.6], [2.4, 3.6]], dense_features[0])\n self.assertAllClose([[4.5, 6.7, 8.9], [4.5, 6.7, 8.9]],\n dense_features[1])", "def test_import_dense_values_mat():\n x = np.random.rand(3, 2)\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x, import_data('/tmp/test.mat'))", "def _to_dense(self: QGTOnTheFlyT) -> jnp.ndarray:\n Npars = nkjax.tree_size(self._params)\n I = jax.numpy.eye(Npars)\n\n if self._chunking:\n # the linear_call in mat_vec_chunked does currently not have a jax batching rule,\n # so it cannot be vmapped but we can use scan\n # which is better for reducing the memory consumption anyway\n _, out = jax.lax.scan(lambda _, x: (None, self @ x), None, I)\n else:\n out = jax.vmap(lambda x: self @ x, in_axes=0)(I)\n\n if jnp.iscomplexobj(out):\n out = out.T\n\n return out", "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def dense_col(self):\n if not self.col_name_mapping or \"dense_col\" not in self.col_name_mapping:\n return EmptyFeature\n return Feature(\n name=list(self.col_name_mapping[\"dense_col\"].keys()),\n index=list(self.col_name_mapping[\"dense_col\"].values()),\n )", "def dense(input, out_dims=None, dropout_keep_prob=1.0, nonlin=True, trainable=True):\n input = tf.nn.dropout(input, dropout_keep_prob)\n # Initial number of values for each residue\n in_dims = input.get_shape()[-1].value\n out_dims = in_dims if out_dims is None else out_dims\n # Weights of the dense layer\n W = tf.Variable(initializer(\"he\", [in_dims, out_dims]), name=\"w\", trainable=trainable)\n b = tf.Variable(initializer(\"zero\", [out_dims]), name=\"b\", trainable=trainable)\n # Operation performed in the dense layer\n Z = tf.matmul(input, W) + b\n if (nonlin):\n nonlin = nonlinearity(\"relu\")\n Z = nonlin(Z)\n Z = tf.nn.dropout(Z, dropout_keep_prob)\n return Z", "def load_scipy_linalg_interface_gen(finder, module):\n module.IgnoreName(\"pre\")", "def dense(in_layer):\n return Dense(neurons,\n kernel_initializer=initializer())(in_layer)", "def denseFeature(self, feat):\n return {'feat': feat}", "def is_dense(self, rel_name):\n return self._declaration[rel_name].dense", "def is_dense(x: Any, backend=None) -> bool:\r\n\r\n module = get_module(backend)\r\n return module.is_dense(x)", "def item_dense_col(self):\n if not self.col_name_mapping or \"item_dense_col\" not in self.col_name_mapping:\n return EmptyFeature\n return Feature(\n name=list(self.col_name_mapping[\"item_dense_col\"].keys()),\n index=list(self.col_name_mapping[\"item_dense_col\"].values()),\n )", "def __init__(\n self,\n state_dense_dim: int,\n action_dense_dim: int,\n dense_sizes: List[int],\n dense_activations: List[str],\n overall_sizes: List[int],\n overall_activations: List[str],\n embedding_bag_collection: EmbeddingBagCollection,\n use_batch_norm: bool = False,\n use_layer_norm: bool = False,\n ):\n super().__init__()\n self.validate_parameters(\n dense_sizes,\n dense_activations,\n overall_sizes,\n overall_activations,\n embedding_bag_collection,\n )\n\n self.state_dense_arch = create_dense_arch(\n state_dense_dim,\n dense_sizes,\n dense_activations,\n use_batch_norm,\n use_layer_norm,\n )\n self.action_dense_arch = create_dense_arch(\n action_dense_dim,\n dense_sizes,\n dense_activations,\n use_batch_norm,\n use_layer_norm,\n )\n # sparse arch will be shared for state sparse features and action sparse features\n self.sparse_arch = SparseArch(embedding_bag_collection)\n\n # Overall arch\n F = sum(\n [\n len(conf.feature_names)\n for conf in embedding_bag_collection.embedding_bag_configs()\n ]\n )\n D = dense_sizes[-1]\n self.F = F\n self.D = D\n sparse_feature_names = []\n for conf in embedding_bag_collection.embedding_bag_configs():\n sparse_feature_names.extend(conf.feature_names)\n\n try:\n self.inter_arch_sparse_and_state_dense = InteractionArch(\n F,\n )\n self.inter_arch_sparse_and_action_dense = InteractionArch(\n F,\n )\n except TypeError:\n # HACK: in torchrec OSS version (0.1.0), InteractionArch\n # only accepts a list of sparse feature names as the input\n # pyre-ignore\n self.inter_arch_sparse_and_state_dense = InteractionArch(\n sparse_feature_names=sparse_feature_names\n )\n # pyre-ignore\n self.inter_arch_sparse_and_action_dense = InteractionArch(\n sparse_feature_names=sparse_feature_names\n )\n\n interaction_output_dim = 2 * D + 2 * F + F * (F - 1) // 2\n self.overall_arch = create_dense_arch(\n interaction_output_dim,\n overall_sizes,\n overall_activations,\n use_batch_norm,\n use_layer_norm,\n )", "def get_dense_layer(self, layer_i=0):\n dense_layers = [layer for layer in self.model.layers if layer.name.startswith('dense')]\n return dense_layers[layer_i]", "def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect", "def from_dense(cls, dense: Float[Array, \"N N\"]) -> \"ConstantDiagonalLinearOperator\":\n return ConstantDiagonalLinearOperator(\n value=jnp.atleast_1d(dense[0, 0]), size=dense.shape[0]\n )" ]
[ "0.6213993", "0.6188066", "0.59842515", "0.59137076", "0.58391494", "0.55679363", "0.5485139", "0.5233388", "0.5186878", "0.5140729", "0.5139248", "0.5135993", "0.5131671", "0.5050744", "0.5043737", "0.5000771", "0.4989075", "0.49857393", "0.49512193", "0.4937559", "0.49297774", "0.49089834", "0.48659822", "0.48642376", "0.4835894", "0.48228914", "0.48187447", "0.48093432", "0.47611797", "0.47514337" ]
0.76370394
0
Dynamic import of CVXOPT sparse interface.
def get_cvxopt_sparse_intf(): import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi return smi.SparseMatrixInterface()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def test_import_values_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparse').toarray())", "def ipu_sparse_ops(scope=\"session\"):\n build_path = Path(\n public_examples_dir,\n \"applications\",\n \"tensorflow\",\n \"dynamic_sparsity\"\n )\n completed = run(['python3-config', '--extension-suffix'], stdout=PIPE)\n extension = completed.stdout.decode().replace('\\n', '')\n shared_libs = [f'host_utils{extension}', 'libsparse_matmul.so']\n paths = [Path(build_path, \"ipu_sparse_ops\", f) for f in shared_libs]\n\n # Use exclusive lockfile to avoid race conditions on the build:\n lock_path = Path(build_path, \".ipu_sparse_ops.pytest.build.lockfile\")\n\n @ExecuteOncePerFS(lockfile=lock_path, file_list=paths, timeout=120, retries=20)\n def build_dynsparse():\n run(['make', 'clean'], cwd=build_path)\n run(['make', '-j'], cwd=build_path)\n\n build_dynsparse()", "def get_cvxopt_dense_intf():\n import cvxpy.interface.cvxopt_interface.valuerix_interface as dmi\n return dmi.DenseMatrixInterface()", "def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def sparse2cvxopt(value):\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n # Convert scipy sparse matrices to coo form first.\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')", "def test_import_sparse_values_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.mat').toarray())", "def coregionalization_sparse(optimize=True, plot=True):\r\n #fetch the data from the non sparse examples\r\n m = coregionalization_toy2(optimize=False, plot=False)\r\n X, Y = m.X, m.likelihood.Y\r\n\r\n #construct a model\r\n m = GPy.models.SparseGPRegression(X,Y)\r\n m.constrain_fixed('iip_\\d+_1') # don't optimize the inducing input indexes\r\n\r\n if optimize:\r\n m.optimize('bfgs', max_iters=100, messages=1)\r\n\r\n if plot:\r\n m.plot(fixed_inputs=[(1,0)])\r\n m.plot(fixed_inputs=[(1,1)], ax=pb.gca())\r\n\r\n return m", "def isspsparse(var):\n if 'theano.sparse' in sys.modules:\n return (sp.sparse.issparse(var)\n or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))\n else:\n return sp.sparse.issparse(var)", "def use_sparse_routines(self):\n return self._use_sparse_routines", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def __init__(self, idxbase=0):\n if idxbase not in (0, 1):\n raise ValueError(\"Invalid index base\")\n\n self.api = cuSparse()\n self.idxbase = (CUSPARSE_INDEX_BASE_ZERO,\n CUSPARSE_INDEX_BASE_ONE)[idxbase]", "def pyoptsparse_installed():\n # type: () -> bool\n try:\n from openmdao.api import pyOptSparseDriver\n except ImportError:\n print(PyOptSparseImportError().msg)\n return False\n return True", "def test_import_type_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert x.dtype == import_data('/tmp/test.sparsetxt').dtype", "def is_sparse(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_sparse(x)", "def set_sparsity(self,use_sparse):\n \n if hasattr(self.problem,'sparse_jac'):\n self.use_sparse = use_sparse\n else:\n raise KINSOL_Exception(\"The problem must have implemented a method 'sparse_jac' for sparsity to by used.\")", "def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)", "def available_sparse_deep_model():\n return ['fast-text-char']", "def train_clustermodel_sparse(self):\n\n print('Clustering using: ' + self.algorithm)\n uniquesegments_df, sparse_matrix = self.create_sparse_matrix(self.data)\n\n clusterer = self.clustering_algorithms[self.algorithm]\n self.clustering_model = clusterer.fit(sparse_matrix)\n \n clusters_df = pd.DataFrame(self.clustering_model.labels_, columns = ['cluster_sparse'])\n clusters_df['segmentskey'] = clusters_df.index\n clusters_df = clusters_df.reset_index(drop=True)\n self.clusters_df_final = pd.merge(uniquesegments_df, clusters_df, on=['segmentskey'])\n self.clusters_df_final['cluster_sparse'].value_counts()\n \n today = datetime.date.today()\n filename = self.algorithm + '_sparse_cluster_model_' + today.strftime('%Y%m%d') + '.pkl'\n joblib.dump(self.clustering_model, filename)\n \n print('Stored ' + filename)\n \n return self.clustering_model, self.clusters_df_final[['segment_id','cluster_sparse']]", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def __init__(self, sparse_args=None, solve=True):\n self.solved = False\n self.sparse_args = sparse_args\n self.solved = False\n if solve: self.solve()", "def as_sparse_variable(x, name=None):\r\n\r\n # TODO\r\n # Verify that sp is sufficiently sparse, and raise a\r\n # warning if it is not\r\n\r\n if isinstance(x, gof.Apply):\r\n if len(x.outputs) != 1:\r\n raise ValueError(\"It is ambiguous which output of a \"\r\n \"multi-output Op has to be fetched.\", x)\r\n else:\r\n x = x.outputs[0]\r\n if isinstance(x, gof.Variable):\r\n if not isinstance(x.type, SparseType):\r\n raise TypeError(\"Variable type field must be a SparseType.\", x,\r\n x.type)\r\n return x\r\n try:\r\n return constant(x, name=name)\r\n except TypeError:\r\n raise TypeError(\"Cannot convert %s to SparseType\" % x, type(x))", "def get_sparse_backend():\n backend = biom_config['python_code_sparse_backend']\n if backend is None:\n backend = 'CSMat'\n\n if backend not in sparse_backends:\n raise InvalidSparseBackendException(\"Unrecognized sparse backend \"\n \"'%s'. Choose from %s.\" % (backend,\n ', '.join(sparse_backends)))\n\n valid_backend = False\n if backend == 'ScipySparseMat':\n try:\n from biom.backends.scipysparse import ScipySparseMat, to_scipy, \\\n dict_to_scipy, list_dict_to_scipy, list_nparray_to_scipy, \\\n nparray_to_scipy, list_list_to_scipy\n SparseObj = ScipySparseMat\n to_sparse = to_scipy\n dict_to_sparseobj = dict_to_scipy\n list_dict_to_sparseobj = list_dict_to_scipy\n list_nparray_to_sparseobj = list_nparray_to_scipy\n nparray_to_sparseobj = nparray_to_scipy\n list_list_to_sparseobj = list_list_to_scipy\n valid_backend = True\n except ImportError:\n valid_backend = False\n stderr.write(\"Cannot load ScipySparseMat (requires that scipy is \"\n \"installed). Using CSMat sparse backend.\\n\")\n\n if backend == 'CSMat' or (not valid_backend):\n try:\n from biom.backends.csmat import CSMat, to_csmat, dict_to_csmat, \\\n list_dict_to_csmat, list_nparray_to_csmat, nparray_to_csmat, \\\n list_list_to_csmat\n SparseObj = CSMat\n to_sparse = to_csmat\n dict_to_sparseobj = dict_to_csmat\n list_dict_to_sparseobj = list_dict_to_csmat\n list_nparray_to_sparseobj = list_nparray_to_csmat\n nparray_to_sparseobj = nparray_to_csmat\n list_list_to_sparseobj = list_list_to_csmat\n valid_backend = True\n except ImportError:\n valid_backend = False\n stderr.write('Cannot load CSMat sparse backend.\\n')\n\n if not valid_backend:\n raise InvalidSparseBackendException(\"The sparse matrix backend '%s' \"\n \"could not be loaded. Please check your biom-format \"\n \"installation.\" % backend)\n\n return SparseObj, to_sparse, dict_to_sparseobj, list_dict_to_sparseobj, \\\n list_nparray_to_sparseobj, nparray_to_sparseobj, \\\n list_list_to_sparseobj", "def create_sparseDB():\n datas = data.Kmercount_to_matrix()\n datas.run()\n print('***Sparse matrix created***')", "def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())", "def set_sparse_backend(sparse_backend, warn=True):\n if 'biom.table' in modules:\n if warn:\n print (\"Warning: biom.table has already been loaded. This call to \"\n \"biom.set_sparse_backend() has no effect. It must be \"\n \"called before biom.table is imported for the first time.\")\n else:\n biom_config['python_code_sparse_backend'] = sparse_backend", "def load_sparse(fname):\n E = np.loadtxt(open(fname, \"rb\"), delimiter=\",\")\n H = E[0, :]\n n = int(H[0])\n d = int(H[1])\n E = E[1:, :]\n S = sparse.coo_matrix((E[:, 2], (E[:, 0] - 1, E[:, 1] - 1)), shape=(n, d))\n S = S.todense()\n\n return S", "def test_import_values_sparsetxt():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.sparsetxt', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparsetxt').toarray())", "def sparseFeature(feat_name, feat_num, embed_dim=4):\n return {'feat_name': feat_name, 'feat_num': feat_num, 'embed_dim': embed_dim}", "def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type=\"float\"):\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n f\"input should be a np.ndarray, not ${type(a_ndarray)}\")\n invalidInputError(isinstance(i_ndarray, np.ndarray),\n f\"indices should be a np.ndarray, not ${type(i_ndarray)}\")\n invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,\n f\"size of values ${a_ndarray.size * shape.size} and\"\n f\" indices ${i_ndarray.size} should match\")\n return cls(a_ndarray,\n shape,\n bigdl_type,\n i_ndarray)" ]
[ "0.6678251", "0.6325364", "0.6168615", "0.61634254", "0.6101723", "0.6002239", "0.59741104", "0.5861257", "0.58531886", "0.5842924", "0.5654546", "0.56046325", "0.5598766", "0.559268", "0.557955", "0.5525964", "0.5518082", "0.550019", "0.5469042", "0.5434541", "0.5425496", "0.5406998", "0.5397187", "0.5389922", "0.535959", "0.53296584", "0.53186536", "0.52898276", "0.52876604", "0.5270214" ]
0.76998776
0
Converts a SciPy sparse matrix to a CVXOPT sparse matrix.
def sparse2cvxopt(value): import cvxopt if isinstance(value, (np.ndarray, np.matrix)): return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d') # Convert scipy sparse matrices to coo form first. elif sp.issparse(value): value = value.tocoo() return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(), value.col.tolist(), size=value.shape, tc='d')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def make_sparse(sparse_mx, args):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n\n indices = tensor(np.vstack((sparse_mx.row, sparse_mx.col)), args, torch.long)\n values = tensor(sparse_mx.data, args)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def to_sparse(self):\n from divisi2.sparse import SparseVector\n return SparseVector(self, self.labels)", "def convert_sparse_to_coo(s_mat):\n return np.vstack(find(s_mat)).T", "def ZSparseMatrix2Scipy(matrix):\n data = np.ndarray(matrix.get_value_size(), dtype=float);\n outer_idx = np.ndarray(matrix.get_outer_size(), dtype=np.int32);\n inner_idx = np.ndarray(matrix.get_inner_size(), dtype=np.int32);\n\n matrix.get_values(data);\n matrix.get_outer_indices(outer_idx);\n matrix.get_inner_indices(inner_idx);\n\n return scipy.sparse.csc_matrix((data, inner_idx, outer_idx),\n shape = (matrix.num_rows(), matrix.num_cols()),\n dtype = float);", "def scipy_sparse_to_spmatrix(A):\n coo = A.tocoo()\n SP = spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape)\n return SP", "def sparse_matrix(data, stype=\"csr\", dtype=complex):\n return _SPARSE_CONSTRUCTORS[stype](data, dtype=dtype)", "def to_csc(self):\n return sparse.csc_matrix((self.data, (self.col, self.row)),\n shape=(self.nrows, self.ncols))", "def _dict_to_sparse(matrix_dict):\n return scipy.sparse.coo_matrix(\n (matrix_dict['data'], (matrix_dict['row'], matrix_dict['col'])),\n shape=matrix_dict['shape'])", "def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()", "def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())", "def to_csr(self):\n return sparse.csr_matrix((self.data, (self.col, self.row)),\n shape=(self.nrows, self.ncols))", "def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))", "def j_sparse_vector_wrapper_to_scipy_spmatrix(j_obj: JavaObject):\n indices = np.frombuffer(j_obj.getIndicesBytes(), dtype=\"<i4\")\n values = np.frombuffer(j_obj.getValuesBytes(), dtype=\"<f8\")\n size = j_obj.getSize()\n indptr = np.array([0, indices.shape[0]], dtype=np.int32)\n return csr_matrix((values, indices, indptr), shape=(1, size), dtype=np.float64).todok()", "def to_sparse(x):\n x_typename = torch.typename(x).split('.')[-1]\n sparse_tensortype = getattr(torch.sparse, x_typename)\n\n indices = torch.nonzero(x)\n if len(indices.shape) == 0: # if all elements are zeros\n return sparse_tensortype(*x.shape)\n indices = indices.t()\n values = x[tuple(indices[i] for i in range(indices.shape[0]))]\n return sparse_tensortype(indices, values, x.size())", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64)\n )\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \n # sparse_mx = sparse_mx.astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)", "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)" ]
[ "0.74020207", "0.7093455", "0.70311135", "0.6989213", "0.69172007", "0.68352145", "0.6795465", "0.67651546", "0.6756018", "0.6749724", "0.6736512", "0.67179906", "0.66992825", "0.6644021", "0.6587182", "0.65845096", "0.6561744", "0.6558976", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927", "0.65500927" ]
0.80747503
0
Converts a NumPy matrix to a CVXOPT matrix.
def dense2cvxopt(value): import cvxopt return cvxopt.matrix(value, tc='d')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cvxopt2dense(value):\n return np.array(value)", "def sparse2cvxopt(value):\n import cvxopt\n if isinstance(value, (np.ndarray, np.matrix)):\n return cvxopt.sparse(cvxopt.matrix(value.astype('float64')), tc='d')\n # Convert scipy sparse matrices to coo form first.\n elif sp.issparse(value):\n value = value.tocoo()\n return cvxopt.spmatrix(value.data.tolist(), value.row.tolist(),\n value.col.tolist(), size=value.shape, tc='d')", "def _conv_array_to_sparse(arr):\n if has_scipy():\n from scipy.sparse import isspmatrix as scipy_sparse_isspmatrix\n else:\n from cuml.internals.import_utils import (\n dummy_function_always_false as scipy_sparse_isspmatrix,\n )\n if scipy_sparse_isspmatrix(arr):\n ret = cupyx.scipy.sparse.csr_matrix(arr.tocsr())\n elif cupyx.scipy.sparse.isspmatrix(arr):\n ret = arr\n elif isinstance(arr, cudf.DataFrame):\n ret = _conv_df_to_sparse(arr)\n elif isinstance(arr, np.ndarray):\n cupy_ary = rmm_cupy_ary(cp.asarray, arr, dtype=arr.dtype)\n ret = cupyx.scipy.sparse.csr_matrix(cupy_ary)\n\n elif isinstance(arr, cp.ndarray):\n ret = cupyx.scipy.sparse.csr_matrix(arr)\n else:\n raise ValueError(\"Unexpected input type %s\" % type(arr))\n return ret", "def to_coo_matrix(self):\n if self.E > 0:\n i, j = self.edges.T\n sm = coo_matrix((self.weights, (i, j)), shape=(self.V, self.V))\n else:\n sm = coo_matrix((self.V, self.V))\n return sm", "def to_coo_matrix(self):\n if self.E > 0:\n i, j = self.edges.T\n sm = coo_matrix((np.ones(self.E), (i, j)),\n shape=(self.V, self.V))\n else:\n sm = coo_matrix((self.V, self.V))\n return sm", "def conv_matrix(matrix, kernel):", "def const_to_matrix(self, value, convert_scalars=False):\n # Lists and 1D arrays become column vectors.\n if isinstance(value, list) or \\\n isinstance(value, np.ndarray) and value.ndim == 1:\n value = np.asmatrix(value, dtype='float64').T\n # First convert sparse to dense.\n elif sp.issparse(value):\n value = value.todense()\n return np.asmatrix(value, dtype='float64')", "def matrixToVector(self,mx):\n return FreeCAD.Base.Vector(mx[0]/1000,mx[1]/1000,mx[2]/1000)", "def to_matrix(self, normalize: bool = True) -> jnp.ndarray:\n return NotImplemented # pragma: no cover", "def csr2tensor(self, matrix: sp.csr_matrix):\n matrix = matrix.tocoo()\n x = torch.sparse.FloatTensor(\n torch.LongTensor(np.array([matrix.row, matrix.col])),\n torch.FloatTensor(matrix.data.astype(np.float32)),\n matrix.shape,\n ).to(self.device)\n return x", "def _mat2vec(self, mat):\n return mat.flatten('F')", "def to_matrix(array):\n return Matrix(array.tolist())", "def to_coo_matrix(self):\n row_indices, column_indices, nonzero_elements = self.to_ijv()\n return coo_matrix((nonzero_elements, (row_indices, column_indices)),\n shape=(self.size, self.size))", "def par_dot_csr_matvec(A, x):\n y = np.empty(x.size, common_type(A, x))\n _dot_csr_matvec_prange(A.data, A.indptr, A.indices, x.ravel(), y)\n y.shape = x.shape\n if isinstance(x, qarray):\n y = qarray(y)\n return y", "def to_matrix(self):\n return numpy.array([[1, 0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 1, 0, 0, 0, 0]], dtype=complex)", "def make_project_matrix(X):\n X = np.mat(X)\n return np.eye(X.shape[0]) - (X*(np.linalg.inv(X.T*X)*X.T))", "def to_matrix(self):\n return self.to_operator().data", "def cudamat_to_cudandarray(x):\r\n if not isinstance(x, cudamat.CUDAMatrix):\r\n raise ValueError(\"We can transfer only cudamat.CUDAMatrix to CudaNdarray\")\r\n # elif x.dtype != \"float32\":\r\n # raise ValueError(\"CudaNdarray support only float32\")\r\n # We don't need this, because cudamat is always float32.\r\n else:\r\n strides = [1]\r\n for i in x.shape[::-1][:-1]:\r\n strides.append(strides[-1]*i)\r\n strides = tuple(strides[::-1])\r\n\r\n import ctypes\r\n ptr_long = long(ctypes.cast(x.mat.data_device, ctypes.c_void_p).value)\r\n\r\n\r\n # seems legit.\r\n z = cuda.from_gpu_pointer(ptr_long, x.shape, strides, x)\r\n return z", "def convert_sparse_to_coo(s_mat):\n return np.vstack(find(s_mat)).T", "def apply(self, problem):\n data, inv_data = super(CVXOPT, self).apply(problem)\n # Convert A, b, G, h, c to CVXOPT matrices.\n if data[s.A] is not None:\n data[s.A] = intf.sparse2cvxopt(data[s.A])\n if data[s.G] is not None:\n data[s.G] = intf.sparse2cvxopt(data[s.G])\n if data[s.B] is not None:\n data[s.B] = intf.dense2cvxopt(data[s.B])\n if data[s.H] is not None:\n data[s.H] = intf.dense2cvxopt(data[s.H])\n if data[s.C] is not None:\n data[s.C] = intf.dense2cvxopt(data[s.C])\n return data, inv_data", "def to_matrix(self, rows=1, cropping=True, outer_offset=None, approximation_precision=0):\n import numpy as np\n return np.array(self.value)", "def to_csc(self):\n return sparse.csc_matrix((self.data, (self.col, self.row)),\n shape=(self.nrows, self.ncols))", "def set_connectivity_matrix(self, arr):\n self._validate_connectivity_matrix_shape(arr)\n self.create_dataset(\n 'connectivity_matrix', data=arr, dtype=np.float32, compression=5\n )\n\n # Ensure attributes are up to date.\n self.set_no_ser_neurons(arr.shape[0])\n self.set_no_gaba_neurons(arr.shape[1])", "def to_matrix(self):\n return numpy.array([[1, 0],\n [0, 1]], dtype=complex)", "def to_col_vec(x):\n assert x.ndim == 1\n return jnp.expand_dims(x, 1)", "def as_matrix(self) -> types.Matrix:", "def to_tensor(mtx):\n\n if not isinstance(mtx, coo_matrix):\n raise ValueError(\"Only coo_matrix is supported.\")\n\n rows = mtx.row\n cols = mtx.col\n\n rows = rows.astype(np.int64, copy=True)\n cols = cols.astype(np.int64, copy=True)\n\n return CooTensor(mtx.shape, mtx.data, [rows, cols], copy=False)", "def to_mat(self) -> np.matrix:\n raise NotImplementedError", "def vandermonde_matrix(x):\n m = size(x) \n n = m+1\n V = ones((m, n))\n for j in range(1, n):\n for i in range(0, m):\n V[i,j] = pow(x[i],j) \n return V", "def make_design_matrix(array):\n return sm.add_constant(make_col_vector(array), prepend=False)" ]
[ "0.6312579", "0.6233762", "0.58347297", "0.5797177", "0.56139076", "0.5566805", "0.54767096", "0.5372099", "0.53622454", "0.535068", "0.5341642", "0.52930886", "0.52172667", "0.5217212", "0.51898384", "0.51890403", "0.5079698", "0.50713885", "0.5059637", "0.5054085", "0.5043916", "0.5037607", "0.50307554", "0.5023365", "0.5022121", "0.5011836", "0.49993807", "0.49974146", "0.49732688", "0.49476847" ]
0.66287154
0
Is the constant a sparse matrix?
def is_sparse(constant) -> bool: return sp.issparse(constant)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))", "def is_sparse(tensor):\n return isinstance(tensor, sparse_tensor.SparseTensor)", "def is_sparse(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_sparse(x)", "def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True", "def isspsparse(var):\n if 'theano.sparse' in sys.modules:\n return (sp.sparse.issparse(var)\n or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))\n else:\n return sp.sparse.issparse(var)", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def sparse_matrix(data, stype=\"csr\", dtype=complex):\n return _SPARSE_CONSTRUCTORS[stype](data, dtype=dtype)", "def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)", "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def sparse_matrix(shape, integer=False):\n dtype = numpy.int_ if integer else numpy.float_\n return scipy.sparse.lil_matrix(shape, dtype=dtype)", "def _identity_sparse(d, stype=\"csr\", dtype=complex):\n return sp.eye(d, dtype=dtype, format=stype)", "def test_build_classification_matrix_sparse():\n import scnym\n\n # generate a sparse matrix with ~10% of elements filled\n B = np.zeros((100, 10))\n ridx = np.random.choice(B.size, size=100, replace=True)\n B.flat[ridx] = 1\n B = sparse.csr_matrix(B)\n\n # create dummy gene names where the order of\n # genes in `B` is permuted\n A_genes = np.arange(10)\n B_genes = np.random.permutation(A_genes)\n\n # build the classification matrix\n X = scnym.utils.build_classification_matrix(\n X=B,\n model_genes=A_genes,\n sample_genes=B_genes,\n )\n assert sparse.issparse(X)\n\n # X should have the genes of B in the order of A\n for i, g in enumerate(A_genes):\n j = int(np.where(B_genes == g)[0])\n assert np.all(X[:, i].toarray() == B[:, j].toarray())\n return", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def test_import_sparse_values_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.mat').toarray())", "def test_import_values_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparse').toarray())", "def isdense(qob):\n return isinstance(qob, np.ndarray)", "def sparse_matrix (base_type=float):\n return defaultdict (lambda: sparse_vector (base_type))", "def _check_scipy_index_typing(sparse_matrix):\n\n int_max = np.iinfo(MKL.MKL_INT_NUMPY).max\n if (sparse_matrix.nnz > int_max) or (max(sparse_matrix.shape) > int_max):\n msg = \"MKL interface is {t} and cannot hold matrix {m}\\n\".format(m=repr(sparse_matrix), t=MKL.MKL_INT_NUMPY)\n msg += \"Try changing MKL to int64 with the environment variable MKL_INTERFACE_LAYER=ILP64\"\n raise ValueError(msg)\n\n # Cast indexes to MKL_INT type\n if sparse_matrix.indptr.dtype != MKL.MKL_INT_NUMPY:\n sparse_matrix.indptr = sparse_matrix.indptr.astype(MKL.MKL_INT_NUMPY)\n if sparse_matrix.indices.dtype != MKL.MKL_INT_NUMPY:\n sparse_matrix.indices = sparse_matrix.indices.astype(MKL.MKL_INT_NUMPY)", "def is_sparse(number):\n\n if number == 0:\n return True\n if number == 1:\n # edge case. List explicitly for clarity. Define to be True\n return True\n else:\n bits = bits_list(number)\n # start power_of_2 at 1 so previous_bit index won't be out of list range\n for power_of_2 in range(1, len(bits)):\n current_bit = bits[power_of_2]\n previous_bit = bits[power_of_2 - 1]\n if ((current_bit == 1) and (previous_bit == 1)):\n # number has two consecutive 1s\n return False\n return True", "def test_00_create_sparse_1d_array(self):\n ncells = 100\n sparsity = 3.0 # 1 / density\n _, err = _iquery(\"create array SPARSE <v:int64>[i=0:{0}:0:5]\".format(\n ncells - 1))\n assert not err, err\n self._array_cleanups.append('SPARSE')\n _, err = _iquery(\"\"\"\n insert(\n redimension(\n apply(\n build(<i:int64>[fud=0:{0}], {1}*fud),\n (v, 1)),\n SPARSE),\n SPARSE)\"\"\".format(int(ncells / sparsity) - 1,\n int(sparsity)))\n assert not err, err\n check_v_sum('SPARSE')\n nchunks = chunk_count(vaid_of('SPARSE'))\n prt(\"SPARSE has\", nchunks, \"chunks\")", "def sparse_matlab(i, j, v, m, n):\n return csr_matrix((v, (i, j)), shape=(m, n))", "def is_dense(x: Any, backend=None) -> bool:\r\n\r\n module = get_module(backend)\r\n return module.is_dense(x)", "def test__csr_matrix_equal(self):\n\n matrix_a = sparse.csr_matrix(([1.0], ([0], [0])), shape=(2, 2))\n matrix_b = sparse.csr_matrix(([1.0], ([0], [0])), shape=(2, 2))\n matrix_c = sparse.csr_matrix(([1.0], ([1], [0])), shape=(2, 2))\n self.assertEqual(decaydata._csr_matrix_equal(matrix_a, matrix_b), True)\n self.assertEqual(decaydata._csr_matrix_equal(matrix_a, matrix_c), False)", "def test_sparse_with_dense():\n\n def test_func(df):\n df[\"new column\"] = 1 # Create dense column\n return df\n\n atom = ATOMClassifier(X_text, y10, random_state=1)\n atom.apply(test_func)\n atom.vectorize(strategy=\"BOW\", return_sparse=False)\n assert all(not pd.api.types.is_sparse(atom.X[c]) for c in atom.features)", "def _schema_has_sparse_features(schema: schema_pb2.Schema) -> bool:\n\n def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n \"\"\"Helper function used to determine whether there are sparse features.\"\"\"\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False\n\n if schema.sparse_feature:\n return True\n return _has_sparse_features(schema.feature)", "def _build_sparse_matrix(L):\n shape = L.shape\n i = torch.LongTensor(np.vstack((L.row, L.col)).astype(int))\n v = torch.FloatTensor(L.data)\n return torch.sparse.FloatTensor(i, v, torch.Size(shape))", "def sparse(cls, a_ndarray, i_ndarray, shape, bigdl_type=\"float\"):\n if a_ndarray is None:\n return None\n invalidInputError(isinstance(a_ndarray, np.ndarray),\n f\"input should be a np.ndarray, not ${type(a_ndarray)}\")\n invalidInputError(isinstance(i_ndarray, np.ndarray),\n f\"indices should be a np.ndarray, not ${type(i_ndarray)}\")\n invalidInputError(i_ndarray.size == a_ndarray.size * shape.size,\n f\"size of values ${a_ndarray.size * shape.size} and\"\n f\" indices ${i_ndarray.size} should match\")\n return cls(a_ndarray,\n shape,\n bigdl_type,\n i_ndarray)", "def _is_supported_matrix(data):\n return (\n spsp.isspmatrix_csc(data)\n or spsp.isspmatrix_csr(data)\n or spsp.isspmatrix_bsr(data)\n or spsp.isspmatrix_dia(data)\n )", "def precondition_sparse_matrix(A: lil_matrix) -> linalg.LinearOperator:\n ilu = linalg.spilu(A)\n Mx = ilu.solve\n return linalg.LinearOperator(A.shape, Mx)" ]
[ "0.76833653", "0.7436849", "0.74230564", "0.72037745", "0.7105081", "0.6872479", "0.6814387", "0.67973125", "0.67928153", "0.67843324", "0.66110086", "0.6596236", "0.6586943", "0.65716755", "0.6568378", "0.64620703", "0.641447", "0.6402279", "0.6368499", "0.62777996", "0.6246264", "0.6240724", "0.62037283", "0.6187758", "0.61535823", "0.6116627", "0.6109262", "0.61028725", "0.60685414", "0.6066004" ]
0.7981942
0
Check if a matrix is Hermitian and/or symmetric.
def is_hermitian(constant) -> bool: complex_type = np.iscomplexobj(constant) if complex_type: # TODO catch complex symmetric but not Hermitian? is_symm = False if sp.issparse(constant): is_herm = is_sparse_symmetric(constant, complex=True) else: is_herm = np.allclose(constant, np.conj(constant.T)) else: if sp.issparse(constant): is_symm = is_sparse_symmetric(constant, complex=False) else: is_symm = np.allclose(constant, constant.T) is_herm = is_symm return is_symm, is_herm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_Hermitian(q_1: Qs) -> bool:\n\n hc = Hermitian_conj(q_1, q_1.rows, q_1.columns)\n\n return equals(q_1, hc)", "def is_symmetric(mat):\n return np.allclose(mat.T, mat)", "def IsHermitian(self):\n \n Hermitian=True\n for Ind in self.IndList():\n Q=tuple(-x for x in Ind)\n \n X = self[Ind].conj().T-self[Q]\n\n A=amax(list(abs(X.flatten())))\n\n \n if A > 1e-9:\n Hermitian=False\n \n return Hermitian", "def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)", "def check_hermitian(self):\n adjoint = self.mat.conj().T\n return np.allclose(self.mat, adjoint)", "def hermitian(matrix):\n return sp.allclose(matrix, sp.conj(matrix.T))", "def is_diagonal(self):\n return self.is_upper() and self.is_lower()", "def is_symmetric(self):\n return self.args[0].is_symmetric()", "def is_symmetric(self):\n return self.all_equal(self.transpose())", "def is_hermitian(self, eps = 0):\n return (self - self.hc()).absmax() <= eps", "def is_symmetric(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, mat.T, atol=eps)", "def chk_hor_sym(self):\n for row in self.rows:\n rrow = copy(row)\n rrow.reverse()\n for i in xrange(int(round(len(row)/2))):\n if row[i] == rrow[i]:\n continue\n else:\n return False\n return True", "def test_is_symmetric_and_hollow(self):\r\n self.assertTrue(is_symmetric_and_hollow(array([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0.0, 0], [0.0, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.001, 1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0, 1.1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.5, 1.1], [1, 0]])))", "def is_hermitian(operator):\n # Handle FermionOperator, BosonOperator, and InteractionOperator\n if isinstance(operator,\n (FermionOperator, BosonOperator, InteractionOperator)):\n return (normal_ordered(operator) == normal_ordered(\n hermitian_conjugated(operator)))\n\n # Handle QubitOperator and QuadOperator\n if isinstance(operator, (QubitOperator, QuadOperator)):\n return operator == hermitian_conjugated(operator)\n\n # Handle sparse matrix\n elif isinstance(operator, spmatrix):\n difference = operator - hermitian_conjugated(operator)\n discrepancy = 0.\n if difference.nnz:\n discrepancy = max(abs(difference.data))\n return discrepancy < EQ_TOLERANCE\n\n # Handle numpy array\n elif isinstance(operator, numpy.ndarray):\n difference = operator - hermitian_conjugated(operator)\n discrepancy = numpy.amax(abs(difference))\n return discrepancy < EQ_TOLERANCE\n\n # Unsupported type\n else:\n raise TypeError('Checking whether a {} is hermitian is not '\n 'supported.'.format(type(operator).__name__))", "def is_hermitian_num(A):\n eps=1e-3\n if(npl.norm(A-np.conj(A.T),'fro')<eps):\n return True\n else:\n return False", "def is_symmetric(self):\n _is_sym = self._is_sym\n if _is_sym is not None:\n return _is_sym\n\n n = self.degree\n if n >= 8:\n if self.is_transitive():\n _is_alt_sym = self._eval_is_alt_sym_monte_carlo()\n if _is_alt_sym:\n if any(g.is_odd for g in self.generators):\n self._is_sym, self._is_alt = True, False\n return True\n\n self._is_sym, self._is_alt = False, True\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)\n\n self._is_sym, self._is_alt = False, False\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)", "def ishomog(tr):\n \n return tr.shape == (4, 4)", "def is_hermitian(self):\n return False # by default, solvers are non-Hermitian", "def check_matrix_symmetric_positive_definite(matrix):\n try:\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n # Not 2-dimensional or square, so not simmetric.\n return False\n\n np.linalg.cholesky(matrix)\n return True\n\n except np.linalg.LinAlgError:\n return False", "def is_symmetric(t):\n return t is None or equal_mirror(t.left, t.right)", "def is_hadamard(matrix):\n m = np.array(matrix)\n order = m.shape[0]\n return np.array_equal(m.dot(m.T), order * np.identity(order))", "def is_skew_symmetric(self):\n return self.all_equal(-self.transpose())", "def _check_real_symmetric(A: np.array) -> bool:\n return np.allclose(A, A.T, atol=1e-9)", "def is_hom(self) -> bool:\n if self.is_null():\n return False\n if self.allele1 == -1 or self.allele2 == -1:\n return True\n return self.allele1 == self.allele2", "def symmetric2dTest(matrix2d):\n \n # is the matrix 2-d?\n if len(np.shape(matrix2d)) != 2:\n raise ValueError(\"Matrix dimensions are not equal to 2.\")\n matrix2d = np.array(matrix2d)\n\n # create boolean for whether 2-d matrix = its transpose\n symmBool = (matrix2d == matrix2d.T).all()\n \n\n if symmBool == False:\n print(\"Matrix not symmetric.\")\n print(\"Max assymetry = \",np.max(matrix2d-matrix2d.T))\n\n return symmBool", "def is_symmetric_transform(self) -> bool:\n\n # If the kernel is made stochastic, it looses the symmetry, if symmetric_kernel\n # is set to True, then apply the the symmetry transformation\n return self.is_stochastic and self.is_symmetric", "def is_invertible(matrix: np.ndarray) -> bool:\n return matrix.shape[0] == matrix.shape[1] and np.linalg.det(matrix) != 0", "def is_hermitian(self):\n return self.args[0].is_hermitian()", "def isToeplitz(mat):\n for j in range(row):\n if not checkDiag(mat, 0, j):\n return False\n for i in range(1, col):\n if not checkDiag(mat, i, 0):\n return False\n return True", "def is_diagonal(row, col):\n return 1 if row == col else 0" ]
[ "0.7288421", "0.70511174", "0.695801", "0.660251", "0.6573264", "0.6567614", "0.6554783", "0.6513599", "0.65056306", "0.64851743", "0.64565355", "0.63981193", "0.6385611", "0.63054913", "0.62993294", "0.62919945", "0.6259792", "0.6143758", "0.6118125", "0.6030024", "0.60240436", "0.60190153", "0.601533", "0.6002776", "0.599455", "0.59832585", "0.5981847", "0.5975365", "0.59264594", "0.5845991" ]
0.71756566
1
Walks through the full state trie, yielding one missing node hash/prefix at a time. The yielded node info is wrapped in a TrackedRequest. The hash is marked as active until it is explicitly marked for review again. The hash/prefix will be marked for review asking a peer for the data. Will exit when all known node hashes are already actively being requested, or if there are no more missing nodes.
async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]: # For each account, when we have asked for all known storage and bytecode # hashes, but some are still not present, we "pause" the account so we can look # for neighboring nodes. # This is a list of paused accounts, using the path to the leaf node, # because that's how the account tracker is indexed. exhausted_account_leaves: Tuple[Nibbles, ...] = () starting_root_hash = self._next_trie_root_hash try: while self.manager.is_running: # Get the next account # We have to rebuild the account iterator every time because... # something about an exception during a manual __anext__()? account_iterator = self._request_tracking_trie_items( self._account_tracker, starting_root_hash, ) try: next_account_info = await account_iterator.__anext__() except trie_exceptions.MissingTraversalNode as exc: # Found a missing trie node while looking for the next account yield self._account_tracker.generate_request( exc.missing_node_hash, exc.nibbles_traversed, ) continue except StopAsyncIteration: # Finished iterating over all available accounts break # Decode account path_to_leaf, address_hash_nibbles, encoded_account = next_account_info account = rlp.decode(encoded_account, sedes=Account) # Iterate over all missing hashes of subcomponents (storage & bytecode) subcomponent_hashes_iterator = self._missing_subcomponent_hashes( address_hash_nibbles, account, starting_root_hash, ) async for node_request in subcomponent_hashes_iterator: yield node_request # Check if account is fully downloaded account_components_complete = self._are_account_components_complete( address_hash_nibbles, account, ) if account_components_complete: # Mark fully downloaded accounts as complete, and do some cleanup self._mark_account_complete(path_to_leaf, address_hash_nibbles) else: # Pause accounts that are not fully downloaded, and track the account # to resume when the generator exits. self._account_tracker.pause_review(path_to_leaf) exhausted_account_leaves += (path_to_leaf, ) except GeneratorExit: # As the generator is exiting, we want to resume any paused accounts. This # allows us to find missing storage/bytecode on the next iteration. for path_to_leaf in exhausted_account_leaves: self._account_tracker.mark_for_review(path_to_leaf) raise else: # If we pause a few accounts and then run out of nodes to ask for, then we # still need to resume the paused accounts to prepare for the next iteration. for path_to_leaf in exhausted_account_leaves: self._account_tracker.mark_for_review(path_to_leaf) # Possible scenarios: # 1. We have completed backfill # 2. We have iterated the available nodes, and all known hashes are being requested. # For example: if 0 nodes are available, and we walk to the root and request # the root from a peer, we do not have any available information to ask for # more nodes, and exit cleanly. # # In response to these situations, we might like to: # 1. Log and celebrate that the full state has been downloaded # 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive # # 1 and 2 are a little more cleanly handled outside this iterator, so we just # exit and let the caller deal with it, using a _check_complete() check. return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _request_tracking_trie_items(\n self,\n request_tracker: TrieNodeRequestTracker,\n root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:\n if self._next_trie_root_hash is None:\n # We haven't started beam syncing, so don't know which root to start at\n return\n trie = HexaryTrie(self._db, root_hash)\n\n starting_index = bytes_to_nibbles(root_hash)\n\n while self.manager.is_running:\n try:\n path_to_node = request_tracker.next_path_to_explore(starting_index)\n except trie_exceptions.PerfectVisibility:\n # This doesn't necessarily mean we are finished.\n # Any active prefixes might still be hiding some significant portion of the trie\n # But it's all we're able to explore for now, until more node data arrives\n return\n\n try:\n cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)\n except KeyError:\n cached_node = None\n node_getter = partial(trie.traverse, path_to_node)\n else:\n node_getter = partial(trie.traverse_from, cached_node, uncached_key)\n\n try:\n node = node_getter()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found missing account trie node\n if path_to_node == exc.nibbles_traversed:\n raise\n elif cached_node is None:\n # The path and nibbles traversed should always match in a non-cached traversal\n raise RuntimeError(\n f\"Unexpected: on a non-cached traversal to {path_to_node}, the\"\n f\" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}\"\n ) from exc\n else:\n # We need to re-raise a version of the exception that includes the whole path\n # from the root node (when using cached nodes, we only have the path from\n # the parent node to the child node)\n # We could always raise this re-wrapped version, but skipping it (probably?)\n # improves performance.\n missing_hash = exc.missing_node_hash\n raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc\n except trie_exceptions.TraversedPartialPath as exc:\n node = exc.simulated_node\n\n if node.value:\n full_key_nibbles = path_to_node + node.suffix\n\n if len(node.sub_segments):\n # It shouldn't be a problem to skip handling this case, because all keys are\n # hashed 32 bytes.\n raise NotImplementedError(\n \"The state backfiller doesn't handle keys of different lengths, where\"\n f\" one key is a prefix of another. But found {node} in trie with\"\n f\" {root_hash!r}\"\n )\n\n yield path_to_node, full_key_nibbles, node.value\n # Note that we do not mark value nodes as completed. It is up to the caller\n # to do that when it is ready. For example, the storage iterator will\n # immediately treat the key as completed. The account iterator will\n # not treat the key as completed until all of its storage and bytecode\n # are also marked as complete.\n else:\n # If this is just an intermediate node, then we can mark it as confirmed.\n request_tracker.confirm_prefix(path_to_node, node)", "async def _missing_storage_hashes(\n self,\n address_hash_nibbles: Nibbles,\n storage_root: Hash32,\n starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:\n\n if storage_root == BLANK_NODE_HASH:\n # Nothing to do if the storage has an empty root\n return\n\n storage_tracker = self._get_storage_tracker(address_hash_nibbles)\n while self.manager.is_running:\n storage_iterator = self._request_tracking_trie_items(\n storage_tracker,\n storage_root,\n )\n try:\n async for path_to_leaf, hashed_key, _storage_value in storage_iterator:\n # We don't actually care to look at the storage keys/values during backfill\n storage_tracker.confirm_leaf(path_to_leaf)\n\n except trie_exceptions.MissingTraversalNode as exc:\n yield storage_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n else:\n # Possible scenarios:\n # 1. We have completed backfilling this account's storage\n # 2. We have iterated the available nodes, and only their children are missing,\n # for example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes.\n #\n # In response to these situations, we might like to:\n # 1. Debug log?\n # 2. Look for more missing nodes in neighboring accounts and their storage, etc.\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it.\n return", "async def _match_urgent_node_requests_to_peers(self) -> None:\n while self.manager.is_running:\n urgent_batch_id, urgent_hashes = await self._node_tasks.get(\n eth_constants.MAX_STATE_FETCH\n )\n\n # Get best peer, by GetNodeData speed\n queen = await self._queen_tracker.get_queen_peer()\n\n queen_is_requesting = queen.eth_api.get_node_data.is_requesting\n\n if queen_is_requesting:\n # Our best peer for node data has an in-flight GetNodeData request\n # Probably, backfill is asking this peer for data\n # This is right in the critical path, so we'd prefer this never happen\n self.logger.debug(\n \"Want to download urgent data, but %s is locked on other request\",\n queen,\n )\n # Don't do anything different, allow the request lock to handle the situation\n\n self._num_urgent_requests_by_peer[queen] += 1\n self._urgent_requests += 1\n\n await self._find_urgent_nodes(\n queen,\n urgent_hashes,\n urgent_batch_id,\n )", "async def _match_predictive_node_requests_to_peers(self) -> None:\n # If self._queen_tracker terminates we need to exit as well, so check that on every\n # iteration.\n while self.manager.is_running and self._queen_tracker.get_manager().is_running:\n try:\n batch_id, hashes = await asyncio.wait_for(\n self._maybe_useful_nodes.get(eth_constants.MAX_STATE_FETCH),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Reduce the number of predictive peers, we seem to have plenty\n if self._min_predictive_peers > 0:\n self._min_predictive_peers -= 1\n self.logger.debug(\n \"Decremented predictive peers to %d\",\n self._min_predictive_peers,\n )\n # Re-attempt\n continue\n\n # Find any hashes that were discovered through other means, like urgent requests:\n existing_hashes = await asyncio.get_event_loop().run_in_executor(\n None,\n self._get_unique_present_hashes,\n hashes,\n )\n # If any hashes are already found, clear them out and retry\n if existing_hashes:\n # Wake up any paused preview threads\n await self._wakeup_preview_waiters(existing_hashes)\n # Clear out any tasks that are no longer necessary\n await self._maybe_useful_nodes.complete(batch_id, tuple(existing_hashes))\n # Restart from the top\n continue\n\n try:\n peer = await asyncio.wait_for(\n self._queen_tracker.pop_fastest_peasant(),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Increase the minimum number of predictive peers, we seem to not have enough\n new_predictive_peers = min(\n self._min_predictive_peers + 1,\n # Don't reserve more than half the peers for prediction\n self._num_peers // 2,\n )\n if new_predictive_peers != self._min_predictive_peers:\n self.logger.debug(\n \"Updating predictive peer count from %d to %d\",\n self._min_predictive_peers,\n new_predictive_peers,\n )\n self._min_predictive_peers = new_predictive_peers\n\n cancel_attempt = True\n else:\n if peer.eth_api.get_node_data.is_requesting:\n self.logger.debug(\n \"Want predictive nodes from %s, but it has an active request, skipping...\",\n peer,\n )\n self._queen_tracker.insert_peer(peer, NON_IDEAL_RESPONSE_PENALTY)\n cancel_attempt = True\n else:\n cancel_attempt = False\n\n if cancel_attempt:\n # Prepare to restart\n await self._maybe_useful_nodes.complete(batch_id, ())\n continue\n\n self._num_predictive_requests_by_peer[peer] += 1\n self._predictive_requests += 1\n\n self.manager.run_task(\n self._get_predictive_nodes_from_peer,\n peer,\n hashes,\n batch_id,\n )\n\n if self.manager.is_running and not self._queen_tracker.get_manager().is_running:\n self.logger.info(\n \"Backfill is complete, halting predictive downloads...\"\n )", "async def _missing_bytecode_hashes(\n self,\n address_hash_nibbles: Nibbles,\n code_hash: Hash32,\n starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:\n\n if code_hash == EMPTY_SHA3:\n # Nothing to do if the bytecode is for the empty hash\n return\n\n bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)\n if bytecode_tracker.is_complete:\n # All bytecode has been collected\n return\n\n # If there is an active request (for now, there can only be one), then skip\n # any database checks until the active request is resolved.\n if not bytecode_tracker.has_active_requests:\n if code_hash not in self._db:\n # The bytecode isn't present, so we ask for it.\n # A bit hacky here, since there is no trie, we just treat it as\n # if it were a leaf node at the root.\n yield bytecode_tracker.generate_request(code_hash, prefix=())\n else:\n # The bytecode is already present, but the tracker isn't marked\n # as completed yet, so finish it off.\n bytecode_tracker.confirm_leaf(path_to_leaf=())", "def find_reachable_nodes(self):\n # find all reachable nodes down from the goal\n found = {}\n found[id(self.root)] = self.root\n queue = [self.root]\n #print >>sys.stderr, '---'\n while queue:\n node = queue.pop(0)\n if hasattr(node, 'dead'):\n if node.dead:\n #print >>sys.stderr, 'dead', node\n continue\n assert not node.dead\n for edge in node.incoming:\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n if id(tailnode) not in found:\n found[id(tailnode)] = tailnode\n queue.append(tailnode)\n tailnode.nout = 0\n tailnode.nout += 1\n # save for sanity check\n self.found = found", "async def _node_hashes_present(self, node_hashes: Set[Hash32], urgent: bool) -> int:\n remaining_hashes = node_hashes.copy()\n timeout = BLOCK_IMPORT_MISSING_STATE_TIMEOUT\n\n start_time = time.monotonic()\n if not urgent:\n wait_event = asyncio.Event()\n self._preview_events[wait_event] = node_hashes\n while remaining_hashes and time.monotonic() - start_time < timeout:\n if urgent:\n await self._new_data_event.wait()\n self._new_data_event.clear()\n else:\n try:\n await asyncio.wait_for(\n wait_event.wait(),\n timeout=CHECK_PREVIEW_STATE_TIMEOUT,\n )\n except asyncio.TimeoutError:\n # Check if the data showed up due to an urgent import\n preview_timeout = True\n pass\n else:\n preview_timeout = False\n finally:\n wait_event.clear()\n\n found_hashes = await self._run_preview_in_thread(\n urgent,\n self._get_unique_present_hashes,\n remaining_hashes,\n )\n\n if not urgent:\n if preview_timeout:\n self._predictive_found_nodes_during_timeout += len(found_hashes)\n else:\n self._predictive_found_nodes_woke_up += len(found_hashes)\n\n if found_hashes:\n remaining_hashes -= found_hashes\n if not urgent and remaining_hashes:\n self._preview_events[wait_event] = remaining_hashes\n\n if not urgent:\n del self._preview_events[wait_event]\n\n if remaining_hashes:\n if urgent:\n logger = self.logger.error\n else:\n logger = self.logger.warning\n logger(\n \"Could not collect node data for %d %s hashes %r within %.0f seconds (took %.1fs)\",\n len(remaining_hashes),\n \"urgent\" if urgent else \"preview\",\n list(remaining_hashes)[0:2],\n timeout,\n time.monotonic() - start_time,\n )\n\n return len(node_hashes) - len(remaining_hashes)", "def breadth_first_search(initial_state):\n list_of_processed_nodes = []\n num_unprocessed_nodes = 0#\n num_unconsidered_children = 0#\n\n initial_node = Node(state=initial_state)\n node_deque = collections.deque()\n node_deque.append(initial_node)\n goal_state_found = False\n goal_node = None\n\n while len(node_deque) > 0 and not goal_state_found:\n e = node_deque.popleft()\n #pdb.set_trace()\n if e in list_of_processed_nodes:\n num_unprocessed_nodes += 1\n continue\n else:\n list_of_processed_nodes.append(e)\n\n list_of_children_nodes, num_unconsidered_children = generate_children_nodes(\n curr_node=e, list_of_processed_nodes=list_of_processed_nodes,\n running_count_of_children_dups=num_unconsidered_children#\n )\n \n for child_node in list_of_children_nodes:\n #print 'Node {0} with goal status {1}'.format(child_node.index, child_node.state.snake_ate_food)\n if child_node.state.goal_state_reached():\n #print \"Goal state reached with node index {0}\".format(child_node.index)\n goal_state_found = True\n goal_node = child_node\n break\n else:\n #print \"Adding to deque node index {0}\".format(child_node.index)\n node_deque.append(child_node)\n\n if len(node_deque) == 0 and not goal_state_found:\n print '*'*40\n print 'NO SOLUTION PATH FOUND'\n print '*'*40\n sys.exit(0)\n\n #pdb.set_trace()#\n # Summary & results\n #print '{0} nodes processed!'.format(len(list_of_processed_nodes))\n #print '{0} nodes already visited, skipped!'.format(num_unprocessed_nodes)\n #print '{0} node children skipped!'.format(num_unconsidered_children)\n #os.system('say -v \"Victoria\" \"done\"')\n\n return goal_node, list_of_processed_nodes", "async def _wait_for_nodes(\n self,\n node_hashes: Iterable[Hash32],\n urgent: bool) -> int:\n missing_nodes = await self._run_preview_in_thread(\n urgent,\n self._get_unique_missing_hashes,\n node_hashes,\n )\n\n if urgent:\n queue = self._node_tasks\n else:\n queue = self._maybe_useful_nodes\n\n unrequested_nodes = tuple(\n node_hash for node_hash in missing_nodes if node_hash not in queue\n )\n if missing_nodes:\n if unrequested_nodes:\n await queue.add(unrequested_nodes)\n return await self._node_hashes_present(missing_nodes, urgent)\n else:\n return 0", "def _iterate_single_node(self, path):\n self.fuzz_node = self.nodes[path[-1].dst]\n # Loop through and yield all possible mutations of the fuzz node.\n # Note: when mutate() returns False, the node has been reverted to the default (valid) state.\n while self.fuzz_node.mutate():\n self.total_mutant_index += 1\n yield (path,)\n\n if self._skip_current_node_after_current_test_case:\n self._skip_current_node_after_current_test_case = False\n break\n elif self._skip_current_element_after_current_test_case:\n self._skip_current_element_after_current_test_case = False\n self.fuzz_node.skip_element()\n self.fuzz_node.reset()", "def _account_review(\n self,\n account_address_hashes: Iterable[Hash32],\n root_hash: Hash32) -> Tuple[Set[Hash32], Dict[Hash32, bytes]]:\n need_nodes = set()\n completed_accounts = {}\n with self._trie_db.at_root(root_hash) as snapshot:\n for account_hash in account_address_hashes:\n try:\n account_rlp = snapshot[account_hash]\n except MissingTrieNode as exc:\n need_nodes.add(exc.missing_node_hash)\n else:\n completed_accounts[account_hash] = account_rlp\n\n return need_nodes, completed_accounts", "def get_state():\n\tif node.id < 0:\n\t\treactor.callLater(0, get_state)\n\t\treturn\n\t\n\tnode.send(node.id, generate_start_graph, None)\n\tnode.target_filename = target_filename\n\tnode.roots = {}\n\tif DO_PROBLEMS:\n\t\ttarget_msg = msg_get_dirty_connections\n\telse:\n\t\ttarget_msg = msg_get_connections\n\tfor i in range(0, node.id):\n\t\tnode.send(i, target_msg, node.id)\n\tnode.send(node.id, wait_full_state, 0)", "def test_lookup_none_pending_all_contacted(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Put the lookup object in the state to test.\n lookup.pending_requests = {}\n for contact in lookup.shortlist:\n lookup.contacted.add(contact)\n self.node.send_find.call_count = 0\n # Re-run _lookup and test\n lookup._lookup()\n self.assertEqual(self.node.send_find.call_count, 0)", "def start_one_step(self):\r\n new_infected_list = []\r\n old_infected_list = copy.deepcopy(self.infected_list)\r\n new_recovered_list = []\r\n old_recovered_list = copy.deepcopy(self.recovered_list)\r\n # For each infected node\r\n for infected_nid in old_infected_list:\r\n infected_node = self.node_dict[infected_nid]\r\n # For each neighbor\r\n for dst_nid in infected_node.get_dst_nid_list(self.graph):\r\n dst_node = self.node_dict[dst_nid]\r\n # Infect susceptible nodes with probability [p]\r\n if dst_node.state is NodeState.SUSCEPTIBLE and random.random() < self.p:\r\n dst_node.infected(self.i)\r\n new_infected_list.append(dst_nid)\r\n\r\n # Minus 1 turn of (remaining) infected days for all infected nodes\r\n infected_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if infected_node.check_finish_infection():\r\n # Infected node get recovered\r\n infected_node.recovered(self.r)\r\n # Remove from infected list\r\n self.infected_list.remove(infected_nid)\r\n # Append to recovered list\r\n new_recovered_list.append(infected_nid)\r\n\r\n # Add newly infected nodes into infected list\r\n self.infected_list += new_infected_list\r\n\r\n # For each recovered node\r\n for recovered_nid in old_recovered_list:\r\n recovered_node = self.node_dict[recovered_nid]\r\n # Minus 1 turn of (remaining) recovered days for all recovered nodes\r\n recovered_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if recovered_node.check_finish_recovery():\r\n # Recovered node get recovered\r\n recovered_node.susceptible()\r\n # Remove from recovered list\r\n self.recovered_list.remove(recovered_nid)\r\n\r\n # Add newly recovered nodes into recovered list\r\n self.recovered_list += new_recovered_list", "def _get_hash_prefixes(self):\n \n client_state = None\n\n self._get_threats_update()", "def test_lookup_all_pending(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n # Sanity check that ALPHA slots are full.\n self.assertEqual(self.node.send_find.call_count, ALPHA)\n self.assertEqual(len(lookup.pending_requests), ALPHA)\n self.assertEqual(len(lookup.contacted), ALPHA)\n self.assertEqual(len(lookup.shortlist), K)\n # Re-run _lookup and ensure no further network calls have been made.\n lookup._lookup()\n self.assertEqual(self.node.send_find.call_count, ALPHA)", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def build_tree(self):\n resolved_nodes = []\n for nodeh in self.unresolved_nodes:\n sibling, left_sibl = self.find_sibling(nodeh)\n\n if nodeh in resolved_nodes:\n continue\n\n if sibling:\n self.nodes[nodeh]['sibling'] = sibling\n self.nodes[nodeh]['left'] = not left_sibl\n self.nodes[sibling]['left'] = left_sibl\n self.nodes[sibling]['sibling'] = nodeh\n\n if left_sibl:\n parent = do_hash(sibling + nodeh, self.algo)\n else:\n parent = do_hash(nodeh + sibling, self.algo)\n self.nodes[sibling]['parent'] = parent\n self.nodes[nodeh]['parent'] = parent\n resolved_nodes.append(nodeh)\n resolved_nodes.append(sibling)\n\n paddress = parent_address(self.nodes[nodeh]['address'])\n self.nodes[parent] = {'address': paddress}\n self.addresses[paddress] = parent\n if paddress != '':\n self.unresolved_nodes.append(parent)\n else:\n self.nodes[nodeh]['address'] = parent_address(\n self.nodes[nodeh]['address'])\n self.unresolved_nodes = list(\n set(self.unresolved_nodes) - set(resolved_nodes))", "def search(start):\n\n '''\n Create a class named nodeClass which contains 4 elements: \n state: The puzzle object containing the puzzle board at the node \n misplaced: num of misplaced tiles\n depth: depth of the node in the tree \n prev: parent node\n '''\n nodeClass = namedtuple('nodeClass', 'state, misplaced, depth, prev')\n\n #instantiate object from class creating the root node\n node = nodeClass(start, 0, 0, None)\n\n #stores the nodes that are going to be explored. \n #the node with lower f-score is explored first\n frontier = q.PriorityQueue()\n frontier.put((0,node))\n\n # frontier_set keep track of the nodes in the frontier queue\n frontier_set = {node}\n #contains the board states already explored\n explored_states = set()\n for ite in range(1,max_iterations+2):#while True:\n #Retrieve the node in the frontier with lowest value\n node = frontier.get()[1]\n\n #get the puzzle board obj from the node object\n state = node.state\n\n #Check if the game has ben solved\n if state.solved or ite==max_iterations:\n Result = namedtuple('Result', 'board, depth, nodesExpanded, max_depth, isSolved')\n return Result(state, node.depth, ite, max(no.depth for no in frontier_set), state.solved)\n\n # expanded nodes are added to explored set\n explored_states.add(state)\n\n #EXPANDING\n for mov in state.possible_moves:\n new_state=state.move(mov)\n new_node = nodeClass(new_state, new_state.score,\n node.depth + 1, node)\n\n #compute f-score of the node\n f_score=new_state.score + new_node.depth\n\n if new_state not in explored_states and new_node not in frontier_set:\n frontier.put((f_score,new_node))\n frontier_set.add(new_node)", "def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)", "def dfs_loop(graph_dict, nodes, track):\n\n for node in nodes:\n if node not in track.explored:\n track.current_source = node\n dfs(graph_dict, node, track)", "def test_traverse_on_empty_trie(empty_trie):\n assert list(empty_trie.traversal()) == []", "def depthFirstSearch(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n node = problem.getStartState()\r\n if (problem.isGoalState(node)):\r\n return [] # no need to make any moves of the start state is goal\r\n start = (node, 'NoDirection',0)\r\n\r\n frontier_queue = Stack() # queue for frontier\r\n frontier_queue.push(start) # frontier consists of only the start state\r\n\r\n explored_nodes = set()\r\n explored_track = {start:None} # keep a track of parent, parent of root node is None\r\n\r\n while not frontier_queue.isEmpty():\r\n state = frontier_queue.pop() # pop the top element from the queue \r\n explored_nodes.add(state)\r\n\r\n if problem.isGoalState(state[0]):\r\n return get_track(explored_track, state)\r\n\r\n neighbors_state = problem.getSuccessors(state[0])\r\n for neighbor in neighbors_state: # neighbor will be something like this ((34, 15), 'South', 1)\r\n if neighbor not in frontier_queue.list and neighbor not in explored_nodes:\r\n frontier_queue.push(neighbor)\r\n explored_track[neighbor] = state\r\n\r\n\r\n def get_track(explored_track, state):\r\n from game import Directions\r\n track_history = [state]\r\n track_history_direction = []\r\n leaf = state\r\n while (explored_track[leaf]) != start:\r\n track_history.append(explored_track[leaf])\r\n leaf = explored_track[leaf]\r\n\r\n for j in range (len(track_history),-1,-1):\r\n this_step = track_history[j-1]\r\n this_step = this_step[1]\r\n track_history_direction.append(this_step)\r\n return track_history_direction[:-1]", "def search(self):\r\n #get the initial state\r\n initialState = State()\r\n \r\n #create root node\r\n rootNode = Node(initialState)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, rootNode)\r\n \r\n #perform search from root node\r\n self.performBacktrackSearch(rootNode, rootNode)\r\n \r\n rootNode.printTree()", "def search(state):\n init_key = state.key()\n visited = set(state.key())\n queue = state.moves()\n heapify(queue)\n total_states = 1\n while queue:\n score, key, saved, path = heappop(queue)\n if key in visited:\n continue\n state.restore(saved, path)\n empty = state.empty_node()\n print(\"[{}] score:{} goal:{} empty:{} moves:{} (queue size {})\".format(\n total_states, score, state.goal, empty, len(path), len(queue)))\n if state.done():\n history = list(state.history)\n break\n visited.add(key)\n total_states += 1\n for move in state.moves():\n heappush(queue, move)\n if total_states > 100000:\n history = []\n break\n\n state.restore(init_key, [])\n return history", "def run(self):\r\n if not self.s or not self.t:\r\n return False\r\n while self.unvisited: # not empty\r\n self.search_space_size += 1\r\n _, v = self.getHighestPriorityNode()\r\n self.search_space.append((self.preds[v], [v]))\r\n if v in self.closed_set:\r\n continue\r\n elif v == self.t: # or self.graph.getGeoCoords(v) == self.graph.getGeoCoords(self.t):\r\n return True\r\n self.closed_set.add(v)\r\n self.relaxVertex(v)\r\n return False # if no valid path has been found (some node inaccessible before t\r", "def solve(self):\r\n queue = collections.deque([Node(self.start)])\r\n seen = set()\r\n seen.add(queue[0].state)\r\n while queue:\r\n queue = collections.deque(sorted(list(queue), key=lambda node: node.f))\r\n node = queue.popleft()\r\n if node.solved:\r\n return node.path\r\n\r\n for move, action in node.actions:\r\n child = Node(move(), node, action)\r\n\r\n if child.state not in seen:\r\n queue.appendleft(child)\r\n seen.add(child.state)", "def graph_search(initial_state):\n path = [initial_state]\n current_node = copy.deepcopy(initial_state)\n while True:\n count = len(path)\n result = expand(current_node)\n for i in result:\n if i[1][1] == 0:\n path.append(i)\n break\n if len(path) > count:\n break\n else:\n current_node = result[-1]\n path.append(result[-1])\n return path", "def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []", "def allsuccessors(obsstore, nodes, ignoreflags=0):\n remaining = set(nodes)\n seen = set(remaining)\n while remaining:\n current = remaining.pop()\n yield current\n for mark in obsstore.successors.get(current, ()):\n # ignore marker flagged with with specified flag\n if mark[2] & ignoreflags:\n continue\n for suc in mark[1]:\n if suc not in seen:\n seen.add(suc)\n remaining.add(suc)" ]
[ "0.6850051", "0.6614492", "0.5646459", "0.5422282", "0.53472066", "0.5297117", "0.52896756", "0.5159195", "0.50123775", "0.4973791", "0.49718073", "0.4961415", "0.49293295", "0.49292937", "0.4928424", "0.4927427", "0.49176684", "0.4904762", "0.48756814", "0.4864487", "0.48493743", "0.48378667", "0.47916695", "0.47730234", "0.47419956", "0.4736022", "0.47168776", "0.46846908", "0.4682893", "0.46786666" ]
0.7028946
0
Walk through the supplied trie, yielding the request tracker and node request for any missing trie nodes.
async def _request_tracking_trie_items( self, request_tracker: TrieNodeRequestTracker, root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]: if self._next_trie_root_hash is None: # We haven't started beam syncing, so don't know which root to start at return trie = HexaryTrie(self._db, root_hash) starting_index = bytes_to_nibbles(root_hash) while self.manager.is_running: try: path_to_node = request_tracker.next_path_to_explore(starting_index) except trie_exceptions.PerfectVisibility: # This doesn't necessarily mean we are finished. # Any active prefixes might still be hiding some significant portion of the trie # But it's all we're able to explore for now, until more node data arrives return try: cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node) except KeyError: cached_node = None node_getter = partial(trie.traverse, path_to_node) else: node_getter = partial(trie.traverse_from, cached_node, uncached_key) try: node = node_getter() except trie_exceptions.MissingTraversalNode as exc: # Found missing account trie node if path_to_node == exc.nibbles_traversed: raise elif cached_node is None: # The path and nibbles traversed should always match in a non-cached traversal raise RuntimeError( f"Unexpected: on a non-cached traversal to {path_to_node}, the" f" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}" ) from exc else: # We need to re-raise a version of the exception that includes the whole path # from the root node (when using cached nodes, we only have the path from # the parent node to the child node) # We could always raise this re-wrapped version, but skipping it (probably?) # improves performance. missing_hash = exc.missing_node_hash raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc except trie_exceptions.TraversedPartialPath as exc: node = exc.simulated_node if node.value: full_key_nibbles = path_to_node + node.suffix if len(node.sub_segments): # It shouldn't be a problem to skip handling this case, because all keys are # hashed 32 bytes. raise NotImplementedError( "The state backfiller doesn't handle keys of different lengths, where" f" one key is a prefix of another. But found {node} in trie with" f" {root_hash!r}" ) yield path_to_node, full_key_nibbles, node.value # Note that we do not mark value nodes as completed. It is up to the caller # to do that when it is ready. For example, the storage iterator will # immediately treat the key as completed. The account iterator will # not treat the key as completed until all of its storage and bytecode # are also marked as complete. else: # If this is just an intermediate node, then we can mark it as confirmed. request_tracker.confirm_prefix(path_to_node, node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked for all known storage and bytecode\n # hashes, but some are still not present, we \"pause\" the account so we can look\n # for neighboring nodes.\n # This is a list of paused accounts, using the path to the leaf node,\n # because that's how the account tracker is indexed.\n exhausted_account_leaves: Tuple[Nibbles, ...] = ()\n\n starting_root_hash = self._next_trie_root_hash\n\n try:\n while self.manager.is_running:\n # Get the next account\n\n # We have to rebuild the account iterator every time because...\n # something about an exception during a manual __anext__()?\n account_iterator = self._request_tracking_trie_items(\n self._account_tracker,\n starting_root_hash,\n )\n try:\n next_account_info = await account_iterator.__anext__()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found a missing trie node while looking for the next account\n yield self._account_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n continue\n except StopAsyncIteration:\n # Finished iterating over all available accounts\n break\n\n # Decode account\n path_to_leaf, address_hash_nibbles, encoded_account = next_account_info\n account = rlp.decode(encoded_account, sedes=Account)\n\n # Iterate over all missing hashes of subcomponents (storage & bytecode)\n subcomponent_hashes_iterator = self._missing_subcomponent_hashes(\n address_hash_nibbles,\n account,\n starting_root_hash,\n )\n async for node_request in subcomponent_hashes_iterator:\n yield node_request\n\n # Check if account is fully downloaded\n account_components_complete = self._are_account_components_complete(\n address_hash_nibbles,\n account,\n )\n if account_components_complete:\n # Mark fully downloaded accounts as complete, and do some cleanup\n self._mark_account_complete(path_to_leaf, address_hash_nibbles)\n else:\n # Pause accounts that are not fully downloaded, and track the account\n # to resume when the generator exits.\n self._account_tracker.pause_review(path_to_leaf)\n exhausted_account_leaves += (path_to_leaf, )\n\n except GeneratorExit:\n # As the generator is exiting, we want to resume any paused accounts. This\n # allows us to find missing storage/bytecode on the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n raise\n else:\n # If we pause a few accounts and then run out of nodes to ask for, then we\n # still need to resume the paused accounts to prepare for the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n\n # Possible scenarios:\n # 1. We have completed backfill\n # 2. We have iterated the available nodes, and all known hashes are being requested.\n # For example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes, and exit cleanly.\n #\n # In response to these situations, we might like to:\n # 1. Log and celebrate that the full state has been downloaded\n # 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it, using a _check_complete() check.\n return", "def _crawl_trie(subtrie, prefix):\n for key in subtrie.keys():\n if key == \"NAME\":\n result.extend(subtrie.get(key))\n else:\n _crawl_trie(subtrie[key], prefix + key)", "def traversal_test_trie():\n from trie import Trie\n trie = Trie()\n trie.insert('alpha')\n trie.insert('alpaca')\n trie.insert('boy')\n return trie", "def unpacktrie(x): ###\n t = trie()\n t.nodes = []\n ind = 0\n rlut = {}\n if progress != None:\n pr = progress(len(x), \"unpacking\")\n while ind < len(x):\n rlut[ind] = len(t.nodes)\n node, ind = unpacknode(x, ind)\n t.nodes.append(node)\n nnodes = len(t.nodes)\n if nnodes % 5000 == 0 and progress != None:\n pr.update(ind, \"(%d nodes)\" % nnodes)\n for node in t.nodes:\n node.parent = rlut[node.parent]\n for key, val in list(node.children.items()):\n node.children[key] = rlut[val]\n return t", "def trie_recurse(wordinds, charinds, prefix, probs, cumul, trie, model, new_inp):\n num = 0\n for let in charinds.keys():\n new_inp[0][-1] = eye[charinds[let]]\n keys = trie.keys(prefix+let)\n num = len(trie.keys(prefix+let))\n if num == 1:\n final_probs[0][wordinds[keys[0]]] = np.multiply(cumul, probs[0][charinds[let]])\n elif num > 1:\n probs = model.predict(new_inp)\n new_inp = np.roll(new_inp, -1, 1)\n \n cumul = np.multiply(cumul, probs[0][charinds[let]])\n trie_recurse(wordinds, charinds, prefix+let, probs, cumul, trie, model, new_inp)", "def test_traversal_on_test_trie_from_root(traversal_test_trie):\n gen = traversal_test_trie.traversal()\n assert list(gen) == ['a', 'l', 'p', 'h', 'a', 'a', 'c', 'a', 'b', 'o', 'y']", "def __init__(self):\r\n self.trie = {}", "def test_traverse_on_empty_trie(empty_trie):\n assert list(empty_trie.traversal()) == []", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def __init__(self):\n self.trie = {}", "def create_trie(self, genome):\r\n i = 0\r\n while i <= len(genome): # O(N)\r\n current = self.root\r\n self.create_trie_aux(current, genome, i) # O(N)\r\n i += 1", "def __init__(self):\n self.trie = Node()", "def trie_walk_yielding(root, yieldfunc, seen=[], preceder=[], level=1, level_keys=[]):\n level_keys.append(list(root.keys()))\n subtrees = [root.get(k) for k in root.keys()]\n # yield subtrees\n for i, subtree in enumerate(subtrees):\n sk = list(root.keys())[i]\n seen.append(sk)\n if subtree == {None: None}:\n # the subtree is a leaf\n yield from yieldfunc(preceder, seen, level)\n gone = seen.pop() # leaf will not be remembered (after being shown)\n if i == len(subtrees) - 1:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n if i == len(subtrees) - 1:\n if level_keys[len(preceder)][0] is None:\n while (\n level_keys[len(preceder)][0] is None\n and popped == level_keys[len(preceder)][-1]\n ):\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n elif popped == level_keys[len(preceder)][-1]:\n while popped == level_keys[len(preceder)][-1]:\n popped = seen.pop()\n preceder.pop()\n level_keys.pop()\n level -= 1\n continue\n elif subtree is None:\n # the 'subtree' is a 'null child' indicating the parent is 'also a leaf'\n popped = seen.pop() # leaf will not be remembered (nor shown at all)\n yield from yieldfunc(preceder, seen, level)\n continue\n subtree_keys = list(subtree.keys())\n preceder.append(sk)\n yield from trie_walk_yielding(\n subtree, yieldfunc, seen, preceder, level + 1, level_keys\n )", "def __init__(self):\n self.trie = TrieNode()", "def __init__(self):\n self.trie = TrieNode()", "def __init__(self):\n # {\n # 'a':{\n # 'p':{\n # 'p':{\n # 'l':{\n # 'e':{'\\0':{}}\n # },\n # '\\0':{}\n # }\n # }\n # }\n # }\n self.trie = dict()", "def test_traversal_on_input_word_with_no_following_letters(full_trie):\n full_trie.insert(\"yo\")\n assert list(full_trie.traversal(\"yo\")) == []", "def tricky_trie():\n from trie import Trie\n trie = Trie()\n trie.insert('bbbbbbb')\n trie.insert('bbb')\n trie.insert('lololololol')\n trie.insert('blololololol')\n return trie", "def test_traverse_string_on_empty_trie(empty_trie):\n assert list(empty_trie.traversal('hello')) == []", "def test_traversal_with_no_input_string_returns_trie(full_trie):\n assert list(full_trie.traversal()) == ['h', 'e', 'y']\n assert list(full_trie.traversal('')) == ['h', 'e', 'y']", "def test_traversal_on_word_with_no_following_letters(full_trie):\n assert list(full_trie.traversal(\"hey\")) == []", "def test_traversal_with_string_not_in_trie(full_trie):\n assert list(full_trie.traversal(\"goodbye\")) == []", "def __init__(self):\r\n self.trie = Trie()", "def __init__(self):\n self.trie = dict()", "def __init__(self):\n self.trie = dict()", "def __init__(self):\n self.trie = Trie()", "def test_traversal_on_test_trie_from_middle(traversal_test_trie):\n gen = traversal_test_trie.traversal('alp')\n assert list(gen) == ['h', 'a', 'a', 'c', 'a']", "def __init__(self):\n self.trie = {}\n self.end = \"**\"" ]
[ "0.6288311", "0.56712145", "0.56443197", "0.5567986", "0.55481374", "0.54169506", "0.5246231", "0.5213265", "0.51985264", "0.51985264", "0.51985264", "0.51985264", "0.5185277", "0.5173819", "0.5107165", "0.50870496", "0.50870496", "0.50827366", "0.50448364", "0.50392336", "0.50298274", "0.5028435", "0.50262207", "0.50178415", "0.5000329", "0.49546996", "0.49546996", "0.49259967", "0.49029493", "0.4881592" ]
0.6903136
0
Walks through the storage trie at the given root, yielding one missing storage node hash/prefix at a time. The yielded node info is wrapped in a ``TrackedRequest``. The hash is marked as active until it is explicitly marked for review again. The hash/prefix will be marked for review asking a peer for the data. Will exit when all known node hashes are already actively being requested, or if there are no more missing nodes.
async def _missing_storage_hashes( self, address_hash_nibbles: Nibbles, storage_root: Hash32, starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]: if storage_root == BLANK_NODE_HASH: # Nothing to do if the storage has an empty root return storage_tracker = self._get_storage_tracker(address_hash_nibbles) while self.manager.is_running: storage_iterator = self._request_tracking_trie_items( storage_tracker, storage_root, ) try: async for path_to_leaf, hashed_key, _storage_value in storage_iterator: # We don't actually care to look at the storage keys/values during backfill storage_tracker.confirm_leaf(path_to_leaf) except trie_exceptions.MissingTraversalNode as exc: yield storage_tracker.generate_request( exc.missing_node_hash, exc.nibbles_traversed, ) else: # Possible scenarios: # 1. We have completed backfilling this account's storage # 2. We have iterated the available nodes, and only their children are missing, # for example: if 0 nodes are available, and we walk to the root and request # the root from a peer, we do not have any available information to ask for # more nodes. # # In response to these situations, we might like to: # 1. Debug log? # 2. Look for more missing nodes in neighboring accounts and their storage, etc. # # 1 and 2 are a little more cleanly handled outside this iterator, so we just # exit and let the caller deal with it. return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _request_tracking_trie_items(\n self,\n request_tracker: TrieNodeRequestTracker,\n root_hash: Hash32) -> AsyncIterator[Tuple[Nibbles, Nibbles, bytes]]:\n if self._next_trie_root_hash is None:\n # We haven't started beam syncing, so don't know which root to start at\n return\n trie = HexaryTrie(self._db, root_hash)\n\n starting_index = bytes_to_nibbles(root_hash)\n\n while self.manager.is_running:\n try:\n path_to_node = request_tracker.next_path_to_explore(starting_index)\n except trie_exceptions.PerfectVisibility:\n # This doesn't necessarily mean we are finished.\n # Any active prefixes might still be hiding some significant portion of the trie\n # But it's all we're able to explore for now, until more node data arrives\n return\n\n try:\n cached_node, uncached_key = request_tracker.get_cached_parent(path_to_node)\n except KeyError:\n cached_node = None\n node_getter = partial(trie.traverse, path_to_node)\n else:\n node_getter = partial(trie.traverse_from, cached_node, uncached_key)\n\n try:\n node = node_getter()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found missing account trie node\n if path_to_node == exc.nibbles_traversed:\n raise\n elif cached_node is None:\n # The path and nibbles traversed should always match in a non-cached traversal\n raise RuntimeError(\n f\"Unexpected: on a non-cached traversal to {path_to_node}, the\"\n f\" exception only claimed to traverse {exc.nibbles_traversed} -- {exc}\"\n ) from exc\n else:\n # We need to re-raise a version of the exception that includes the whole path\n # from the root node (when using cached nodes, we only have the path from\n # the parent node to the child node)\n # We could always raise this re-wrapped version, but skipping it (probably?)\n # improves performance.\n missing_hash = exc.missing_node_hash\n raise trie_exceptions.MissingTraversalNode(missing_hash, path_to_node) from exc\n except trie_exceptions.TraversedPartialPath as exc:\n node = exc.simulated_node\n\n if node.value:\n full_key_nibbles = path_to_node + node.suffix\n\n if len(node.sub_segments):\n # It shouldn't be a problem to skip handling this case, because all keys are\n # hashed 32 bytes.\n raise NotImplementedError(\n \"The state backfiller doesn't handle keys of different lengths, where\"\n f\" one key is a prefix of another. But found {node} in trie with\"\n f\" {root_hash!r}\"\n )\n\n yield path_to_node, full_key_nibbles, node.value\n # Note that we do not mark value nodes as completed. It is up to the caller\n # to do that when it is ready. For example, the storage iterator will\n # immediately treat the key as completed. The account iterator will\n # not treat the key as completed until all of its storage and bytecode\n # are also marked as complete.\n else:\n # If this is just an intermediate node, then we can mark it as confirmed.\n request_tracker.confirm_prefix(path_to_node, node)", "async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked for all known storage and bytecode\n # hashes, but some are still not present, we \"pause\" the account so we can look\n # for neighboring nodes.\n # This is a list of paused accounts, using the path to the leaf node,\n # because that's how the account tracker is indexed.\n exhausted_account_leaves: Tuple[Nibbles, ...] = ()\n\n starting_root_hash = self._next_trie_root_hash\n\n try:\n while self.manager.is_running:\n # Get the next account\n\n # We have to rebuild the account iterator every time because...\n # something about an exception during a manual __anext__()?\n account_iterator = self._request_tracking_trie_items(\n self._account_tracker,\n starting_root_hash,\n )\n try:\n next_account_info = await account_iterator.__anext__()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found a missing trie node while looking for the next account\n yield self._account_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n continue\n except StopAsyncIteration:\n # Finished iterating over all available accounts\n break\n\n # Decode account\n path_to_leaf, address_hash_nibbles, encoded_account = next_account_info\n account = rlp.decode(encoded_account, sedes=Account)\n\n # Iterate over all missing hashes of subcomponents (storage & bytecode)\n subcomponent_hashes_iterator = self._missing_subcomponent_hashes(\n address_hash_nibbles,\n account,\n starting_root_hash,\n )\n async for node_request in subcomponent_hashes_iterator:\n yield node_request\n\n # Check if account is fully downloaded\n account_components_complete = self._are_account_components_complete(\n address_hash_nibbles,\n account,\n )\n if account_components_complete:\n # Mark fully downloaded accounts as complete, and do some cleanup\n self._mark_account_complete(path_to_leaf, address_hash_nibbles)\n else:\n # Pause accounts that are not fully downloaded, and track the account\n # to resume when the generator exits.\n self._account_tracker.pause_review(path_to_leaf)\n exhausted_account_leaves += (path_to_leaf, )\n\n except GeneratorExit:\n # As the generator is exiting, we want to resume any paused accounts. This\n # allows us to find missing storage/bytecode on the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n raise\n else:\n # If we pause a few accounts and then run out of nodes to ask for, then we\n # still need to resume the paused accounts to prepare for the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n\n # Possible scenarios:\n # 1. We have completed backfill\n # 2. We have iterated the available nodes, and all known hashes are being requested.\n # For example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes, and exit cleanly.\n #\n # In response to these situations, we might like to:\n # 1. Log and celebrate that the full state has been downloaded\n # 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it, using a _check_complete() check.\n return", "def _storage_review(\n self,\n storage_key: Hash32,\n storage_root_hash: Hash32) -> Set[Hash32]:\n with self._trie_db.at_root(storage_root_hash) as snapshot:\n try:\n # request the data just to see which part is missing\n snapshot[storage_key]\n except MissingTrieNode as exc:\n return {exc.missing_node_hash}\n else:\n return set()", "def test_create_empty_trie(empty_trie):\n assert empty_trie.root.children == {}\n assert empty_trie._size == 0", "def _account_review(\n self,\n account_address_hashes: Iterable[Hash32],\n root_hash: Hash32) -> Tuple[Set[Hash32], Dict[Hash32, bytes]]:\n need_nodes = set()\n completed_accounts = {}\n with self._trie_db.at_root(root_hash) as snapshot:\n for account_hash in account_address_hashes:\n try:\n account_rlp = snapshot[account_hash]\n except MissingTrieNode as exc:\n need_nodes.add(exc.missing_node_hash)\n else:\n completed_accounts[account_hash] = account_rlp\n\n return need_nodes, completed_accounts", "async def _missing_bytecode_hashes(\n self,\n address_hash_nibbles: Nibbles,\n code_hash: Hash32,\n starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:\n\n if code_hash == EMPTY_SHA3:\n # Nothing to do if the bytecode is for the empty hash\n return\n\n bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles)\n if bytecode_tracker.is_complete:\n # All bytecode has been collected\n return\n\n # If there is an active request (for now, there can only be one), then skip\n # any database checks until the active request is resolved.\n if not bytecode_tracker.has_active_requests:\n if code_hash not in self._db:\n # The bytecode isn't present, so we ask for it.\n # A bit hacky here, since there is no trie, we just treat it as\n # if it were a leaf node at the root.\n yield bytecode_tracker.generate_request(code_hash, prefix=())\n else:\n # The bytecode is already present, but the tracker isn't marked\n # as completed yet, so finish it off.\n bytecode_tracker.confirm_leaf(path_to_leaf=())", "async def _match_urgent_node_requests_to_peers(self) -> None:\n while self.manager.is_running:\n urgent_batch_id, urgent_hashes = await self._node_tasks.get(\n eth_constants.MAX_STATE_FETCH\n )\n\n # Get best peer, by GetNodeData speed\n queen = await self._queen_tracker.get_queen_peer()\n\n queen_is_requesting = queen.eth_api.get_node_data.is_requesting\n\n if queen_is_requesting:\n # Our best peer for node data has an in-flight GetNodeData request\n # Probably, backfill is asking this peer for data\n # This is right in the critical path, so we'd prefer this never happen\n self.logger.debug(\n \"Want to download urgent data, but %s is locked on other request\",\n queen,\n )\n # Don't do anything different, allow the request lock to handle the situation\n\n self._num_urgent_requests_by_peer[queen] += 1\n self._urgent_requests += 1\n\n await self._find_urgent_nodes(\n queen,\n urgent_hashes,\n urgent_batch_id,\n )", "def auto_create_filesystem(self):\n\n key = self.km.gpg_key['fingerprint']\n root = yield BuddyNode.get_node(self.start_port, self.known_ip,\n self.known_port).get_root(key)\n\n if root:\n self.tree.register_root_inode(root)\n else:\n logger.info('Did not find existing root inode pointer.'\n ' Generating new root inode pointer.')\n self.tree.generate_root_inode()", "async def _match_predictive_node_requests_to_peers(self) -> None:\n # If self._queen_tracker terminates we need to exit as well, so check that on every\n # iteration.\n while self.manager.is_running and self._queen_tracker.get_manager().is_running:\n try:\n batch_id, hashes = await asyncio.wait_for(\n self._maybe_useful_nodes.get(eth_constants.MAX_STATE_FETCH),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Reduce the number of predictive peers, we seem to have plenty\n if self._min_predictive_peers > 0:\n self._min_predictive_peers -= 1\n self.logger.debug(\n \"Decremented predictive peers to %d\",\n self._min_predictive_peers,\n )\n # Re-attempt\n continue\n\n # Find any hashes that were discovered through other means, like urgent requests:\n existing_hashes = await asyncio.get_event_loop().run_in_executor(\n None,\n self._get_unique_present_hashes,\n hashes,\n )\n # If any hashes are already found, clear them out and retry\n if existing_hashes:\n # Wake up any paused preview threads\n await self._wakeup_preview_waiters(existing_hashes)\n # Clear out any tasks that are no longer necessary\n await self._maybe_useful_nodes.complete(batch_id, tuple(existing_hashes))\n # Restart from the top\n continue\n\n try:\n peer = await asyncio.wait_for(\n self._queen_tracker.pop_fastest_peasant(),\n timeout=TOO_LONG_PREDICTIVE_PEER_DELAY,\n )\n except asyncio.TimeoutError:\n # Increase the minimum number of predictive peers, we seem to not have enough\n new_predictive_peers = min(\n self._min_predictive_peers + 1,\n # Don't reserve more than half the peers for prediction\n self._num_peers // 2,\n )\n if new_predictive_peers != self._min_predictive_peers:\n self.logger.debug(\n \"Updating predictive peer count from %d to %d\",\n self._min_predictive_peers,\n new_predictive_peers,\n )\n self._min_predictive_peers = new_predictive_peers\n\n cancel_attempt = True\n else:\n if peer.eth_api.get_node_data.is_requesting:\n self.logger.debug(\n \"Want predictive nodes from %s, but it has an active request, skipping...\",\n peer,\n )\n self._queen_tracker.insert_peer(peer, NON_IDEAL_RESPONSE_PENALTY)\n cancel_attempt = True\n else:\n cancel_attempt = False\n\n if cancel_attempt:\n # Prepare to restart\n await self._maybe_useful_nodes.complete(batch_id, ())\n continue\n\n self._num_predictive_requests_by_peer[peer] += 1\n self._predictive_requests += 1\n\n self.manager.run_task(\n self._get_predictive_nodes_from_peer,\n peer,\n hashes,\n batch_id,\n )\n\n if self.manager.is_running and not self._queen_tracker.get_manager().is_running:\n self.logger.info(\n \"Backfill is complete, halting predictive downloads...\"\n )", "async def download_storage(\n self,\n storage_key: Hash32,\n storage_root_hash: Hash32,\n account: Address,\n block_number: BlockNumber,\n urgent: bool = True) -> int:\n # should never take more than 64 attempts to get a full account\n for num_downloads_required in range(64):\n need_nodes = await self._run_preview_in_thread(\n urgent,\n self._storage_review,\n storage_key,\n storage_root_hash,\n )\n if need_nodes:\n await self.ensure_nodes_present(need_nodes, block_number, urgent)\n else:\n # Account is fully available within the trie\n return num_downloads_required\n else:\n raise Exception(\n f\"State Downloader failed to download storage 0x{storage_key.hex()} in \"\n f\"{to_checksum_address(account)} at storage root 0x{storage_root_hash.hex()} \"\n f\"in 64 runs.\"\n )", "def test_init_skips_touch_bucket_if_local_network_id_is_key(self):\n self.node.routing_table.touch_bucket = mock.MagicMock()\n Lookup(FindNode, self.node.network_id, self.node, self.event_loop)\n self.assertEqual(self.node.routing_table.touch_bucket.call_count, 0)", "def recoverTree(self, root: TreeNode) -> None:\n self.nodes = {}\n nodes = self.DFS(root)\n \n wrong, i = [], 1\n while i <= len(nodes)-1 and len(wrong) < 2:\n if nodes[i].val < nodes[i-1].val:\n wrong.append((i-1, i))\n i += 1\n\n if len(wrong) == 1: # case I: i-1, i switch\n a, b = wrong[0]\n elif len(wrong) == 2: # case II: i, j switch, j >= i+1\n a, b = wrong[0][0], wrong[1][1]\n \n nodes[a].val, nodes[b].val = nodes[b].val, nodes[a].val\n \n return root", "async def _node_hashes_present(self, node_hashes: Set[Hash32], urgent: bool) -> int:\n remaining_hashes = node_hashes.copy()\n timeout = BLOCK_IMPORT_MISSING_STATE_TIMEOUT\n\n start_time = time.monotonic()\n if not urgent:\n wait_event = asyncio.Event()\n self._preview_events[wait_event] = node_hashes\n while remaining_hashes and time.monotonic() - start_time < timeout:\n if urgent:\n await self._new_data_event.wait()\n self._new_data_event.clear()\n else:\n try:\n await asyncio.wait_for(\n wait_event.wait(),\n timeout=CHECK_PREVIEW_STATE_TIMEOUT,\n )\n except asyncio.TimeoutError:\n # Check if the data showed up due to an urgent import\n preview_timeout = True\n pass\n else:\n preview_timeout = False\n finally:\n wait_event.clear()\n\n found_hashes = await self._run_preview_in_thread(\n urgent,\n self._get_unique_present_hashes,\n remaining_hashes,\n )\n\n if not urgent:\n if preview_timeout:\n self._predictive_found_nodes_during_timeout += len(found_hashes)\n else:\n self._predictive_found_nodes_woke_up += len(found_hashes)\n\n if found_hashes:\n remaining_hashes -= found_hashes\n if not urgent and remaining_hashes:\n self._preview_events[wait_event] = remaining_hashes\n\n if not urgent:\n del self._preview_events[wait_event]\n\n if remaining_hashes:\n if urgent:\n logger = self.logger.error\n else:\n logger = self.logger.warning\n logger(\n \"Could not collect node data for %d %s hashes %r within %.0f seconds (took %.1fs)\",\n len(remaining_hashes),\n \"urgent\" if urgent else \"preview\",\n list(remaining_hashes)[0:2],\n timeout,\n time.monotonic() - start_time,\n )\n\n return len(node_hashes) - len(remaining_hashes)", "def walkTree(self, root, default_action):\n node = root\n while True:\n actionToWeight = node.getActionToWeightMap()\n if actionToWeight == {}:\n return node\n action = util.selectRandomKey(actionToWeight, default_action)\n nextNode = node.getSuccInTree(action)\n if nextNode == None:\n return node\n node = nextNode", "def test_traverse_on_empty_trie(empty_trie):\n assert list(empty_trie.traversal()) == []", "def find_reachable_nodes(self):\n # find all reachable nodes down from the goal\n found = {}\n found[id(self.root)] = self.root\n queue = [self.root]\n #print >>sys.stderr, '---'\n while queue:\n node = queue.pop(0)\n if hasattr(node, 'dead'):\n if node.dead:\n #print >>sys.stderr, 'dead', node\n continue\n assert not node.dead\n for edge in node.incoming:\n for tailnode in edge.tail:\n #print >>sys.stderr, tailnode\n if id(tailnode) not in found:\n found[id(tailnode)] = tailnode\n queue.append(tailnode)\n tailnode.nout = 0\n tailnode.nout += 1\n # save for sanity check\n self.found = found", "def empty_trie():\n from trie import Trie\n trie = Trie()\n return trie", "def unpacktrie(x): ###\n t = trie()\n t.nodes = []\n ind = 0\n rlut = {}\n if progress != None:\n pr = progress(len(x), \"unpacking\")\n while ind < len(x):\n rlut[ind] = len(t.nodes)\n node, ind = unpacknode(x, ind)\n t.nodes.append(node)\n nnodes = len(t.nodes)\n if nnodes % 5000 == 0 and progress != None:\n pr.update(ind, \"(%d nodes)\" % nnodes)\n for node in t.nodes:\n node.parent = rlut[node.parent]\n for key, val in list(node.children.items()):\n node.children[key] = rlut[val]\n return t", "def resolve(self): # HashMap.resolve\n prevCount = self.allFiles.count_deleted()\n\n # no need to resolve uniques, so remove them from the HashMap\n deleteList=[]\n for hashval, list in self.contentHash.iteritems():\n if len(list) == 1:\n deleteList.append(hashval)\n for e in deleteList:\n del self.contentHash[e]\n\n # delete the directories first, in order of\n # increasing depth\n if verbose:\n print '# checking candidates from depth ' + str(self.minDepth) + ' through ' + str(self.maxDepth)\n for currentDepth in xrange(self.minDepth-1,self.maxDepth+1):\n for hashval, list in self.contentHash.iteritems():\n example = list[0]\n if isinstance(example, DirObj):\n winner, losers = resolve_candidates(list, currentDepth)\n if losers != None:\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# dir \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n self.prune()\n\n for hashval, list in self.contentHash.iteritems():\n example = list[0] \n if isinstance(example, FileObj):\n winner, losers = resolve_candidates(list)\n for loser in losers:\n if not loser.deleted:\n if verbose:\n print '# file \"' + loser.pathname + '\" covered by \"' + winner.pathname + '\"'\n self.delete(loser)\n loser.winner = winner\n\n return self.allFiles.count_deleted() - prevCount", "def empty_trie():\n trie = Trie()\n return trie", "def test_traversal_on_test_trie_from_root(traversal_test_trie):\n gen = traversal_test_trie.traversal()\n assert list(gen) == ['a', 'l', 'p', 'h', 'a', 'a', 'c', 'a', 'b', 'o', 'y']", "def recur(self, tyme):\n super(GetDoer, self).recur(tyme)\n while len(self.queue) > 0:\n log.debug(f\"Processing `recur` for GetDoer; queue len={len(self.queue)}.\")\n callback, identifier, key = self.queue.popleft()\n result = LookupValues(identifier, key, callback)\n self.node.get(dht.InfoHash.get(key), get_cb=result.get_cb, done_cb=result.done_cb)", "def __init__(self):\n self.root = TrieNode()\n # self.root = {}", "def process(introspection_data):\n unprocessed_data = copy.deepcopy(introspection_data)\n failures = []\n _run_pre_hooks(introspection_data, failures)\n node_info = _find_node_info(introspection_data, failures)\n if node_info:\n # Locking is already done in find_node() but may be not done in a\n # node_not_found hook\n node_info.acquire_lock()\n if failures or node_info is None:\n msg = _('The following failures happened during running '\n 'pre-processing hooks:\\n%s') % '\\n'.join(failures)\n if node_info is not None:\n node_info.finished(istate.Events.error, error='\\n'.join(failures))\n _store_logs(introspection_data, node_info)\n raise utils.Error(msg, node_info=node_info, data=introspection_data)\n LOG.info('Matching node is %s', node_info.uuid,\n node_info=node_info, data=introspection_data)\n\n if node_info.finished_at is not None:\n # race condition or introspection canceled\n raise utils.Error(_('Node processing already finished with '\n 'error: %s') % node_info.error,\n node_info=node_info, code=400)\n # NOTE(TheJulia): this was previously called as a background\n # process, but we can't do that with sqlite.\n _store_unprocessed_data(node_info.uuid, unprocessed_data)\n\n try:\n node = node_info.node()\n except ir_utils.NotFound as exc:\n with excutils.save_and_reraise_exception():\n node_info.finished(istate.Events.error, error=str(exc))\n _store_logs(introspection_data, node_info)\n\n try:\n result = _process_node(node_info, node, introspection_data)\n except utils.Error as exc:\n node_info.finished(istate.Events.error, error=str(exc))\n with excutils.save_and_reraise_exception():\n _store_logs(introspection_data, node_info)\n except Exception as exc:\n LOG.exception('Unexpected exception during processing')\n msg = _('Unexpected exception %(exc_class)s during processing: '\n '%(error)s') % {'exc_class': exc.__class__.__name__,\n 'error': exc}\n node_info.finished(istate.Events.error, error=msg)\n _store_logs(introspection_data, node_info)\n raise utils.Error(msg, node_info=node_info, data=introspection_data,\n code=500)\n\n if CONF.processing.always_store_ramdisk_logs:\n _store_logs(introspection_data, node_info)\n return result", "async def start_watching_roots(self):\n db.clear_visits(self.db_conn)\n for root in self.config.roots:\n await self.watch_tree(root)\n\n for path in db.get_unvisited_files(self.db_conn):\n print(path)\n await self.process_change(path, None)", "def test_empty_tree_size(empty_trie):\n assert empty_trie.size == 0", "async def create_checkpoint_store_if_not_exists_async(self):", "async def calc_nodes(height, balances, zero_nodes_lookup, root_index, hash_func):\n if len(balances) == 0:\n return {root_index: zero_nodes_lookup[height]}\n if height == 0:\n assert len(balances) == 1\n _, vault_data = balances[-1]\n balance = int(vault_data['amount'])\n # A node with balance=0 is considered uninitialized.\n if balance == 0:\n return {root_index: zero_nodes_lookup[0]}\n stark_key = int(vault_data['stark_key'])\n token_id = int(vault_data['token_id'])\n return {root_index: await vault_hash_async(stark_key, token_id, balance, hash_func)}\n mid = 2 ** (height - 1)\n left_balances = [(i, data) for i, data in balances if i < mid]\n right_balances = [(i - mid, data) for i, data in balances if i >= mid]\n left, right = await asyncio.gather(\n calc_nodes(height - 1, left_balances, zero_nodes_lookup, 2 * root_index, hash_func),\n calc_nodes(height - 1, right_balances, zero_nodes_lookup, 2 * root_index + 1, hash_func))\n nodes = {root_index: await hash_func(left[2 * root_index], right[2 * root_index + 1])}\n nodes.update(left)\n nodes.update(right)\n return nodes", "def test_scan_recursive(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount + 1)", "def create_trie_aux(self, current, key, i):\r\n if i > len(key):\r\n return\r\n else:\r\n # Calculate index\r\n # $ = 0, A = 1, B = 2, C=3, D=4\r\n if i == len(key):\r\n index = 0\r\n else:\r\n index = ord(key[i]) - 65 + 1\r\n data = key[i]\r\n # If path exist\r\n if current.link[index] is not None:\r\n current = current.link[index]\r\n current.all_index.append(i)\r\n\r\n # If path doesn't exist\r\n else:\r\n current.link[index] = Node_2()\r\n current = current.link[index]\r\n current.all_index.append(i)\r\n\r\n # Increments the frequency of occurrence\r\n i += 1\r\n # recur\r\n self.create_trie_aux(current, key, i)" ]
[ "0.70497036", "0.6926985", "0.5630191", "0.48730353", "0.48023686", "0.47926912", "0.4764393", "0.46911123", "0.4685286", "0.4670354", "0.46662426", "0.4628357", "0.46207827", "0.45691854", "0.45436504", "0.45341483", "0.44921196", "0.4481057", "0.44659668", "0.4446959", "0.4409526", "0.4406358", "0.43713477", "0.4330247", "0.43084478", "0.42874962", "0.42822623", "0.4276372", "0.42547506", "0.42471585" ]
0.766844
0
Checks if this bytecode is missing. If so, yield it and then exit. If not, then exit immediately. This may seem like overkill, and it is right now. But... Code merkelization is coming (theoretically), and the other account and storage trie iterators work similarly to this, so in some ways it's easier to do this "overgeneralized" solution now. It makes request tracking a bit easier too, to have the same TrackedRequest result mechanism.
async def _missing_bytecode_hashes( self, address_hash_nibbles: Nibbles, code_hash: Hash32, starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]: if code_hash == EMPTY_SHA3: # Nothing to do if the bytecode is for the empty hash return bytecode_tracker = self._get_bytecode_tracker(address_hash_nibbles) if bytecode_tracker.is_complete: # All bytecode has been collected return # If there is an active request (for now, there can only be one), then skip # any database checks until the active request is resolved. if not bytecode_tracker.has_active_requests: if code_hash not in self._db: # The bytecode isn't present, so we ask for it. # A bit hacky here, since there is no trie, we just treat it as # if it were a leaf node at the root. yield bytecode_tracker.generate_request(code_hash, prefix=()) else: # The bytecode is already present, but the tracker isn't marked # as completed yet, so finish it off. bytecode_tracker.confirm_leaf(path_to_leaf=())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disable_bytecode_generation():\n sentinel, sys.dont_write_bytecode = sys.dont_write_bytecode, True\n\n try:\n yield\n finally:\n sys.dont_write_bytecode = sentinel", "async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked for all known storage and bytecode\n # hashes, but some are still not present, we \"pause\" the account so we can look\n # for neighboring nodes.\n # This is a list of paused accounts, using the path to the leaf node,\n # because that's how the account tracker is indexed.\n exhausted_account_leaves: Tuple[Nibbles, ...] = ()\n\n starting_root_hash = self._next_trie_root_hash\n\n try:\n while self.manager.is_running:\n # Get the next account\n\n # We have to rebuild the account iterator every time because...\n # something about an exception during a manual __anext__()?\n account_iterator = self._request_tracking_trie_items(\n self._account_tracker,\n starting_root_hash,\n )\n try:\n next_account_info = await account_iterator.__anext__()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found a missing trie node while looking for the next account\n yield self._account_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n continue\n except StopAsyncIteration:\n # Finished iterating over all available accounts\n break\n\n # Decode account\n path_to_leaf, address_hash_nibbles, encoded_account = next_account_info\n account = rlp.decode(encoded_account, sedes=Account)\n\n # Iterate over all missing hashes of subcomponents (storage & bytecode)\n subcomponent_hashes_iterator = self._missing_subcomponent_hashes(\n address_hash_nibbles,\n account,\n starting_root_hash,\n )\n async for node_request in subcomponent_hashes_iterator:\n yield node_request\n\n # Check if account is fully downloaded\n account_components_complete = self._are_account_components_complete(\n address_hash_nibbles,\n account,\n )\n if account_components_complete:\n # Mark fully downloaded accounts as complete, and do some cleanup\n self._mark_account_complete(path_to_leaf, address_hash_nibbles)\n else:\n # Pause accounts that are not fully downloaded, and track the account\n # to resume when the generator exits.\n self._account_tracker.pause_review(path_to_leaf)\n exhausted_account_leaves += (path_to_leaf, )\n\n except GeneratorExit:\n # As the generator is exiting, we want to resume any paused accounts. This\n # allows us to find missing storage/bytecode on the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n raise\n else:\n # If we pause a few accounts and then run out of nodes to ask for, then we\n # still need to resume the paused accounts to prepare for the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n\n # Possible scenarios:\n # 1. We have completed backfill\n # 2. We have iterated the available nodes, and all known hashes are being requested.\n # For example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes, and exit cleanly.\n #\n # In response to these situations, we might like to:\n # 1. Log and celebrate that the full state has been downloaded\n # 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it, using a _check_complete() check.\n return", "def _enable_scan_single_bytecode(code, name):\n bc = bytecode.Bytecode.from_code(code)\n Instr = bytecode.Instr\n\n # Updates LOAD_GLOBAL to LOAD_FAST when arg is name\n for instr in bc:\n if isinstance(instr, Instr) \\\n and instr.name == \"LOAD_GLOBAL\" and instr.arg == name:\n instr.set(\"LOAD_FAST\", name)\n\n # Some needed information from the first/main FOR_ITER and the heading\n # \"filter\" part of the generator expression or list/set comprehension\n for_idx = next(idx for idx, instr in enumerate(bc)\n if getattr(instr, \"name\", None) == \"FOR_ITER\")\n for_instr = bc[for_idx]\n begin_label_idx = for_idx - 1\n try:\n filter_last_idx = last(idx for idx, instr in enumerate(bc)\n if isinstance(instr, Instr)\n and instr.is_cond_jump()\n and instr.arg == begin_label_idx)\n except StopIteration:\n filter_last_idx = for_idx\n\n # Adds the block before the loop (i.e., first label) to append/add/yield\n # the first input directly from FOR_ITER and save the first \"prev\"\n # accordingly\n heading_instructions = [(\"DUP_TOP\",),\n (\"STORE_FAST\", name)] + {\n \"<listcomp>\": [(\"LIST_APPEND\", 2)],\n \"<setcomp>\": [(\"SET_ADD\", 2)],\n \"<genexpr>\": [(\"YIELD_VALUE\",),\n (\"POP_TOP\",)]\n }[bc.name]\n bc[begin_label_idx:begin_label_idx] = (\n [instr.copy() for instr in bc[for_idx:filter_last_idx + 1]] +\n [Instr(*args) for args in heading_instructions]\n )\n\n # Adds ending block that stores the result to prev before a new iteration\n loop_instructions = [\"SET_ADD\", \"LIST_APPEND\", \"YIELD_VALUE\"]\n ending_idx = next(-idx for idx, instr in enumerate(reversed(bc), 1)\n if isinstance(instr, Instr)\n and instr.name in loop_instructions)\n ending_instructions = [(\"DUP_TOP\",),\n (\"STORE_FAST\", name)]\n bc[ending_idx:ending_idx] = \\\n [Instr(*args) for args in ending_instructions]\n\n return bc.to_code()", "def is_incomplete(source, filename, symbol):\n try:\n code = self.compile(source, filename, symbol)\n except (OverflowError, SyntaxError, ValueError):\n return False\n if code is None:\n return True\n return False", "def Next():\n return CheckForError(lib.Generators_Get_Next())", "def irgen_skip(stmt, builder, table):\n pass", "def skip_gzip_check(self):\r\n _read_eof = gzip._GzipReader._read_eof\r\n gzip.GzipFile._read_eof = lambda *args, **kwargs: None\r\n yield\r\n gzip.GzipFile._read_eof = _read_eof", "async def unhandled_response(self, pkt, source):\n if False:\n yield None", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def test_iterator(self):\n with open(get_test_file('example-iana.org-chunked.warc'), 'rb') as fh:\n with closing(ArchiveIterator(fh)) as a:\n for record in a:\n assert record.rec_type == 'warcinfo'\n break\n\n record = next(a)\n assert record.rec_type == 'response'\n\n for record in a:\n assert record.rec_type == 'request'\n break\n\n with pytest.raises(StopIteration):\n record = next(a)\n\n assert a.record == None\n assert a.reader == None\n assert a.read_to_end() == None", "def missing_tiles(mbtiles, required_tiles):\n for tile in required_tiles:\n if not mbtiles.tile_exists(tile.x, tile.y, tile.z):\n yield tile", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "async def extra_make_response(self, pkt, source):\n if False:\n yield None", "def no_builtin_verification():\n current_space().skip_builtin_verification = True\n yield\n current_space().skip_builtin_verification = False", "def has_next():", "def __emptygen():\n if False:\n yield", "def cache_code(self):\n\n # Generate the prologue\n self._synthesize_prologue()\n\n # Don't have a real epilogue.\n self.add(spu.stop(0x2000))\n # self._check_alignment(self._code, 'spu code')\n\n # self.exec_module.make_executable(self._code.buffer_info()[0], len(self._code))\n\n # Append our instructions to the prologue's, first making sure the alignment is correct.\n if len(self._prologue._code) % 2 == 1: # Odd number of instructions\n self._prologue.add(spu.lnop(0))\n\n self._prologue._code.extend(self._code)\n self._prologue._check_alignment(self._prologue._code, 'spu prologue')\n \n self._epilogue = self \n self._cached = True\n return", "def cache(self, code, *args, **kwargs):\n try:\n compiled = memoized_parse_block(code)\n except CoconutException:\n logger.display_exc()\n return None\n else:\n return super(CoconutCompiler, self).cache(compiled, *args, **kwargs)", "def dummy_code_block() -> CodeBlock:\n return make_dummy_code_block()", "def check_cachable(self, codelib):\n return not codelib.has_dynamic_globals", "def get_code(self, fullname):\n\t\tsource_path = self.get_filename(fullname)\n\t\tsource_mtime = None\n\t\ttry:\n\t\t\tbytecode_path = cache_from_source(source_path)\n\t\texcept NotImplementedError:\n\t\t\tbytecode_path = None\n\t\telse:\n\t\t\ttry:\n\t\t\t\tst = self.path_stats(source_path)\n\t\t\texcept NotImplementedError:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tsource_mtime = int(st['mtime'])\n\t\t\t\ttry:\n\t\t\t\t\tdata = self.get_data(bytecode_path)\n\t\t\t\texcept IOError:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tbytes_data = self._bytes_from_bytecode(fullname, data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t bytecode_path,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t st)\n\t\t\t\t\texcept (ImportError, EOFError):\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\t_verbose_message('{} matches {}', bytecode_path,\n\t\t\t\t\t\t\t\t\t\tsource_path)\n\t\t\t\t\t\tfound = marshal.loads(bytes_data)\n\t\t\t\t\t\tif isinstance(found, _code_type):\n\t\t\t\t\t\t\t_imp._fix_co_filename(found, source_path)\n\t\t\t\t\t\t\t_verbose_message('code object from {}',\n\t\t\t\t\t\t\t\t\t\t\tbytecode_path)\n\t\t\t\t\t\t\treturn found\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tmsg = \"Non-code object in {}\"\n\t\t\t\t\t\t\traise ImportError(msg.format(bytecode_path),\n\t\t\t\t\t\t\t\t\t\t\t name=fullname, path=bytecode_path)\n\t\tsource_bytes = self.get_data(source_path)\n\t\tcode_object = self.source_to_code(source_bytes, source_path)\n\t\t_verbose_message('code object from {}', source_path)\n\t\tif (not sys.dont_write_bytecode and bytecode_path is not None and\n\t\t\tsource_mtime is not None):\n\t\t\tdata = bytearray(_MAGIC_BYTES)\n\t\t\tdata.extend(_w_long(source_mtime))\n\t\t\tdata.extend(_w_long(len(source_bytes)))\n\t\t\tdata.extend(marshal.dumps(code_object))\n\t\t\ttry:\n\t\t\t\tself._cache_bytecode(source_path, bytecode_path, data)\n\t\t\t\t_verbose_message('wrote {!r}', bytecode_path)\n\t\t\texcept NotImplementedError:\n\t\t\t\tpass\n\t\treturn code_object", "def get_next_as_optional(self):\n raise NotImplementedError(\"Iterator.get_next_as_optional()\")", "def getPossiblyUnreachableBlock(self):\n return self._unreachableBlock", "def irgen_continue(stmt, builder, table):\n tmp = builder.unreachable() \n if stmt.label:\n table.conts[tmp] = (builder.block, table[stmt.label])\n else:\n table.conts[tmp] = (builder.block, None)", "def _find_exit_only_bbs_x86(self, xbb):\n # Exit block\n # 000000000109218e^5 lock.atomic nop\n # 0000000001092193 jne 109217e # exit address\n if len(xbb.insts) != 2 or len(xbb.ins) != 2:\n return\n nop = xbb.insts[0]\n if nop.op != \"nop\" or len(nop.prefix) == 0:\n return\n if nop.prefix[0] != \"lock.atomic\":\n return\n jne = xbb.insts[1]\n if jne.op != \"jne\":\n return\n\n # Exit-only block from cmpxchg\n # 000000000109218e^2 lock.atomic.je.unlikely mov %rdx,0x0(%rbx)\n # 000000000109218e^3 lock.atomic.je.unlikely jmp 000000000109218e^5\n for xo_bb in xbb.ins:\n for inst in xo_bb.insts:\n if len(inst.prefix) == 0:\n xo_bb = None\n break\n if inst.prefix[0] != \"lock.atomic.je.unlikely\":\n xo_bb = None\n break\n if xo_bb != None:\n yield xo_bb", "async def _missing_storage_hashes(\n self,\n address_hash_nibbles: Nibbles,\n storage_root: Hash32,\n starting_main_root: Hash32) -> AsyncIterator[TrackedRequest]:\n\n if storage_root == BLANK_NODE_HASH:\n # Nothing to do if the storage has an empty root\n return\n\n storage_tracker = self._get_storage_tracker(address_hash_nibbles)\n while self.manager.is_running:\n storage_iterator = self._request_tracking_trie_items(\n storage_tracker,\n storage_root,\n )\n try:\n async for path_to_leaf, hashed_key, _storage_value in storage_iterator:\n # We don't actually care to look at the storage keys/values during backfill\n storage_tracker.confirm_leaf(path_to_leaf)\n\n except trie_exceptions.MissingTraversalNode as exc:\n yield storage_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n else:\n # Possible scenarios:\n # 1. We have completed backfilling this account's storage\n # 2. We have iterated the available nodes, and only their children are missing,\n # for example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes.\n #\n # In response to these situations, we might like to:\n # 1. Debug log?\n # 2. Look for more missing nodes in neighboring accounts and their storage, etc.\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it.\n return", "def is_codegen(self):\r\n return self.has_label('codegen')", "def test_fork_missing_predecessor(self):\n bvh = self.BlockValidationHandler()\n\n root = self.btm.chain_head\n\n # generate candidate chain 3 long off the current head.\n new_block = self.btm.generate_chain(root, 3,\n {'add_to_cache': True})\n # remove one of the new blocks\n del self.btm.block_cache[new_block[1].identifier]\n\n bv = self.create_block_validator(new_block[-1], bvh.on_block_validated)\n bv.run()\n\n self.assertTrue(bvh.has_result())\n self.assertTrue(new_block[-1].status == BlockStatus.Invalid)\n self.assertFalse(bvh.result[\"commit_new_block\"])", "def next(self) -> Optional[Chainable]:\n return None", "def has_next():\n\n return True" ]
[ "0.5561266", "0.5535464", "0.5518014", "0.49592614", "0.4936246", "0.4916298", "0.48616886", "0.470336", "0.46492574", "0.4646029", "0.46459916", "0.46368012", "0.4590937", "0.45884967", "0.4579382", "0.4562117", "0.45618558", "0.45422032", "0.4526873", "0.4470122", "0.446246", "0.44544232", "0.44365343", "0.44359633", "0.4432314", "0.44144693", "0.441354", "0.44125953", "0.44118905", "0.4409639" ]
0.713805
0
Estimate the completed fraction of the trie that is contiguous with the current index (which rotates every 32 blocks) It will be probably be quite noticeable that it will get "stuck" when downloading a lot of storage, because we'll have to blow it up to more than a percentage to see any significant change within 32 blocks. (when the index will change again anyway)
def _contiguous_accounts_complete_fraction(self) -> float: starting_index = bytes_to_nibbles(self._next_trie_root_hash) unknown_prefixes = self._account_tracker._trie_fog._unexplored_prefixes if len(unknown_prefixes) == 0: return 1 # find the nearest unknown prefix (typically, on the right) nearest_index = unknown_prefixes.bisect(starting_index) # Get the nearest unknown prefix to the left if nearest_index == 0: left_prefix = (0, ) * 64 else: left_prefix = unknown_prefixes[nearest_index - 1] if key_starts_with(starting_index, left_prefix): # The prefix of the starting index is unknown, so the index # itself is unknown. return 0 # Get the nearest unknown prefix to the right if len(unknown_prefixes) == nearest_index: right_prefix = (0xf, ) * 64 else: right_prefix = unknown_prefixes[nearest_index] # Use the space between the unknown prefixes to estimate the completed contiguous fraction # At the base, every gap in the first nibble is a full 1/16th of the state complete known_first_nibbles = right_prefix[0] - left_prefix[0] - 1 completed_fraction_base = (1 / 16) * known_first_nibbles # Underneath, you can count completed subtrees on the right, each child 1/16 of the parent right_side_completed = sum( nibble * (1 / 16) ** nibble_depth for nibble_depth, nibble in enumerate(right_prefix[1:], 2) ) # Do the same on the left left_side_completed = sum( (0xf - nibble) * (1 / 16) ** nibble_depth for nibble_depth, nibble in enumerate(left_prefix[1:], 2) ) # Add up all completed areas return left_side_completed + completed_fraction_base + right_side_completed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fraction_completed(self):\n return sum(self._chunk_done.values()) / len(self.chunks)", "def get_utilization(self):\n child_prefixes = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))\n # Remove overlapping prefixes from list of children\n networks = cidr_merge([c.prefix for c in child_prefixes])\n children_size = float(0)\n for p in networks:\n children_size += p.size\n return int(children_size / self.prefix.size * 100)", "def test_zPartialCurrents(self):\n # node 15 (ring=2, position=3), axial=3, group=3, j=1 (z-plus)\n iNode, iz, ig, j = 14, 2, 2, 0\n self.assertAlmostEqual(\n self.nhf.partialCurrentsZ[iNode, iz, j, ig] / 1.6928521e06, 1.0\n )", "def fractionPassing(self):\n return self.cut.entries / self.entries", "def _perc_up(self, cur_idx):\n while (cur_idx - 1) // 2 >= 0:\n parent_idx = (cur_idx - 1) // 2\n if self._heap[cur_idx] < self._heap[parent_idx]:\n self._heap[cur_idx], self._heap[parent_idx] = (\n self._heap[parent_idx],\n self._heap[cur_idx],\n )\n cur_idx = parent_idx", "def fifteen():\r\n\r\n currentcell = 1.0\r\n cellpaths = 2.0\r\n \r\n while currentcell < 20.0:\r\n currentcell += 1.0\r\n cellpaths = cellpaths * (4.0 - 2.0/currentcell)\r\n \r\n return cellpaths", "def modularity():\n\n q = 0.0\n for idx in range(0, node_count):\n if _tot[idx] > 0.0:\n q += (_in[idx] / m - math.pow(_tot[idx] / m, 2))\n return q", "def measure_gcd_success():\n for size in range(2,16):\n print(\"--------- samplesize = %d\" % size)\n d = dict()\n for _ in range(1000):\n q = findpoly(size)\n d.setdefault(q,0)\n d[q] += 1\n for k,v in sorted(d.items(), key=lambda x: x[1]):\n print(\"%5d: %8s\" % (v, k))", "def gc(self) -> float:\n g = self.count(\"G\")\n c = self.count(\"C\")\n return (g + c) / len(self) * 100", "def calc_GC(filepath):\n liste=['small.exon.piRNA_2.fa', 'small.exon.piRNA_1.fa', 'small.exon.piRNA_3.fa']\n \n length=list(range(0,34))\n d={}\n for i in length:\n d[i]={'A':0, 'G':0, 'T':0, 'C':0}\n for i in liste:\n with open(filepath+'/'+i, 'r') as f:\n for line in f:\n #fasta header starts with >\n if line.startswith('>'):\n pass\n else:\n line_l=list(line)\n for el in range(len(line_l)):\n if line_l[el]=='A':\n d[el]['A']+=1\n elif line_l[el]=='T':\n d[el]['T']+=1\n elif line_l[el]== 'G':\n d[el]['G']+=1\n elif line_l[el]== 'C':\n d[el]['C']+=1\n\n df=pd.DataFrame.from_dict(d)\n df=df.transpose()\n df.index = np.arange(1, len(df) + 1)\n \n\n df['A [%]']=df['A']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['G [%]']=df['G']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['T [%]']=df['T']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100\n df['C [%]']=df['C']/(df['A'].sum()+df['G'].sum()+df['C'].sum()+df['T'].sum())*100", "def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)", "def reduce(self) -> float:\n # Note: Reduction over segments not supported/needed for now.\n return self._tree[1]", "def total_chunks(self) -> global___Expression:", "def get_value(self):\r\n if len(self.walk) == 0:\r\n return 0\r\n value = 0\r\n start = 0\r\n end = len(self.walk) - 1\r\n while start < end:\r\n i_segment = self.get_segment(start+1)\r\n if i_segment.value == 'RUNG':\r\n break\r\n start += 2\r\n while end >= 2:\r\n i_segment = self.get_segment(end-1)\r\n if i_segment.value == 'RUNG':\r\n break\r\n end -= 2\r\n j = start\r\n while j < end:\r\n j_node = self.get_node(j)\r\n j += 1\r\n j_segment = self.get_segment(j)\r\n j += 1\r\n if j_segment.value != 'RUNG':\r\n # if the node connector is not critical, try to find and skip a loop\r\n k = j\r\n while k < end:\r\n k_node = self.get_node(k)\r\n k += 1\r\n k_segment = self.get_segment(k)\r\n k += 1\r\n if k_segment.value == 'RUNG':\r\n break\r\n if k_node == j_node:\r\n # Only skippable nodes existed before returned to original node, so skip that loop.\r\n value += (k - j) * 10\r\n j = k\r\n j_node = k_node\r\n j_segment = k_segment\r\n break\r\n if j_segment.value == 'SCAFFOLD':\r\n value -= j_segment.a.distance_sq(j_segment.b)\r\n elif j_segment.value == 'RUNG':\r\n value -= j_segment.a.distance_sq(j_segment.b)\r\n return value", "def load_factor(self):\n return round(self._n / self._size, 2)", "def gc_rate(dna: str, percent=False):\n c = Counter(dna)\n result = (c[\"G\"] + c[\"C\"]) / len(dna)\n return result * 100 if percent else result", "def overall_reduction(self):\n return 84", "def gc_content(seq):\n result = float(str(seq).count('G') + str(seq).count('C'))/len(seq) *100\n return result", "def percentage(self):\n return sum(self.chunk_percentage) / self.total_steps", "def _load_factor(self):\n return self.size / len(self.buckets)", "def get_next(current):\n return 0.5 * (current + n / current)", "def get_expected_compression_ratio_pct(self) -> int:\n return 100", "def _continued_fraction_kv(v, z, output_log_space=False):\n dtype = dtype_util.common_dtype([v, z], tf.float32)\n tol = tf.cast(np.finfo(dtype_util.as_numpy_dtype(\n dtype)).eps, dtype=dtype)\n max_iterations = 1000\n\n # Use Steed's algorithm to evaluate the confluent hypergeometric\n # function continued fraction in a numerically stable manner.\n def steeds_algorithm(\n should_stop,\n index,\n partial_numerator,\n partial_denominator,\n denominator_ratio,\n convergent_difference,\n hypergeometric_ratio,\n # Terms for recurrence in 6.7.36 in [3].\n k_0,\n k_1,\n # Intermediate coefficient in 6.7.30 in [3].\n c,\n # Intermediate sum in 6.7.35 in [3].\n q,\n hypergeometric_sum):\n # The numerator is v**2 - (index - 0.5) ** 2\n partial_numerator = partial_numerator - 2. * (index - 1.)\n c = tf.where(should_stop, c, -c * partial_numerator / index)\n next_k = (k_0 - partial_denominator * k_1) / partial_numerator\n k_0 = tf.where(should_stop, k_0, k_1)\n k_1 = tf.where(should_stop, k_1, next_k)\n q = tf.where(should_stop, q, q + c * next_k)\n partial_denominator = partial_denominator + 2.\n denominator_ratio = 1. / (\n partial_denominator + partial_numerator * denominator_ratio)\n convergent_difference = tf.where(\n should_stop, convergent_difference,\n convergent_difference * (\n partial_denominator * denominator_ratio - 1.))\n hypergeometric_ratio = tf.where(\n should_stop,\n hypergeometric_ratio,\n hypergeometric_ratio + convergent_difference)\n hypergeometric_sum = tf.where(\n should_stop,\n hypergeometric_sum,\n hypergeometric_sum + q * convergent_difference)\n index = index + 1\n should_stop = (tf.math.abs(q * convergent_difference) <\n tf.math.abs(hypergeometric_sum) * tol) | (\n index > max_iterations)\n return (should_stop,\n index,\n partial_numerator,\n partial_denominator,\n denominator_ratio,\n convergent_difference,\n hypergeometric_ratio,\n k_0, k_1, c, q, hypergeometric_sum)\n\n initial_numerator = tf.math.square(v) - 0.25\n initial_denominator = 2 * (z + 1.)\n initial_ratio = 1. / initial_denominator + tf.zeros_like(v)\n initial_seq = -initial_numerator + tf.zeros_like(z)\n\n (_, _, _, _, _, _, hypergeometric_ratio,\n _, _, _, _, hypergeometric_sum) = tf.while_loop(\n cond=lambda stop, *_: tf.reduce_any(~stop),\n body=steeds_algorithm,\n loop_vars=(\n tf.zeros_like(v + z, dtype=tf.bool),\n tf.cast(2., dtype=dtype),\n initial_numerator,\n initial_denominator,\n initial_ratio,\n initial_ratio,\n initial_ratio,\n tf.zeros_like(v + z),\n tf.ones_like(v + z),\n initial_seq,\n initial_seq,\n 1 - initial_numerator * initial_ratio))\n\n log_kve = 0.5 * tf.math.log(np.pi / (2 * z)) - tf.math.log(hypergeometric_sum)\n log_kvp1e = (\n log_kve + tf.math.log1p(\n 2 * (v + z + initial_numerator * hypergeometric_ratio))\n - tf.math.log(z) - dtype_util.as_numpy_dtype(dtype)(np.log(2.)))\n if output_log_space:\n return log_kve, log_kvp1e\n return tf.math.exp(log_kve), tf.math.exp(log_kvp1e)", "def load_factor(self) -> float:\n return self.filled_count / self.table_size", "def leaf_nodes_to_search_percent(self) -> Optional[int]:\n return pulumi.get(self, \"leaf_nodes_to_search_percent\")", "def correct_fraction():\n with open(os.path.join(ocr.settings.BASE_DIR,\n 'training_set.json')) as file:\n training_set = json.load(file)\n correct = 0\n for letter in training_set['list']:\n print(letter['letter'])\n for _ in range(REPETITIONS):\n if ocr.basic_nn.tools.recognize_symbol(letter['inputs']) \\\n == letter['letter']:\n correct += 1 / REPETITIONS\n fraction = correct / len(training_set['list'])\n print(fraction)\n return fraction", "def backUp(self, value):\n return value / len(self.children)", "def _reduce(self) -> None:\n divisor = self._gcd(self._numerator, self._denominator)\n self._numerator = self._numerator // divisor\n self._denominator = self._denominator // divisor", "def problem_086(limit,verbose):\n\n # Three routes:\n # *------F Sides labeled A, B, C, routes clockwise from S\n # | /| R1^2 = (A + C)^2 + B^2\n # | / n R2^2 = (B + C)^2 + A^2\n # +-----+------+-----F R3^2 = (A + B)^2 + C^2\n # | | / | . `|\n # | A / .|` / |\n # | |/. ` a-n / |\n # +-C---S-b-B--+-----+\n # | ` . |\n # | `|\n # *------+\n # | |\n # | |\n # | |\n # +------F\n \n # Genreate all triples up to perimeter 3M + sqrt((M + M)^2 + M^2)\n # Which is is 3M + sqrt(5M^2)\n\n total_found = 0\n cuboids = defaultdict(set)\n triples = set()\n under_length = []\n \n for batch in count():\n size = (batch + 1) * 500\n max_triple_perimeter = int(3 * size + sqrt(5 * size**2)) + 1\n all_triples = set(generate_triples(max_triple_perimeter))\n this_loop = all_triples - triples\n triples = all_triples\n \n with click.progressbar(this_loop, label=\"{}\".format(total_found)) as bar:\n new_cuboids = (c for t in bar for c in generate_cuboids(t))\n new_cuboids = (c for c in new_cuboids if c.a > 0)\n new_cuboids = (c for c in new_cuboids if is_shortest_route_integral(c))\n for cuboid in new_cuboids:\n cuboids[cuboid.c].add(cuboid)\n \n for i in range(batch * 500, batch * 500 + 500):\n \n total_found += len(cuboids[i])\n if total_found >= limit:\n click.echo(total_found)\n click.echo(i)\n return", "def _perc_down(self, cur_idx):\n while 2 * cur_idx + 1 < len(self._heap):\n min_child_idx = self._get_min_child(cur_idx)\n if self._heap[cur_idx] > self._heap[min_child_idx]:\n self._heap[cur_idx], self._heap[min_child_idx] = (\n self._heap[min_child_idx],\n self._heap[cur_idx],\n )\n else:\n return\n cur_idx = min_child_idx" ]
[ "0.57871044", "0.57394934", "0.5700969", "0.557595", "0.55153626", "0.5491407", "0.54715765", "0.54007584", "0.53748786", "0.5336496", "0.5328934", "0.5310399", "0.5259188", "0.52554494", "0.5247512", "0.5241603", "0.5217785", "0.51893973", "0.51840824", "0.5177834", "0.5167148", "0.51589537", "0.51565206", "0.5141999", "0.5114801", "0.51116115", "0.5084546", "0.5079915", "0.50679106", "0.5065548" ]
0.764008
0
Return the Trie Fog that can be searched, ignoring any nodes that are currently being requested.
def _get_eligible_fog(self) -> fog.HexaryTrieFog: return self._trie_fog.mark_all_complete(self._active_prefixes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def empty_trie():\n trie = Trie()\n return trie", "def empty_trie():\n from trie import Trie\n trie = Trie()\n return trie", "def traversal_test_trie():\n from trie import Trie\n trie = Trie()\n trie.insert('alpha')\n trie.insert('alpaca')\n trie.insert('boy')\n return trie", "def tricky_trie():\n from trie import Trie\n trie = Trie()\n trie.insert('bbbbbbb')\n trie.insert('bbb')\n trie.insert('lololololol')\n trie.insert('blololololol')\n return trie", "def cc_trie (cls) :\n return Trie (cls.cc_map)", "def full_trie():\n trie = Trie()\n trie.insert(\"hey\")\n return trie", "def find_fiq_and_vq(self):\n start = self._query.get_start()\n finish = self._query.get_finish()\n feature_vector = self._query.get_preference()\n filtering_vector = self._query.get_teta()\n\n fiq = {}\n vq = set()\n for node in self.list_of_node_features:\n for feature_index in range(len(self.list_of_node_features[node])):\n # Check the following conditions:\n # 1. Feature value of node at position h equals zero\n # 2. Feature value of node at position h is less than feature value in filtering vector\n # at position h\n # 3. node is node a start or finish node\n # If 3rd condition and either 1 or 2 do not add node to FIQ\n if (self.list_of_node_features.get(node)[feature_index] == 0 or\n self.list_of_node_features.get(node)[feature_index] < filtering_vector[feature_index])\\\n and (node != start and node != finish):\n continue\n\n # Add node to FIQ and VQ\n if fiq.get(feature_index):\n element = (node, self.list_of_node_features.get(node)[feature_index])\n fiq.get(feature_index).append(element)\n else:\n fiq[feature_index] = [(node, self.list_of_node_features.get(node)[feature_index])]\n vq.add(node)\n\n # remove features with zero values in feature vector\n for feature_index in range(len(feature_vector)):\n if feature_vector[feature_index] == 0:\n del(fiq[feature_index])\n\n return fiq, vq", "def checkFog(ontology_fog):\n if len(ontology_fog.has_bounding_box) != 0:\n bounding_box = checkBoundingBox(ontology_fog.has_bounding_box[0]) #checks the ontology BoundingBox individual and assigns a correct PYOSCX BoundingBox object.\n if len(ontology_fog.has_visual_range) !=0:\n visual_range = ontology_fog.has_visual_range[0]\n return xosc.Fog(visual_range,bounding_box)", "def get_root():\n root = VGOCache('https://www.vegguide.org/region/0')\n return check_has_regions(root.results['regions']['primary'])", "def get_node():\n return TrieNode()", "def test_traversal_with_no_input_string_returns_trie(full_trie):\n assert list(full_trie.traversal()) == ['h', 'e', 'y']\n assert list(full_trie.traversal('')) == ['h', 'e', 'y']", "def get_allowed_geometry(self):\n\n objects = self.get_geometry()\n\n allowed_objects = []\n\n for obj in objects:\n\n if rs.ObjectType(obj) == gs.allowed_object_types[self.path[1]]:\n\n allowed_objects.append(obj)\n\n return allowed_objects", "def multi_trie():\n trie = Trie()\n trie.insert(\"hey\")\n trie.insert(\"hell\")\n trie.insert(\"hello\")\n trie.insert(\"howdy\")\n trie.insert(\"head\")\n trie.insert(\"hi you\")\n return trie", "def alphacheck(self):\n if self.root_system.is_finite() and self.root_system.is_irreducible():\n return Family(self.index_set(), self.simple_coroot, \\\n hidden_keys = [0], hidden_function = lambda i: - self.cohighest_root())\n else:\n return self.simple_coroots()", "def test_traverse_on_empty_trie(empty_trie):\n assert list(empty_trie.traversal()) == []", "def test_create_empty_trie(empty_trie):\n assert empty_trie.root.children == {}\n assert empty_trie._size == 0", "def test_traversal_with_string_not_in_trie(full_trie):\n assert list(full_trie.traversal(\"goodbye\")) == []", "def eligible_nodes(self):\n return [v for v in self.G if self.eligible_node(v)]", "def __init__(self):\r\n self.trie = Trie()", "def usable(self):\n return self.exclude(Q(location=None) | Q(ipv4=None) | Q(ipv6=None))", "def get_fog_ids(self):\n fog_ids = []\n \n for node_id in self.nodes:\n if (isinstance(self.nodes[node_id], FogNode)):\n fog_ids.append(node_id)\n\n return fog_ids", "def __init__(self):\n self.trie = Trie()", "async def _missing_trie_hashes(self) -> AsyncIterator[TrackedRequest]:\n # For each account, when we have asked for all known storage and bytecode\n # hashes, but some are still not present, we \"pause\" the account so we can look\n # for neighboring nodes.\n # This is a list of paused accounts, using the path to the leaf node,\n # because that's how the account tracker is indexed.\n exhausted_account_leaves: Tuple[Nibbles, ...] = ()\n\n starting_root_hash = self._next_trie_root_hash\n\n try:\n while self.manager.is_running:\n # Get the next account\n\n # We have to rebuild the account iterator every time because...\n # something about an exception during a manual __anext__()?\n account_iterator = self._request_tracking_trie_items(\n self._account_tracker,\n starting_root_hash,\n )\n try:\n next_account_info = await account_iterator.__anext__()\n except trie_exceptions.MissingTraversalNode as exc:\n # Found a missing trie node while looking for the next account\n yield self._account_tracker.generate_request(\n exc.missing_node_hash,\n exc.nibbles_traversed,\n )\n continue\n except StopAsyncIteration:\n # Finished iterating over all available accounts\n break\n\n # Decode account\n path_to_leaf, address_hash_nibbles, encoded_account = next_account_info\n account = rlp.decode(encoded_account, sedes=Account)\n\n # Iterate over all missing hashes of subcomponents (storage & bytecode)\n subcomponent_hashes_iterator = self._missing_subcomponent_hashes(\n address_hash_nibbles,\n account,\n starting_root_hash,\n )\n async for node_request in subcomponent_hashes_iterator:\n yield node_request\n\n # Check if account is fully downloaded\n account_components_complete = self._are_account_components_complete(\n address_hash_nibbles,\n account,\n )\n if account_components_complete:\n # Mark fully downloaded accounts as complete, and do some cleanup\n self._mark_account_complete(path_to_leaf, address_hash_nibbles)\n else:\n # Pause accounts that are not fully downloaded, and track the account\n # to resume when the generator exits.\n self._account_tracker.pause_review(path_to_leaf)\n exhausted_account_leaves += (path_to_leaf, )\n\n except GeneratorExit:\n # As the generator is exiting, we want to resume any paused accounts. This\n # allows us to find missing storage/bytecode on the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n raise\n else:\n # If we pause a few accounts and then run out of nodes to ask for, then we\n # still need to resume the paused accounts to prepare for the next iteration.\n for path_to_leaf in exhausted_account_leaves:\n self._account_tracker.mark_for_review(path_to_leaf)\n\n # Possible scenarios:\n # 1. We have completed backfill\n # 2. We have iterated the available nodes, and all known hashes are being requested.\n # For example: if 0 nodes are available, and we walk to the root and request\n # the root from a peer, we do not have any available information to ask for\n # more nodes, and exit cleanly.\n #\n # In response to these situations, we might like to:\n # 1. Log and celebrate that the full state has been downloaded\n # 2. Exit this search and sleep a bit, waiting for new trie nodes to arrive\n #\n # 1 and 2 are a little more cleanly handled outside this iterator, so we just\n # exit and let the caller deal with it, using a _check_complete() check.\n return", "def get_all(root: TrieNode, prefix: str):\n \"\"\" Retorna uma lista IDs de cursos com o prefixo \"\"\"\n node = root\n found = []\n prefix = prefix.upper()\n\n # Se a raíz não tem filhos, a árvore é vazia\n if not root.children:\n return found\n\n # se não, busca cada caractere do prefixo \n for char in prefix:\n char_not_found = True\n\n # se o usuário colocar um asterisco, sinaliza qualquer palavra com o prefixo\n if char == '*': \n break\n else:\n # busca nas childs do nodo atual\n for child in node.children:\n if child.char == char:\n # se encontrar, atualiza a flag\n char_not_found = False\n # e recomeça do nodo que encontrou\n node = child\n break\n\n # se não encontrou algum caractere\n if char_not_found:\n return found\n\n # se encontrou todas as letras ou um *, pega todas as palavras\n return find_words(node)", "def test_on_tricky_trie(tricky_trie):\n assert tricky_trie.size == 4", "def __init__(self):\n self.root = TrieNode('*')", "def __init__(self):\n self.root = TrieNode('*')\n self.size = 0", "def infras (self):\n return (node for id, node in self.network.nodes_iter(data=True) if\n node.type == Node.INFRA)", "def __init__(self):\n self.trie = TrieNode()", "def __init__(self):\n self.trie = TrieNode()" ]
[ "0.5776142", "0.57360816", "0.5326462", "0.5315044", "0.5267867", "0.5225813", "0.51331353", "0.5095872", "0.50827706", "0.49944326", "0.49880475", "0.496863", "0.48768044", "0.4836348", "0.48069534", "0.4800665", "0.47692093", "0.47494227", "0.47216982", "0.47108996", "0.46952766", "0.46802568", "0.46288362", "0.4597854", "0.4574584", "0.45703384", "0.45663476", "0.4565464", "0.4564405", "0.4564405" ]
0.7354035
0
Return title + episode (if series)
def inclusive_title(self): return self.title + (" %s" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else "")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def episode_title_for_tvdb(self):\n \n # strip out the year from the episode title:\n return \"Episode %d\"%self.episode_number[1]", "def episode_title_for_tvdb(self):\n return self.episode_title", "def episode_title_for_tvdb(self):\n \n # strip out the year from the episode title:\n return re.sub('(Part )(?P<part>\\d+)','(\\g<part>)',self.episode_title)", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def parse_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in EPISODE_TITLE_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''", "def media_series_title(self):\n media_status = self._media_status()[0]\n return media_status.series_title if media_status else None", "def seasonEpisode(self):\n return f's{str(self.seasonNumber).zfill(2)}e{str(self.episodeNumber).zfill(2)}'", "def book_series_title(self):\n return get_value(self.record, \"book_series[0].title\")", "def season_episode_str_from_show(show):\n return 'S{:02d}E{:02d}'.format(show._next.season, show._next.episode)", "def convert_title_season_episode_to_long_form(self, se_input, title_input):\n se_input = se_input[1:]\n se_input.replace(' ', '')\n\n e_ndx = se_input.index('E')\n\n #sometimes it looks like \"S14 E10\" and sometimes it's \"S14 Ep10\"\n if \"Ep\" in se_input:\n ep_offset = 2\n else:\n ep_offset = 1\n\n s = se_input[:e_ndx]\n e = se_input[e_ndx+ep_offset:]\n\n return \"%s Season %s Episode %s\" % (title_input, s, e)", "def extract_season_episode(string):\n series, episode = (None, None)\n\n # The series page url contains \"<name>_sX_eY\"\n m = re.match('.*[sS](\\d+)_?[eE](\\d+).*', string)\n if m:\n series, episode = m.groups()\n series = int(series)\n episode = int(episode)\n\n else:\n # Matches \"XxY\" OR unicode x (\\xd7 / ×)\n m = re.search(\"(\\d+)[x|\\xd7](\\d+)\", string)\n if m:\n series, episode = m.groups()\n series = int(series)\n episode = int(episode)\n else:\n m = re.search(\"S(\\d+)E(\\d+)\", string)\n if m:\n series, episode = m.groups()\n series = int(series)\n episode = int(episode)\n\n else:\n # Broke Girls – Season 4 Episode 22 – And the In and Out\n f = re.findall('(.+?)season\\s(\\d+)\\sepisode\\s(\\d+)', string + \" \", re.I)\n if f:\n _, series, episode = f[0]\n series = int(series)\n episode = int(episode)\n\n else:\n # Broke Girls – saison 5 épisode 16\n f = re.findall(\n '(.+?)\\ssaison\\s(\\d+)\\s\\xe9pisode\\s(\\d+)\\s',\n string + \" \", re.I)\n if f:\n _, series, episode = f[0]\n series = int(series)\n episode = int(episode)\n else:\n # 'Dragon Ball Super: Temporada 1 - Episodio 11 (2015)' TODO can be optimized\n f = re.findall('(.+?)\\stemporada\\s(\\d+)(.*)\\sepisodio\\s(\\d+)\\s', string + \" \", re.I)\n if f:\n _, series, __, episode = f[0]\n series = int(series)\n episode = int(episode)\n else:\n # Broke Girls – saison 5 episode 16\n f = re.findall(\n '(.+?)\\ssaison\\s(\\d+)\\s\\episode\\s(\\d+)\\s',\n string + \" \", re.I)\n if f:\n _, series, episode = f[0]\n series = int(series)\n episode = int(episode)\n\n return series, episode", "def get_title():", "def media_series_title(self):\n if lgtv[\"pairingKey\"] == 0:\n return \"Pin not set\"\n if self._currentSourceNumber == \"0\":\n return (\"{0} - CH{1:d} - {2}\").format(self._currentSourceName, self._currentChannelNumber, self._currentChannelName)\n else:\n return \"\"", "def parse_anime_episode_title(filename):\n print_info('Attempting to parse episode title from {0}'.format(filename))\n for regex in ANIME_EPISODE_TITLE_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_title = m.group('EpisodeTitle')\n return clean_episode_title(extracted_title)\n return ''", "def get_episode_details(token, url, season):\n u = url + str(season)\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(u, headers=headers)\n json_data = json.loads(r.text).get('data')\n season_details = {}\n season_details['current_season'] = season\n if len(json_data) > 1:\n for episode in json_data:\n d = episode.get('firstAired')\n date = datetime.datetime.strptime(d, \"%Y-%m-%d\")\n today = datetime.datetime.today()\n if date.date() >= today.date():\n season_details['next_ep_no'] = episode.get('airedEpisodeNumber')\n season_details['next_air_date'] = episode.get('firstAired')\n season_details['ep_title'] = episode.get('episodeName')\n season_details['ep_overview'] = episode.get('overview')\n break\n else:\n season_details['next_ep_no'] = (json_data[len(json_data) - 1].get('airedEpisodeNumber'))\n season_details['next_air_date'] = (json_data[len(json_data) - 1].get('firstAired'))\n season_details['ep_title'] = (json_data[len(json_data) - 1].get('episodeName'))\n season_details['ep_overview'] = (json_data[len(json_data) - 1].get('overview'))\n else:\n season_details['next_ep_no'] = 1\n season_details['next_air_date'] = (json_data[0].get('firstAired'))\n season_details['ep_title'] = (json_data[0].get('episodeName'))\n season_details['ep_overview'] = (json_data[0].get('overview'))\n if season_details['next_air_date'] == \"\":\n season_details['next_air_date'] = 'TBD'\n if season_details['ep_title'] == \"\" or season_details['ep_title'] is None:\n season_details['ep_title'] = 'TBD'\n if season_details['ep_overview'] == \"\" or season_details['ep_overview'] is None:\n season_details['ep_overview'] = 'TBD'\n return season_details", "def parse_episode(filename):\n print_info('Extracting episode from {0}'.format(filename))\n for regex in EPISODE_NUM_REGEXS:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_ep = m.group('Episode').lower()\n print_info('Extracted episode: {0}'.format(extracted_ep))\n\n if '-' in extracted_ep:\n print_info('Multiple Episodes found')\n tokens = extracted_ep.split('-e')\n first_token = tokens[0]\n last_token = tokens[len(tokens)-1]\n return parse_episode(first_token) + '-' + parse_episode(last_token)\n else:\n ep_num = int(extracted_ep)\n if ep_num is not None and ep_num > 0:\n print_info('Episode might be: {0}'.format(ep_num))\n return 'E' + format_num(ep_num)\n\n return None", "def getEpisodeArt(episode):\n\tseriesId = None\n\tfor sk in Dict['series'].keys():\n\t\tif Dict['series'][str(sk)]['title']==episode['seriesTitle']:\n\t\t\tseriesId = int(sk)\n\tif seriesId is not None:\n\t\tartUrl = \"\"\n\t\tif Dict['series'][str(seriesId)]['tvdbId'] is not None:\n\t\t\tartUrl = fanartScrapper.getSeasonThumb(Dict['series'][str(seriesId)]['tvdbId'], episode['season'], rand=False)\n\t\t\t#Log.Debug(\"arturl: %s\"%artUrl)\n\t\t\tif artUrl is not None:\n\t\t\t\tart = Function(getArt,url=artUrl)\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = Dict['series'][str(seriesId)]['art']\n\t\tif artUrl == \"\" or artUrl is None:\n\t\t\tartUrl = R(CRUNCHYROLL_ART)\n\telse:\n\t\tartUrl = R(CRUNCHYROLL_ART)\n\tLog.Debug(\"artUrl: %s\"%artUrl)\n\treturn artUrl", "def testGetEpisodeName(self):\n\t\tfor case in self.testCases:\n\t\t\tassert case['title'] == getEpisodeName( case['show'], case['season'], case['episode'])", "def episode_string(self, episode):\n cp, _ = zip(*episode.states)\n\n car_positions = dict()\n for i, p in enumerate(cp):\n car_positions[p] = i\n\n x, y = zip(*self.track_positions)\n output = \"\"\n y_rng = range(max(y) + 1)\n for i in range(max(x) + 1):\n row = \"\"\n for j in y_rng:\n pos = i, j\n if pos in car_positions:\n row += str(car_positions[pos])\n elif pos in self.start_positions:\n row += self.format_dict['start']\n elif pos in self.finish_positions:\n row += self.format_dict['finish']\n elif pos in self.track_positions:\n row += self.format_dict['track']\n else:\n row += self.format_dict['border']\n row += self.format_dict['sep']\n output = row + \"\\n\" + output\n return output", "def test_get_episode_overview(self):\n self.assertEquals(\n self.t['Battlestar Galactica (2003)'][1][6]['overview'].startswith(\n 'When a new copy of Doral, a Cylon who had been previously'),\n True\n )", "def get_title_artist(title_element): \n \n \n title_token = title_element.text.split(\" \")\n\n word = title_token.pop(0)\n artist = ''\n title = ''\n first = True\n while(title_token != [] and word != '-' and word[-1] != '-'):\n if first:\n first = False\n artist += (word)\n else:\n artist += ' '\n artist += word\n\n word = title_token.pop(0)\n \n if word[-1] == '-':\n word = word[:-1]\n artist += word\n \n if title_token == []:\n print(\"ERROR HERE: \", title_element.text)\n return None, None\n \n word = title_token.pop(0)\n first = True\n\n while(True):\n if first:\n first = False\n title += word\n else:\n title += ' '\n title += word\n if title_token != []:\n word = title_token.pop(0)\n if word == \"ALBUM\" or (word == \"EP\" and title_token[0] == \"REVIEW\"):\n break\n else:\n break\n return title, artist", "def retrieve_episode(url):\n domain = parse_domain(url)\n html = retrieve_episode_html(url)\n return domain, html", "def list_episodes(title, uri):\r\n\r\n # Set plugin category. It is displayed in some skins as the name\r\n # of the current section.\r\n xbmcplugin.setPluginCategory(_handle, title)\r\n\r\n # Get the list of videos in the category.\r\n result = _get_data(uri)\r\n # Iterate through videos.\r\n #logger.info(\"######: {}, log: {}########\".format('rk1', result['items']))\r\n for video in result['items']:\r\n # {\r\n # \"title\": \"Sakthi returns to India\",\r\n # \"contentId\": 1000036012,\r\n # \"uri\": \"https://api.hotstar.com/o/v1/episode/detail?id=80096&contentId=\r\n # 1000036012&offset=0&size=20&tao=0&tas=5\",\r\n # \"description\": \"Saravanana and Meenakshi's oldest son, Sakthi, returns to\r\n # India 25 years after his parents had left it. He wants to search for a bride,\",\r\n # \"duration\": 1332,\r\n # \"contentType\": \"EPISODE\",\r\n # \"contentProvider\": \"Global Villagers\",\r\n # \"cpDisplayName\": \"Global Villagers\",\r\n # \"assetType\": \"EPISODE\",\r\n # \"genre\": [\r\n # \"Family\"\r\n # ],\r\n # \"lang\": [\r\n # \"Tamil\"\r\n # ],\r\n # \"channelName\": \"Star Vijay\",\r\n # \"seasonNo\": 1,\r\n # \"episodeNo\": 520,\r\n # \"premium\": false,\r\n # \"live\": false,\r\n # \"hboContent\": false,\r\n # \"encrypted\": false,\r\n # \"startDate\": 1416649260,\r\n # \"endDate\": 4127812200,\r\n # \"broadCastDate\": 1382367600,\r\n # \"showName\": \"Saravanan Meenatchi\",\r\n # \"showId\": 99,\r\n # \"showShortTitle\": \"Saravanan Meenatchi\",\r\n # \"seasonName\": \"Chapter 1\",\r\n # \"playbackUri\": \"https://api.hotstar.com/h/v1/play?contentId=1000036012\",\r\n # \"contentDownloadable\": false\r\n # },\r\n _add_video_item(video)\r\n #logger.info(\"######: {}, log: {}########\".format('rk2', video))\r\n\r\n _add_next_page_and_search_item(result['nextPage'], 'episodes', title)\r\n\r\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\r\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_NONE)\r\n\r\n # Finish creating a virtual folder.\r\n xbmcplugin.endOfDirectory(_handle)", "def episode(self, title=None, episode=None):\n key = f'{self.key}/children'\n if title is not None and not isinstance(title, int):\n return self.fetchItem(key, Episode, title__iexact=title)\n elif episode is not None or isinstance(title, int):\n if isinstance(title, int):\n index = title\n else:\n index = episode\n return self.fetchItem(key, Episode, parentIndex=self.index, index=index)\n raise BadRequest('Missing argument: title or episode is required')", "def episodes(self):\n for episode in self._root.iter('Episode'):\n entry = {}\n entry['season'] = int(episode.find('SeasonNumber').text)\n entry['episode'] = int(episode.find('EpisodeNumber').text)\n entry['title'] = unicode(episode.find('EpisodeName').text)\n if entry['title'] == '':\n continue\n entry['description'] = unicode(episode.find('Overview').text)\n entry['firstAired'] = episode.find('FirstAired').text\n yield entry", "def add_episode(self, ep):\n #make da season\n ses = self._add_season(ep)\n dvdses = self._add_season(ep, dvd=True) \n self._add_episode(ep, ses)\n self._add_episode(ep, dvdses, dvd=True)", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def create_episode(e, debug=False):\n #{\"title\": , \"summary\": , \"image\": , \"link\": , \"season\": , \"number\": , \"rating\"}\n\n if debug:\n print(\"beginning create_episode()\")\n\n episode = {}\n\n # get BeautifulSoup data for extracting details\n episode_url = \"https://www.imdb.com/\" + e[\"link\"]\n episode_soup = bs4.BeautifulSoup(requests.get(episode_url).text, features=\"html.parser\")\n\n #get title\n title_wrapper = episode_soup.select(\".title_wrapper\")[0]\n episode[\"title\"] = title_wrapper.select(\"h1\")[0].contents[0].replace(u'\\xa0', ' ')\n\n #get summary\n episode[\"summary\"] = episode_soup.select(\".summary_text\")[0].contents[0].replace(u'\\n', ' ')\n\n #get image\n episode[\"image\"] = get_image(e[\"link\"], debug)\n\n #link\n episode[\"link\"] = e[\"link\"]\n\n #season\n episode[\"season\"] = e[\"season\"]\n\n #number\n episode[\"number\"] = e[\"episode_number\"]\n\n #rating\n episode[\"rating\"] = e[\"rating\"]\n\n return episode", "def SongTitle( path ):\n p = subprocess.Popen( ['ffprobe',path], stderr=subprocess.PIPE )\n\n output = p.communicate()[1].decode()\n if 'Invalid data found' in output:\n return None\n\n # find the first occurance of \"title : stuff\" with any number of spaces.\n res = re.search( r'title\\s+:\\s+([a-zA-Z0-9,\\(\\) ]+)', output )\n\n if res is None:\n return \"\"\n\n ret = res.group(1)\n\n return ret", "def get_video_title(self, response):\n return response.css(\".watch-title::text\").extract_first(default='')" ]
[ "0.74555385", "0.7378745", "0.6963783", "0.68400466", "0.66249967", "0.6508349", "0.64518964", "0.6446037", "0.6445582", "0.6437754", "0.6297738", "0.62690187", "0.6210943", "0.6200764", "0.61216825", "0.6120273", "0.611263", "0.59974575", "0.5988067", "0.5951355", "0.59445405", "0.59397787", "0.59121096", "0.58959424", "0.5892946", "0.58775663", "0.58768576", "0.5873981", "0.5853357", "0.5831947" ]
0.7791267
0
Returns dataframe with mean profit per cluster basing on a df given as an argument
def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame: return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_cluster_train.sort_values(by='profit', ascending=False)\n group_size = int(len(df_profit_per_cluster) / 3)\n\n buy_clusters_mean_profit = df_profit_per_cluster.iloc[:group_size]['profit'].mean()\n sell_clusters_mean_profit = df_profit_per_cluster.iloc[-group_size:]['profit'].mean()\n\n buy_clusters_list = list(df_profit_per_cluster.iloc[:group_size]['cluster'])\n sell_clusters_list = list(df_profit_per_cluster.iloc[-group_size:]['cluster'])\n\n return buy_clusters_mean_profit, buy_clusters_list, sell_clusters_mean_profit, sell_clusters_list", "def get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test: pd.core.frame.DataFrame,\n buy_clusters_list: List[int], sell_clusters_list: List[int]) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(buy_clusters_list) != 0 and len(sell_clusters_list) != 0, \"Clusters list can't be empty.\"\n\n buy_clusters_mean_profit = \\\n df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(buy_clusters_list)]['profit'].mean()\n sell_clusters_mean_profit = \\\n df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(sell_clusters_list)]['profit'].mean()\n\n return buy_clusters_mean_profit, sell_clusters_mean_profit", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def clusterting_feature_importance (df, cluster_col):\r\n scores = pd.DataFrame()\r\n df0 = df.copy()\r\n df0 = df.select_dtypes(include=np.number)\r\n\r\n for i in df0[cluster_col].unique():\r\n df2 = df0[df0[cluster_col] == i]\r\n df2.drop(cluster_col,axis=1, inplace=True)\r\n #df2 = df.select_dtypes(include=np.number)\r\n scores[i] = df2.std() / (df2.max() - df2.min())\r\n scores['mean'] = scores.mean(axis = 1)\r\n\r\n scores = 1 - scores\r\n\r\n return scores", "def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def mean(df):\r\n\r\n\tdf_mean_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_mean_dict[col] = df[col].mean()\r\n\r\n\tdf_mean = pd.DataFrame(df_mean_dict, index=['Mean'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_mean", "def disaggregate_by_cluster(self):\n # wt = np.zeros((1, self.ds.shape[1]))\n # total = np.zeros((self.n_ahead, self.ds.shape[1]))\n \n agg_cluster_ds = np.zeros((self.n_ahead+1, self.n_clusters))\n agg_cluster_ds[0] = self.ds_agg_by_c[-1]\n agg_cluster_ds[1:] = self.ds_c_for\n cluster_perc_change = np.diff(agg_cluster_ds, axis = 0) / agg_cluster_ds[:-1]\n\n cluster_scaling_vector = np.zeros((2, self.ds.shape[1]))\n\n # break down proportionally -> don't work well\n # for c in range(self.n_clusters):\n # c_m = self.ds.iloc[-self.cluster_n_period:, np.where(self.ds_c == c)[0]]\n # c_sum = sum(c_m)\n # indiv_sum = np.sum(c_m, axis = 0)\n # wt[:,np.where(self.ds_c == c)[0]] = (indiv_sum/c_sum)\n # total[:,np.where(self.ds_c == c)[0]] = np.reshape(\n # np.repeat(self.ds_c_for[:,c], c_m.shape[1]), (self.n_ahead, c_m.shape[1]))\n \n # multiply by the perc change\n \n for i in range(self.ds_c.shape[0]):\n cluster_scaling_vector[:,i] = cluster_perc_change[:,self.ds_c[i]]\n cluster_scaling_vector = cluster_scaling_vector+1\n cluster_scaling_vector = np.array(cluster_scaling_vector)\n \n self.ds_for = self.ds.copy()\n\n for yr in range(self.n_ahead)[::-1]:\n # forecast on foretasted number\n yr_ind = self.ds_for.index[-(yr+1)]\n self.ds_for.ix[yr_ind] = self.ds_for.iloc[-(yr+2),:].values * cluster_scaling_vector[-(yr+1)]\n\n # self.ds_for.iloc[-(self.n_ahead):,:] = self.ds_for.iloc[-(self.n_ahead+1):-1,:].values * np.array(cluster_scaling_vector)\n\n # if negative -> 0\n self.ds_for[self.ds_for < 0] = 0", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def k_means_model(df, numOfClusters):\n # Perform scaling on the dataframe containing the selected features\n data = scale(df)\n\n # Train a model\n model = KMeans(init=\"k-means++\", n_clusters=numOfClusters, n_init=20).fit(data)\n return model", "def grouping(data_clust):\n data_grouped = data_clust.groupby('Clusters').mean()\n return data_grouped", "def cluster(players_df, columns):\n\toptimal_n=None\n\toptimal_clusters=None\n\toptimal_clusterer=None\n\toptimal_silhouette=-99\n\tfor n in range(2,9):\n\t\tclusterer=KMeans(n_clusters=n)\n\t\tcluster_labels=clusterer.fit_predict(players_df[columns])\n\t\tavg_silhouette=silhouette_score(players_df[columns], cluster_labels)\n\t\tprint('The avg silhouette score for {} clusters is {}'.format(n, avg_silhouette))\n\t\tif avg_silhouette > optimal_silhouette:\n\t\t\toptimal_silhouette=avg_silhouette\n\t\t\toptimal_clusterer=clusterer\n\t\t\toptimal_clusters=cluster_labels\n\t\t\toptimal_n=n\n\tprint('Returning optimal clusters found with n={}'.format(optimal_n))\n\tclusters = {n: [] for n in range(optimal_n)}\n\tfor i, label in enumerate(optimal_clusters):\n\t\tclusters[label].append(\n\t\t\tdict(\n\t\t\t\tplayer_id=players_df.iloc[i]['PERSON_ID'],\n\t\t\t\tfirst_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[-1],\n\t\t\t\tlast_name=players_df.iloc[i]['DISPLAY_LAST_COMMA_FIRST'].split()[0],\n\t\t\t\t)\n\t\t\t)\n\treturn clusters", "def get_clusters_with_all_features(df, n_clusters):\n pipe = _build_model(df, use_pca=False, n_components=0, use_kmeans=True, n_clusters=n_clusters)\n labels = pipe.named_steps['kmeans'].labels_\n df.loc[:, 'labels'] = labels\n print(df.groupby('labels').agg(\n {'Fresh': 'mean', 'Milk': 'mean', 'Grocery': 'mean', 'Frozen': 'mean', 'Detergents_Paper': 'mean',\n 'Delicassen': 'mean'}))\n print(pipe.named_steps['scaler'].inverse_transform(pipe.named_steps['kmeans'].cluster_centers_))\n # cluster 1: low spending behaviour in general\n # cluster 2: high spending in detergents_paper, milk, grocery\n # cluster 3: high spending in fresh, rest low\n # cluster 4: high spending in everything except detergents_paper, extremely high in delicassen\n # cluster 5: medium spending in general, low in frozen, high in detergents and paper", "def get_product_means(df):\n try:\n mean_dataframe = df.groupby(['asin'])['overall'].mean()\n print mean_dataframe[:10]\n write_df_tocsv(mean_dataframe, 'product_means.csv')\n return mean_dataframe\n except Exception as e:\n print \"Error getting product means\"\n print str(e)\n pass", "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())", "def kMeans(df, k, threshold = 0.05, index_list = []):\n\tr.assign('df', df)\n\tr('df_transpose = t(df)')\n\tif len(index_list) == 0:\n\t \t\tindex_list = [2, 3, 4, 5, 6]\n\tr.assign('index_list', index_list)\n\tr('testframe = df_transpose[index_list,]')\n\tr.assign('k', k)\n\tr.assign('threshold', threshold)\n\tresults = r('kMeans(testframe, k, threshold)')\n\tr.assign('results', results)\n\treturn r('results')", "def kmeans_clustering(proj_df, k):\r\n k_means= k_means = KMeans(random_state=25, n_clusters=k)\r\n k_means.fit(proj_df)\r\n labels= k_means.predict(proj_df)\r\n \r\n return labels", "def __create_cluster_profiles(self,\n clustered_dataframes,\n shrunken_df,\n numerical_features,\n le_map,\n output_path,\n find_nearest_on_cols=False,\n show=True):\n\n def find_nearest(numbers, target):\n \"\"\"\n Find the closest fitting number to the target number\n \"\"\"\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]\n\n cluster_profiles_df = pd.DataFrame(columns=shrunken_df.columns).drop(\n 'Cluster_Name', axis=1)\n rows_count = 0\n for cluster_identfier, cluster_dataframe in \\\n clustered_dataframes.items():\n df = pd.DataFrame(columns=cluster_dataframe.columns)\n df = df.append(cluster_dataframe.mean(), ignore_index=True)\n df.index = [cluster_identfier]\n\n if cluster_dataframe.shape[0] <= 1:\n continue\n\n # Attempt to convert numbers found within the full set of data\n for col in cluster_dataframe.columns:\n if col not in numerical_features or find_nearest_on_cols:\n df[col] = find_nearest(numbers=shrunken_df[\n col].value_counts().index.tolist(),\n target=df[col].values[0])\n\n # Evaluate cluster dataframe by dataframe\n eval_df = pd.DataFrame(columns=cluster_dataframe.columns)\n eval_df = eval_df.append(\n cluster_dataframe.mean(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.min(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.median(),\n ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.max(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.std(), ignore_index=True)\n eval_df = eval_df.append(\n cluster_dataframe.var(), ignore_index=True)\n eval_df.index = [\"Mean\", \"Min\", \"Median\",\n \"Max\", \"Standard Deviation\", \"Variance\"]\n\n if show:\n print(\"Total found in {0} is {1}\".format(\n cluster_identfier, cluster_dataframe.shape[0]))\n self.__render_mpl_table(\n df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Means_Rounded_To_Nearest_Real_Numbers\",\n header_columns=0,\n col_width=4.0)\n\n self.__render_mpl_table(\n eval_df,\n sub_dir=output_path,\n filename=cluster_identfier +\n \"_Eval_Df\",\n header_columns=0,\n col_width=4.0)\n display(df)\n display(eval_df)\n self.__vertical_spacing(7)\n\n cluster_profiles_df = cluster_profiles_df.append(\n self.__decode_df(df, le_map))\n\n rows_count += cluster_dataframe.shape[0]\n\n return rows_count, cluster_profiles_df", "def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk", "def k_means(n_clust, data_frame, true_labels):\n k_means = KMeans(n_clusters=n_clust, random_state=123, n_init=30)\n k_means.fit(data_frame)\n c_labels = k_means.labels_\n df = pd.DataFrame({'clust_label': c_labels, 'orig_label': true_labels.tolist()})\n ct = pd.crosstab(df['clust_label'], df['orig_label'])\n y_clust = k_means.predict(data_frame)\n display(ct)\n print('% 9s' % 'inertia homo compl v-meas ARI AMI silhouette')\n print('%i %.3f %.3f %.3f %.3f %.3f %.3f'\n % (k_means.inertia_,\n homogeneity_score(true_labels, y_clust),\n completeness_score(true_labels, y_clust),\n v_measure_score(true_labels, y_clust),\n adjusted_rand_score(true_labels, y_clust),\n adjusted_mutual_info_score(true_labels, y_clust),\n silhouette_score(data_frame, y_clust, metric='euclidean')))", "def clustering(df, mode):\n # split into list of dfs containing only one reference node\n df_list = [df.loc[i : i + 8 - 1, :] for i in range(0, len(df), 8)]\n\n df_coefficient = pd.DataFrame()\n\n # loop over every single node\n for df_single in df_list:\n df_single = df_single.reset_index()\n total_value = 0\n\n # loop over the weights of all connected nodes\n for j in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n total_value = total_value + math.sqrt(df_single.chi_sq[j] * df_single.chi_sq[j + 1])\n if mode == \"arithmetic\": \n # arithmetic\n total_value = total_value + ((df_single.chi_sq[j] * df_single.chi_sq[j + 1]) / 2)\n if mode == \"argmax\": \n # max\n total_value = total_value + max(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n if mode == \"argmin\":\n # min\n total_value = total_value + min(df_single.chi_sq[j], df_single.chi_sq[j + 1])\n\n for i in range(len(df_single) - 1):\n if mode == \"geometric\":\n # geometric\n triplet_value = math.sqrt(df_single.chi_sq[i] * df_single.chi_sq[i + 1])\n if mode == \"arithmetic\":\n # arithmetic\n triplet_value = (df_single.chi_sq[i] * df_single.chi_sq[i + 1]) / 2\n if mode == \"argmax\":\n # max\n triplet_value = max(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n if mode == \"argmin\": \n # min\n triplet_value = min(df_single.chi_sq[i], df_single.chi_sq[i + 1])\n\n cluster_coefficient = triplet_value / total_value\n buffer = [\n [\n df_single.reference[i],\n df_single.comparison[i],\n df_single.comparison[i + 1],\n triplet_value,\n cluster_coefficient,\n ]\n ]\n df_coefficient = df_coefficient.append(buffer)\n\n df_coefficient = df_coefficient.reset_index()\n\n print(\"\\n\\n threshold 0.5*c_omega\")\n check_list = []\n # print out triangles that have a cluster coefficient bigger, than X\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.5) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.75*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.75) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n print(\"\\n\\n threshold 0.8*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n\n print(\"\\n\\n threshold 0.9*c_omega\")\n check_list = []\n for i in range(len(df_coefficient)):\n if df_coefficient[4][i] >= ((0.9) * df_coefficient[4].max()):\n print(list(df_coefficient.loc[i][1:4]))\n check_list.append(list(df_coefficient.loc[i][1:4]))\n else:\n continue\n\n return", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def cluster_kmeans(df, k):\r\n # Sample fron the original df\r\n sample_df=df.sample(n = k)\r\n obs, attr= df.shape\r\n # Make copies \r\n copy_df=df.copy()\r\n flag=0\r\n sse_old=0\r\n while (flag==0): \r\n sse=0\r\n Labels=[]\r\n for i in range(0, obs):\r\n dist= []\r\n for j in range(0,k):\r\n #Calculate Eucledian distance\r\n diff=list((df.iloc[i,:]-sample_df.iloc[j,:])**2)\r\n eu_dist=(sum(diff))**(1/attr)\r\n dist.append(eu_dist) \r\n #Add Labels to the observations based on the variable they are close to\r\n label=(dist.index(min(dist)))\r\n Labels.append(label)\r\n # Calculate SSE\r\n sse=sse+((min(dist) )**2)\r\n sse=sse**(1/2)\r\n copy_df['labels']=Labels\r\n # Stopping criteria is change in SSE should be 2 %\r\n if (sse_old !=0):\r\n if(abs(sse_old-sse)/sse_old<=0.05):\r\n flag=1 \r\n return_df=copy_df['labels'].to_frame()\r\n return (return_df, sse)\r\n else:\r\n sse_old=sse\r\n #Empty the sample df\r\n sample_df.drop(sample_df.index, inplace=True)\r\n # Now pick random values from each label and add it to the sample df\r\n for val in range(0,k):\r\n #Create new sample df\r\n sample_df = pd.concat([sample_df, copy_df[copy_df['labels']==val].iloc[:,0:attr].sample(n=1)])\r\n else:\r\n sse_old=sse\r\n #Empty the sample df\r\n sample_df.drop(sample_df.index, inplace=True)\r\n for val in range(0,k):\r\n #Create new sample df \r\n sample_df = pd.concat([sample_df, copy_df[copy_df['labels']==val].iloc[:,0:attr].sample(n=1)])", "def cluster(dfi, samples, num_clusters=8, random_state=1):\n df = dfi.fillna(0)\n X = df[samples].values\n kmeans = KMeans(n_clusters=num_clusters,\n random_state=random_state).fit(X)\n cluster_number = kmeans.labels_\n df['kmeans_cluster_number'] = cluster_number\n return df", "def compute_cluster_ensemble(var, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n num_clusters = maxIndices.shape[0]\n if len(var.shape) == 1:\n meanvar = np.zeros((num_clusters,))\n elif len(var.shape) == 2:\n meanvar = np.zeros((var.shape[0],num_clusters))\n else:\n warnings.warn('did not have correct shape for ' + str(var) + ' with len(var.shape)='+ str(len(var.shape)))\n meanvar = None\n\n for aCluster, maxInd in enumerate(maxIndices):\n # get particles in cluster\n particles = indicesToParticle[indicesOnCluster[aCluster,0:maxInd]]\n\n # compute mean depending upon size of array\n if len(var.shape) == 1:\n meanvar[aCluster] = np.mean(var[particles])\n if len(var.shape) == 2:\n meanvar[:,aCluster] = np.mean(var[:,particles], axis=1)\n\n return meanvar #}}}", "def get_clusters_with_pca(df_full, df, n_clusters, n_components):\n pipe = _build_model(df, use_pca=True, n_components=2, use_kmeans=True, n_clusters=n_clusters)\n df.loc[:, ['PC-1', 'PC-2']] = pipe.named_steps['pca'].transform(df)\n labels = pipe.named_steps['kmeans'].labels_\n df.loc[:, 'labels'] = labels\n df_centers = df.groupby('labels').agg({'PC-1': 'mean', 'PC-2': 'mean'})\n print(df_centers)\n\n df.loc[:, 'Channel'] = df_full['Channel']\n df.loc[:, 'Channel_str'] = df['Channel'].replace([1, 2], ['Horeca', 'Retail'])\n plt.figure(8, figsize=(12, 6))\n for channel in ['Horeca', 'Retail']:\n plt.scatter(x='PC-1', y='PC-2', data=df[df['Channel_str'] == channel],\n s=200, alpha=0.5, label=channel)\n plt.xlabel('PC-1'), plt.ylabel('PC-2')\n plt.title('PC-1 vs PC-2 concerning Channel')\n plt.legend()\n plt.show()\n\n plt.figure(9, figsize=(12, 6))\n plt.scatter(x='PC-1', y='PC-2', data=df[df['labels'] == 0], s=100, c='red', label='Cluster 1')\n plt.scatter(x='PC-1', y='PC-2', data=df[df['labels'] == 1], s=100, c='blue', label='Cluster 2')\n plt.scatter(x='PC-1', y='PC-2', data=df[df['labels'] == 2], s=100, c='green', label='Cluster 3')\n plt.scatter(x='PC-1', y='PC-2', data=df[df['labels'] == 3], s=100, c='cyan', label='Cluster 4')\n plt.scatter(x='PC-1', y='PC-2', data=df[df['labels'] == 4], s=100, c='magenta', label='Cluster 5')\n plt.scatter(df_centers.iloc[:, 0], df_centers.iloc[:, 1],\n s=100, c='yellow', label='Centroids')\n plt.title('Clusters of customers')\n plt.xlabel('Spending in PC-1')\n plt.ylabel('Spending in PC-2')\n plt.legend()\n plt.show()", "def cluster_stats(df):\r\n pattern = list(df.iloc[0])[-2]\r\n n_days = len(pattern)\r\n \r\n cls = [(day + 1, hour) for day in range(n_days) for hour in range(24)]\r\n tp = pd.DataFrame(columns = cls)\r\n tp.columns = pd.MultiIndex.from_tuples(tp.columns, names = ['day', 'hour'])\r\n tp.index.name = 'cluster'\r\n \r\n for (key, value) in df.groupby('cluster'):\r\n d, total = np.zeros((n_days, 24)), len(value)\r\n for arr in value.iloc[:, :-2].values:\r\n for i, ax in enumerate(np.split(arr, n_days)):\r\n ax = np.array([[0, 1][x > 0] for x in ax])\r\n d[i] += ax\r\n d /= total\r\n s = pd.Series({(x + 1, y) : d[x][y] for x in range(n_days) for y in range(24)})\r\n s.name = key\r\n tp = tp.append(s)\r\n \r\n tp['pattern'] = [pattern] * len(tp)\r\n return tp" ]
[ "0.77887636", "0.71788836", "0.68467784", "0.64547867", "0.62752557", "0.62094164", "0.6097976", "0.6032077", "0.59859556", "0.5968796", "0.5952526", "0.5906807", "0.59013987", "0.58965117", "0.58917665", "0.5887675", "0.5864871", "0.5808137", "0.5738972", "0.5717941", "0.56477034", "0.5637867", "0.56281066", "0.5615782", "0.56156254", "0.5612784", "0.5595809", "0.55820537", "0.5569827", "0.5517969" ]
0.87321883
0
Basing on a dataframe given as an argument, returns mean profit per class (buy, sell) in training dataset. sort dataframe descending by profit marks 1/3 of clusters with the highest profit as buy marks 1/3 of clusters with the lowest profit as sell if data contains less than 3 different clusters returns AssertionError
def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple: # if condition returns False, AssertionError is raised: assert len(df_profit_per_cluster_train) >= 3, "Algorithm, returned less than 3 clusters." df_profit_per_cluster = df_profit_per_cluster_train.sort_values(by='profit', ascending=False) group_size = int(len(df_profit_per_cluster) / 3) buy_clusters_mean_profit = df_profit_per_cluster.iloc[:group_size]['profit'].mean() sell_clusters_mean_profit = df_profit_per_cluster.iloc[-group_size:]['profit'].mean() buy_clusters_list = list(df_profit_per_cluster.iloc[:group_size]['cluster']) sell_clusters_list = list(df_profit_per_cluster.iloc[-group_size:]['cluster']) return buy_clusters_mean_profit, buy_clusters_list, sell_clusters_mean_profit, sell_clusters_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test: pd.core.frame.DataFrame,\n buy_clusters_list: List[int], sell_clusters_list: List[int]) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(buy_clusters_list) != 0 and len(sell_clusters_list) != 0, \"Clusters list can't be empty.\"\n\n buy_clusters_mean_profit = \\\n df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(buy_clusters_list)]['profit'].mean()\n sell_clusters_mean_profit = \\\n df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(sell_clusters_list)]['profit'].mean()\n\n return buy_clusters_mean_profit, sell_clusters_mean_profit", "def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:\n return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()", "def clusterting_feature_importance (df, cluster_col):\r\n scores = pd.DataFrame()\r\n df0 = df.copy()\r\n df0 = df.select_dtypes(include=np.number)\r\n\r\n for i in df0[cluster_col].unique():\r\n df2 = df0[df0[cluster_col] == i]\r\n df2.drop(cluster_col,axis=1, inplace=True)\r\n #df2 = df.select_dtypes(include=np.number)\r\n scores[i] = df2.std() / (df2.max() - df2.min())\r\n scores['mean'] = scores.mean(axis = 1)\r\n\r\n scores = 1 - scores\r\n\r\n return scores", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def score(self, df_X, ser_y):\n df_predict = self.predict(df_X)\n accuracies = []\n for instance in ser_y.index:\n # Accuracy is the probability of selecting the correct class\n try:\n accuracy = df_predict.loc[instance, ser_y.loc[instance]]\n except:\n import pdb; pdb.set_trace()\n accuracies.append(accuracy)\n return np.mean(accuracies)", "def ranking_metric(df, method, phenoPos, phenoNeg, classes, ascending): \n \n A = phenoPos\n B = phenoNeg\n df2 = df.T \n df2['class'] = classes\n df_mean= df2.groupby('class').mean().T\n df_std = df2.groupby('class').std().T \n #exclude any zero stds.\n df_mean = df_mean[df_std.sum(axis=1) !=0]\n df_std = df_std[df_std.sum(axis=1) !=0]\n \n if method == 'signal_to_noise':\n sr = (df_mean[A] - df_mean[B])/(df_std[A] + df_std[B])\n elif method == 't_test':\n sr = (df_mean[A] - df_mean[B])/ np.sqrt(df_std[A]**2/len(df_std)+df_std[B]**2/len(df_std) )\n elif method == 'ratio_of_classes':\n sr = df_mean[A] / df_mean[B]\n elif method == 'diff_of_classes':\n sr = df_mean[A] - df_mean[B]\n elif method == 'log2_ratio_of_classes':\n sr = np.log2(df_mean[A] / df_mean[B])\n else:\n logging.error(\"Please provide correct method name!!!\") \n sys.exit()\n sr.sort_values(ascending=ascending, inplace=True)\n df3 = sr.to_frame().reset_index()\n df3.columns = ['gene_name','rank']\n df3['rank2'] = df3['rank']\n\n return df3", "def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())", "def score(self, df_X, ser_y):\n df_predict = self.predict(df_X)\n missing_columns = set(ser_y).difference(\n df_predict.columns)\n for column in missing_columns:\n df_predict[column] = np.repeat(0,\n len(df_predict))\n accuracies = []\n for instance in ser_y.index:\n # Accuracy is the probability of selecting the correct class\n try:\n accuracy = df_predict.loc[instance, ser_y.loc[instance]]\n except:\n import pdb; pdb.set_trace()\n accuracies.append(accuracy)\n return np.mean(accuracies)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def get_split(data):\n \"\"\" gets the best feature, and best value \"\"\"\n\n best_feature = None\n best_value = 0.0\n columns = data.columns\n gini_base = gini_impurity(data)\n n_rows = len(data.index) # total number of rows of data before split\n\n # Fininding which split yields the best gini gain\n max_gain = 0\n\n for i in range(len(columns)-1): # -1 b.c. class is final column\n xs = data[columns[i]].unique() # get values to test\n for x in xs: # test values\n # split dataset\n df_left = data[data[columns[i]] < x]\n df_right = data[data[columns[i]] >= x]\n\n # get gini impurities\n gini_left = gini_impurity(df_left)\n gini_right = gini_impurity(df_right)\n \n\n # Calculated weighted gini impurity\n w_left = len(df_left.index) / n_rows\n w_right = len(df_right.index) / n_rows\n\n w_gini = gini_left * w_left + gini_right * w_right\n \n\n # Calculate gini gain (we want to minimize w_gini for the smallest impurity. Ideal split is perfect Left=c1, Right=c2)\n # why not just find min w_gin instead of uding gini_gain and gini_base vaiables?\n gini_gain = gini_base - w_gini\n\n # check if this is the best split so far, store values, update max_gini\n if gini_gain > max_gain:\n best_feature = columns[i]\n best_value = x\n max_gain = gini_gain\n\n df_left = data.loc[data[best_feature] < best_value]\n df_right = data.loc[data[best_feature] >= best_value]\n \n\n return best_feature, best_value, df_left, df_right", "def balance_classes(df):\n df_class_0 = df[df[65]==0]\n df_class_1 = df[df[65]==1]\n df_count = df[65].value_counts()\n count_0 = df_count[0]\n count_1 = df_count[1]\n\n if count_0 > count_1:\n df_class_1_over = df_class_1.sample(count_0, replace=True)\n df_over = pd.concat([df_class_0, df_class_1_over], axis=0)\n elif count_0 < count_1:\n df_class_0_over = df_class_0.sample(count_1, replace=True)\n df_over = pd.concat([df_class_1, df_class_0_over], axis=0)\n else:\n df_over = df\n \n return df_over", "def mid_market_price(orders: pandas.DataFrame):\n return numpy.mean((best_bid_price(orders), best_ask_price(orders)))", "def create_strategy(filename: str, columns_list: List[str], som_width: int, som_height: int, n_iter: int, sigma=0.3,\n learning_rate=0.01) -> tuple:\n # get prepared data\n df, df_prepared, df_train, df_test, df_train_columns = get_data(filename, columns_list)\n\n # train som\n final_df_train, final_df_test = train_som(som_width, som_height, df, df_train, df_test, df_train_columns, n_iter,\n sigma=sigma, learning_rate=learning_rate)\n\n # get profit per cluster in train and test datasets\n df_profit_per_cluster_train = get_profit_per_cluster(final_df_train)\n df_profit_per_cluster_test = get_profit_per_cluster(final_df_test)\n\n # get mean profit for sell and buy class in training and testing datasets\n try:\n buy_clusters_mean_profit_train, buy_clusters_list, sell_clusters_mean_profit_train, sell_clusters_list = \\\n get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train)\n\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = \\\n get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test, buy_clusters_list, sell_clusters_list)\n # if the data was assigned to less than to 3 clusters\n except:\n buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \\\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = None, None, None, None\n\n return len(df_profit_per_cluster_train), len(df_profit_per_cluster_test), \\\n buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \\\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test", "def stdConfidenceTrades(predictions, buy_confidence=1.5, sell_confidence=1.1):\n smooth_preds = pd.Series(predictions).rolling(5).mean()\n buy_thresh = np.mean(smooth_preds) + buy_confidence * np.std(smooth_preds)\n sell_thresh = np.mean(smooth_preds) - sell_confidence * np.std(smooth_preds)\n buy_positions = np.where(predictions > buy_thresh)[0]\n sell_positions = np.where(predictions < sell_thresh)[0]\n \n buys = buy_positions\n sells = []\n curSell = 0\n for curBuy in buys:\n arr = np.where(sell_positions > curBuy)[0]\n if len(arr):\n sells.append(sell_positions[arr[0]])\n tradePairs = list(zip(buys, sells))\n return tradePairs", "def balence_classes(df, btol):\r\n #Find the least supported class and muliply by the tolerance coefficient to get max_count:\r\n ccounts = df['classification'].value_counts()\r\n max_count = np.min(ccounts.values) * btol\r\n #Create a new dataframe with balenced support:\r\n newdf = pd.DataFrame(columns=df.columns.values)\r\n for x in df.groupby('classification'):\r\n if x[1].shape[0] > max_count:\r\n newdf = newdf.append(x[1].sample(max_count).reset_index(drop=True))\r\n else:\r\n newdf = newdf.append(x[1].reset_index(drop=True))\r\n return newdf.reset_index(drop=True)", "def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions", "def prepareSplitClassifier(df, models, choice):\n\n\n def classificationOutput(clf, X, Y):\n \"\"\"\n Fit the model and print the classification results\n - confusion_matrix\n - avg scores etc\n \"\"\"\n n_samples = 36\n\n print \"\\n\\nClassifier: \\n %s\" % (clf)\n print \"#\" * 79\n # classifier_gnb = naive_bayes.GaussianNB() # initiating the classifier\n\n clf.fit(X[:n_samples], Y[:n_samples]) # train on first n_samples and test on last 10\n\n expected = Y[n_samples:]\n predicted = clf.predict(X[n_samples:])\n print(\"Classification report:\\n%s\\n\" % (metrics.classification_report(expected, predicted)))\n print(\"\\nConfusion matrix:\\n%s\" % metrics.confusion_matrix(expected, predicted))\n\n\n\n\n def splitclassify(cDf):\n \"\"\"\n Given the dataframe combined with equal fair and unfair apps,\n classify them\n \"\"\"\n cDf = cDf.reindex(np.random.permutation(cDf.index)) # shuffle the dataframe\n featCols = set(cDf.columns)\n featCols.remove('appLabel')\n\n features = cDf[list(featCols)].astype('float')\n\n ## Scale the features to a common range\n min_max_scaler = preprocessing.MinMaxScaler()\n X = min_max_scaler.fit_transform(features.values)\n\n Y = cDf['appLabel'].values\n\n\n if choice == 'all':\n for key in models:\n classifier = models[key]\n classificationOutput(classifier, X, Y)\n else:\n if choice in models:\n classifier = models[choice]\n classificationOutput(classifier, X, Y)\n else:\n print \"Incorrect Choice\"\n\n\n\n fairDf = df[df['appLabel'] == False]\n unfairDf = df[df['appLabel'] == True]\n\n\n # calculate total possible splits of fair data frame relatie to\n # size of unfair dataframe\n splits = len(fairDf) // len(unfairDf)\n\n for i in range(splits):\n clDf = fairDf[i : i+len(unfairDf)].append(unfairDf)\n\n # print fairDf.values, unfairDf.values\n print \"Classifying %d th split of fair apps with unfair app\" % (i)\n print \"-\" * 79\n splitclassify(clDf)\n print \"\\n\\n\"", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def generate_clusters(df):\n\n df_size = df.shape[0]\n print(df_size)\n n_clusters = 0\n percent_min_pts = 0.105\n min_clusters = 3\n while (n_clusters != min_clusters):\n print(\"percent_min_pts\", percent_min_pts)\n min_cluster_pts = math.floor(df_size * percent_min_pts)\n print(\"min_cluster_pts\", min_cluster_pts)\n\n clusterer = hdbscan.HDBSCAN(min_cluster_size=min_cluster_pts)\n print(df.head())\n clusterer.fit(df)\n cluster_groups = {}\n labels = clusterer.labels_\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n n_clusters = len(set(labels))\n print(\"n_clusters\", n_clusters)\n multiplier = abs(n_clusters - min_clusters) * 0.001\n print(\"multiplier\", multiplier)\n if n_clusters > min_clusters:\n percent_min_pts += multiplier\n else:\n percent_min_pts -= multiplier\n print(\"percent_min_pts\", percent_min_pts)\n return labels", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def train(self, data):\n\t\tepsilon = self.epsilon\n\t\ttempDist = 1.0\n\t\tk = self.k\n\t\tcenters = data.rdd.takeSample(False, k, 1)\n\t\ti = 0 \n\t\twhile tempDist > epsilon or self.maxNoOfIteration > i:\n\t\t\ti+=1\t\t\t\n\t\t\tclosest = data.map(lambda p: (closestCluster(p, centers), (np.array(p), 1)))\n \t\t\tpointStats = closest.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n \t\tnewPoints = pointStats.map(lambda x: (x[0], x[1][0] / float(x[1][1]))).collect()\n \t\ttempDist = sum(np.sum((centers[index] - p) ** 2) for (index, p) in newPoints)\n \t\tfor (ind, p) in newPoints:\n\t\t\t\tcenters[ind] = p\n\t\tself.centers = centers\n\t\treturn self.centers", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def cluster_importance(self, clf=DecisionTreeClassifier(), n_most_important=3):\n\n for k in xrange(self.n_clusters):\n labels = (self.labels == k)\n clf.fit(self.data.values, labels)\n\n print \"\\n ======== cluster {} / {} ========\".format(k + 1, self.n_clusters)\n\n sorted_importance = sorted(zip(clf.feature_importances_, self.data.columns), key=lambda (imp, col): imp, reverse=True)\n sorted_importance = sorted_importance[:n_most_important]\n\n for imp, col in sorted_importance:\n print \"[{:.5f} relative importance] {}\".format(imp, col)\n print self.data.loc[labels, col].describe()", "def cluster_membership_occupancy(data):\n \n \n \n n_clusters = len(set(data['clusters'])-{-1}) # since -1 element denotes noice\n\n if n_clusters == 0:\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features()]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters ==1:\n #obtain_total_cluster_areas_set_everything_else_to_default\n membership=[Cluster_Membership_Features()]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n \n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0]\n \n Total_cluster_area=np.sum(cluster_chull_areas)\n areas=[Cluster_Area_Features([Total_cluster_area,0,0,0,0,0,0,0,0])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features()]\n density = pd.DataFrame([o.__dict__ for o in density])\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n \n elif n_clusters >1:\n #Summarizing the cluster membership distribution characteristics\n cluster_size_nums=np.delete(np.array(data.groupby(['clusters']).size()),0)\n (cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD)= distribution_statistics(cluster_size_nums)\n\n #For each cluster calculate the area by calculating the area of the convex hull of cluster members\n # Note: concavehull implementation here might be a good addition as it will provide more imformative values. \n\n d = dict(tuple(data.groupby('clusters')))\n d.pop(-1, None)\n try:\n cluster_chull_areas=[ss.ConvexHull(np.column_stack([d[i]['X'].array,d[i]['Y'].array])).volume for i in d.keys()]\n except:\n cluster_chull_areas=[0,0,0,0,0]\n \n\n (avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area)= distribution_statistics(cluster_chull_areas)\n Total_cluster_area=np.sum(cluster_chull_areas)\n\n #Calculate cluster density: number of nuclei/ convex area of cluster\n cluster_density=np.divide(cluster_size_nums,cluster_chull_areas)\n (avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density)= distribution_statistics(cluster_density)\n\n #return dataframe of features\n membership=[Cluster_Membership_Features([cluster_size_nums_avg,cluster_size_nums_min,cluster_size_nums_max,\n cluster_size_nums_std,cluster_size_nums_cv,cluster_size_nums_cd,\n cluster_size_nums_IQR,cluster_size_nums_Quartile_CD])]\n membership = pd.DataFrame([o.__dict__ for o in membership])\n areas=[Cluster_Area_Features([Total_cluster_area,\n avg_cluster_area,min_cluster_area,max_cluster_area,\n std_cluster_area,CV_cluster_area,CD_cluster_area,\n IQR_cluster_area,Quartile_CD_cluster_area])]\n areas = pd.DataFrame([o.__dict__ for o in areas])\n density=[Cluster_Density_Features([avg_cluster_density,min_cluster_density,max_cluster_density,\n std_cluster_density,CV_cluster_density,CD_cluster_density,\n IQR_cluster_density,Quartile_CD_cluster_density])]\n density = pd.DataFrame([o.__dict__ for o in density])\n\n all_features = pd.concat([membership.reset_index(drop=True), areas.reset_index(drop=True),\n density], axis=1)\n return all_features", "def features_sparseness(dat,sort=0): \n \n lblst=dat.columns.tolist()\n ll=len(dat)\n res=pd.Series(index=lblst,name='sparseness')\n \n for lb in lblst:\n ct=dat[lb].value_counts()\n res[lb]= ct.iloc[0]/ll\n \n if sort==1:\n res.sort_values(ascending=True,inplace=True)\n elif sort==-1:\n res.sort_values(ascending=False,inplace=True)\n else:\n pass\n \n return res", "def makeRankDF(self, class_selection=None):\n df_values = pd.DataFrame()\n for idx, clf in enumerate(self.clfs):\n df_values[idx] = pd.Series(self._orderFeatures(clf,\n class_selection=class_selection),\n index=self.features)\n df_result = self._makeFeatureDF(df_values)\n df_result = df_result.fillna(0)\n return df_result.sort_values(cn.MEAN)", "def makeRankDF(self, class_selection=None):\n df_values = pd.DataFrame()\n for idx, clf in enumerate(self.clfs):\n df_values[idx] = pd.Series(self._orderFeatures(clf,\n class_selection=class_selection),\n index=self.features)\n df_result = self._makeFeatureDF(df_values)\n df_result = df_result.fillna(0)\n return df_result.sort_values(cn.MEAN)", "def grouping(data_clust):\n data_grouped = data_clust.groupby('Clusters').mean()\n return data_grouped" ]
[ "0.72233945", "0.6792338", "0.6017771", "0.58476025", "0.57573664", "0.5649134", "0.56137496", "0.5605749", "0.56018263", "0.55858415", "0.55482686", "0.55418605", "0.5532151", "0.551682", "0.5425047", "0.54250115", "0.5417848", "0.5410822", "0.5366402", "0.53150505", "0.52947503", "0.5293667", "0.52910036", "0.5276034", "0.52610314", "0.5259408", "0.5243734", "0.5238531", "0.5238531", "0.5220504" ]
0.78250796
0
Basing on a dataframe given as an argument, and list of buy and sell clusters returns mean profit per class (buy, sell) in testing dataset.
def get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test: pd.core.frame.DataFrame, buy_clusters_list: List[int], sell_clusters_list: List[int]) -> tuple: # if condition returns False, AssertionError is raised: assert len(buy_clusters_list) != 0 and len(sell_clusters_list) != 0, "Clusters list can't be empty." buy_clusters_mean_profit = \ df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(buy_clusters_list)]['profit'].mean() sell_clusters_mean_profit = \ df_profit_per_cluster_test.loc[df_profit_per_cluster_test['cluster'].isin(sell_clusters_list)]['profit'].mean() return buy_clusters_mean_profit, sell_clusters_mean_profit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_cluster_train.sort_values(by='profit', ascending=False)\n group_size = int(len(df_profit_per_cluster) / 3)\n\n buy_clusters_mean_profit = df_profit_per_cluster.iloc[:group_size]['profit'].mean()\n sell_clusters_mean_profit = df_profit_per_cluster.iloc[-group_size:]['profit'].mean()\n\n buy_clusters_list = list(df_profit_per_cluster.iloc[:group_size]['cluster'])\n sell_clusters_list = list(df_profit_per_cluster.iloc[-group_size:]['cluster'])\n\n return buy_clusters_mean_profit, buy_clusters_list, sell_clusters_mean_profit, sell_clusters_list", "def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:\n return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()", "def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())", "def score(self, df_X, ser_y):\n df_predict = self.predict(df_X)\n accuracies = []\n for instance in ser_y.index:\n # Accuracy is the probability of selecting the correct class\n try:\n accuracy = df_predict.loc[instance, ser_y.loc[instance]]\n except:\n import pdb; pdb.set_trace()\n accuracies.append(accuracy)\n return np.mean(accuracies)", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def calc_skill_cluster_sets(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, blocks_one_clusnum, skill_str, seas):\r\n prec_arr, recall_arr, F1_arr, clus_num_arr = [], [], [], []\r\n\r\n prec_vals = sorted(np.unique(blocks_one_clusnum[skill_str].values), reverse = True)\r\n #loop through first element separately so that subsequent values can be appended\r\n node_cluster_set_test_str, ds_arr = [], []\r\n for prec in prec_vals:\r\n node_cluster_set_test_str_app = blocks_one_clusnum['set'][np.where(blocks_one_clusnum[skill_str]==prec)[0]].values\r\n for clus in node_cluster_set_test_str_app:\r\n #add cluster to cluster set\r\n node_cluster_set_test_str = np.append(node_cluster_set_test_str, clus)\r\n node_cluster_set_test_str = np.unique(node_cluster_set_test_str)\r\n node_num = len(node_cluster_set_test_str) # number of nodes in cluster set\r\n clus_num_arr.append(node_num)\r\n #calculate skill score of cluster set by calculating the number of days blocked from the GTD and selecting the season\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_sel = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_sel)\r\n prec_arr.append(prec)\r\n recall_arr.append(recall)\r\n F1_arr.append(F1)\r\n\r\n return clus_num_arr, prec_arr, recall_arr, F1_arr", "def evaluation(X_selected, X_test, n_clusters, y):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=1)\n\n k_means.fit(X_selected)\n y_predict = k_means.predict(X_test)\n\n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict, average_method='arithmetic')\n\n sil = silhouette_score(X_test, y_predict, metric=\"euclidean\")\n db_score = davies_bouldin_score(X_test, y_predict)\n ch_score = calinski_harabasz_score(X_test, y_predict)\n purity = calcolaPurity(y, y_predict)\n\n return nmi, sil, db_score, ch_score, purity", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def calc_skill_clusters(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, SOM_clusters_block, seas):\r\n ds_arr_ones = []\r\n for clus in SOM_clusters_block:\r\n node_cluster_set_test = [clus]\r\n node_cluster_set_test_str = [str(clus).replace(',', '') for clus in node_cluster_set_test]\r\n #calculate the blocked days which the new cluster determines\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n #define as DataArray and select JJA to remove the extended days included for classifying blocks\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_seas = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_seas)\r\n #calculate precision, recall and F1\r\n if len(str(node_cluster_set_test)) == 1:\r\n comb_str = f\"{node_cluster_set_test[0]}\".replace(\"'\", \"\")\r\n else:\r\n comb_str = f\"{str(node_cluster_set_test)[1:-1]}\".replace(\"'\", \"\") \r\n ds=xr.Dataset({'precision': prec, 'recall': recall, 'F1': F1, 'clus_num': int(len(node_cluster_set_test)), 'set': str(comb_str)})\r\n ds_arr_ones.append(ds)\r\n blocks_one_clusnum = xr.concat(ds_arr_ones, dim = \"set\")\r\n return blocks_one_clusnum", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def score(self, df_X, ser_y):\n df_predict = self.predict(df_X)\n missing_columns = set(ser_y).difference(\n df_predict.columns)\n for column in missing_columns:\n df_predict[column] = np.repeat(0,\n len(df_predict))\n accuracies = []\n for instance in ser_y.index:\n # Accuracy is the probability of selecting the correct class\n try:\n accuracy = df_predict.loc[instance, ser_y.loc[instance]]\n except:\n import pdb; pdb.set_trace()\n accuracies.append(accuracy)\n return np.mean(accuracies)", "def target_mean(train,test,train_index=None,holdout_index=None,col=[],\n target='click',num_folds=5,seed=23):\n feature_name='new_features'\n if holdout_index is None:\n train_cv = train.copy()\n holdout = None\n else:\n if train_index is None:\n warnings.warn('train index is None. Now need to calculate. If you parse the value, it will be more efficient ')\n train_index = list(set(train.index) - set(holdout_index))\n train_cv = train.loc[train_index].copy()\n holdout = train.loc[holdout_index].copy()\n holdout_list = []\n sf = StratifiedKFold(n_splits= num_folds, shuffle=True, random_state=seed)\n\n train_return = train[col].copy()\n test_return = test[col].copy()\n train_return[feature_name] = np.nan\n test_return[feature_name] = np.nan\n test_list = []\n \n val_index_monitor = []\n for t_index,v_index in sf.split(train_cv,train_cv[target]):\n history = train_cv.iloc[t_index].copy()\n mapping = history.groupby(col)[target].mean().reset_index().rename({target:feature_name},axis=1)\n val = train_cv.iloc[v_index].copy()\n val_index_monitor.extend(list(val.index))\n train_return.loc[val.index,feature_name] = val[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values\n if holdout is not None:\n holdout_list.append(holdout[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values)\n test_list.append(test[col].merge(mapping,how='left',left_on=col,right_on=col).drop(col,axis=1)[feature_name].values)\n if holdout is not None:\n train_return.loc[holdout.index,feature_name] = np.mean(np.array(holdout_list),axis=0)\n test_return[feature_name] = np.mean(np.array(test_list),axis=0)\n val_index_monitor.extend(list(holdout.index))\n return train_return[feature_name].values,test_return[feature_name].values", "def create_strategy(filename: str, columns_list: List[str], som_width: int, som_height: int, n_iter: int, sigma=0.3,\n learning_rate=0.01) -> tuple:\n # get prepared data\n df, df_prepared, df_train, df_test, df_train_columns = get_data(filename, columns_list)\n\n # train som\n final_df_train, final_df_test = train_som(som_width, som_height, df, df_train, df_test, df_train_columns, n_iter,\n sigma=sigma, learning_rate=learning_rate)\n\n # get profit per cluster in train and test datasets\n df_profit_per_cluster_train = get_profit_per_cluster(final_df_train)\n df_profit_per_cluster_test = get_profit_per_cluster(final_df_test)\n\n # get mean profit for sell and buy class in training and testing datasets\n try:\n buy_clusters_mean_profit_train, buy_clusters_list, sell_clusters_mean_profit_train, sell_clusters_list = \\\n get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train)\n\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = \\\n get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test, buy_clusters_list, sell_clusters_list)\n # if the data was assigned to less than to 3 clusters\n except:\n buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \\\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = None, None, None, None\n\n return len(df_profit_per_cluster_train), len(df_profit_per_cluster_test), \\\n buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \\\n buy_clusters_mean_profit_test, sell_clusters_mean_profit_test", "def purity_score(label, pred):\n \n df = pd.concat([label, pd.DataFrame(pred)], axis=1)\n df.set_axis(['label', 'pred'], axis=1, inplace=True)\n \n s = 0\n\n for x, cluster in df.groupby('pred'):\n s += cluster['label'].value_counts().iloc[0] # adding the most occuring class in a cluster\n\n return s / label.shape[0]", "def grouping(data_clust):\n data_grouped = data_clust.groupby('Clusters').mean()\n return data_grouped", "def test_splits(ratings_df, predict_method, k, test_size=250, splits=3):\n mse_ls = []\n for _ in range(splits):\n test_ratings_df = ratings_df.sample(n=test_size)\n train_ratings_df = ratings_df.drop(test_ratings_df.index)\n model_params = matrix_cf.get_model_params(train_ratings_df)\n _, mse = predict_method(test_ratings_df, k, model_params)\n mse_ls.append(mse)\n return np.array(mse_ls).mean()", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def evaluateClusters( features, labels):\r\n\r\n\treturn silhouette_score( features, labels)", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def evalute_subset(X_train, X_test, y_train, y_test):\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n clf.fit(X_train, y_train) \r\n y_pred = clf.predict(X_test)\r\n return accuracy_score(y_test, y_pred)", "def clusterting_feature_importance (df, cluster_col):\r\n scores = pd.DataFrame()\r\n df0 = df.copy()\r\n df0 = df.select_dtypes(include=np.number)\r\n\r\n for i in df0[cluster_col].unique():\r\n df2 = df0[df0[cluster_col] == i]\r\n df2.drop(cluster_col,axis=1, inplace=True)\r\n #df2 = df.select_dtypes(include=np.number)\r\n scores[i] = df2.std() / (df2.max() - df2.min())\r\n scores['mean'] = scores.mean(axis = 1)\r\n\r\n scores = 1 - scores\r\n\r\n return scores", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def train(self, data):\n\t\tepsilon = self.epsilon\n\t\ttempDist = 1.0\n\t\tk = self.k\n\t\tcenters = data.rdd.takeSample(False, k, 1)\n\t\ti = 0 \n\t\twhile tempDist > epsilon or self.maxNoOfIteration > i:\n\t\t\ti+=1\t\t\t\n\t\t\tclosest = data.map(lambda p: (closestCluster(p, centers), (np.array(p), 1)))\n \t\t\tpointStats = closest.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n \t\tnewPoints = pointStats.map(lambda x: (x[0], x[1][0] / float(x[1][1]))).collect()\n \t\ttempDist = sum(np.sum((centers[index] - p) ** 2) for (index, p) in newPoints)\n \t\tfor (ind, p) in newPoints:\n\t\t\t\tcenters[ind] = p\n\t\tself.centers = centers\n\t\treturn self.centers", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def predict(self, train_df: pd.DataFrame, test_df: pd.DataFrame) -> pd.DataFrame:\n\n # get feature list\n target_columns, features = PropensityModel.get_feature_and_target_columns(train_df)\n\n # predict propensities\n model_prediction_df_list = []\n for pred_df in [train_df, test_df]:\n x_test = pred_df[features]\n y_test = pred_df[target_columns]\n y_pred = self.model.predict_proba(x_test)\n \n # select second column (positive class) if there is only one target variable\n if len(self.target_variables) == 1:\n y_pred = y_pred[:,1]\n \n fold_predictions = pd.DataFrame(y_pred, columns=['prediction_'+x for x in self.target_variables])\n fold_predictions['sf_account_id'] = pred_df.index\n for column in target_columns:\n fold_predictions[column.replace('prediction_','target_')] = y_test[column].tolist()\n fold_predictions['train_or_test'] = pred_df.train_or_test.iloc[0]\n model_prediction_df_list += [fold_predictions]\n model_predictions = pd.concat(model_prediction_df_list, sort=False)\n\n return model_predictions", "def predict(self, test_data):\n if self.centroids_.shape[0]==0:\n raise ValueError(\"No centroids present. Run KMeans.fit first.\")\n\n print test_data.shape\n part_of_cluster=np.zeros(test_data.shape[0])\n for i in range(test_data.shape[0]):\n dists=[]\n for c in range(self.centroids_.shape[0]):\n # compute distance between current train_data instance and each cluster\n dists.append( self.metric( instance=test_data[i,:], centroid=self.centroids_[c,:]) )\n \n # assign point to cluster with minimal distance\n part_of_cluster[i]=dists.index(min(dists))\n\n return part_of_cluster", "def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())", "def train_som(som_width: int, som_height: int, df: pd.core.frame.DataFrame, df_train: pd.core.frame.DataFrame,\n df_test: pd.core.frame.DataFrame, df_train_columns: pd.core.frame.DataFrame, n_iter: int, sigma=0.3,\n learning_rate=0.01):\n\n som = MiniSom(som_width, som_height, df_train.shape[1], sigma=sigma, learning_rate=learning_rate,\n random_seed=0)\n som.train(df_train, n_iter)\n\n # converting numpy arrays to dataframes\n df_train = pd.DataFrame(df_train, columns=df_train_columns)\n df_test = pd.DataFrame(df_test, columns=df_train_columns)\n\n # creating column with cluster basing on model prediction\n df_train['cluster'] = df_train.apply(lambda x: som_predict(x, som), axis=1)\n df_test['cluster'] = df_test.apply(lambda x: som_predict(x, som), axis=1)\n\n # joining train and test dataframes with previously dropped columns, which will be useful in the further part of\n # the script\n final_df_train = df_train.join(df[['Date', 'Price', 'close_plus_20_days', 'profit']].iloc[:, :len(df_train)],\n lsuffix='_org')\n final_df_test = df_test.join(df[['Date', 'Price', 'close_plus_20_days', 'profit']].iloc[len(df_train):],\n lsuffix='_org')\n\n return final_df_train, final_df_test", "def profitCalculation(confusion_matrix):\n numberofClasses = 4\n profits = [[20, -7, -7, -7], [-7, 15, -7, -7], [-7, -7, 5, -7], [-3, -3, -3, -3]]\n totalProfit = 0\n for count in range(numberofClasses):\n for counter in range(numberofClasses):\n totalProfit += confusion_matrix[count][counter] * profits[count][counter]\n\n return totalProfit" ]
[ "0.8185011", "0.7136002", "0.5966785", "0.59034175", "0.58183724", "0.5765148", "0.5717098", "0.56850725", "0.56474286", "0.56181526", "0.5601236", "0.557191", "0.55662805", "0.5527561", "0.55179936", "0.55108774", "0.54241854", "0.5415164", "0.5412711", "0.5410539", "0.5401505", "0.53984374", "0.53896075", "0.5375711", "0.53662443", "0.5354585", "0.5349638", "0.53489065", "0.5336449", "0.5330914" ]
0.8361798
0
Creates strategy which can be used in testing part of the script. reads preprocessed split into training and testing sets data train som model calculates mean profit per cluster in training and testing dataset gets mean profits
def create_strategy(filename: str, columns_list: List[str], som_width: int, som_height: int, n_iter: int, sigma=0.3, learning_rate=0.01) -> tuple: # get prepared data df, df_prepared, df_train, df_test, df_train_columns = get_data(filename, columns_list) # train som final_df_train, final_df_test = train_som(som_width, som_height, df, df_train, df_test, df_train_columns, n_iter, sigma=sigma, learning_rate=learning_rate) # get profit per cluster in train and test datasets df_profit_per_cluster_train = get_profit_per_cluster(final_df_train) df_profit_per_cluster_test = get_profit_per_cluster(final_df_test) # get mean profit for sell and buy class in training and testing datasets try: buy_clusters_mean_profit_train, buy_clusters_list, sell_clusters_mean_profit_train, sell_clusters_list = \ get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train) buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = \ get_mean_profit_per_class_from_test_df(df_profit_per_cluster_test, buy_clusters_list, sell_clusters_list) # if the data was assigned to less than to 3 clusters except: buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \ buy_clusters_mean_profit_test, sell_clusters_mean_profit_test = None, None, None, None return len(df_profit_per_cluster_train), len(df_profit_per_cluster_test), \ buy_clusters_mean_profit_train, sell_clusters_mean_profit_train, \ buy_clusters_mean_profit_test, sell_clusters_mean_profit_test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def main_predefined_split():\n\n average_performance = []\n fold_num = 'predefined'\n output_file_folder = \"output/{}\".format(args.experiment_name)\n output_file_name = \"{}/lnnel_{}.csv\".format(output_file_folder, fold_num)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = output_file_name\n\n if args.use_blink:\n df_train = pd.read_csv(\"./data/lcquad/blink/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/blink/lcquad_test_sorted.csv\")\n else:\n df_train = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_train_sorted.csv\")\n df_test = pd.read_csv(\"./data/lcquad/dbpedia/lcquad_test_sorted.csv\")\n\n # filter out the questions with single positive or many negatives in trianing set\n filtered_question_mentions = []\n for qm in df_train.QuestionMention.unique():\n df_ = df_train[df_train.QuestionMention == qm]\n if df_.Label.sum() == 0:\n filtered_question_mentions.append(qm)\n if df_.Label.sum() == 1 and df_.shape[0] == 1:\n filtered_question_mentions.append(qm)\n # print(df_.Label.values)\n df_train_split_filtered = df_train[~df_train.QuestionMention.isin(filtered_question_mentions)]\n df_train_split_filtered = df_train_split_filtered.sort_values(by=['QuestionMention', 'Label'])\n df_train = df_train_split_filtered\n\n # train\n features_train = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_train.Features.values])\n x_train = torch.from_numpy(features_train).float()\n y_train = torch.from_numpy(df_train.Label.values).float().reshape(-1, 1)\n m_labels_train = df_train.Mention_label.values\n ques_train = df_train.Question.values\n\n # test\n features_test = np.array(\n [np.fromstring(s[1:-1], dtype=np.float, sep=', ') for s in df_test.Features.values])\n x_test = torch.from_numpy(features_test).float()\n y_test = torch.from_numpy(df_test.Label.values).float().reshape(-1, 1)\n m_labels_test = df_test.Mention_label.values\n ques_test = df_test.Question.values\n\n # train model and evaluate\n model = pick_model(args.model_name, args.alpha)\n model = model.to(device)\n\n # move to gpu\n x_train, y_train = x_train.to(device), y_train.to(device)\n x_test, y_test = x_test.to(device), y_test.to(device)\n\n print(model)\n\n print(\"model: \", args.model_name, args.alpha)\n print(model(x_train, m_labels_train))\n\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n # aggregate the data into train, val, and test\n train_data = (x_train, y_train, m_labels_train, ques_train)\n print(\"train:\", x_train.shape, y_train.shape, m_labels_train.shape, ques_train.shape)\n test_data = (x_test, y_test, m_labels_test, ques_test)\n print(\"test:\", x_test.shape, y_test.shape, m_labels_test.shape, ques_test.shape)\n\n # check class distribution\n print(\"y_train sum\", sum(y_train), sum(y_train) / len(y_train))\n print(\"y_test sum\", sum(y_test), sum(y_test) / len(y_test))\n\n train(model, train_data, test_data, test_data, args.checkpoint_name, args.num_epoch, args.margin,\n args.learning_rate)\n test_pred, best_scores = test(x_test, m_labels_test, ques_test, args.alpha, args.checkpoint_name,\n args.model_name,\n args.output_file_name)\n with open(args.log_file_name, 'a') as f:\n f.write(\n \"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}; lr={}; margin={}\\n\".format(\n args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores[\n 'precision'],\n best_scores[\n 'recall'],\n best_scores['f1'],\n args.learning_rate,\n args.margin))\n print(\"model={}; use_fixed_threshold={}; alpha={}; p={}; r={}; f1={}\\n\".format(args.model_name,\n args.use_fixed_threshold,\n args.alpha,\n best_scores['precision'],\n best_scores['recall'],\n best_scores['f1']))\n average_performance.append([best_scores['precision'], best_scores['recall'], best_scores['f1']])\n\n average_performance = np.array(average_performance)\n print(\"Avg performance is prec - rec - f1: \", average_performance.mean(0))", "def create_train_test_sample(input_parcel_filepath: Path,\n output_parcel_train_filepath: Path,\n output_parcel_test_filepath: Path,\n balancing_strategy: str,\n force: bool = False):\n\n # If force == False Check and the output files exist already, stop.\n if(force is False\n and output_parcel_train_filepath.exists() is True\n and output_parcel_test_filepath.exists() is True):\n logger.warning(f\"create_train_test_sample: output files already exist and force == False, so stop: {output_parcel_train_filepath}, {output_parcel_test_filepath}\")\n return\n\n # Load input data...\n logger.info(f\"Start create_train_test_sample with balancing_strategy {balancing_strategy}\")\n logger.info(f\"Read input file {input_parcel_filepath}\")\n df_in = pdh.read_file(input_parcel_filepath)\n logger.debug(f\"Read input file ready, shape: {df_in.shape}\")\n\n # Init some many-used variables from config\n class_balancing_column = conf.columns['class_balancing']\n class_column = conf.columns['class']\n\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n count_per_class = df_in.groupby(class_balancing_column, as_index=False).size()\n logger.info(f\"Number of elements per classname in input dataset:\\n{count_per_class}\")\n\n # The test dataset should be as representative as possible for the entire dataset, so create\n # this first as a 20% sample of each class without any additional checks...\n # Remark: group_keys=False evades that apply creates an extra index-level of the groups above\n # the data and evades having to do .reset_index(level=class_balancing_column_NAME, drop=True)\n # to get rid of the group level\n df_test = df_in.groupby(class_balancing_column, group_keys=False).apply(pd.DataFrame.sample, frac=0.20)\n logger.debug(f\"df_test after sampling 20% of data per class, shape: {df_test.shape}\")\n\n # The candidate parcel for training are all non-test parcel\n df_train_base = df_in[~df_in.index.isin(df_test.index)]\n logger.debug(f\"df_train_base after isin\\n{df_train_base}\")\n\n # Remove parcel with too few pixels from the train sample\n min_pixcount = conf.marker.getfloat('min_nb_pixels_train')\n df_train_base = df_train_base[df_train_base[conf.columns['pixcount_s1s2']] >= min_pixcount]\n logger.debug(f\"Number of parcels in df_train_base after filter on pixcount >= {min_pixcount}: {len(df_train_base)}\")\n\n # Some classes shouldn't be used for training... so remove them!\n logger.info(f\"Remove 'classes_to_ignore_for_train' from train sample (= where {class_column} is in: {conf.marker.getlist('classes_to_ignore_for_train')}\")\n df_train_base = df_train_base[~df_train_base[class_column].isin(conf.marker.getlist('classes_to_ignore_for_train'))]\n\n # All classes_to_ignore aren't meant for training either...\n logger.info(f\"Remove 'classes_to_ignore' from train sample (= where {class_column} is in: {conf.marker.getlist('classes_to_ignore')}\")\n df_train_base = df_train_base[~df_train_base[class_column].isin(conf.marker.getlist('classes_to_ignore'))]\n\n # Print the train base result before applying any balancing\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n count_per_class = df_train_base.groupby(class_balancing_column, as_index=False).size()\n logger.info(f\"Number of elements per classname for train dataset, before balancing:\\n{count_per_class}\")\n\n # Depending on the balancing_strategy, use different way to get a training sample\n if balancing_strategy == 'BALANCING_STRATEGY_NONE':\n # Just use 25% of all non-test data as train data -> 25% of 80% of data -> 20% of all data\n # will be training date\n # Remark: - this is very unbalanced, eg. classes with 10.000 times the input size than other\n # classes\n # - this results in a relatively high accuracy in overall numbers, but the small\n # classes are not detected at all\n df_train = (df_train_base\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, frac=0.25))\n\n elif balancing_strategy == 'BALANCING_STRATEGY_MEDIUM':\n # Balance the train data, but still use some larger samples for the classes that have a lot\n # of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall, and also the\n # smaller classes give some results with upper limit of 4000 results significantly\n # less good.\n\n # For the larger classes, favor them by leaving the samples larger but cap at upper_limit\n upper_limit = 10000\n lower_limit = 1000\n logger.info(f\"Cap over {upper_limit}, keep the full number of training sample till {lower_limit}, samples smaller than that are oversampled\")\n df_train = (df_train_base.groupby(class_balancing_column).filter(lambda x: len(x) >= upper_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_limit))\n # Middle classes use the number as they are\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < upper_limit)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= lower_limit))\n # For smaller classes, oversample...\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < lower_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, lower_limit, replace=True))\n\n elif balancing_strategy == 'BALANCING_STRATEGY_MEDIUM2':\n # Balance the train data, but still use some larger samples for the classes that have a lot\n # of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall, and also the\n # smaller classes give some results with upper limit of 4000 results significantly\n # less good.\n\n # For the larger classes, leave the samples larger but cap\n cap_count_limit1 = 100000\n cap_train_limit1 = 30000\n logger.info(f\"Cap balancing classes over {cap_count_limit1} to {cap_train_limit1}\")\n df_train = (df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) >= cap_count_limit1)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, cap_train_limit1))\n cap_count_limit2 = 50000\n cap_train_limit2 = 20000\n logger.info(f\"Cap balancing classes between {cap_count_limit2} and {cap_count_limit1} to {cap_train_limit2}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < cap_count_limit1)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= cap_count_limit2)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, cap_train_limit2))\n cap_count_limit3 = 20000\n cap_train_limit3 = 10000\n logger.info(f\"Cap balancing classes between {cap_count_limit3} and {cap_count_limit2} to {cap_train_limit3}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < cap_count_limit2)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= cap_count_limit3)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, cap_train_limit3))\n cap_count_limit4 = 10000\n cap_train_limit4 = 10000\n logger.info(f\"Cap balancing classes between {cap_count_limit4} and {cap_count_limit3} to {cap_train_limit4}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < cap_count_limit3)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= cap_count_limit4)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, cap_train_limit4))\n oversample_count = 1000\n # Middle classes use the number as they are\n logger.info(f\"For classes between {cap_count_limit4} and {oversample_count}, just use all samples\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < cap_count_limit4)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= oversample_count))\n # For smaller classes, oversample...\n logger.info(f\"For classes smaller than {oversample_count}, oversample to {oversample_count}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < oversample_count)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, oversample_count, replace=True))\n\n elif balancing_strategy == 'BALANCING_STRATEGY_PROPORTIONAL_GROUPS':\n # Balance the train data, but still use some larger samples for the classes that have a lot\n # of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall, and also the\n # smaller classes give some results with upper limit of 4000 results significantly\n # less good.\n\n # For the larger classes, leave the samples larger but cap\n upper_count_limit1 = 100000\n upper_train_limit1 = 30000\n logger.info(f\"Cap balancing classes over {upper_count_limit1} to {upper_train_limit1}\")\n df_train = (df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) >= upper_count_limit1)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_train_limit1))\n upper_count_limit2 = 50000\n upper_train_limit2 = 20000\n logger.info(f\"Cap balancing classes between {upper_count_limit2} and {upper_count_limit1} to {upper_train_limit2}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < upper_count_limit1)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= upper_count_limit2)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_train_limit2))\n upper_count_limit3 = 20000\n upper_train_limit3 = 10000\n logger.info(f\"Cap balancing classes between {upper_count_limit3} and {upper_count_limit2} to {upper_train_limit3}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < upper_count_limit2)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= upper_count_limit3)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_train_limit3))\n upper_count_limit4 = 10000\n upper_train_limit4 = 5000\n logger.info(f\"Cap balancing classes between {upper_count_limit4} and {upper_count_limit3} to {upper_train_limit4}\")\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < upper_count_limit3)\n .groupby(class_balancing_column).filter(lambda x: len(x) >= upper_count_limit4)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_train_limit4))\n\n # For smaller balancing classes, just use all samples\n df_train = df_train.append(\n df_train_base.groupby(class_balancing_column).filter(lambda x: len(x) < upper_count_limit4))\n\n elif balancing_strategy == 'BALANCING_STRATEGY_UPPER_LIMIT':\n # Balance the train data, but still use some larger samples for the classes that have a lot\n # of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall, and also the\n # smaller classes give some results with upper limit of 4000 results significantly\n # less good.\n\n # For the larger classes, favor them by leaving the samples larger but cap at upper_limit\n upper_limit = 10000\n logger.info(f\"Cap over {upper_limit}...\")\n df_train = (df_train_base.groupby(class_balancing_column).filter(lambda x: len(x) >= upper_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_limit))\n # For smaller classes, just use all samples\n df_train = df_train.append(df_train_base\n .groupby(class_balancing_column).filter(lambda x: len(x) < upper_limit))\n\n elif balancing_strategy == 'BALANCING_STRATEGY_EQUAL':\n # In theory the most logical way to balance: make sure all classes have the same amount of\n # training data by undersampling the largest classes and oversampling the small classes.\n df_train = (df_train_base.groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, 2000, replace=True))\n\n else:\n raise Exception(f\"Unknown balancing strategy, STOP!: {balancing_strategy}\")\n\n # Log the resulting numbers per class in the train sample\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n count_per_class = df_train.groupby(class_balancing_column, as_index=False).size()\n logger.info(f'Number of elements per class_balancing_column in train dataset:\\n{count_per_class}')\n if class_balancing_column != class_column:\n count_per_class = df_train.groupby(class_column, as_index=False).size()\n logger.info(f'Number of elements per class_column in train dataset:\\n{count_per_class}')\n\n # Log the resulting numbers per class in the test sample\n with pd.option_context('display.max_rows', None, 'display.max_columns', None):\n count_per_class = df_test.groupby(class_balancing_column, as_index=False).size()\n logger.info(f'Number of elements per class_balancing_column in test dataset:\\n{count_per_class}')\n if class_balancing_column != class_column:\n count_per_class = df_test.groupby(class_column, as_index=False).size()\n logger.info(f'Number of elements per class_column in test dataset:\\n{count_per_class}')\n\n # Write to output files\n logger.info('Write the output files')\n df_train.set_index(conf.columns['id'], inplace=True)\n df_test.set_index(conf.columns['id'], inplace=True)\n pdh.to_file(df_train, output_parcel_train_filepath) # The ID column is the index...\n pdh.to_file(df_test, output_parcel_test_filepath) # The ID column is the index...", "def create_train_test_sample(\n input_parcel_path: Path,\n output_parcel_train_path: Path,\n output_parcel_test_path: Path,\n balancing_strategy: str,\n force: bool = False,\n):\n\n # If force == False Check and the output files exist already, stop.\n if (\n force is False\n and output_parcel_train_path.exists() is True\n and output_parcel_test_path.exists() is True\n ):\n logger.warning(\n \"create_train_test_sample: output files already exist and force is False: \"\n f\"{output_parcel_train_path}, {output_parcel_test_path}\"\n )\n return\n\n # Load input data...\n logger.info(\n f\"Start create_train_test_sample with balancing_strategy {balancing_strategy}\"\n )\n logger.info(f\"Read input file {input_parcel_path}\")\n df_in = pdh.read_file(input_parcel_path)\n logger.debug(f\"Read input file ready, shape: {df_in.shape}\")\n\n # Init some many-used variables from config\n class_balancing_column = conf.columns[\"class_balancing\"]\n class_column = conf.columns[\"class\"]\n\n with pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None\n ): # type: ignore\n count_per_class = df_in.groupby(class_balancing_column, as_index=False).size()\n logger.info(\n f\"Number of elements per classname in input dataset:\\n{count_per_class}\"\n )\n\n # The test dataset should be as representative as possible for the entire dataset,\n # so create this first as a 20% sample of each class without any additional checks.\n # Remark: group_keys=False evades that apply creates an extra index-level of the\n # groups above the data and evades having to do\n # .reset_index(level=class_balancing_column_NAME, drop=True)\n # to get rid of the group level\n test_df = df_in.groupby(class_balancing_column, group_keys=False).apply(\n pd.DataFrame.sample, frac=0.20\n )\n logger.debug(\n f\"df_test after sampling 20% of data per class, shape: {test_df.shape}\"\n )\n\n # The candidate parcel for training are all non-test parcel\n train_base_df = df_in[~df_in.index.isin(test_df.index)]\n logger.debug(f\"df_train_base after isin\\n{train_base_df}\")\n\n # Remove parcel with too few pixels from the train sample\n min_pixcount = conf.marker.getfloat(\"min_nb_pixels_train\")\n train_base_df = train_base_df[\n train_base_df[conf.columns[\"pixcount_s1s2\"]] >= min_pixcount\n ]\n logger.debug(\n \"Number of parcels in df_train_base after filter on pixcount >= \"\n f\"{min_pixcount}: {len(train_base_df)}\"\n )\n\n # Some classes shouldn't be used for training... so remove them!\n logger.info(\n \"Remove 'classes_to_ignore_for_train' from train sample (= where \"\n f\"{class_column} is in: {conf.marker.getlist('classes_to_ignore_for_train')}\"\n )\n train_base_df = train_base_df[\n ~train_base_df[class_column].isin(\n conf.marker.getlist(\"classes_to_ignore_for_train\")\n )\n ]\n\n # All classes_to_ignore aren't meant for training either...\n logger.info(\n f\"Remove 'classes_to_ignore' from train sample (= where {class_column} is in: \"\n f\"{conf.marker.getlist('classes_to_ignore')}\"\n )\n train_base_df = train_base_df[\n ~train_base_df[class_column].isin(conf.marker.getlist(\"classes_to_ignore\"))\n ]\n\n # Print the train base result before applying any balancing\n with pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None\n ): # type: ignore\n count_per_class = train_base_df.groupby(\n class_balancing_column, as_index=False\n ).size()\n logger.info(\n \"Number of elements per classname for train dataset, before balancing:\\n\"\n f\"{count_per_class}\"\n )\n\n # Depending on the balancing_strategy, use different way to get a training sample\n train_df = pd.DataFrame().reindex_like(train_base_df)\n if balancing_strategy == \"BALANCING_STRATEGY_NONE\":\n # Just use 25% of all non-test data as train data -> 25% of 80% of data -> 20%\n # of all data will be training date\n # Remark: - this is very unbalanced, eg. classes with 10.000 times the input\n # size than other classes\n # - this results in a relatively high accuracy in overall numbers, but\n # the small classes are not detected at all\n train_df = train_base_df.groupby(\n class_balancing_column, group_keys=False\n ).apply(pd.DataFrame.sample, frac=0.25)\n\n elif balancing_strategy == \"BALANCING_STRATEGY_MEDIUM\":\n # Balance the train data, but still use some larger samples for the classes\n # that have a lot of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall,\n # and also the smaller classes give some results with upper limit of 4000\n # results significantly less good.\n\n # For the larger classes, favor them by leaving the samples larger but cap at\n # upper_limit\n upper_limit = 10000\n lower_limit = 1000\n logger.info(\n f\"Cap over {upper_limit}, keep the full number of training sample till \"\n f\"{lower_limit}, samples smaller than that are oversampled\"\n )\n train_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_limit)\n )\n # Middle classes use the number as they are\n train_df = pd.concat(\n [\n train_df,\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < upper_limit)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= lower_limit),\n ]\n )\n # For smaller classes, oversample...\n train_df = pd.concat(\n [\n train_df,\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < lower_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, lower_limit, replace=True),\n ]\n )\n\n elif balancing_strategy == \"BALANCING_STRATEGY_MEDIUM2\":\n # Balance the train data, but still use some larger samples for the classes\n # that have a lot of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall,\n # and also the smaller classes give some results with upper limit of 4000\n # results significantly less good.\n\n # For the larger classes, leave the samples larger but cap\n # Cap 1\n cap_count_limit1 = 100000\n cap_train_limit1 = 30000\n logger.info(\n f\"Cap balancing classes over {cap_count_limit1} to {cap_train_limit1}\"\n )\n train_capped_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) >= cap_count_limit1)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_capped_df) > 0:\n train_df = pd.concat(\n [train_df, train_capped_df.apply(pd.DataFrame.sample, cap_train_limit1)]\n )\n\n # Cap 2\n cap_count_limit2 = 50000\n cap_train_limit2 = 20000\n logger.info(\n f\"Cap balancing classes between {cap_count_limit2} and {cap_count_limit1} \"\n f\"to {cap_train_limit2}\"\n )\n train_capped_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < cap_count_limit1)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= cap_count_limit2)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_capped_df) > 0:\n train_df = pd.concat(\n [train_df, train_capped_df.apply(pd.DataFrame.sample, cap_train_limit2)]\n )\n\n # Cap 3\n cap_count_limit3 = 20000\n cap_train_limit3 = 10000\n logger.info(\n f\"Cap balancing classes between {cap_count_limit3} and {cap_count_limit2} \"\n f\"to {cap_train_limit3}\"\n )\n train_capped_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < cap_count_limit2)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= cap_count_limit3)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_capped_df) > 0:\n train_df = pd.concat(\n [train_df, train_capped_df.apply(pd.DataFrame.sample, cap_train_limit3)]\n )\n\n # Cap 4\n cap_count_limit4 = 10000\n cap_train_limit4 = 10000\n logger.info(\n f\"Cap balancing classes between {cap_count_limit4} and {cap_count_limit3} \"\n f\"to {cap_train_limit4}\"\n )\n train_capped_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < cap_count_limit3)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= cap_count_limit4)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_capped_df) > 0:\n train_df = pd.concat(\n [train_df, train_capped_df.apply(pd.DataFrame.sample, cap_train_limit4)]\n )\n\n # Middle classes use the number as they are, smaller classes are oversampled\n oversample_count = 1000\n logger.info(\n f\"For classes between {cap_count_limit4} and {oversample_count}, just use \"\n \"all samples\"\n )\n train_df = pd.concat(\n [\n train_df,\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < cap_count_limit4)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= oversample_count),\n ]\n )\n # For smaller classes, oversample...\n logger.info(\n f\"For classes smaller than {oversample_count}, oversample to \"\n f\"{oversample_count}\"\n )\n train_capped_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < oversample_count)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_capped_df) > 0:\n train_df = pd.concat(\n [\n train_df,\n train_capped_df.apply(\n pd.DataFrame.sample, oversample_count, replace=True\n ),\n ]\n )\n\n elif balancing_strategy == \"BALANCING_STRATEGY_PROPORTIONAL_GROUPS\":\n # Balance the train data, but still use some larger samples for the classes\n # that have a lot of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall,\n # and also the smaller classes give some results with upper limit of 4000\n # results significantly less good.\n\n # For the larger classes, leave the samples larger but cap\n upper_count_limit1 = 100000\n upper_train_limit1 = 30000\n logger.info(\n f\"Cap balancing classes over {upper_count_limit1} to {upper_train_limit1}\"\n )\n train_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_count_limit1)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_train_limit1)\n )\n upper_count_limit2 = 50000\n upper_train_limit2 = 20000\n logger.info(\n f\"Cap balancing classes between {upper_count_limit2} and \"\n f\"{upper_count_limit1} to {upper_train_limit2}\"\n )\n train_limit2_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < upper_count_limit1)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_count_limit2)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_limit2_df) > 0:\n train_df = pd.concat(\n [\n train_df,\n train_limit2_df.apply(pd.DataFrame.sample, upper_train_limit2),\n ]\n )\n upper_count_limit3 = 20000\n upper_train_limit3 = 10000\n logger.info(\n f\"Cap balancing classes between {upper_count_limit3} and \"\n f\"{upper_count_limit2} to {upper_train_limit3}\"\n )\n train_limit3_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < upper_count_limit2)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_count_limit3)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_limit3_df) > 0:\n train_df = pd.concat(\n [\n train_df,\n train_limit3_df.apply(pd.DataFrame.sample, upper_train_limit3),\n ]\n )\n upper_count_limit4 = 10000\n upper_train_limit4 = 5000\n logger.info(\n f\"Cap balancing classes between {upper_count_limit4} and \"\n f\"{upper_count_limit3} to {upper_train_limit4}\"\n )\n train_limit4_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) < upper_count_limit3)\n .groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_count_limit4)\n .groupby(class_balancing_column, group_keys=False)\n )\n if len(train_limit4_df) > 0:\n train_df = pd.concat(\n [\n train_df,\n train_limit4_df.apply(pd.DataFrame.sample, upper_train_limit4),\n ]\n )\n # For smaller balancing classes, just use all samples\n train_df = pd.concat(\n [\n train_df,\n train_base_df.groupby(class_balancing_column).filter(\n lambda x: len(x) < upper_count_limit4\n ),\n ]\n )\n\n elif balancing_strategy == \"BALANCING_STRATEGY_UPPER_LIMIT\":\n # Balance the train data, but still use some larger samples for the classes\n # that have a lot of members in the input dataset\n # Remark: with the upper limit of 10.000 this gives still OK results overall,\n # and also the smaller classes give some results with upper limit of 4000\n # results significantly less good.\n\n # For the larger classes, favor them by leaving the samples larger but cap at\n # upper_limit\n upper_limit = 10000\n logger.info(f\"Cap over {upper_limit}...\")\n train_df = (\n train_base_df.groupby(class_balancing_column)\n .filter(lambda x: len(x) >= upper_limit)\n .groupby(class_balancing_column, group_keys=False)\n .apply(pd.DataFrame.sample, upper_limit)\n )\n # For smaller classes, just use all samples\n train_df = pd.concat(\n [\n train_df,\n train_base_df.groupby(class_balancing_column).filter(\n lambda x: len(x) < upper_limit\n ),\n ]\n )\n\n elif balancing_strategy == \"BALANCING_STRATEGY_EQUAL\":\n # In theory the most logical way to balance: make sure all classes have the\n # same amount of training data by undersampling the largest classes and\n # oversampling the small classes.\n train_df = train_base_df.groupby(\n class_balancing_column, group_keys=False\n ).apply(pd.DataFrame.sample, 2000, replace=True)\n\n else:\n raise Exception(f\"Unknown balancing strategy, STOP!: {balancing_strategy}\")\n\n # Log the resulting numbers per class in the train sample\n with pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None\n ): # type: ignore\n count_per_class = train_df.groupby(\n class_balancing_column, as_index=False\n ).size()\n logger.info(\n \"Number of elements per class_balancing_column in train dataset:\\n\"\n f\"{count_per_class}\"\n )\n if class_balancing_column != class_column:\n count_per_class = train_df.groupby(class_column, as_index=False).size()\n logger.info(\n \"Number of elements per class_column in train dataset:\\n\"\n f\"{count_per_class}\"\n )\n\n # Log the resulting numbers per class in the test sample\n with pd.option_context(\n \"display.max_rows\", None, \"display.max_columns\", None\n ): # type: ignore\n count_per_class = test_df.groupby(class_balancing_column, as_index=False).size()\n logger.info(\n \"Number of elements per class_balancing_column in test dataset:\\n\"\n f\"{count_per_class}\"\n )\n if class_balancing_column != class_column:\n count_per_class = test_df.groupby(class_column, as_index=False).size()\n logger.info(\n \"Number of elements per class_column in test dataset:\\n\"\n f\"{count_per_class}\"\n )\n\n # Write to output files\n logger.info(\"Write the output files\")\n train_df.set_index(conf.columns[\"id\"], inplace=True)\n test_df.set_index(conf.columns[\"id\"], inplace=True)\n pdh.to_file(train_df, output_parcel_train_path) # The ID column is the index...\n pdh.to_file(test_df, output_parcel_test_path) # The ID column is the index...", "def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)", "def main_modeling_pipeline():\n\n\n data_df = pd.read_csv('gs://aiplatformfilipegracio2020/head_train_data.csv')\n data_df = data_df[[LABEL, 'price', 'days_on_site']]\n\n class_weights = calculate_class_weights(data_df[LABEL])\n print('class weights', class_weights)\n logging.info('Data loaded and processed')\n train_ds, val_ds, test_ds = make_tf_datasets(data_df, LABEL)\n logging.info('Tensorflow datasets created')\n\n with strategy.scope():\n logging.info('Inside strategy')\n simple_feature_layer = make_simple_feature_layer(data_df)\n logging.info('Going to make model')\n simple_model = make_simple_model(simple_feature_layer)\n\n logging.info('Going fit model')\n simple_model_results, simple_model = model_fit_and_evaluate(model=simple_model,\n train_ds=train_ds,\n val_ds=val_ds,\n test_ds=test_ds,\n class_weights=class_weights,\n epochs=TRAINING_EPOCHS,\n job_name='simple_model')\n\n simple_model.save('gs://aiplatformfilipegracio2020/')", "def main(source_dir, ksplit, out_dir, data_pattern, label_pattern, test_mode,\r\n numTopVars = [10, 50, 100, 500, 1000], compute_results=True):\r\n # Load input and labels.\r\n data, labels, data_file = load_data(source_dir, data_pattern)\r\n filename_base = path.splitext(path.basename(mname))[0]\r\n # Get classifiers and params.\r\n global NAMES\r\n if test_mode:\r\n NAMES = [\"Chance\", \"Nearest Neighbors\", \"Linear SVM\", \"Decision Tree\",\r\n \"Logistic Regression\", \"Naive Bayes\", \"LDA\"]\r\n ksplit = 3\r\n\r\n classifiers, params = make_classifiers(NAMES) # data.shape, ksplit)\r\n\r\n\r\n # Make the folds.\r\n logging.info(\"Making %d folds\" % ksplit)\r\n #kf = StratifiedKFold(labels, n_folds=ksplit)\r\n kf = KFold(labels.shape[0], n_folds=ksplit)\r\n\r\n # Extract the training and testing indices from the k-fold object,\r\n # which stores fold pairs of indices.\r\n fold_pairs = [(tr, ts) for (tr, ts) in kf]\r\n assert len(fold_pairs) == ksplit\r\n rank_per_fold = get_rank_per_fold(data, labels, fold_pairs,\r\n save_path=out_dir, parallel=True)\r\n #dhjelm: were we planning on using this dict?\r\n #score={}\r\n dscore=[]\r\n totalErrs = []\r\n if compute_results:\r\n for name in NAMES:\r\n mdl = classifiers[name]\r\n param = params[name]\r\n # get_score runs the classifier on each fold,\r\n # each subset of selected top variables and does a grid search for\r\n # classifier-specific parameters (selects the best)\r\n clf, allConfMats, allTotalErrs, allFittedClassifiers = \\\r\n get_score(data, labels, fold_pairs, name, mdl, param,\r\n numTopVars=numTopVars,\r\n rank_per_fold=rank_per_fold, parallel=True,\r\n rand_iter=-1)\r\n # save classifier object and results to file\r\n save_classifier_results(name, out_dir, allConfMats,\r\n allTotalErrs)\r\n save_classifier_object(clf, allFittedClassifiers, name, out_dir)\r\n # Append classifier results to list of all results\r\n dscore.append(allConfMats)\r\n totalErrs.append(allTotalErrs)\r\n '''\r\n First do some saving of total results\r\n '''\r\n save_combined_results(NAMES, dscore, totalErrs,\r\n numTopVars, out_dir, filename_base)\r\n\r\n plot_errors(NAMES, numTopVars, dscore, totalErrs,\r\n filename_base, out_dir,compute_results)\r\n\r\n logging.shutdown()", "def create_simple_data_set(\n n_training_points,\n n_testing_points,\n low=0,\n high=3,\n mode=training_testing_split.SEPERATE,\n kernel=kernel_matern,\n shuffle=True,\n):\n gp = gaussian_process(kernel=kernel, verbose=True)\n\n mid = (low + high) / 2\n\n if mode == training_testing_split.SEPERATE_LONG:\n x_training, x_testing = __seperate_long(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.SEPERATE:\n x_training, x_testing = __seperate(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.INTERSPREAD:\n x_training, x_testing = __interspread(\n n_training_points, n_testing_points, low, high\n )\n elif mode == training_testing_split.RANDOM:\n x_training, x_testing = __random(n_training_points, n_testing_points, low, high)\n elif mode == training_testing_split.MIXED:\n\n def r(z):\n dist = np.random.randint(low=1, high=100, size=4)\n λ = lambda x: x / dist.sum()\n vfunc = np.vectorize(λ)\n dist = vfunc(dist)\n return (z * dist).round().astype(int)\n\n training_dist = r(n_training_points)\n testing_dist = r(n_testing_points)\n x1, x2 = __random(training_dist[0], testing_dist[0], low, high)\n x11, x22 = __interspread(training_dist[1], testing_dist[1], low, high)\n x111, x222 = __interspread(training_dist[2], testing_dist[2], low, high)\n x1111, x2222 = __seperate(training_dist[3], testing_dist[3], low, high)\n x_training = np.vstack([x1, x11, x111, x1111])\n x_testing = np.vstack([x2, x22, x222, x222])\n\n y_samples = gp.sample(np.vstack([x_training, x_testing]), 1).squeeze()\n y_training = y_samples[: len(x_training)].reshape(-1, 1)\n y_testing = y_samples[len(x_training) :].reshape(-1, 1)\n training_data_set = data_loader.DataSet(X=x_training, Y=y_training)\n testing_data_set = data_loader.DataSet(X=x_testing, Y=y_testing)\n\n if shuffle:\n training_data_set.shuffle()\n testing_data_set.shuffle()\n\n return training_data_set, testing_data_set", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def test_training(self):\n\t\tpass", "def data_split(self, split_index=0, thinning=None, apply_mask=False, mask=[], **kwargs):\n \n self.learn_sets = ['train','vali','test']\n self.ind_train = self.train_splitdict[split_index]\n self.ind_train.sort()\n self.ind_test = self.test_splitdict[split_index]\n self.ind_test.sort()\n if len(self.vali_splitdict) !=0:\n self.learn_sets = ['train','vali','test']\n self.ind_vali = self.vali_splitdict[split_index]\n self.ind_vali.sort()\n self.indices_learn_dict = dict(zip(self.learn_sets, [self.ind_train, self.ind_vali, self.ind_test]))\n else:\n self.learn_sets = ['train','test']\n self.indices_learn_dict = dict(zip(self.learn_sets, [self.ind_train, self.ind_test]))\n\n\n self.train_samples = self.fullspace[self.ind_train]\n self.train_size = len(self.train_samples)\n\n if len(self.vali_splitdict) !=0:\n self.vali_samples = self.fullspace[self.ind_vali]\n self.vali_samples.sort()\n self.vali_size = len(self.vali_samples)\n else:\n self.vali_size = 0\n self.test_samples = self.fullspace[self.ind_test]\n #self.test_samples.sort()\n self.test_size = len(self.test_samples)\n verbosity = kwargs.get('verbosity', 1)\n\n too.condprint(\"number of obtained training vectors\", self.train_size, level=1, verbosity=verbosity)\n too.condprint(\"number of obtained validation vectors\", self.vali_size, level=1, verbosity=verbosity)\n too.condprint(\"number of obtained test vectors\", self.test_size, level=2, verbosity=verbosity)\n\n\n self.matrix_datalearn_dict = dict()\n\n for dli in self.learn_sets:\n matrixdata = np.copy(self.matrix_z)\n self.matrixdata=matrixdata\n\n ## copy of mask to avoid modifying orginal mask after iterations\n if apply_mask==False:\n maskcopy=np.arange(0,len(matrixdata[0])) ##range over all axis length, does not mask anything\n else:\n maskcopy=np.copy(mask)\n \n ## apply thinning (if set to None, there is no thinning)\n self.mask_true=maskcopy[::thinning] \n\n ## apply mask also to feature grid and save as masked_+...\n setattr(self, 'masked_'+self.features_str, self.fgrid[self.mask_true]) \n\n matrixdata = matrixdata[:,self.mask_true] ## apply mask and thinning to feature space (k-grid)\n indices_l = self.indices_learn_dict[dli]\n matrixdata = matrixdata[indices_l,:] ##choose learning set\n self.matrix_datalearn_dict[dli] = matrixdata\n self.matrix_datalearn_dict = objdict(self.matrix_datalearn_dict)\n return self.matrix_datalearn_dict", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def prediction_data(median_split,mean_split,std_split,degrees_split,weight_split,export_file):\n DATA_TEST_PATH = '../data/test.csv' # Download train data and supply path here \n print('\\nIMPORTING TESTING DATA :',end=\" \")\n y_test, tX_test, ids_test = load_csv_data(DATA_TEST_PATH)\n print('DONE')\n \n #5.a. Splitting the testing data\n print('SPLITTING TESTING DATA :',end=\" \")\n y_test_split,tx_test_split,id_test_split = split_dataset(y_test,tX_test,ids_test) \n print('DONE') \n #5.b. prediction on each model\n y_pred = list()\n \n for split,(y_test_s,tx_test_s,id_test_s) in enumerate(zip(y_test_split,tx_test_split,id_test_split)): \n print('PREDICTION FOR TESTING DATA SPLIT NUMBER',split)\n \n #Formatting to the correct datatype\n y_test_s = np.squeeze(y_test_s)\n tx_test_s = np.squeeze(tx_test_s)\n id_test_s = np.squeeze(id_test_s)\n print('Size of the vectors',y_test_s.shape,tx_test_s.shape) \n #Formatting the data themselves\n print('Counting NaN',end='. ')\n tx_test_s = count_NaN(tx_test_s)\n print('Sanitizing',end = ' . ')\n tx_test_s,median_vec = sanitize_NaN(tx_test_s,median_split[split])\n print('Standardizing',end = ' .')\n tx_test_s,mean_te,std_te = standardize(tx_test_s,mean_split[split],std_split[split])\n print('Building polynomial basis') \n tx_test_s = build_poly(tx_test_s, degrees_split[split])\n \n #Prediction\n y_pred.append(predict_labels(np.array(weight_split[split]), np.array(tx_test_s))) \n \n print('MERGING TESTING DATA',end=\"\")\n y_pred_merged, ids_merged = merge_dataset(y_pred,id_test_split)\n print('DONE')\n \n OUTPUT_PATH = 'results/output_sanitized_normalization_'+export_file+'.csv' \n print('EXPORTING TESTING DATA WITH PREDICTIONS :',end=\" \")\n \n create_csv_submission(ids_merged, y_pred_merged, OUTPUT_PATH)\n print('DONE')", "def create_final_strategy(filename: str, columns_list: List[str], som_width=Config.som_width,\n som_height=Config.som_height, n_iter=Config.n_iter, sigma=Config.sigma,\n learning_rate=Config.learning_rate) -> tuple:\n\n print(\n f'Creating final strategy for parameters: \\nmap_size: {som_height}\\nn_iter: {n_iter}\\nsigma:{sigma}\\nlr: {learning_rate}')\n # get prepared data\n df, df_prepared, df_train, df_test, df_train_columns = get_data(filename, columns_list)\n\n # train som\n final_df_train, final_df_test = train_som(som_width, som_height, df, df_train, df_test, df_train_columns, n_iter,\n sigma=sigma, learning_rate=learning_rate)\n\n # get profit per cluster in train datasets\n df_profit_per_cluster_train = get_profit_per_cluster(final_df_train)\n\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_cluster_train.sort_values(by='profit', ascending=False)\n group_size = int(len(df_profit_per_cluster) / 3)\n\n buy_clusters_list = list(df_profit_per_cluster.iloc[:group_size]['cluster'])\n sell_clusters_list = list(df_profit_per_cluster.iloc[-group_size:]['cluster'])\n\n return final_df_train, final_df_test, buy_clusters_list, sell_clusters_list", "def load_data(self,split='train'):\n raise NotImplementedError", "def read_data_split_and_search():\n\n\n\n dataReader = Movielens10MReader()\n dataset = dataReader.load_data()\n\n URM_train, URM_test = split_train_in_two_percentage_global_sample(dataset.get_URM_all(), train_percentage = 0.80)\n URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)\n\n output_folder_path = \"result_experiments/\"\n\n\n # If directory does not exist, create\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n\n\n\n\n\n\n collaborative_algorithm_list = [\n Random,\n TopPop,\n P3alphaRecommender,\n RP3betaRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n MatrixFactorization_BPR_Cython,\n MatrixFactorization_FunkSVD_Cython,\n PureSVDRecommender,\n SLIM_BPR_Cython,\n SLIMElasticNetRecommender\n ]\n\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorHoldout\n\n evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[5])\n evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[5, 10])\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n metric_to_optimize = \"MAP\",\n n_cases = 10,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = output_folder_path,\n similarity_type_list = [\"cosine\"],\n parallelizeKNN = False)\n\n\n\n\n\n pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)\n pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)\n\n #\n #\n # for recommender_class in collaborative_algorithm_list:\n #\n # try:\n #\n # runParameterSearch_Collaborative_partial(recommender_class)\n #\n # except Exception as e:\n #\n # print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n # traceback.print_exc()\n #", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def generate_data(project_data, config, split_method = RAW) :\n training_data = []\n testing_data = []\n \n # Flatten the data (collapse the project and session hierarchy into a list of session_data)\n for v in config.train_project_names:\n # Data in all sessions of one project\n project_session_data = random.sample(project_data[v], len(project_data[v]))\n\n training_data += project_session_data[int(config.session_training_percentage[0] * len(project_session_data)):\n int(config.session_training_percentage[1] * len(project_session_data))]\n\n if config.double_training:\n for i in xrange(int(config.session_training_percentage[0] * len(project_session_data)),\n int(config.session_training_percentage[1] * len(project_session_data))):\n session_data = project_session_data[i]\n\n reversed_session_data = {}\n reversed_session_data[SESSION_NAME] = session_data[SESSION_NAME] + \"_reversed\"\n reversed_session_data[SESSION_EVENTS] = []\n\n def reverse_point_data_qsr(point_data):\n reversed_point_data = point_data[:4]\n # Hands to objects feature swap\n reversed_point_data += point_data[8:12] \n reversed_point_data += point_data[4:8]\n\n # Centroid direction and distance difference is symmetric\n reversed_point_data += point_data[12:14]\n\n # Object corners swap\n reversed_point_data += point_data[16:18] \n reversed_point_data += point_data[14:16]\n\n reversed_point_data += point_data[18:19]\n reversed_point_data += point_data[20:21] \n reversed_point_data += point_data[19:20]\n\n # For QTCCS\n reversed_point_data += point_data[22:23]\n reversed_point_data += point_data[21:22]\n reversed_point_data += point_data[24:25]\n reversed_point_data += point_data[23:24]\n\n # # For difference of features\n # fl = 21\n # reversed_point_data += point_data[fl:fl + 4]\n # # Hands to objects feature swap\n # reversed_point_data += point_data[fl + 8:fl + 12] \n # reversed_point_data += point_data[fl + 4:fl + 8]\n\n # # Centroid direction and distance difference is symmetric\n # reversed_point_data += point_data[fl + 12:fl + 14]\n\n # # Object corners swap\n # reversed_point_data += point_data[fl + 16:fl + 18] \n # reversed_point_data += point_data[fl + 14:fl + 16]\n\n # reversed_point_data += point_data[fl + 18:fl + 19]\n # reversed_point_data += point_data[fl + 20:fl + 21] \n # reversed_point_data += point_data[fl + 19:fl + 20]\n\n return reversed_point_data\n\n def reverse_point_data_sparse_qsr(point_data):\n reversed_point_data = point_data[:2 * 56]\n # Hands to objects feature swap\n reversed_point_data += point_data[4 * 56:6 * 56] \n reversed_point_data += point_data[2 * 56:4 * 56]\n\n # Centroid direction and distance difference is symmetric\n reversed_point_data += point_data[6 * 56:7 * 56]\n\n # Object corners swap\n reversed_point_data += point_data[8 * 56:9 * 56] \n reversed_point_data += point_data[7 * 56:8 * 56]\n\n anchor = 9 * 56\n reversed_point_data += point_data[anchor:anchor + 2]\n reversed_point_data += point_data[anchor + 2 * 2:anchor + 3 * 2] \n reversed_point_data += point_data[anchor + 2:anchor + 2 * 2]\n\n anchor = 9 * 56 + 3 * 2\n # For QTCCS\n reversed_point_data += point_data[anchor + 3:anchor + 2 * 3]\n reversed_point_data += point_data[anchor:anchor + 3]\n reversed_point_data += point_data[anchor + 3 * 3:anchor + 4 * 3]\n reversed_point_data += point_data[anchor + 2 * 3:anchor + 3 * 3]\n\n return reversed_point_data\n\n reversed_session_data[SESSION_DATA] = []\n for point_data in session_data[SESSION_DATA]:\n if split_method == RAW:\n reversed_point_data = point_data[:39]\n reversed_point_data += point_data[51:63]\n reversed_point_data += point_data[39:51]\n elif split_method == PCAS:\n reversed_point_data = point_data[:6]\n # Object centroid swap\n reversed_point_data += point_data[8:10] \n reversed_point_data += point_data[6:8]\n # Object corners swap\n reversed_point_data += point_data[14:18] \n reversed_point_data += point_data[10:14]\n elif split_method == QSR or split_method == EVENT:\n reversed_point_data = reverse_point_data_qsr(point_data)\n elif split_method == SPARSE_QSR:\n reversed_point_data = reverse_point_data_sparse_qsr(point_data)\n\n reversed_session_data[SESSION_DATA].append(reversed_point_data)\n\n for event_str in session_data[SESSION_EVENTS]:\n reversed_event_str = {}\n for key in event_str:\n reversed_event_str[key] = event_str[key]\n\n subj, obj, theme, event, prep = event_str['label']\n def swap_objects(value):\n if value == 2:\n return 3\n if value == 3:\n return 2\n return value\n\n reversed_event_str['label'] = (swap_objects(subj), swap_objects(obj), swap_objects(theme), event, prep)\n\n reversed_session_data[SESSION_EVENTS].append(reversed_event_str)\n\n training_data.append(reversed_session_data)\n\n\n testing_data += project_session_data[int(config.session_testing_percentage[0] * len(project_session_data)):\n int(config.session_testing_percentage[1] * len(project_session_data))]\n \n return (training_data, testing_data)", "def test_split_data(self, whp_pandas):\n test_class = Slug_Forecasting(whp_pandas.copy())\n test_class.stationarity_check()\n test_class.split_data()\n\n assert hasattr(test_class, \"y_train\"), \"y_train attribute must have been create\"\n assert hasattr(test_class, \"y_pred\"), \"y_test attribute must have been create\"\n\n assert len(test_class.y_train) == 180, \"In this example, y_train should be 180 long\"\n assert len(test_class.y_pred) == 60, \"In this example, y_pred should be 60 long\"\n\n test_class = Slug_Forecasting(whp_pandas.copy())\n\n # test train size data\n try:\n test_class.split_data(train_size=400)\n print(\"Not enough data to fulfill train_size requirement\")\n raise ValueError\n except AssertionError:\n pass", "def __init__(self, h5_path='../dataset/lob.h5', split=1,\n train=True, n_window=1, normalization='std', epsilon=1e-15, horizon=0, shift=None):\n\n self.window = n_window\n\n assert 0 <= split <= 8\n assert 0 <= horizon <= 2\n\n # Translate the prediction to horizon to the data\n if horizon == 1:\n horizon = 3\n elif horizon == 2:\n horizon = 4\n\n # Load the data\n file = h5py.File(h5_path, 'r', )\n features = np.float32(file['features'])\n targets = np.int32(file['targets'])\n day_train_split_idx = file['day_train_split_idx'][:].astype('bool')\n day_test_split_idx = file['day_test_split_idx'][:].astype('bool')\n stock_idx = file['stock_idx'][:].astype('bool')\n file.close()\n\n # Get the data for the specific split and setup (train/test)\n if train:\n idx = day_train_split_idx[split]\n\n # Get the statistics needed for normalization\n if normalization == 'std':\n self.mean = np.mean(features[idx], axis=0)\n self.std = np.std(features[idx], axis=0)\n features = (features - self.mean) / (self.std + epsilon)\n else:\n idx = day_test_split_idx[split]\n\n if shift is not None:\n print(\"testing shift = \", shift)\n # Shift the testing features only\n features[idx] += shift[0]*np.mean(features, axis=0)\n features[idx] *= shift[1]\n\n\n # Also get the train data to normalize the test data accordingly (if needed)\n if normalization == 'std':\n train_idx = day_train_split_idx[split]\n self.mean = np.mean(features[train_idx], axis=0)\n self.std = np.std(features[train_idx], axis=0)\n features = (features - self.mean) / (self.std + epsilon)\n del train_idx\n\n # Get the data per stock\n self.features_per_stock = []\n self.labels = []\n for i in range(len(stock_idx)):\n cur_idx = np.logical_and(idx, stock_idx[i])\n self.features_per_stock.append(features[cur_idx])\n self.labels.append(targets[cur_idx, horizon])\n\n # Create a lookup table to find the correct stock\n self.look_up_margins = []\n current_sum = 0\n for i in range(len(self.features_per_stock)):\n # Remove n_window since they are used to ensure that we are always operate on a full window\n cur_limit = self.features_per_stock[i].shape[0] - n_window - 1\n current_sum += cur_limit\n self.look_up_margins.append(current_sum)\n\n\n # Get the total number of samples\n self.n = self.look_up_margins[-1]\n self.n_stocks = len(self.look_up_margins)", "def train(self,path,mode):\n if mode == \"porto\":\n self.prepare_data(path)\n else:\n self.prepare_sumo_data(path)\n self.poly_regression()", "def __init__(self, scoring_function=None, partition_index=0):\n self.trained_data = dict()\n self.scoring_function = scoring_function or diff_score\n self.last_pred = []\n self.partitions = dict()\n self.partition_index = partition_index", "def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data", "def split_data_metrics_learning(cfg):\n actual_pose = cfg['actual_pose']\n target = cfg['target']\n person_ids = cfg['person_ids']\n \n # Split train and val data based on the person ids.\n all_ids = np.arange(1, 21)\n val_ids = cfg['val_ids']\n train_ids = set(all_ids).symmetric_difference(val_ids)\n \n anchor_gallery_split_size = cfg['anchor_gallery_split_size']\n window_width = cfg['window_width']\n overlap = cfg['overlap']\n random_state = cfg['random_state']\n \n # Get only the training set data and the label.\n X_train, y_train = get_req_ids(actual_pose, target, train_ids, person_ids)\n \n # Select the evaluation data that measures the performance of the model on the training set.\n train_accuracy_ids = random.sample(train_ids, len(val_ids))\n X_train_acc, y_train_acc = get_req_ids(actual_pose, target, train_accuracy_ids, person_ids)\n \n # Anchor/Gallery set split for the training set.\n X_train_gal, X_train_anchor, y_train_gal, y_train_anchor = train_test(X_train = X_train_acc, y_train = y_train_acc, \n test_size=anchor_gallery_split_size, \n random_state=random_state, stratify=y_train_acc)\n \n # Subsample the gait sequences of the anchor/gallery set of the training set based on the window width and the overlap.\n X_train_gal, y_train_gal = subsample(cfg, X_train_gal, y_train_gal, window_width=window_width, overlap=overlap)\n X_train_anchor, y_train_anchor = subsample(cfg, X_train_anchor, y_train_anchor, window_width=window_width, overlap=overlap)\n \n # Get only the validation set data and the label.\n X_val, y_val = get_req_ids(actual_pose, target, val_ids, person_ids)\n \n # Anchor/Gallery set split for the validation set.\n X_val_gal, X_val_anchor, y_val_gal, y_val_anchor = train_test(X_train = X_val, \n y_train = y_val, \n test_size=anchor_gallery_split_size, \n random_state=random_state, \n stratify=y_val)\n \n \n # If data augmentation parameter is set to True in the configuration dictionary, data augmentation is done for the training set.\n if cfg['augment_data']:\n X_train, y_train = augment_data(X_train, y_train)\n \n # Subsample the gait sequences of the whole training set based on the window width and the overlap.\n X_train, y_train = subsample(cfg, X_train, y_train, window_width=window_width, overlap=overlap)\n \n # Subsample the gait sequences of the anchor/gallery set of the validation set based on the window width and the overlap.\n X_val_gal, y_val_gal = subsample(cfg, X_val_gal, y_val_gal, window_width=window_width, overlap=overlap)\n X_val_anchor, y_val_anchor = subsample(cfg, X_val_anchor, y_val_anchor, window_width=window_width, overlap=overlap)\n \n # Concatenate the gallery and anchor set of the validation data and label as a whole. This is just to maintain the train-val uniformity and \n # is not used anywhere in the project.\n X_val, y_val = np.concatenate((X_val_gal, X_val_anchor)), np.concatenate((y_val_gal, y_val_anchor))\n \n return X_train, X_val, X_train_gal, X_train_anchor, X_val_gal, X_val_anchor, y_train, y_val, y_train_gal, y_train_anchor, y_val_gal, y_val_anchor", "def main(input_filepath, output_filepath):\n\n logging.info(\"reading %s\", input_filepath)\n train_test = pd.read_hdf(input_filepath, 'train_test')\n meta = pd.read_hdf(input_filepath, 'meta')\n meta_org = pd.read_hdf(input_filepath, 'meta_org')\n\n sel_series = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\\\n ['series_id'].unique()\n train_series, validate_series = train_test_split(sel_series, random_state=1)\n\n logging.info(\"calc train_test\")\n train_test = calc_final_features(train_test, meta, meta_org=meta_org, verbose=True)\n\n sel = train_test[train_test.entry_type.isin(['train', 'cold_start'])]\n train = sel[sel.series_id.isin(train_series)]\n validate = sel[sel.series_id.isin(validate_series)]\n test = train_test[train_test.entry_type.isin(['test'])]\n\n logging.info(\"writing %s\", output_filepath)\n train.to_hdf(output_filepath, \"train\", mode=\"w\")\n validate.to_hdf(output_filepath, \"validate\", mode=\"a\")\n test.to_hdf(output_filepath, \"test\", mode=\"a\")\n for k in ['meta', 'submission']:\n df = pd.read_hdf(input_filepath, k)\n df.to_hdf(output_filepath, k, mode=\"a\")", "def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")", "def train(train_set, test_set, train_label, test_label, data_name, test_filenames, dimension_reduce=False,\n distribute_training=False):\n train_set = np.array(train_set)\n test_set = np.array(test_set)\n\n print(\"The shape of training set before dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set before dimension reduction is {0}\".format(test_set.shape))\n print('Use distribute training ? >> {0}'.format(distribute_training))\n reg = linear_model.BayesianRidge()\n\n if dimension_reduce:\n pca = PCA(n_components=128)\n train_set = pca.fit_transform(train_set)\n test_set = pca.fit_transform(test_set)\n\n print(\"The shape of training set after dimension reduction is {0}\".format(train_set.shape))\n print(\"The shape of test set after dimension reduction is {0}\".format(test_set.shape))\n\n if not distribute_training:\n reg.fit(train_set, train_label)\n else:\n train_set, test_set, train_label, test_label = da.array(train_set), da.array(test_set), da.array(\n train_label), da.array(test_label)\n reg.fit(train_set, train_label)\n\n predicted_label = reg.predict(test_set)\n mae_lr = round(mean_absolute_error(test_label, predicted_label), 4)\n rmse_lr = round(math.sqrt(mean_squared_error(test_label, predicted_label)), 4)\n pc = round(np.corrcoef(test_label, predicted_label)[0, 1], 4)\n print('===============The Mean Absolute Error of Model is {0}===================='.format(mae_lr))\n print('===============The Root Mean Square Error of Model is {0}===================='.format(rmse_lr))\n print('===============The Pearson Correlation of Model is {0}===================='.format(pc))\n\n mkdirs_if_not_exist('./model')\n joblib.dump(reg, './model/BayesRidge_%s.pkl' % data_name)\n print('The regression model has been persisted...')\n\n mkdirs_if_not_exist('./result')\n\n out_result(test_filenames, predicted_label, test_label, None, path='./result/Pred_GT_{0}.csv'.format(data_name))\n\n df = pd.DataFrame([mae_lr, rmse_lr, pc])\n df.to_csv('./result/%s.csv' % data_name, index=False)\n print('The result csv file has been generated...')", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def train(self, algorithm):\n\n kfold = StratifiedKFold(10, True, 1)\n f1_score = []\n precision_score = []\n recall_score = []\n for train, test in kfold.split(self.data_training, self.data_target):\n model = algorithm.fit(self.data_training.iloc[train], self.data_target.iloc[train])\n scores = self.score_model(model, self.data_training.iloc[test], self.data_target.iloc[test])\n f1_score.append(scores[0])\n precision_score.append(scores[1])\n recall_score.append(scores[2])\n\n self.print_results(f1_score, precision_score, recall_score)", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)" ]
[ "0.67197025", "0.6372317", "0.6326024", "0.62784594", "0.62007445", "0.6187098", "0.61727315", "0.61609745", "0.6150832", "0.61274564", "0.6109672", "0.60908484", "0.60526675", "0.60400504", "0.5990867", "0.59906036", "0.59859765", "0.5970628", "0.5950032", "0.5942936", "0.5933349", "0.5925814", "0.5925297", "0.5916644", "0.5913775", "0.5902966", "0.5898248", "0.58884454", "0.588773", "0.58804816" ]
0.6798001
0
Initialize the bzip2 package.
def __init__(self, system): super(Bzip2106, self).__init__("bzip2-1.0.6", system, "bzip2-1.0.6.tar.gz")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n\t\tself.verbose = kwargs.pop('verbose', self.verbose)\n\t\t#super(ZipArchive, self).__init__(*args, **kwargs)\n\t\tzipfile.ZipFile.__init__(self, *args, **kwargs)", "def __init__(self):\n _snap.TStrHashF_DJB_swiginit(self, _snap.new_TStrHashF_DJB())", "def hasBzip2():\n return _libsbml.SBMLReader_hasBzip2()", "def hasBzip2():\n return _libsbml.SBMLWriter_hasBzip2()", "def __init__(self, repository):\n self.repository = repository\n self.compression = [(gzip.GzipFile, 'gz'), (lzma.LZMAFile, 'xz')]\n self.pending = set()", "def SBMLReader_hasBzip2():\n return _libsbml.SBMLReader_hasBzip2()", "def SBMLWriter_hasBzip2():\n return _libsbml.SBMLWriter_hasBzip2()", "def __init__(self):\n _snap.TPairHashImpl2_swiginit(self, _snap.new_TPairHashImpl2())", "def init():\n return _libsbml.FbcExtension_init()", "def _open_zip(self):\n self.buffer = io.BytesIO()\n self.zf = zipfile.ZipFile(self.buffer, \"w\", zipfile.ZIP_DEFLATED)", "def init():\n return _libsbml.FbcV1ToV2Converter_init()", "def __init__(self, path: str):\n self._path = path\n self._fp = gzip.open(self._path, mode=\"r\")", "def init():\n return _libsbml.FbcV2ToV1Converter_init()", "def init():", "def bgzip_tabix(bedbz2):\n bed = bedbz2.replace(\".bz2\", \"\")\n bedgz = bed + \".gz\"\n tbi = bedgz + \".tbi\"\n if os.path.exists(bedgz) and os.path.exists(tbi):\n print bedgz, tbi, \"has beed generated.\"\n return\n c1 = \"bzip2 -d %s\" % bedbz2\n c2 = \"bgzip %s\" % bed\n c3 = \"tabix -s 1 -b 2 -e 3 %s\" % bedgz\n call_sys([c1, c2, c3])", "def _initialize_buffers(self) -> None:", "def __init__(self, fileref):\n self.__ref = fileref\n self.__lib = _zlib.decompressobj(memLevel=9)\n self.__buf = b''", "def __init__(self, path: str):\n # turn file into bit sequence\n self.init_bit_string(path)\n\n # calculate map size and the index that the flattened tree starts at\n self.map_size = int(self.bit_string[:8], 2)\n self.tree_index = 8 * (self.map_size + 2)\n\n # extract characters from the first bytes of the compressed file and update current index\n char_segment = self.bit_string[8: 8 * self.tree_index]\n self.extract_chars(char_segment)\n self.bit_string_index = self.tree_index", "def uncompress_bzip2(location, target_dir):\n return uncompress(location, target_dir, decompressor=bz2.BZ2File)", "def initialize(self):\n global VERSION_DATE\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n data[\"pyblosxom_version\"] = VERSION_DATE\n data['pi_bl'] = ''\n\n # Get our URL and configure the base_url param\n if pyhttp.has_key('SCRIPT_NAME'):\n if not config.has_key('base_url'):\n config['base_url'] = 'http://%s%s' % (pyhttp['HTTP_HOST'], pyhttp['SCRIPT_NAME'])\n else:\n config['base_url'] = config.get('base_url', '')\n\n if config[\"datadir\"].endswith(\"\\\\\") or config[\"datadir\"].endswith(\"/\"):\n config['datadir'] = config['datadir'][:-1]\n\n # import and initialize plugins\n import plugin_utils\n plugin_utils.initialize_plugins(config.get(\"plugin_dirs\", []), config.get(\"load_plugins\", None))\n\n # entryparser callback is run here first to allow other plugins\n # register what file extensions can be used\n data['extensions'] = tools.run_callback(\"entryparser\",\n {'txt': blosxom_entry_parser},\n mappingfunc=lambda x,y:y,\n defaultfunc=lambda x:x)", "def FbcExtension_init():\n return _libsbml.FbcExtension_init()", "def init():\n pass", "def __init__(self, *args, offset=0, **kwargs):\n super(BF, self).__init__(*args,\n arch=arch_from_id(\"bf\"),\n offset=offset,\n entry_point=0,\n **kwargs)\n self.os = \"bf\"", "def __init__(self, path):\n # TODO: later matching stuff will need superclass attributes\n self.path = path\n #self.topbp = ZFSBlockPointer(topbp) TODO: later", "def initialize(self):\r\n self.bucket_array.initialize()", "def __init__(self, host=None, port=None):\n\n self._mh = MasterHead.get_head()\n\n cfg = self._mh.cfg['Extensions']['Security']['zap']\n self._path = cfg['path']\n self._host = cfg['host'] if (host == None) else host\n self._port = cfg['port'] if (port == None) else port\n\n proxy = 'http://{0}:{1}'.format(self._host, self._port)\n self._client = ZAPv2(proxies={'http': proxy, 'https:': proxy})", "def __init__(self, zipcode, countrycode, apikey):\r\n self.zip = zipcode\r\n self.ccode = countrycode\r\n self.set_apikey(apikey)", "def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath", "def decompress_bz2(filename):\n basename = os.path.basename(filename)\n newfilepath = os.path.dirname(filename) + '/' + '.'.join(basename.split('.')[0:-1]) # all but bz2\n\n with open(newfilepath, 'wb') as new_file, bz2.BZ2File(filename, 'rb') as bz2_file:\n for data in iter(lambda : bz2_file.read(100 * 1024), b''):\n new_file.write(data)\n\n return newfilepath", "def __init__(self, input_stream, level=9):\n super(Gzip, self).__init__(input_stream)\n\n self._level = level" ]
[ "0.61423457", "0.5892953", "0.5870998", "0.58182406", "0.5793603", "0.5732819", "0.5602661", "0.5576144", "0.55652636", "0.55539757", "0.5521145", "0.5483834", "0.5482151", "0.54801524", "0.5467347", "0.54053587", "0.54005855", "0.53714246", "0.53280556", "0.5319646", "0.53182524", "0.52691317", "0.52681017", "0.52661586", "0.5265196", "0.52157265", "0.51991504", "0.5193243", "0.5193243", "0.51914823" ]
0.7995384
0
Helper function to check for blacklisted tokens
def check_blacklisted_token(token): token = models.TokenBlackList.query.filter_by(token=token).first() if token: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_token_in_blacklist(decrypted_token):\n return (\n decrypted_token[\"jti\"] in BLACKLIST\n ) # if True, go to revoked_token_callback", "def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n return model.revoked_token.RevokedToken.is_blacklisted(jti)", "def check_if_token_in_blacklist(decrypted_token):\n jti = decrypted_token['jti']\n return RevokedTokenModel.is_jti_blacklisted(jti)", "def is_blacklisted(token):\n if Revoked.query.filter_by(token=token).first():\n return True\n return False", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def check_token_in_blacklist(decrypted_token):\n from .models import BlacklistToken\n jti = decrypted_token['jti']\n\n if BlacklistToken.check_blacklist(jti):\n raise InvalidToken(\"Token is blacklisted. Please log in again.\")\n\n return False", "def check_blacklist(auth_token):\n token = BlacklistToken.query.filter_by(token=str(auth_token)).first()\n if token:\n return True\n\n return False", "def blacklist_token(token):\n\n refresh_token = RefreshToken(token)\n refresh_token.blacklist()", "def verify_token(auth_token):\n blacklisted_token = TokenBlacklisting.query.filter_by(\n token=str(auth_token)).first()\n if blacklisted_token:\n return True\n return False", "def is_blacklisted(self):\r\n \r\n in_blacklist = False \r\n if self.chrompos in parser.blacklist:\r\n in_blacklist = True\r\n \r\n return in_blacklist", "async def check_for_blacklist(ctx):\n if ctx.guild is None:\n # raise commands.NoPrivateMessage\n return True\n return db.is_blacklisted(ctx)", "def test_unused_token_is_valid(self):\n assert self.token.is_valid()", "def verify_token(self, token):\n return False", "async def validate_token(self, token):", "def check_for_token(token):\n try:\n decode_token(token)\n return True\n except:\n return False", "def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens", "def blacklist_token(token):\n curr_token = BlacklistToken(token=token)\n try:\n db.session.add(curr_token)\n db.session.commit()\n except:\n return False\n return True", "def decode_auth_token(auth_token): \n try: \n payload = jwt.decode(auth_token, getattr(settings, \"SECRET_KEY\", \"\"),algorithms=['HS256']) \n is_blacklisted_token = User.check_blacklist(auth_token)\n if is_blacklisted_token:\n return False,'Token blacklisted. Please log in again.'\n else:\n return True, payload['sub']\n except jwt.ExpiredSignatureError:\n return False,'Signature expired. Please log in again.'\n except jwt.InvalidTokenError:\n return False,'Invalid token. Please log in again.'", "def test_bad_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n bad_activation_keys = (\n 'emailactivationtokengenerator',\n 'emailactivation-tokengenerator',\n '3rd-bademailactivationkey'\n )\n for key in bad_activation_keys:\n self.assertFalse(token_generator.check_token(user, key))", "def is_blacklisted(self, fkey):\n return fkey in self.blacklist", "def is_blacklisted(self, string='') -> int:\n try:\n for word in string.split(' '):\n if word in self.blacklist:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_blacklisted({string}) -> {error}\")", "def test_rejects_invalid_tokens(self):\n config.set(xsrf_token_key='abcdef')\n tool = utils.XsrfTool()\n self.assertFalse(tool.verify_token(\n 'ThisTokenDoesNotEvenHaveASlash', 12345, 'test_action'))\n timestamp = utils.get_timestamp(XsrfToolTests.TEST_NOW)\n self.assertFalse(\n tool.verify_token('NotTheRightDigest/%f' % timestamp, 12345,\n 'test_action'))", "def is_token_revoked(decoded_token):\n jti = decoded_token['jti']\n token = BlacklistedToken.query.filter_by(jti=jti).first()\n return token is not None", "def is_blacklisted_username(username):\n settings = api.config.get_settings()\n return username in settings.get(\n \"username_blacklist\", api.config.default_settings[\"username_blacklist\"]\n )", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def verify_token(*token): # pragma: no cover\n\n if current_app.config.get('IGNORE_AUTH') is True:\n return True\n\n g.user = APITokenModel.verify_token(token[0])\n\n if g.user is None:\n return False\n\n return g.user", "def is_blacklisted(cls, msg):\n return is_blacklisted(msg.fields.get('from_addr'))", "def is_whitelisted(self, fkey):\n return fkey in self.whitelist", "def not_blacklisted_channel(blacklist):\n async def predicate(ctx):\n channel = ctx.message.channel\n server = bot.get_guild(SERVER_ID)\n for c in blacklist:\n if channel == discord.utils.get(server.text_channels, name=c):\n raise CommandNotAllowedInChannel(channel, \"Command was invoked in a blacklisted channel.\")\n return True\n \n return commands.check(predicate)", "def blacklist_token(token, user):\r\n user = User.query.filter_by(username=user).first()\r\n user.login_status = False\r\n token = Token.query.filter_by(token=token).first()\r\n token.blacklist = True\r\n db.session.commit()\r\n return {'Message': 'You have successfully logged out', \"Status\": \"Success\"}, 201" ]
[ "0.76352197", "0.76228154", "0.7478889", "0.7431049", "0.7411897", "0.7238838", "0.7137883", "0.69916326", "0.69205165", "0.68788165", "0.66148496", "0.65662944", "0.64836353", "0.6441574", "0.635617", "0.6342027", "0.63384205", "0.62987286", "0.6216427", "0.6215166", "0.621221", "0.6178898", "0.61447513", "0.6093886", "0.6053502", "0.6016824", "0.59953547", "0.5987605", "0.59503347", "0.59454095" ]
0.7947959
0
Determine the anticipated host switch name for the logical switch respresented by and store it in caller's . If an existing name is present, use it.
def _preprocess_resolve_switch_name(obj, kwargs): # Determine the expected host_switch_name from the associated # TransportZone. This must be done via API regardless of requested # execution_type. if kwargs.get('host_switch_name') is None: # XXX(jschmidt): read() should be able to default to proper # obj.id_ instead of requiring explicit caller input. tz_id = obj.read(id_=obj.id_)["transport_zone_id"] pylogger.debug("Retrieved logical switch transport_zone_id: %s" % tz_id) tz = transport_zone_facade.TransportZoneFacade(parent=obj.parent, id_=tz_id) tz_switch_name = tz.read(id_=tz.id_)["switch_name"] pylogger.debug("Retrieved transport zone switch_name: %s" % tz_switch_name) kwargs.update({'host_switch_name': tz_switch_name})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_switch(self,host):\n switch_list = self.__graph_dict[host]\n switch_num = switch_list[0]\n return switch_num", "def get_initiator_host_name(self, connector):\n name = connector.get('initiator',\n connector.get('wwnns', [''])[0])[::-1]\n if self.configuration.unique_fqdn_network:\n name = connector.get('host', name)\n return re.sub('[^0-9a-zA-Z-_]', '_', name[:32])", "def name(self):\n return self.robot.name + ' ' + SWITCH_TYPES[self.type][0]", "def name(self):\n if self.resource.is_client:\n return f\"{self.network.name} {self.resource.name_connection_type} {SWITCH_TYPES[self.variable][0]}\"\n elif self.resource.is_eero or self.resource.is_profile:\n return f\"{self.network.name} {self.resource.name} {SWITCH_TYPES[self.variable][0]}\"\n return f\"{self.resource.name} {SWITCH_TYPES[self.variable][0]}\"", "def get_host(name):\n raise NotImplementedError('derived class should overload me')", "def hostname(name: str = \"\") -> str:\n ...", "def name(self):\n return \"myhomeserver1_\" + self._light_id", "def get_host_name(self, wwpn):\n cmd = \"svcinfo lsfabric -wwpn=%s -delim :\" % (wwpn)\n output = self._svc_command(cmd)[0]\n\n if len(output) < 2:\n return None\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_KEY_HOST_NAME)\n name = values[index]\n return name", "def get_host_name():\r\n\tglobal HOST_NAME\r\n\r\n\twhile True:\r\n\t\tname_of_host = input(\"What is your name (As it appears in zoom without the '(Host, me)' part)? \")\r\n\t\tHOST_NAME = name_of_host + \" (Host, me)\"\r\n\t\tcorrect = input(f\"Host name set as {HOST_NAME}, is this correct? [Y/N]: \")\r\n\t\tif correct.upper() == \"Y\":\r\n\t\t\treturn", "def get_host_name(self):\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True\n\n try:\n return self.keyinfo['tracking_id'].attrs['hostname']\n except:\n return None\n\n if self.have_metadata is False:\n self._get_metadata()\n self.have_metadata = True", "def via_host_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"via_host_name\")", "def via_host_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"via_host_name\")", "def via_host_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"via_host_name\")", "def get_host_name():\n return socket.gethostname()", "def _get_switch_info(switch_info, host_id):\n for switch_ip in switch_info:\n if host_id in switch_info[switch_ip]:\n info = switch_info[switch_ip][host_id].split(\",\")\n return (switch_ip, info[0], info[1:])\n return (None, None, None)", "def get_name(self, address):\n our_beacon = self.format_beacon('connected', False)\n machine_name = re.compile('machine=(.*)\\n').search\n\n try:\n tsock = socket.socket()\n tsock.connect((address, 2190))\n self.send_packet(tsock, our_beacon)\n tivo_beacon = self.recv_packet(tsock)\n tsock.close()\n name = machine_name(tivo_beacon).groups()[0]\n except:\n name = address\n\n return name", "def get_switch(self, conf, dpid):\n\t\tpass", "def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def unique_id(self):\n return self.heater.id + \"_switch\"", "def get_switch(self, name):\n try:\n assert name in self.list_switches()\n return self.devices[name]\n except KeyError:\n raise UnknownDevice(name)", "def ethernet_switch_address(self):\n return self._props[\"optional\"].get(self._ethernet_switch_prop)", "def get_weak_username(self, host):\n try:\n return self.weak_hosts.get(host)[1]\n except IndexError:\n return \" \"", "def switch_by_label(self, label):\n if isinstance(label, ArduinoSwitchControlSwitch):\n return label\n elif label in self.switches:\n return self.switches[label]\n else:\n raise SwitchError(f\"No switch with label '{label}' found.\")", "def _get_machine_name(self):\n self.machine = platform.uname().node\n return self.machine", "def _detect_name(self):\n\n if 'Model name' in self.cpu_info:\n return self.cpu_info['Model name']\n\n # CPUs C/S Nodes Sockets\n # D03 16 4 1 4 (likely to change in the future)\n # D05 64 32 4 2\n # Amber 46-92 46 1 1-2\n # Tx2 28~224 28 2 1-2\n elif int(self.cpu_info['CPU(s)']) == 16 and \\\n int(self.cpu_info['Socket(s)']) == 4:\n return \"D03\"\n\n elif int(self.cpu_info['CPU(s)']) == 64 and \\\n int(self.cpu_info['Socket(s)']) == 2 and \\\n int(self.cpu_info['NUMA node(s)']) == 4:\n return \"D05\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 46 and \\\n int(self.cpu_info['NUMA node(s)']) == 1:\n return \"Amberwing\"\n\n elif int(self.cpu_info['Core(s) per socket']) == 28 and \\\n int(self.cpu_info['NUMA node(s)']) == 2:\n return \"ThunderX2\"", "async def test_get_rpc_channel_name(mock_rpc_device) -> None:\n assert get_rpc_channel_name(mock_rpc_device, \"input:0\") == \"test switch_0\"\n assert get_rpc_channel_name(mock_rpc_device, \"input:3\") == \"Test name switch_3\"", "async def test_custom_name_1(\n hass: HomeAssistant,\n target_domain: Platform,\n) -> None:\n registry = er.async_get(hass)\n device_registry = dr.async_get(hass)\n\n switch_config_entry = MockConfigEntry()\n switch_config_entry.add_to_hass(hass)\n\n device_entry = device_registry.async_get_or_create(\n config_entry_id=switch_config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n name=\"Device name\",\n )\n\n switch_entity_entry = registry.async_get_or_create(\n \"switch\",\n \"test\",\n \"unique\",\n device_id=device_entry.id,\n has_entity_name=True,\n original_name=\"Original entity name\",\n )\n switch_entity_entry = registry.async_update_entity(\n switch_entity_entry.entity_id,\n config_entry_id=switch_config_entry.entry_id,\n name=\"Custom entity name\",\n )\n\n # Add the config entry\n switch_as_x_config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\n CONF_ENTITY_ID: switch_entity_entry.id,\n CONF_TARGET_DOMAIN: target_domain,\n },\n title=\"ABC\",\n )\n switch_as_x_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)\n await hass.async_block_till_done()\n\n entity_entry = registry.async_get(\n f\"{target_domain}.device_name_original_entity_name\"\n )\n assert entity_entry\n assert entity_entry.device_id == switch_entity_entry.device_id\n assert entity_entry.has_entity_name is True\n assert entity_entry.name == \"Custom entity name\"\n assert entity_entry.original_name == \"Original entity name\"\n assert entity_entry.options == {\n DOMAIN: {\"entity_id\": switch_entity_entry.entity_id}\n }", "def get_hostname(self):\n return self.name", "async def test_custom_name_2(\n hass: HomeAssistant,\n target_domain: Platform,\n) -> None:\n registry = er.async_get(hass)\n device_registry = dr.async_get(hass)\n\n switch_config_entry = MockConfigEntry()\n switch_config_entry.add_to_hass(hass)\n\n device_entry = device_registry.async_get_or_create(\n config_entry_id=switch_config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n name=\"Device name\",\n )\n\n switch_entity_entry = registry.async_get_or_create(\n \"switch\",\n \"test\",\n \"unique\",\n device_id=device_entry.id,\n has_entity_name=True,\n original_name=\"Original entity name\",\n )\n switch_entity_entry = registry.async_update_entity(\n switch_entity_entry.entity_id,\n config_entry_id=switch_config_entry.entry_id,\n name=\"New custom entity name\",\n )\n\n # Add the config entry\n switch_as_x_config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\n CONF_ENTITY_ID: switch_entity_entry.id,\n CONF_TARGET_DOMAIN: target_domain,\n },\n title=\"ABC\",\n )\n switch_as_x_config_entry.add_to_hass(hass)\n\n # Register the switch as x entity in the entity registry, this means\n # the entity has been setup before\n switch_as_x_entity_entry = registry.async_get_or_create(\n target_domain,\n \"switch_as_x\",\n switch_as_x_config_entry.entry_id,\n suggested_object_id=\"device_name_original_entity_name\",\n )\n switch_as_x_entity_entry = registry.async_update_entity(\n switch_as_x_entity_entry.entity_id,\n config_entry_id=switch_config_entry.entry_id,\n name=\"Old custom entity name\",\n )\n\n assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)\n await hass.async_block_till_done()\n\n entity_entry = registry.async_get(\n f\"{target_domain}.device_name_original_entity_name\"\n )\n assert entity_entry\n assert entity_entry.entity_id == switch_as_x_entity_entry.entity_id\n assert entity_entry.device_id == switch_entity_entry.device_id\n assert entity_entry.has_entity_name is True\n assert entity_entry.name == \"Old custom entity name\"\n assert entity_entry.original_name == \"Original entity name\"\n assert entity_entry.options == {\n DOMAIN: {\"entity_id\": switch_entity_entry.entity_id}\n }" ]
[ "0.59917486", "0.5900308", "0.58105445", "0.5809452", "0.57702166", "0.57147825", "0.562613", "0.558732", "0.55398726", "0.55224764", "0.5492891", "0.5492891", "0.5477548", "0.5439147", "0.53883976", "0.5365079", "0.5351319", "0.534847", "0.53416914", "0.5320049", "0.53086567", "0.53013694", "0.52895534", "0.5277987", "0.5264771", "0.5249689", "0.5233974", "0.5233607", "0.523239", "0.5230245" ]
0.7307711
0
Fit LDA from a scipy CSR matrix (X).
def fit_lda(X, vocab): print('fitting lda...') return LdaModel(matutils.Sparse2Corpus(X, documents_columns=False), num_topics=100, passes=1, iterations=500, chunksize=1000, update_every=1, id2word=dict([(i, s) for i, s in enumerate(vocab)]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit(self, X):\n\n X_sparse = X.copy().astype(np.float64)\n self.X_sparse = X_sparse\n self._fit()\n return self", "def fit(self, X, y=None):\n #X = check_array(X, accept_sparse='csr')\n return self", "def fit(self, X: sp.csr_matrix, n_samples: int):\n X = check_array(X, accept_sparse=(\"csr\", \"csc\"))\n if not sp.issparse(X):\n X = sp.csr_matrix(X)\n dtype = np.float64\n\n if self.use_idf:\n _, n_features = X.shape\n self.df = np.squeeze(np.asarray(X.sum(axis=0)))\n idf = np.log(n_samples / self.df)\n self._idf_diag = sp.diags(\n idf,\n offsets=0,\n shape=(n_features, n_features),\n format=\"csr\",\n dtype=dtype,\n )\n\n return self", "def fit(self, X, n_samples):\n X = check_array(X, accept_sparse=('csr', 'csc'))\n if not sp.issparse(X):\n X = sp.csr_matrix(X)\n dtype = np.float64\n\n if self.use_idf:\n _, n_features = X.shape\n self.df = np.squeeze(np.asarray(X.sum(axis=0)))\n idf = np.log(n_samples / self.df)\n self._idf_diag = sp.diags(idf, offsets=0,\n shape=(n_features, n_features),\n format='csr',\n dtype=dtype)\n\n return self", "def fit(self, X):\n X = self.tf_vectorizer.fit_transform(X).toarray()\n if not sp.issparse(X):\n X = sp.csc_matrix(X)\n n_samples, n_features = X.shape\n \n if sp.isspmatrix_csr(X):\n df = bincount(X.indices, minlength=X.shape[1])\n else:\n df = np.diff(sp.csc_matrix(X, copy=False).indptr)\n \n #compute idf weight\n #idf = np.log((float(n_samples)-df+0.5)/(df+0.5))\n idf = np.log(float(n_samples) / df) + 1.0\n self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, \n n=n_features, format='csr')\n #compute the length for each document and average length of the corpus\n doc_len = np.sum(X,axis=1)\n self._doc_len = np.reshape(doc_len, (n_samples,1))\n self._avgdl = np.sum(X)/n_samples", "def fit(self, X):\r\n\t\tself.data = check_array(X)\r\n\t\tn_var = self.data.shape[1]\r\n\r\n\t\tU = np.arange(n_var)\r\n\t\tK = []\r\n\t\tX_ = np.copy(X)\r\n\t\tfor _ in range(0, n_var):\r\n\t\t\tcu_i = self._search_exogenous_x(X_, U)\r\n\t\t\tfor i in U:\r\n\t\t\t\tif i != cu_i:\r\n\t\t\t\t\tX_[:, i] = self._residual( X_[:, i], X_[:, cu_i] )\r\n\t\t\tK.append(cu_i)\r\n\t\t\tU = U[U != cu_i]\r\n\r\n\t\tself._causal_order = K\r\n\t\tself._estimate_adjacency_matrix(X)", "def fit_transform(self, X):\n self.fit(X)\n return self.doc_topic_distr, self.xai", "def fit(self, X):", "def fit(self, x: DNDarray):\n # 1. input sanitation\n if not isinstance(x, DNDarray):\n raise ValueError(f\"input needs to be a ht.DNDarray, but was {type(x)}\")\n if x.split is not None and x.split != 0:\n raise NotImplementedError(\"Not implemented for other splitting-axes\")\n # 2. Embed Dataset into lower-dimensional Eigenvector space\n eigenvalues, eigenvectors = self._spectral_embedding(x)\n\n # 3. Find the spectral gap, if number of clusters is not defined from the outside\n if self.n_clusters is None:\n diff = eigenvalues[1:] - eigenvalues[:-1]\n tmp = ht.argmax(diff).item()\n self.n_clusters = tmp + 1\n\n components = eigenvectors[:, : self.n_clusters].copy()\n\n params = self._cluster.get_params()\n params[\"n_clusters\"] = self.n_clusters\n self._cluster.set_params(**params)\n self._cluster.fit(components)\n self._labels = self._cluster.labels_\n self._cluster_centers = self._cluster.cluster_centers_\n\n return self", "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n coherence_c_v = []\n coherence_u_mass = []\n print(\"Fitting models\")\n for num_topics in range(self.min_topics, self.max_topics, self.step):\n lda_model = gensim.models.LdaMulticore(corpus=corpus, id2word=self.id2word, num_topics=num_topics,\n random_state=100, chunksize=100, passes=20,\n per_word_topics=True, minimum_probability=0)\n if not os.path.exists(f\"data/intermediate/optimal_testing\"):\n os.mkdir(f\"data/intermediate/optimal_testing\")\n with open(f\"data/intermediate/optimal_testing/lda_model_{num_topics}_topics.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics} coherence: {coherence}\")\n coherence_c_v.append(coherence)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents, dictionary=self.id2word,\n coherence='u_mass')\n coherence_u_mass.append(coherence_model_lda.get_coherence())\n return coherence_c_v, coherence_u_mass", "def fit_transform(self, X, y=None, sample_weight=None):\n\n X = check_array(X, accept_sparse=\"csr\")\n\n if not issparse(X):\n X = csr_matrix(X)\n\n if sample_weight is not None:\n NotImplementedError(\"Sample weights not supported in distributed\")\n # sample_weight = _check_sample_weight(sample_weight, X, dtype=np.float32)\n\n if np.any(X.data < 0):\n raise ValueError(\n \"PLSA is only valid for matrices with non-negative \" \"entries\"\n )\n\n row_sums = np.array(X.sum(axis=1).T)[0]\n good_rows = row_sums != 0\n\n if not np.all(good_rows):\n zero_rows_found = True\n data_for_fitting = X[good_rows]\n else:\n zero_rows_found = False\n data_for_fitting = X\n\n U, V = plsa_fit(\n data_for_fitting,\n self.n_components,\n n_row_blocks=self.n_row_blocks,\n n_col_blocks=self.n_col_blocks,\n init=self.init,\n n_iter=self.n_iter,\n n_iter_per_test=self.n_iter_per_test,\n tolerance=self.tolerance,\n e_step_thresh=self.e_step_thresh,\n random_state=self.random_state,\n )\n\n if zero_rows_found:\n self.embedding_ = np.zeros((X.shape[0], self.n_components))\n self.embedding_[good_rows] = U\n else:\n self.embedding_ = U\n\n self.components_ = V\n self.training_data_ = X\n\n return self.embedding_", "def fit_lda_model(self):\n self.id2word = corpora.Dictionary(self.documents)\n self.id2word.filter_extremes(no_below=20, no_above=0.5)\n corpus = [self.id2word.doc2bow(text) for text in self.documents]\n alpha = list(np.arange(0.1, 1, 0.3))\n alpha.append(\"symmetric\")\n beta = copy.deepcopy(alpha)\n alpha.append(\"asymmetric\")\n corpus_sets = [gensim.utils.ClippedCorpus(corpus, int(len(corpus) * 0.75)), corpus]\n corpus_titles = [\"75% corpus\", \"100% corpus\"]\n model_results = {\"Validation_set\": [], \"Topics\": [], \"Alpha\": [], \"Beta\": [], \"Coherence\": []}\n print(\"Fitting models\")\n for i, corpus_set in enumerate(corpus_sets):\n for num_topics in self.topics_to_test:\n for a in alpha:\n for b in beta:\n lda_model = gensim.models.LdaMulticore(corpus=corpus_set, id2word=self.id2word, alpha=a,\n random_state=100, chunksize=100, passes=20,\n num_topics=num_topics,\n per_word_topics=True, minimum_probability=0, eta=b)\n if i == 1: # we only want to save the model if it's a model on the whole corpus\n if not os.path.exists(f\"data/intermediate/hyperparameter_testing\"):\n os.mkdir(f\"data/intermediate/hyperparameter_testing\")\n with open(f\"data/intermediate/hyperparameter_testing/lda_{num_topics}_\"\n f\"topics{a}_alpha_{b}_eta.pkl\", \"wb\") as file_out:\n pickle.dump(lda_model, file_out)\n coherence_model_lda = CoherenceModel(model=lda_model, texts=self.documents,\n dictionary=self.id2word, coherence='c_v')\n coherence = coherence_model_lda.get_coherence()\n print(f\"Topic {num_topics}, alpha {a} eta {b} corpus {corpus_titles[i]} coherence: {coherence}\")\n model_results['Validation_set'].append(corpus_titles[i])\n model_results['Topics'].append(num_topics)\n model_results['Alpha'].append(a)\n model_results['Beta'].append(b)\n model_results['Coherence'].append(coherence)\n pd.DataFrame(model_results).to_csv(\"hyperparamter_tuning_results.csv\", index=False)", "def fit(self, X, y=None):\n for input_data in X:\n self._node_vocab.add_node(input_data[0])\n self._word_vocab.add_document(input_data[1])\n if self._use_char:\n self._char_vocab.add_documents(input_data[1])\n for data in input_data[2]:\n self._word_vocab.add_document(data)\n if self._use_char:\n self._char_vocab.add_documents(data)\n # self._label_vocab.add_node(' '.join(data)) # this results in a very big lable space (90K) \n self._label_vocab.add_document(data) # Use word indexing instead, drawbacks: BOW\n\n self._node_vocab.build()\n self._word_vocab.build()\n self._char_vocab.build()\n self._label_vocab.build()\n\n return self", "def fit_transform(self, X):\n X_sparse = X.copy().astype(np.float64)\n self.X_sparse = X_sparse\n self._fit()\n return self.sample_weights, self.s, self.feature_weights", "def fit(self, X):\n self.rel_to_idx, self.ent_to_idx = create_mappings(X)\n self.is_fitted = True", "def fit_transform(self, X):\n self._fit(X)\n return self.embedding", "def fit_transform(self, X):\n self.fit(X)\n return self.embedding_", "def fit(self, texts):\n print('Processing text and fitting LDA...')\n\n texts = preprocess_text(texts)\n stemmed_texts = [\n list(set(self.tokenizer.stem(text))) for text in texts]\n self.lda_dictionary = Dictionary(stemmed_texts)\n lda_corpus = [\n self.lda_dictionary.doc2bow(text) for text in stemmed_texts]\n self.lda = LdaModel(lda_corpus, num_topics=self.n_topics)\n print('Done.')\n\n return self", "def _lda(self):\n self.ldamodel = gensim.models.ldamodel.LdaModel(self.gensim_corpus, \n num_topics=self.n_topics, \n id2word=self.id_map, \n passes=self.n_passes,\n random_state=42)\n \n self.topic_matrix = self.ldamodel.print_topics(num_topics=self.n_topics, \n num_words=self.n_words)", "def train_lda(obs):\n print('Training LDA model...')\n lda = LatentDirichletAllocation(n_topics=42, max_iter=100, \n doc_topic_prior=0.0001,\n learning_method='online',\n learning_offset=50., \n topic_word_prior=0.001,\n random_state=0)\n lda.fit_transform(obs)\n pickle.dump(lda, open(\"ilda.data\", \"wb\" ))\n return lda", "def fit(self, X, lexicon_dict, word_embedding_matrix, rerun=False, max_iter=None):\n if rerun == False:\n self._initialize_(X, lexicon_dict, word_embedding_matrix)\n \n self.wordOccurenceMatrix = self._check_non_neg_array(self.wordOccurenceMatrix, \"TSWE.fit\")\n if max_iter is None:\n max_iter = self.max_iter\n \n self.all_loglikelihood = []\n self.all_perplexity = []\n n_docs, vocabSize = self.wordOccurenceMatrix.shape\n for iteration in range(max_iter):\n for d in range(n_docs):\n for i, v in enumerate(word_indices(self.wordOccurenceMatrix[d, :])):\n t = self.topics[(d, i)]\n s = self.sentiments[(d, i)]\n prior_sentiment = lexicon_dict.get(v,1)\n self.n_ds[d,s]-=1\n self.n_d[d] -= 1\n self.n_dst[d,s,t] -= 1\n self.n_vts[v, t, s*prior_sentiment] -= 1\n self.n_ts[t, s] -= 1\n self.n_vt[v,t] -= 1\n\n probabilities_ts = self.conditionalDistribution(d, v)\n ind = sampleFromCategorical(probabilities_ts.flatten())\n t, s = np.unravel_index(ind, probabilities_ts.shape)\n \n self.topics[(d, i)] = t\n self.sentiments[(d, i)] = s\n self.n_d[d] += 1\n self.n_dst[d,s,t] += 1\n self.n_vts[v, t, s*prior_sentiment] += 1\n self.n_ts[t, s] += 1\n self.n_ds[d,s]+=1\n self.n_vt[v,t] += 1\n\n '''\n if self.prior_update_step > 0 and (iteration+1)%self.prior_update_step == 0:\n numerator = 0\n denominator = 0\n for d in range(n_docs):\n numerator += psi(self.n_d[d] + self.alphaVec) - psi(self.alphaVec)\n denominator += psi(np.sum(self.n_ds[d] + self.alphaVec)) - psi(np.sum(self.alphaVec))\n \n self.alphaVec *= numerator / denominator \n '''\n if self.prior_update_step > 0 and (iteration+1)%self.prior_update_step == 0:\n print (\"Updating topic embeddings\")\n for k in range(self.n_topic_components):\n res = minimize(L,self.topic_embeddings[k,:],method='L-BFGS-B',args=(self.word_embeddings, self.n_vt[:,k]))\n self.topic_embeddings[k] = res.x\n\n #loglikelihood_ = self.loglikelihood()\n #perplexity_ = self.perplexity()\n \n #self.all_loglikelihood.append(loglikelihood_)\n #self.all_perplexity.append(perplexity_)\n \n #if self.evaluate_every > 0 and (iteration+1)%self.evaluate_every == 0:\n # if self.verbose > 0:\n # print (\"Perplexity after iteration {} (out of {} iterations) is {:.2f}\".format(iteration + 1, max_iter, perplexity_))\n \n self.doc_sentiment_prior_ = self.alphaVec\n normalized_n_vts = self.n_vts.copy() + self.beta\n normalized_n_vts /= normalized_n_vts.sum(0)[np.newaxis,:,:]\n self.components_ = normalized_n_vts\n \n return self", "def fit_transform(self, X):\n\n\t\tD = distance_matrix(X.T, X.T)\t\n\n\t\tG = self.graph(D)\n\n\t\tQ = shortest_path(csgraph = G, method=\"FW\")\t# Compute shortest distance using Floyd-Warshall \n\n\t\t#Modifying the shortest-path matrix.\n\t\tQ = Q ** 2\n\t\tQ *= - 0.5\n\t\tQ += - np.mean(Q, axis=0)\n\n\t\temb = mds(Q, 2)\t\n\n\t\treturn emb", "def fit(self, X_list):\n # Check parameters\n if not isinstance(X_list, list):\n raise ValueError('X_list must be a list.')\n\n if len(X_list) < 2:\n raise ValueError('X_list must be a list containing at least two items')\n\n n_features = check_array(X_list[0]).shape[1]\n X_list_ = []\n for X in X_list:\n X_ = check_array(X)\n if X_.shape[1] != n_features:\n raise ValueError('X_list must be a list with the same number of features')\n X_list_.append(X_)\n X_list = np.array(X_list_)\n\n if self._prior_knowledge is not None:\n self._Aknw = check_array(self._prior_knowledge)\n self._Aknw = np.where(self._Aknw < 0, np.nan, self._Aknw)\n if (n_features, n_features) != self._Aknw.shape:\n raise ValueError('The shape of prior knowledge must be (n_features, n_features)')\n else:\n self._Aknw = None\n\n # Causal discovery\n U = np.arange(n_features)\n K = []\n X_list_ = [np.copy(X) for X in X_list]\n for _ in range(n_features):\n m = self._search_causal_order(X_list_, U)\n for i in U:\n if i != m:\n for d in range(len(X_list_)):\n X_list_[d][:, i] = self._residual(X_list_[d][:, i], X_list_[d][:, m])\n K.append(m)\n U = U[U != m]\n\n self._causal_order = K\n\n self._adjacency_matrices = []\n for X in X_list:\n self._estimate_adjacency_matrix(X)\n self._adjacency_matrices.append(self._adjacency_matrix)\n return self", "def fit(self, train_matrix, train_label, sample_weight):\r\n raise NotImplementedError", "def fit(self):\n import networkx as nx\n import torch\n # Step 1. Calculate the Laplacian matrix\n L = nx.laplacian_matrix(self.Graph)\n nodelist = self.Graph.nodes()\n K = L.shape[0]\n\n # Step 2. Get the data in the right format \n cache = self.loss_function(self.data_train)\n \n # Step 3. Compute the proximal loss\n def proximal_loss(t, nu, warm_start, pool, cache=cache):\n XtX = cache['XtX']\n XtY = cache['XtY']\n n = cache['n']\n # LU = X'X + 0.5 * t * I\n Alu = torch.lu(XtX + 1./(2 * t) * torch.eye(n).unsqueeze(0).double())\n b = XtY + 1./(2 * t) * torch.from_numpy(nu)\n x = torch.lu_solve(b, *Alu).numpy()\n return x\n\n def proximal_residual(t, nu, warm_start, pool, lambda_val=1e-4):\n return nu / (1. + t * lambda_val)\n\n G_to_data = self._graph_to_data(cache['alpha_shape'])\n result, info = self._stratified_model_admm(shape=cache['shape'], \\\n Lap=L, \\\n loss_proximal_func=proximal_loss, \\\n regulariser_proximal_func=proximal_residual, \\\n graph_data=G_to_data)\n print(info)\n return self._output_to_graph(result)", "def fit(self, X):\n self._causal_order = None\n self._adjacency_matrices = None\n\n X = check_array(X)\n\n lingam_model = self._lingam_model\n if lingam_model is None:\n lingam_model = DirectLiNGAM()\n elif not isinstance(lingam_model, _BaseLiNGAM):\n raise ValueError(\"lingam_model must be a subclass of _BaseLiNGAM\")\n\n phis = self._ar_coefs\n thetas = self._ma_coefs\n order = self._order\n\n if phis is None or thetas is None:\n phis, thetas, order, residuals = self._estimate_varma_coefs(X)\n else:\n p = phis.shape[0]\n q = thetas.shape[0]\n residuals = self._calc_residuals(X, phis, thetas, p, q)\n\n model = lingam_model\n model.fit(residuals)\n\n psis, omegas = self._calc_psi_and_omega(\n model.adjacency_matrix_, phis, thetas, order\n )\n\n if self._prune:\n ee = np.dot(\n np.eye(model.adjacency_matrix_.shape[0]) - model.adjacency_matrix_,\n residuals.T,\n ).T\n psis, omegas = self._pruning(X, ee, order, model.causal_order_)\n\n self._ar_coefs = phis\n self._ma_coefs = thetas\n self._order = order\n self._residuals = residuals\n\n self._causal_order = model.causal_order_\n self._adjacency_matrices = (psis, omegas)\n\n return self", "def fit(self, X):\n if isinstance(X, np.ndarray) and X.ndim == 2:\n X = [X]\n self.mean_ = np.concatenate(X).mean(axis=0, keepdims=True)\n X_stan = [Xi - self.mean_ for Xi in X]\n uX, sX, vhX = np.linalg.svd(np.concatenate(X_stan), full_matrices=False)\n whiten = vhX.T @ np.diag(1. / sX)\n Xw = [X_stani @ whiten for X_stani in X_stan]\n Xp = [np.diff(Xwi, axis=0) for Xwi in Xw]\n up, sp, vhp = np.linalg.svd(np.concatenate(Xp), full_matrices=False)\n proj = vhp.T\n self.all_coef_ = whiten @ proj[:, ::-1]\n self.all_coef_ /= np.linalg.norm(self.all_coef_, axis=0, keepdims=True)\n self.coef_ = self.all_coef_[:, :self.n_components]\n return self", "def fit(self, x):\n x = np.asarray(x)\n _ = self.fit_transform(x)", "def fit(self, X, y=None):\n t0 = time.perf_counter()\n X = self._validate_data(X, accept_sparse=[\"csr\", \"csc\"])\n\n n_samples, n_features = X.shape\n\n t1 = time.perf_counter()\n if self.n_components == \"auto\":\n self.n_components_ = johnson_lindenstrauss_min_dim(\n n_samples=n_samples, eps=self.eps\n )\n\n if self.n_components_ <= 0:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is invalid\" % (self.eps, n_samples, self.n_components_)\n )\n\n elif self.n_components_ > n_features:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is larger than the original space with \"\n \"n_features=%d\"\n % (self.eps, n_samples, self.n_components_, n_features)\n )\n t2 = time.perf_counter()\n else:\n if self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0, got %s\" % self.n_components\n )\n\n elif self.n_components > n_features:\n warnings.warn(\n \"The number of components is higher than the number of\"\n \" features: n_features < n_components (%s < %s).\"\n \"The dimensionality of the problem will not be reduced.\"\n % (n_features, self.n_components),\n DataDimensionalityWarning,\n )\n\n self.n_components_ = self.n_components\n t2 = time.perf_counter()\n\n # Generate a projection matrix of size [n_components, n_features]\n self.components_af_ = self._make_random_matrix(self.n_components_, n_features)\n self.components_ = self.components_af_.to_ndarray()\n t3 = time.perf_counter()\n\n # Check contract\n assert self.components_.shape == (self.n_components_, n_features), (\n \"An error has occurred the self.components_ matrix has \"\n \" not the proper shape.\"\n )\n return self", "def fit(self, x, y=None):\n if self.method == 'svd' and x._sparse:\n raise NotImplementedError(\n \"SVD method not supported for sparse arrays.\")\n\n self.mean_ = x.mean(axis=0)\n norm_x = x - self.mean_\n\n if self.method == \"svd\":\n return self._fit_svd(norm_x)\n else:\n return self._fit_eig(norm_x)" ]
[ "0.64050597", "0.6399704", "0.6362117", "0.62613547", "0.6251182", "0.6121596", "0.610392", "0.6086461", "0.59369344", "0.5904508", "0.5868396", "0.5860403", "0.585907", "0.58524126", "0.5779344", "0.5778336", "0.575929", "0.57262206", "0.5716203", "0.5701557", "0.569485", "0.56885874", "0.56741214", "0.5665374", "0.5661171", "0.56581527", "0.5650661", "0.56443095", "0.56288487", "0.55797076" ]
0.75815755
0
Used at initialization to update all scan groups with their database values
def load_all_groups(self): for _, group in self.scopes.items(): group.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update(self):\n # clear group before rebuild\n self.clear()\n\n # build configuration groups\n self._config_names = []\n for i in range(self._n_configs):\n config_name = f\"config{i+1:02}\"\n self._config_names.append(config_name)\n self._build_config_group(config_name)\n\n # reset active configuration if necessary\n if not all(cname in self._config_names for cname in self._active_config):\n self._active_config = (self._config_names[0],)\n\n # build datasets\n self._build_datasets()", "def _initilise_graph_db(self):\n for collector in self.collectors:\n collector.init_graph_db()", "def reinit_data(self):\n self.if_name_map, \\\n self.if_alias_map, \\\n self.if_id_map, \\\n self.oid_name_map = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_interface_tables, self.db_conn)\n\n self.update_data()", "def update_data(self):\n for sai_id_key in self.if_id_map:\n namespace, sai_id = mibs.split_sai_id_key(sai_id_key)\n if_idx = mibs.get_index_from_str(self.if_id_map[sai_id_key])\n counter_table = self.namespace_db_map[namespace].get_all(mibs.COUNTERS_DB, \\\n mibs.counter_table(sai_id))\n if counter_table is None:\n counter_table = {}\n self.if_counters[if_idx] = counter_table\n\n\n self.lag_name_if_name_map, \\\n self.if_name_lag_name_map, \\\n self.oid_lag_name_map, _, _ = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_lag_tables, self.db_conn)\n\n self.if_range = sorted(list(self.oid_name_map.keys()) + list(self.oid_lag_name_map.keys()))\n self.if_range = [(i,) for i in self.if_range]", "def fill_db(self, data):\n check_input_params(data, self.DB)\n self.db = data[self.DB]", "def _initGroups(self):\n defaults = self._getGroupDefaults()\n ddict = self._getDefaultGroupDict(defaults)\n\n for group in self._config.sections():\n ddict[\"_name\"] = group\n container = self.getGroupContainer(**ddict)\n self._passConfig(container, group)\n self.groups.append(container)\n\n if not self.groups:\n self.groups.append(self.getGroupContainer(**defaults._dict_))", "def _UpdateDataSetValues( self ):\n pass", "def updateImageGroups(self):\n self.img_grps = self.splitImages()\n grps = self.img_grps\n self.detail.clear()\n detail = \"Available Groups : \\n\"\n if len(grps) >= 1:\n for i in range(len(grps)):\n detail += \"Group \"+ str(i+1)+ \" : \" + str(grps[i][0]) + \" ... \" + str(grps[i][-1]) + '\\n'\n\n self.detail.insertPlainText(detail)\n self.detail.moveCursor(QTextCursor.End)", "def _init_prepare_database(self, feat_db):\n by_groups = self.db.groupby(self.by)\n\n if self.verbose:\n display = progress_display.ProgressDisplay()\n display.add('block', 'Preprocessing by block', len(by_groups))\n\n for by_key, by_frame in by_groups:\n if self.verbose:\n display.update('block', 1)\n display.display()\n\n # allow to get by values as well as values of other variables\n # that are determined by these\n by_values = dict(by_frame.iloc[0])\n\n # apply 'by' filters\n if self.filters.by_filter(by_values):\n # get analogous feat_db\n by_feat_db = feat_db.iloc[by_frame.index]\n\n # drop indexes\n by_frame = by_frame.reset_index(drop=True)\n\n # reset_index to get an index relative to the 'by' db,\n # the original index could be conserved in an additional\n # 'index' column if necessary by removing the drop=True, but\n # this would add another constraint on the possible column name\n by_feat_db = by_feat_db.reset_index(drop=True)\n\n # apply generic filters\n by_frame = self.filters.generic_filter(by_values, by_frame)\n\n self.by_dbs[by_key] = by_frame\n self.feat_dbs[by_key] = by_feat_db\n\n def _by_dbs(l): return self.by_dbs[by_key].groupby(l)\n self.on_blocks[by_key] = _by_dbs(self.on)\n self.across_blocks[by_key] = _by_dbs(self.across)\n self.on_across_blocks[by_key] = _by_dbs(self.on + self.across)\n\n if len(self.across) > 1:\n self.antiacross_blocks[by_key] = dict()\n for across_key in self.across_blocks[by_key].groups:\n b = True\n for i, col in enumerate(self.across):\n b = b * (by_frame[col] != across_key[i])\n self.antiacross_blocks[by_key][across_key] = (\n by_frame[b].index)", "def setup(self):\n base = automap_base()\n engine = create_engine(\"mysql+pymysql://\" + csconfig.config.db_user + \":\" +\n csconfig.config.db_password + \"@\" +\n csconfig.config.db_host + \":\" +\n str(csconfig.config.db_port) +\n \"/\" + csconfig.config.db_name)\n base.prepare(engine, reflect=True)\n session = Session(engine)\n cloud_yaml = base.classes.csv2_group_resource_yaml\n\n for cloud in self.group_resources:\n cloud_yamls = session.query(cloud_yaml).\\\n filter(cloud_yaml.group_name == self.name,\n cloud_yaml.cloud_name == cloud.cloud_name)\n cloud_yaml_list = []\n for yam in cloud_yamls:\n cloud_yaml_list.append([yam.yaml_name, yam.yaml, yam.mime_type])\n if cloud.cloud_type == 'localhost':\n newcloud = cloudscheduler.localhostcloud.LocalHostCloud(extrayaml=cloud_yaml_list, resource=cloud)\n else:\n newcloud = cloudscheduler.openstackcloud.\\\n OpenStackCloud(extrayaml=cloud_yaml_list, resource=cloud)\n self.clouds[newcloud.name] = newcloud\n self.log.debug(\"Added all clouds for group: %s\", self.name)", "def refreshGroups(self):\n self.groups = []\n\n self.addGroupsWithIds(self._getGroupIdsJoined())\n self.addGroupsWithIds(self._getGroupIdsInvited(), False)", "def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = DashboardConfiguration.objects.filter(type=\"default\").first()\n\n # Add all players from dataset\n group = self.add_players(dashboard_configuration)\n\n # Add all games from the dataset\n self.add_games()\n\n # Create the games played for this group\n self.add_game_played(group)", "def test_ipam_vlan_groups_update(self):\n pass", "def _populate(self):\n self.addDemographics()\n self.addLabs()\n self.addProblems()\n self.addMeds()\n self.addAllergies()\n self.addImmunizations()\n self.addVitals()\n self.populated_p = True", "def groups(self, groups):\n self._groups = groups", "def _set_group_resource(self, _g):\n\n if isinstance(_g, Server):\n return\n\n for _, sg in _g.subgroups.items():\n self._set_group_resource(sg)\n _g.vCPUs += sg.vCPUs\n _g.mem += sg.mem\n _g.local_volume_size += sg.local_volume_size", "def fill(self):\n\n self.db.batch_insert_camera_from_api()", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()", "def groups(self, groups):\n\n self._groups = groups", "def groups(self, groups):\n\n self._groups = groups", "def groups(self, groups):\n\n self._groups = groups", "def groups(self, groups):\n\n self._groups = groups", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter", "def data(self, *args, **kwargs):\n\n data = self.cached(NR_CACHE_NAME)\n if not data:\n raise core.InvalidState(\"No grouping loaded\")\n\n mapping = self.mapping(data['release'], data['groups'])\n data['groups'] = self.transform(data['groups'], mapping)\n self.cache(NR_CACHE_NAME, data)\n return None", "def update_stats():\n list_db = get_list_database()\n\n list_db.group_stats_force_update()\n transaction_commit(None, 'GroupStatsUpdate')\n\n list_db.user_stats_force_update()\n transaction_commit(None, 'UserStatsUpdate')", "def __init__(self, groups=dict()):\n self.groups = groups", "def main():\n lc = db.getCursorForDB(localDb)\n rc = db.getCursorForDB(\"enwiki_p\")\n\n # Drop the old groups\n query = \"DELETE FROM ts_users_groups\"\n out(\"Deleting old user groups\")\n lc = db.execute(lc, query)\n\n # Fetch the updated groups\n query = \"SELECT * FROM user_groups\"\n out(\"Selecting user groups\")\n rc = db.execute(rc, query)\n rows = rc.fetchall()\n space = []\n values = []\n for r in rows:\n space.append(\"(%s,%s)\")\n values += [str(r[\"ug_user\"]), str(r[\"ug_group\"])]\n\n # Add them to the local table\n query = \"INSERT INTO ts_users_groups (tug_uid, tug_group) VALUES %s\" % (','.join(space))\n out(\"Inserting user groups\")\n lc = db.execute(lc, query, values)", "def refresh(self):\n self.active_member_count\n self.description\n self.lbmethod\n self.members\n self.minimum_active_member\n self.minimum_up_member\n self.slow_ramp_time\n self.statistics", "def test_partially_update_device_group_by_id1(self):\n pass" ]
[ "0.62571394", "0.6019692", "0.5947787", "0.59254926", "0.5868216", "0.5769238", "0.55904764", "0.5585611", "0.5578358", "0.5528191", "0.54845923", "0.54744333", "0.54352814", "0.5428182", "0.54216605", "0.54042125", "0.5394926", "0.538905", "0.5349979", "0.5349979", "0.5349979", "0.5349979", "0.5335404", "0.53139955", "0.531254", "0.5264091", "0.52481353", "0.52352244", "0.5232071", "0.522367" ]
0.61861694
1
Generate header for oauth2
def oauth_headers(oauth): import base64 encoded_credentials = base64.b64encode(('{0}:{1}'.format(oauth.client_id, oauth.client_secret)).encode('utf-8')) headers = { 'Authorization': 'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_oauth_headers(access_token: str) -> dict:\n return {'Authorization': 'Bearer ' + access_token}", "def __header_base64(self):\n header_base64 = base64.b64encode(f'{self.client_id}:{self.client_secret}'.encode('ascii'))\n header_base64 = str(header_base64).split(\"'\")[1]\n return {'Authorization': f'Basic {header_base64}'}", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def generate_headers_with_auth(self, token_type: str = 'access'):\n if re.search('access', token_type, re.I):\n bearer_token = self._access_token\n elif re.search('refresh', token_type, re.I):\n bearer_token = self._refresh_token\n else:\n raise (Exception('Please check docstrings and change token_type value'))\n\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + bearer_token\n }", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def buildHeader(self):\n if self.key:\n userString = self.user+b\":\"+self.key\n else:\n userString = self.user+b\":\"\n \n encodedUserString = b64encode(userString)\n decodedUserString = encodedUserString.decode(\"ascii\")\n self.basicAuthHeader = {\"Authorization\": \"Basic \" + decodedUserString}", "def _get_headers() -> dict:\n api_key = API_KEY_CRED_LOADER.load_credentials()\n api_secret = API_SECRET_CRED_LOADER.load_credentials()\n return {\"Authorization\": \"sso-key {}:{}\".format(api_key, api_secret)}", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def create_oauth(self, user):\r\n from oauth_provider.models import Consumer, Token, Resource\r\n\r\n # Necessary setup for ``oauth_provider``.\r\n resource, _ = Resource.objects.get_or_create(url='test', defaults={\r\n 'name': 'Test Resource'\r\n })\r\n consumer, _ = Consumer.objects.get_or_create(key='123', defaults={\r\n 'name': 'Test',\r\n 'description': 'Testing...'\r\n })\r\n token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={\r\n 'consumer': consumer,\r\n 'resource': resource,\r\n 'secret': '',\r\n 'user': user,\r\n })\r\n\r\n # Then generate the header.\r\n oauth_data = {\r\n 'oauth_consumer_key': '123',\r\n 'oauth_nonce': 'abc',\r\n 'oauth_signature': '&',\r\n 'oauth_signature_method': 'PLAINTEXT',\r\n 'oauth_timestamp': str(int(time.time())),\r\n 'oauth_token': 'foo',\r\n }\r\n return 'OAuth %s' % ','.join([key + '=' + value for key, value in oauth_data.items()])", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "async def gen_headers(auth_string):\n return {\n \"Authorization\": f\"Basic {str(b64encode(bytearray(auth_string, 'utf8')), 'utf-8')}\"\n }", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header", "def _get_oauth_headers(self, user):\n access_token = AccessTokenFactory.create(user=user, application=ApplicationFactory()).token\n headers = {\n 'HTTP_AUTHORIZATION': 'Bearer ' + access_token\n }\n return headers", "def _get_oauth_headers(self, user):\n access_token = AccessTokenFactory.create(user=user, application=ApplicationFactory()).token\n headers = {\n 'HTTP_AUTHORIZATION': 'Bearer ' + access_token\n }\n return headers", "def _build_http_header(self) -> Dict[str, str]:\n return {}", "def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def getHeaders():\n userid = rhev_settings.USERNAME\n passwd = rhev_settings.PASSWORD\n # base64.encodestring adds trailing \\n. \n auth = base64.encodestring(\"%s:%s\" % (userid, passwd)).rstrip(\"\\n\")\n headers = {\"Content-Type\": \"application/xml\",\n \"Accept\": \"application/xml\",\n \"Accept-Charset\": \"utf-8\",\n \"Authorization\" : (\"Basic %s\" % auth)}\n return headers", "def GenerateOAuth2String(username, access_token, base64_encode=True):\n auth_string = 'user=%s\\1auth=Bearer %s\\1\\1' % (username, access_token)\n if base64_encode:\n auth_string = base64.b64encode(auth_string)\n return auth_string", "def _oauth_payload_generate(self):\n\t\tresult = {\n\t\t\t\"oauth_consumer_key\" : self.key,\n\t\t\t\"oauth_nonce\" : self._oauth_nonce_generate(),\n\t\t\t\"oauth_signature_method\" : \"HMAC-SHA1\",\n\t\t\t\"oauth_timestamp\" : str( int( time.time()) ),\n\t\t\t\"oauth_version\" : \"1.0\"\n\t\t}\n\n\t\t# * if token is unavaliable, this func must be called from request_token\n\t\t# provide callback addr instead.\n\t\t# * access token should have a higher priority ...\n\t\tif self.has_user():\n\t\t\tresult[\"oauth_token\"] = self.a_token\n\t\telse:\n\t\t\tif len( self.token ) > 0:\n\t\t\t\tresult[\"oauth_token\"] = self.token\n\t\t\telse:\n\t\t\t\tresult[\"oauth_callback\"] = self.callback\n\n\t\treturn result", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}" ]
[ "0.7510096", "0.71629244", "0.71587896", "0.7056566", "0.7027807", "0.69251823", "0.6914705", "0.6914705", "0.68121415", "0.67767835", "0.6775404", "0.67447174", "0.67390746", "0.67297125", "0.6694022", "0.66927457", "0.66704285", "0.6578851", "0.65618974", "0.64950544", "0.64950544", "0.6491071", "0.64580643", "0.6430708", "0.64227843", "0.6331883", "0.63285726", "0.6326796", "0.6320564", "0.63170034" ]
0.73699313
1
Creates an access token from the supplied oauth2.0 object
def create_access_token(oauth): #create parameters for API authorization redirect_uri = 'oob' params = {'client_secret': oauth.client_secret, 'redirect_uri': redirect_uri, 'response_type': 'code'} #store the access code url = oauth.get_authorize_url(**params) #open a web browser to get access token and then store it via manual input webbrowser.open(url) code = input('Enter code: ') #create credentials item start_time = time.time() #create dictionary to hold credentials and store beginning time credentials = {'token_time': start_time} #NEED TO ADD IN 'REFRESH TOKEN' FUNCTION HERE SOMEWHERE # #create parameters data = {'code': code, 'redirect_uri': redirect_uri, 'grant_type': 'authorization_code'} #build the headers headers = oauth_headers(oauth) #create the raw access token raw_access = oauth.get_raw_access_token(data=data, headers=headers) #parse the raw access token and add to credentials variable credentials.update(access_parse(raw_access)) #parse access token from credentials access_token = credentials['access_token'] #return access token return access_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_oauth2_access_token(self):\n if not isinstance(self.session, DropboxSession):\n raise ValueError(\"This call requires a DropboxClient that is configured with an \"\n \"OAuth 1 access token.\")\n url, params, headers = self.request(\"/oauth2/token_from_oauth1\", method='POST')\n\n r = self.rest_client.POST(url, params, headers)\n return r['access_token']", "def build_token_from_oauth_response(oauth_resp):\n return Token(\n access_token=oauth_resp[\"access_token\"],\n refresh_token=oauth_resp[\"refresh_token\"],\n expires_in=oauth_resp[\"expires_in\"],\n )", "def createAccessTokenReplacement(self):\r\n\r\n url = self._config['OAUTH2ENDPOINT']['huddleAuthServer'] + \"request?response_type=code\" + \\\r\n \"&client_id=\" + self._config['OAUTH2']['clientID'] + \\\r\n \"&redirect_uri=\" + self._config['OAUTH2']['redirectUri']\r\n webbrowser.open_new(url)\r\n code = input('Please enter the code from your web browser:')\r\n\r\n response = self._oauth.obtainAccessTokenBy3LeggedOAuth(code)\r\n responseBody = json.loads(response['Body'])\r\n\r\n try:\r\n oauthToken = Token(responseBody)\r\n except TypeError as e:\r\n print (\"Bad response when requesting a token \" + str(response))\r\n sys.exit()\r\n\r\n return oauthToken", "def access_token(config, token):\n response = call_api('post', 'oauth/access_token', config,\n params={'oauth_token': token['oauth_token']},\n data={'oauth_verifier': token['oauth_verifier']})\n return dict([(k, v[0]) for k,v in urlparse.parse_qs(response.text).items()])", "def create_access_token(self):\n\t\t# Wraper for also caching invalid results\n #def getMetadataRofs(path):\n #\ttry:\n # \treturn self.client.metadata(path)\n # except Exception, e:\n # log.write('Exception at getMetadataRofs for path '+ path + '\\n')\n # pprint(e, log)\n # return False\n\n\t\ttry:\n\t\t\trequest_token = self.session.obtain_request_token()\n\t\t\turl = self.session.build_authorize_url(request_token)\n\t\t\tprint url\n\t\t\traw_input()\n\t\t\taccess_token = self.session.obtain_access_token(request_token)\n\t\t\tself.client = client.DropboxClient(self.session)\n\t\t\t\n\t\t\t# Build cache for metadata querying\n\n\t\t\t# Wraper for also caching invalid results\n\t\t\tdef getMetadataRofs(path):\n\t\t\t\ttry:\n\t\t\t\t\treturn self.client.metadata(path)\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogger.error('Exception at getMetadataRofs for path '+ path + '\\n')\n\t\t logger.debug(sys.exc_info()[0])\n\t\t\t\t\treturn False\n\n\t\t\tself.cache_metadata = Cache(getMetadataRofs)\n\t\t\tself.cache_files = {}\n\n\t\texcept Exception, e:\n\t\t\tlogger.error('Exception %s at create_access_token' % (sys.exc_info()[0]))\n\t\t\tlogger.debug(pformat(sys.exc_info()))", "def create_namespaced_o_auth_access_token(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_o_auth_access_token\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_o_auth_access_token`\")\n\n resource_path = '/oapi/v1/oauthaccesstokens'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1OAuthAccessToken',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_access_token(self, renew=False):\n if self.access_token is None or renew:\n headers = {} # don't use json here, juse urlencode.\n url = self._url_for_op('token')\n data = urllib.urlencode({'grant_type': 'client_credentials',\n 'client_id':self.CLIENT_ID,\n 'client_secret':self.CLIENT_SECRET})\n req = urllib2.Request(url, data, headers)\n try:\n response = urllib2.urlopen(req).read()\n response = json.loads(response)\n except urllib2.HTTPError as e:\n raise ApiError(e.reason)\n except Exception, e:\n raise ApiError(e)\n self.access_token = response['access_token']\n return self.access_token", "def test_create_o_auth_access_token(self):\n pass", "def from_auth_provider(\n cls, access_token=None, expires_in=None, token_type=None, audience=None\n ):\n expires_at = int(time.time()) + expires_in\n\n return cls(\n access_token=access_token,\n expires_in=expires_in,\n expires_at=expires_at,\n token_type=token_type,\n audience=audience,\n )", "def _get_access_token(self):\n\n self._access_token = None\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n doc = minidom.Document()\n root = doc.createElement('tokenAuthRequest')\n doc.appendChild(root)\n aki = doc.createElement('accessKeyId')\n aki.appendChild(doc.createTextNode(self.publicAccessKey))\n root.appendChild(aki)\n pak = doc.createElement('privateAccessKey')\n pak.appendChild(doc.createTextNode(self.privateAccessKey))\n root.appendChild(pak)\n rt = doc.createElement('refreshToken')\n rt.appendChild(doc.createTextNode(self._refresh_token))\n root.appendChild(rt)\n data = doc.toprettyxml()\n\n resp = requests.post(BASE_URL + \"authorization\", data=data, headers=self._default_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to claim access token: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n self._access_token = resp.headers.get('Location', None)\n if not self._access_token:\n raise ValueError(\"Unable to get access token\")\n\n self._user_id = os.path.basename(vals.get('authorization').get('user'))\n\n # Always set the expiry 30 minutes from now so we dont have to deal with parsing timezones\n # self._access_token_expiry = dateutil_parser.parse(vals.get('authorization').get('expiration'))\n self._access_token_expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def fetch_oauth_access_token(consumer_token, request_token):\n url = get_oauth_access_token_url(consumer_token, request_token)\n request = urllib2.urlopen(url)\n token = _oauth_parse_response(request.read())\n request.close()\n return token", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except Exception:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "async def oauth2_token(\n request: Request, oauth2_request=Depends(_oauth2_request)\n):", "def create_bearer_token(self):\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n }\n\n r = requests.post(self.token_endpoint, headers=headers, data=data)\n\n if r.status_code == 200:\n logging.info(\"Successfully obtained bearer token\")\n self.bearer_token = r.json()[\"access_token\"]\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))", "def authorize(self, OAUTH_SETTINGS, consumer_key, consumer_secret, oauth_token, oauth_token_secret, oauth_verifier):\n\t\tconsumer = oauth2.Consumer(consumer_key, consumer_secret)\n\t\ttoken = oauth2.Token(oauth_token, oauth_token_secret)\n\t\tclient = oauth2.Client(consumer, token)\n\n\t\treq = oauth2.Request(method=\"GET\", url=OAUTH_SETTINGS['access_token_url'], parameters={\"oauth_verifier\": oauth_verifier})\n\t\tresp, content = client.request(req.to_url(), \"GET\")\n\t\tif resp['status'] != \"200\":\n\t\t\traise Exception(content)\n\n\t\tquery = urlparse.parse_qs(content)\n\t\treturn query['oauth_token'][0], query['oauth_token_secret'][0]", "def getAuthObj(self):\n if self.accessToken is None:\n self.authenticate()\n\n return OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = self.accessToken,\n resource_owner_secret = self.accessTokenSecret,\n signature_type = 'auth_header')", "def _get_access_token(self, url):\n if self.access_token:\n return self.access_token\n data = \"client_id=%s&client_secret=%s&grant_type=password&username=%s&password=%s&scope=write\" %\\\n (self.client_id, self.client_secret, self.username, self.password)\n\n parsed = urlparse(url)\n path = urlunparse(ParseResult(parsed.scheme, parsed.netloc, \"/oauth2/access_token\", None, None, None))\n\n auth_resp = urlopen(Request(path, data), timeout=10)\n if auth_resp.getcode() != 200:\n self.logger.error(\"Error with client credentials\")\n return self.access_token\n auth_resp_data = json.loads(auth_resp.read())\n\n if \"access_token\" in auth_resp_data:\n self.access_token = auth_resp_data[\"access_token\"]\n else:\n self.logger.error(\"Error with client credentials\")\n return self.access_token", "def auth(access_token, access_token_secret, consumer_key, consumer_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token,access_token_secret)\n return auth", "def get_access_token(request_token,request_secret,verifier):\n oauth = OAuth1(CLIENT_KEY, client_secret=CLIENT_SECRET, resource_owner_key=request_token, resource_owner_secret=request_secret, verifier=verifier)\n response = requests.post(ACCESS_TOKEN_URL, auth=oauth)\n credentials = urlparse.parse_qs(response.content)\n access_token = credentials.get(\"oauth_token\")[0]\n access_secret = credentials.get(\"oauth_token_secret\")[0]\n return access_token, access_secret", "def get_access_token(self,verifier,access_token_url):\n\t\toauth = OAuth1(client_key=self.CONSUMER_KEY,\n\t\t\tclient_secret=self.CONSUMER_SECRET,\n\t\t\tresource_owner_key=self.resource_owner_key,\n\t\t\tresource_owner_secret=self.resource_owner_secret,\n\t\t\tverifier=verifier)\n\t\tr = requests.post(url=access_token_url, auth=oauth)\n\t\tcredentials = parse_qs(r.content)\n\t\tif \"oauth_token\" not in credentials.keys():\n\t\t\treturn None,None\n\t\ttoken = credentials.get('oauth_token')[0]\n\t\tsecret = credentials.get('oauth_token_secret')[0]\n\t\tself.token=token\n\t\tself.secret=secret\n\t\treturn token,secret", "async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = example_user_validator(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = user[\"username\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n refresh_token_data = jwt_claims.copy()\n refresh_token_data[\"sub\"] = user[\"username\"]\n refresh_token_data[\"exp\"] = datetime.utcnow() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS)\n refresh_token_data[\"type\"] = \"refresh\"\n refresh_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessRefreshToken(\n access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM),\n refresh_token=jwt.encode(refresh_token_data, SECRET_KEY, algorithm=ALGORITHM)\n )", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def get_access_token(credentials={}):\n client_id = credentials['client_id']\n client_secret = credentials['client_secret']\n\n if client_id == None or client_secret == None:\n return None\n\n # POST request for token\n response = requests.post('https://auth.domain.com.au/v1/connect/token', \n data = {'client_id':client_id,\n \"client_secret\":client_secret,\n \"grant_type\":\"client_credentials\",\n \"scope\":\"api_listings_read api_listings_write\",\n \"Content-Type\":\"text/json\"})\n token=response.json()\n expire = datetime.now() + timedelta(seconds=token['expires_in'])\n print (f'token expires at {expire}')\n\n access_token = {}\n access_token['access_token'] = token['access_token']\n access_token['expire_at'] = expire\n\n return access_token", "def read_namespaced_o_auth_access_token(self, name, **kwargs):\n\n all_params = ['name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_o_auth_access_token\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_o_auth_access_token`\")\n\n resource_path = '/oapi/v1/oauthaccesstokens/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1OAuthAccessToken',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_auth_token(self,\n client_id,\n client_secret,\n grant_type):\n\n # Prepare query URL\n _query_builder = Configuration.base_uri\n _query_builder += '/oauth///token'\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n _headers = {\n 'accept': 'application/json'\n }\n\n # Prepare form parameters\n _form_parameters = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'grant_type': grant_type\n }\n\n # Prepare and execute request\n _request = self.http_client.post(_query_url, headers=_headers, parameters=_form_parameters)\n _context = self.execute_request(_request)\n\n # Endpoint and global error handling using HTTP status codes.\n if _context.response.status_code == 400:\n raise APIException('unsupported_grant_type', _context)\n elif _context.response.status_code == 401:\n raise APIException('invalid_client', _context)\n elif _context.response.status_code == 404:\n raise APIException('The requested URI does not exist', _context)\n elif _context.response.status_code == 503:\n raise APIException('The service requested is currently unavailable', _context)\n elif (_context.response.status_code < 200) or (_context.response.status_code > 208): \n raise APIException('An internal error occurred when processing the request', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(_context.response.raw_body, OAuthResponse.from_dictionary)", "def request_access_token():\n\n # For Private application authentication, you must specifiy\n # grant_type=client_credentials and the service scope. For the \n # Content API, scope=contentapi\n post_data = {\"grant_type\": APP_CONFIG['GRANT_TYPE'],\n \"scope\": APP_CONFIG['SCOPE']}\n post_data_string = json.dumps(post_data)\n\n # Construct authentication string:\n # 1. Concatenate the client id, a colon character \":\", and the client secret into a single string\n # 2. URL encode the string from step 1\n # 3. Base64 encode the string from step 2\n authstr = to_native_string(\n b64encode(('%s:%s' % (APP_CONFIG['CLIENT_ID'], APP_CONFIG['CLIENT_SECRET'])).encode('utf-8'))).strip()\n\n # Construct an Authorization header with the value of 'Basic <base64 encoded auth string>'\n headers = {\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Basic \" + authstr\n }\n\n r = s.post(APP_CONFIG['OAUTH_TOKEN_URL'], data=post_data_string, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))\n\n if r.status_code in (400,500):\n\n # Handle known error\n result = r.json() \n return jsonify(result)\n\n elif r.status_code == 200:\n\n result = r.json() \n access_token = result['access_token']\n token_type = result['token_type']\n timestamp = result.get('timestamp', None)\n expires_in = result.get('expires_in', None)\n token_expiry = None\n if expires_in is not None:\n token_expiry = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n token_expiry = token_expiry + datetime.timedelta(seconds=expires_in)\n token_expiry = token_expiry.isoformat()\n\n html = '<pre>';\n html += '<h3>Successfully retrieved access token!</h3>' \n html += '<pre>';\n html += 'access_token : ' + access_token\n html += '<pre>';\n html += 'token_type : ' + token_type\n html += '<pre>';\n html += 'expires_in (sec) : ' + str(expires_in)\n html += '<pre>';\n html += 'token_expiry : ' + token_expiry\n html += '<pre>';\n html += 'timestamp : ' + timestamp\n\n html += '<pre>';\n html += '<h3>Query Content API with Access Token</h3>'\n html += '<pre>';\n html += '<a href=\"/query-collection-myhuman?access_token='+access_token+'\">Query Collection: myhuman</a>'\n\n return html\n\n else:\n # Handle unknown error\n return (r.text, r.status_code, r.headers.items())", "def get_access_token():\n\n account = get_account()\n\n account.EnsureCredentials(dbus_interface=GOA_ACCOUNT)\n access_token, _ = account.GetAccessToken(dbus_interface=GOA_ACCOUNT_OAUTH2)\n return str(access_token)", "def request_access_token(self, as_url: str, audience: str, scopes: List['str']):\n session = AceSession.create(key_id=bytes(f\"{self.client_id}{AceSession.session_id}\", 'ascii'))\n\n pop_key = session.public_pop_key\n\n payload = {\n CK.GRANT_TYPE: GrantTypes.CLIENT_CREDENTIALS,\n CK.CLIENT_ID: self.client_id,\n CK.CLIENT_SECRET: self.client_secret,\n CK.SCOPE: \",\".join(scopes),\n CK.AUD: audience,\n CK.CNF: { Cose.COSE_KEY: CoseKey(pop_key, session.pop_key_id, CoseKey.Type.ECDSA).encode() }\n }\n\n response = requests.post(url=f\"{as_url}/token\", data=dumps(payload))\n\n if response.status_code != 200:\n print(f\"\\t ERROR: {loads(response.content)}\")\n exit(1)\n\n response_content = loads(response.content)\n\n token = response_content[CK.ACCESS_TOKEN]\n rs_pub_key = CoseKey.from_cose(response_content[CK.RS_CNF])\n\n session.token = token\n session.rs_public_key = rs_pub_key.key\n\n return session" ]
[ "0.77427965", "0.7283522", "0.6906905", "0.6883373", "0.68754137", "0.6776269", "0.6742007", "0.6687795", "0.66251546", "0.6614909", "0.6581949", "0.65400434", "0.6519731", "0.6517932", "0.65080005", "0.64990944", "0.6489406", "0.6487857", "0.648599", "0.64817834", "0.6473712", "0.6445017", "0.64410484", "0.6439445", "0.6423755", "0.64105487", "0.6397823", "0.63948506", "0.63934463", "0.6390371" ]
0.7380083
1
API query to return all available players, ssorted by number of fantasy points\n
def available_players_query(): #start the calculation timer calc_start = time.time() #initialize everything last_first_names = [] full_names = [] player_key = [] player_pos = [] start = 1 done = False #this is where the data is actually created #loop thru to get all of the players available while(not done): query_url = base_query_url + 'league/' + leagueID + '/players;status=A;sort=PTS;start=%s;count=25' %start r = s.get(query_url, params={'format': 'json'}) output = r.json() output = output['fantasy_content'] output = output['league'] output = output[1] output = output['players'] count = output['count'] player_num = list(output.keys()) player_num = player_num[0:len(player_num)-1] #grab the names for each of the players in this batch of players for i in player_num: #get to player details output1 = output[i] output1 = output1['player'] output1 = output1[0] #get player name output_name = output1[2] output_name = output_name['name'] first = output_name['first'] last = output_name['last'] full = output_name['full'] last_first = last + ', ' + first #get player key output_key = list(output1[0].values())[0] #get player position output_pos = list(output1[9].values())[0] #add items to lists last_first_names.append(last_first) full_names.append(full) player_key.append(output_key) player_pos.append(output_pos) #stopping rule: if the number of players on the page is less than 25, then stop start += 25 if count < 25: done = True #stop the timer calc_end = time.time() #print the calculation time print('Process complete') print('Calculation time for all available players: {0:0.2f} seconds'.format((calc_end-calc_start))) #return the players name and player key lists return full_names, player_key, player_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def player_stats_query(week, player_list, session=s): \n #initialize lists\n pos_list = []\n team_list = []\n \n #cycle thru each player that is currently available\n for player in avail_player_key:\n #build the API url for the unique player key\n url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week)\n #convert API call to json\n raw = s.get(url_player, params={'format': 'json'}).json()\n #parse out the players details info (e.g. position, owned, etc.)\n player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0]\n #parse out position from player details\n pos = player_details[9]['display_position'].upper()\n \n ## FILTER OUT NON-OFFENSE POSITIONS\n if pos not in ['QB', 'WR', 'RB', 'TE']:\n continue\n else:\n \n #parse out team from player_details\n team = player_details[6]['editorial_team_abbr'].upper()\n #append data to lists\n pos_list.append(pos)\n team_list.append(team)\n \n #initialize a stats list\n stats_list = []\n #parse out the player stats\n player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats']\n #loop thru all of the various stats\n for stat in player_stats:\n stat_dict = stat['stat']\n stats_list.append(stat_dict)\n \n return stats_list", "def player_list():\n page = request.args.get(\"page\", \"1\")\n count = request.args.get(\"count\", \"12\")\n team_id = request.args.get(\"team_id\")\n\n if not team_id:\n raise BadRequest(\"Nama team tidak boleh kosong\")\n\n # type conversion\n page = int(page)\n count = int(count)\n team_id = int(team_id)\n\n player = player_ctrl.get_list(page=page, count=count, team_id=team_id)\n\n response = {\n \"status\": 200 if player.items != [] else 204,\n \"has_next\": player.has_next,\n \"has_prev\": player.has_prev,\n \"total\": player.total,\n \"result\": _entity_player_list(player.items)\n }\n\n return jsonify(response)", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def playerStandings():\n #gets id, player, wins and matches ordered by most wins\n conn = DB().execute(\"select id, player, wins, matches FROM players order by wins desc\")\n #conn = DB().execute(\"SELECT id FROM players UNION SELECT player FROM players UNION SELECT COUNT(winner) as winners FROM matches GROUP BY winner UNION SELECT SUM(COUNT(loser),winners) as losers FROM matches GROUP BY loser\")\n #conn = DB().execute(\"SELECT players.id, players.player, count(matches.winner) AS winners, count(matches.loser) + winners AS total_matches FROM players JOIN matches ON players.player=matches.winner=matches.loser\")\n #collects the select rows into a list\n playersList = list(conn[\"cursor\"].fetchall())\n conn[\"cursor\"].close()\n return playersList", "async def get_all_top_10(self) -> 'Response':\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/vnd.lichess.v3+json'\n }\n response = await self._client.request(method=RequestMethods.GET, url=USERS_PLAYER_URL, headers=headers)\n return response", "def playerStandings():\n c.execute(\"SELECT id,name,wins,matches FROM players ORDER BY wins DESC\");\n print c.fetchall()\n return c.fetchall()", "def playerStandings():\n # gets connection to tournament database in conn object\n conn = connect()\n # gets the cursor to execute queries\n c = conn.cursor()\n # executes select statement on STANDING view for getting results in\n # descending order of number of wins for each player\n c.execute(\"SELECT * FROM STANDING ORDER BY WINS DESC;\")\n # results are stored in ps variable\n ps = c.fetchall()\n # closing the connection to tournament database\n conn.close()\n # returns the results receieved from tournament database\n return ps", "def playerStandingsByPoints():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT * FROM standing_by_points_and_omw;\"\"\")\n standings = cur.fetchall()\n return standings", "def playerStandings():\n db, cursor = connect()\n cursor.execute(\"SELECT id, name , wins, matches FROM players ORDER BY wins DESC\")\n return cursor.fetchall() \n\n #player = print row for row in cursor.fetchall() ", "def team_players_query():\n #start the calculation timer\n calc_start = time.time()\n\n #initialize everything\n last_first_names = []\n full_names = []\n player_key = []\n player_pos = []\n \n #build the query URL\n query_url = base_query_url + 'team/' + leagueID + teamID + '/roster'\n\n #get the json data\n r = s.get(query_url, params={'format': 'json'})\n output = r.json()\n output = output['fantasy_content']['team'][1]['roster']['0']['players']\n player_num = list(output.keys())\n player_num = player_num[0:len(player_num)-1]\n #loop thru all of the players and extract the necessary info\n for i in player_num:\n result = output[i]\n result = result['player'][0]\n #store the player key\n player_k = result[0]['player_key']\n #store the player position\n pos = result[9]['display_position']\n #store player names\n output_name = result[2]['name']\n f_name = output_name['first']\n l_name = output_name['last']\n full = output_name['full']\n #build formatted name\n last_first = l_name + ', ' + f_name\n #add to lists\n full_names.append(full)\n last_first_names.append(last_first)\n player_key.append(player_k)\n player_pos.append(pos)\n \n #stop the timer\n calc_end = time.time()\n #print the calculation time\n print('Process complete')\n print('Calculation time for rostered players: {0:0.2f} seconds'.format((calc_end-calc_start)))\n #return full names and player keys\n return full_names, player_key, player_pos", "def playerStandings():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"select player_id, player_name, wins, (wins + losses) as total_played from normalized_wins_and_losses order by wins desc, total_played desc;\")\n player_standings = db_cursor.fetchall()\n db_conn.commit()\n db_conn.close()\n return player_standings", "def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result", "def show_players_specific_tournament(self) -> None:\n id_choice = check.request_id(TOURNAMENTS)\n tournament_data = TOURNAMENTS.get(doc_id=id_choice)\n if tournament_data.get(\"players\") == {}:\n print(\"\\n This tournaments has no players yet\")\n else:\n players_list = tournament_data.get(\"players\")\n deserialized_player_list = []\n for player_data in players_list:\n deserialized_player = Player(**json.loads(player_data))\n deserialized_player_list.append(deserialized_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"alphabetical\", \"ranking\", \"None\")\n if choice == \"alphabetical\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.first_name)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)\n elif choice == \"ranking\":\n utils.clear_terminal()\n deserialized_player_list = sorted(deserialized_player_list, key=lambda player: player.ranking)\n for deserialized_player in deserialized_player_list:\n print(deserialized_player)", "async def get_players(self):\r\n if os.environ.get(\"WoW_Token\") is None:\r\n return\r\n else:\r\n async with aiohttp.ClientSession().get('https://us.api.battle.net/wow/leaderboard/3v3?locale=en_US&apikey=' + os.environ.get(\"WoW_Token\")) as res:\r\n if res.status == 200:\r\n data = await res.json()\r\n output = {}\r\n for player in range(0, 965):\r\n output[int(player)] = data['rows'][player]\r\n with open('Pvp_Players.json', 'w') as pvp_players:\r\n json.dump(output, pvp_players)\r\n return output", "def playerStandings():\n cur4 = conn.cursor()\n \n # This playerStandings() should return in format (id,name,wins,matches) \n query =\"\"\"SELECT id, name, sum(wincount) as wins, sum(lose_count)+sum(wincount) as total\n from\n (((\n select p.id, p.name, count(winner) as wincount, '0' as lose_count\n from players p left join matches on p.id=winner group by p.id, p.name order by count(winner) desc)\n UNION\n (select p.id, p.name, '0' as wincount, count(loser) as lose_count\n from players p left join matches on p.id=loser group by p.id, p.name order by count(loser) desc\n )))\n as standings group by id, name order by wins desc, total asc;\n \"\"\"\n cur4.execute(query)\n rows = cur4.fetchall()\n\n return rows", "def get_player_data(responses) -> pd.DataFrame:\n result = []\n for p in responses:\n data = p.decode(\"utf8\").replace(\"'\", '\"')\n data = json.loads(data)\n latest_stats = data.get(\"league\").get(\"standard\").get(\"stats\").get(\n \"latest\")\n stats_dict = {\n \"FGP\":\n latest_stats.get(\"fgp\"),\n \"FTP\":\n latest_stats.get(\"ftp\"),\n \"3PM\":\n round(\n int(latest_stats.get(\"tpm\")) / int(\n latest_stats.get(\"gamesPlayed\")), 1),\n \"PPG\":\n latest_stats.get(\"ppg\"),\n \"APG\":\n latest_stats.get(\"apg\"),\n \"RPG\":\n latest_stats.get(\"rpg\"),\n \"SPG\":\n latest_stats.get(\"spg\"),\n \"BPG\":\n latest_stats.get(\"bpg\"),\n \"TPG\":\n latest_stats.get(\"topg\"),\n \"MPG\":\n round(\n int(latest_stats.get(\"min\")) / int(\n latest_stats.get(\"gamesPlayed\")), 1)\n }\n result.append(stats_dict)\n return pd.DataFrame(result)", "def players(self, game: str) -> Response:\n\n endpoint = '/api/players'\n query = f'?game={game}'\n return self.fetch(endpoint, query)", "def playerStandings():\n # place all players in a dictionary\n player_dict = {}\n conn, c = connect()\n c.execute(\"\"\"SELECT * FROM players;\"\"\")\n for row in c.fetchall():\n player_dict[row[0]] = [row[1], 0, 0]\n\n # count the number of win and matches in for all matches\n c.execute(\"\"\"SELECT winner, loser FROM matches;\"\"\")\n for row in c.fetchall():\n if row[0] in player_dict:\n player_dict[row[0]][1] += 1\n player_dict[row[0]][2] += 1\n if row[1] in player_dict:\n player_dict[row[1]][2] += 1\n\n # compile win counts as the key to dictionary\n win_count = {}\n for i in player_dict:\n wins = player_dict[i][1]\n if wins in win_count:\n win_count[wins].append((i, player_dict[i][0],\n wins, player_dict[i][2]))\n else:\n win_count[wins] = [(i, player_dict[i][0],\n wins, player_dict[i][2])]\n\n # compile output list\n output_list = []\n for i in sorted(win_count.keys(), reverse=True):\n for j in win_count[i]:\n output_list.append(j)\n\n return output_list", "def get_user_players(self, userid):\r\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\", \"Accept\": \"text/plain\",\r\n 'Referer': 'http://' + self.domain + '/standings.phtml', \"User-Agent\": user_agent}\r\n req = self.session.get('http://' + self.domain + '/playerInfo.phtml?pid=' + str(userid),\r\n headers=headers).content\r\n soup = BeautifulSoup(req, \"html.parser\")\r\n # title = soup.title.string\r\n # community = soup.find_all('table', border=0)[1].a.text\r\n # username = re.search('\\((.*?)\\)', soup.find('div', id='title').text).group(1)\r\n players_info = list()\r\n for i in soup.find('table', cellpadding=2).find_all('tr')[1:]:\r\n cad = i.find_all('td')\r\n player_id = int(re.findall('\\d+', i.find_all('img')[0]['src'])[0])\r\n name = cad[2].text.strip()\r\n club = cad[3].find('img')['alt']\r\n club_id = int(re.findall('\\d+', i.find_all('img')[1]['src'])[0])\r\n value = float(cad[4].text.replace(\".\", \"\"))\r\n totalpoints = float(cad[5].text)\r\n position = self.translate_position(cad[6].text)\r\n players_info.append([player_id, name, club_id, club, value, totalpoints, position])\r\n return players_info", "def playerStandings():\n conn, cur = connect()\n query = \"SELECT * FROM player_standings;\"\n try:\n cur.execute(query)\n except:\n print(\"Error encountered when selecting player standings from the database\")\n player_standings = cur.fetchall()\n conn.close()\n\n standings = []\n for player in player_standings:\n standings.append((player['player_id'], player['player_name'],\n (int)(player['wins']), (int)(player['num_matches'])))\n return standings", "def nflffpointleaders(self, irc, msg, args):\n \n url = self._b64decode('aHR0cDovL2dhbWVzLmVzcG4uZ28uY29tL2ZmbC9sZWFkZXJz')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n html = html.replace('&nbsp;',' ')\n \n soup = BeautifulSoup(html)\n table = soup.find('table', attrs={'id':'playertable_0'})\n rows = table.findAll('tr')[2:12]\n\n append_list = []\n count = 1\n\n for row in rows:\n rank = count\n player = row.find('td').find('a')\n points = row.find('td', attrs={'class':'playertableStat appliedPoints sortedCell'})\n append_list.append(str(rank) + \". \" + ircutils.bold(player.getText()) + \" (\" + points.getText() + \")\")\n count += 1 # ++\n \n title = \"Top 10 FF points:\"\n descstring = string.join([item for item in append_list], \" | \") # put the list together.\n output = \"{0} :: {1}\".format(ircutils.mircColor(title, 'red'), descstring)\n irc.reply(output)", "def playerStandings():\n db = connect()\n c = db.cursor()\n query = (\"SELECT * FROM standings;\")\n c.execute(query)\n matches = c.fetchall()\n print(matches)\n db.close()\n return matches", "def playerSearch(self, start, count, level, formation, position, nationality, league, team, minBid, maxBid, minBIN, maxBIN):\n searchstring = \"\"\n cardList = list()\n\n if level != \"\" and level != \"any\":\n searchstring += \"&lev=\" + level\n if formation != \"\" and formation != \"any\":\n searchstring += \"&form=\" + formation\n if position != \"\" and position != \"any\":\n if position == \"defense\" or position == \"midfield\" or position == \"attacker\":\n searchstring += \"&zone=\" + position\n else:\n searchstring += \"&pos=\" + position\n if nationality > 0:\n searchstring += \"&nat=\" + str(nationality)\n if league > 0:\n searchstring += \"&leag=\" + str(league)\n if team > 0:\n searchstring += \"&team=\" + str(team)\n if minBIN > 0:\n searchstring += \"&minb=\" + str(minBIN)\n if maxBIN > 0:\n searchstring += \"&maxb=\" + str(maxBIN)\n if minBid > 0:\n searchstring += \"&micr=\" + str(minBid)\n if maxBid > 0:\n searchstring += \"&macr=\" + str(maxBid)\n\n requestor = UrlRequestor(\"https://utas.fut.ea.com/ut/game/fifa13/auctionhouse?type=player&start=\" + str(start) + \"&num=\" + str(count) + searchstring, {'Content-Type': 'application/json', 'Cookie': self.EASW_KEY + \"; \" + self.EASF_SESS + \"; \" + self.FUTPHISHING + \"; \", 'X-UT-SID': self.XUT_SID, 'x-http-method-override': 'GET'}, \"\")\n requestor.open()\n lol = requestor.getReturnData().get('auctionInfo')\n\n for card in lol:\n cardList.append(Card(card, self))\n return cardList", "def get_fb_ind_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball individual rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT player_id, first_name, last_name, \\\nnickname FROM player\")\n players = cursor.fetchall()\n\n for player_id, first_name, last_name, nickname in players:\n cursor.execute(\"SELECT fb_offense_rating, fb_defense_rating FROM \\\nplayer WHERE player_id = {0}\".format(player_id))\n offense_rating, defense_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(offense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n offense_rank = float(mu) - (3 * float(sigma))\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(defense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n defense_rank = float(mu) - (3 * float(sigma))\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_winner = {0}\".format(player_id))\n offense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_winner = {0}\".format(player_id))\n defense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_loser = {0}\".format(player_id))\n offense_lose_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_loser = {0}\".format(player_id))\n defense_lose_count = cursor.fetchone()[0]\n\n intermediate_rank = (first_name, last_name, nickname,\n 'Offense', round(offense_rank, 4), offense_win_count,\n offense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n intermediate_rank = (first_name, last_name, nickname,\n 'Defense', round(defense_rank, 4), defense_win_count,\n defense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def find_all_by_player(self, player):\n cursor = self._connection.cursor()\n command = 'SELECT * FROM scores WHERE player=? ORDER BY level'\n cursor.execute(command, [player])\n return cursor.fetchall()", "def playerStandings():\n\n db = connect()\n db_cursor = db.cursor()\n query = \"SELECT * FROM standings\"\n db_cursor.execute(query)\n standings = db_cursor.fetchall()\n db.close()\n return standings", "def playerStandings():\n dbConn = connect()\n c = dbConn.cursor()\n c.execute(\"SELECT * FROM standings\")\n standings = c.fetchall()\n dbConn.close()\n return standings", "def playerStandings():\n with _connect_db() as (conn, cur):\n cur.execute(\"\"\"SELECT * FROM standings;\"\"\")\n standings = cur.fetchall()\n return standings", "def playerStandings():\n conn = connect()\n c = conn.cursor()\n # Gets all the information from the view \"standings\".\n c.execute(\"SELECT * from standings;\")\n result = c.fetchall()\n conn.close()\n return result" ]
[ "0.688901", "0.668698", "0.6631293", "0.6608959", "0.6578679", "0.656035", "0.65281093", "0.6521085", "0.6487832", "0.6485906", "0.64535034", "0.64514905", "0.6449632", "0.63346374", "0.63248485", "0.63226306", "0.6192959", "0.61134183", "0.60795534", "0.6069883", "0.60646695", "0.6040604", "0.598503", "0.59784126", "0.59774673", "0.5976796", "0.5976306", "0.5967583", "0.5967232", "0.596571" ]
0.74588764
0
Returns the player stats for the given week\n Takes the player list as an argument so the function can be used for available players and rostered players\n Only works for offensive players (QB, WR, RB, TE) right now
def player_stats_query(week, player_list, session=s): #initialize lists pos_list = [] team_list = [] #cycle thru each player that is currently available for player in avail_player_key: #build the API url for the unique player key url_player = base_query_url+'league/'+leagueID+'/players;player_keys='+player+'/stats;type=week;week='+str(week) #convert API call to json raw = s.get(url_player, params={'format': 'json'}).json() #parse out the players details info (e.g. position, owned, etc.) player_details = raw['fantasy_content']['league'][1]['players']['0']['player'][0] #parse out position from player details pos = player_details[9]['display_position'].upper() ## FILTER OUT NON-OFFENSE POSITIONS if pos not in ['QB', 'WR', 'RB', 'TE']: continue else: #parse out team from player_details team = player_details[6]['editorial_team_abbr'].upper() #append data to lists pos_list.append(pos) team_list.append(team) #initialize a stats list stats_list = [] #parse out the player stats player_stats = raw['fantasy_content']['league'][1]['players']['0']['player'][1]['player_stats']['stats'] #loop thru all of the various stats for stat in player_stats: stat_dict = stat['stat'] stats_list.append(stat_dict) return stats_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_player_stats_from_game(team, year, week):", "def get_players_game_stats_for_season_for_week(self, season, week, season_type=\"REG\"):\n try:\n season = int(season)\n week = int(week)\n if season_type not in [\"REG\", \"PRE\", \"POST\"]:\n raise ValueError\n except (TypeError, ValueError):\n raise FantasyDataError('Error: Invalid method parameters')\n\n season_param = \"{0}{1}\".format(season, season_type)\n result = self._method_call(\"PlayerGameStatsByWeek/{season}/{week}\", season=season_param, week=week)\n return result", "def nflweek(self, irc, msg, args, optlist, optweek):\n \n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvYWxsU2NoZWR1bGUuanM=')\n \n usePre, useNext, outputWeek = False, False, False\n for (option, arg) in optlist:\n if option == 'pre':\n usePre = True\n \n if optweek:\n if optweek == \"next\":\n useNext = True\n elif optweek.isdigit():\n if usePre: \n if 1 <= int(optweek) <= 4:\n outputWeek = \"Preseason Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Preseason week number must be between 1 and 4.\")\n return\n else:\n if 1 <= int(optweek) <= 17:\n outputWeek = \"Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Week must be between 1-17\")\n return \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n jsondata = json.loads(html)\n\n week = jsondata.get('week', None) # work with the week data so we know where we are.\n\n if week is None:\n irc.reply(\"Failed to load schedule.\")\n return\n\n currentWeekName = week.get('current', {'current': None}).get('weekName', None) \n nextWeekName = week.get('next', {'next': None}).get('weekName', None) \n\n if currentWeekName is None:\n irc.reply(\"Cannot figure out the current week.\")\n return\n\n games = jsondata.get('content', None) # data in games.\n \n if games is None:\n irc.reply(\"Failed to load the games data.\")\n return\n \n if outputWeek:\n games = [item['games'] for item in games if item['weekName'] == outputWeek]\n weekOutput = outputWeek\n elif useNext:\n games = [item['games'] for item in games if item['weekName'] == nextWeekName]\n weekOutput = nextWeekName\n else:\n games = [item['games'] for item in games if item['weekName'] == currentWeekName]\n weekOutput = currentWeekName\n \n append_list = []\n\n for games in games:\n for t in games:\n awayTeam = self._translateTeam('team', 'nid', t['awayTeamId'])\n homeTeam = self._translateTeam('team', 'nid', t['homeTeamId'])\n append_list.append(\"[\" + t['date']['num'] + \"] \" + awayTeam + \"@\" + homeTeam + \" \" + t['date']['time'])\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1}\".format(ircutils.bold(weekOutput), descstring)\n \n irc.reply(output)", "def pull_game_stats_weekly(season,week):\n\n# pull url tags for active players into list\n url_list = []\n pos_list = []\n csv_path = \"../CSV_data/ActivePlayerList.csv\"\n with open(csv_path) as csv_file:\n reader = csv.reader(csv_file,skipinitialspace=True)\n for row in reader:\n pos_list.append(row[0])\n url_list.append(row[2])\n\n# scrape through all player url tags\n for pos,url in zip(pos_list,url_list):\n file_path = \"../CSV_data/\"+pos+\"Stats.csv\"\n try:\n url += str(season)\n soup = BeautifulSoup(ul.urlopen(url).read(), \"html.parser\")\n # assign field names\n player_name = soup.find(\"span\", {\"class\" : \"player-name\"}).string\n print player_name, url\n fieldNames = soup.find(\"tr\", {\"class\" : \"player-table-key\"}).findAll(\"td\")\n numColumns = len(fieldNames)\n # pull the statistics\n table = soup.findAll(\"table\", {\"class\":\"data-table1\"})\n regularSeason = table[1]\n\n for i in range(len(regularSeason)):\n body = regularSeason.findAll(\"tbody\")\n body1 = body[0]\n rows = body1.findAll(\"tr\")\n rowsList = []\n for i in range(len(rows)):\n if len(rows[i]) > 2:\n rowsList.append(rows[i])\n# remove row[0] which contains field names\n del rowsList[len(rowsList)-1]\n\n # write statistics to csv\n for j in range(len(rowsList)):\n tempRow = rowsList[j]\n cells = tempRow.findAll(\"td\")\n output = \"\"\n if (cells[0].string == str(week)):\n for i in range(numColumns): # for each field, append to output string\n tempCell = str(cells[i]).lstrip(\"<td>\").rstrip(\"</td>\").replace('\\t', \"\").replace('\\r', \"\").replace('\\n', \"\").replace(\" \", \"\")\n cell = re.sub('<[^>]+>', '', tempCell)\n cell = re.sub(\"[^{}]+\".format(printable), \"\", cell)\n output = output + cell + \",\"\n if (tempCell == 'Bye'):\n for i in range(numColumns-2):\n output = output + \",\"\n print \"Bye Week Found\"\n break\n if output != \"\":\n output = \"\\n\" + output + player_name.strip() + \",\" + str(season)\n print output\n with open(file_path, \"a\") as text_file:\n print \"Writing to...\" + file_path\n text_file.write(output)\n text_file.close()\n time.sleep(.05)\n\n\n print '-------------------------------------'\n except IOError, e:\n print 'Failed to open url'\n print '-------------------------------------'\n if hasattr(e, 'code'):\n print 'We failed with error code - %s.' % e.code\n elif hasattr(e, 'reason'):\n print \"The error object has the following 'reason' attribute :\"\n print e.reason\n return False\n\n except IndexError:\n print 'No regular season data: Index error'\n print '-------------------------------------'\n #return False\n\n except AttributeError:\n print 'No regular season data: Attribute error'\n print '-------------------------------------'\n #return False", "def getWatchlistSummary(self):\n players = self.getAllPlayerInfoWatchlistFull()\n\n # [ playernumber, bidstatus, rating, name, startprice, curbid_or_finalsoldprice, buynow, time, id ]\n num_players_won = 0\n num_players_expired = 0\n\n wonplayers_sellprice_total = 0\n wonplayers_boughtprice_total = 0\n for p in players:\n p_bidstatus = p[1]\n p_id = p[8]\n p_boughtprice = p[5]\n p_sellprice = self.getPlayerSellPrice(p_id)\n\n if \"won\" in p_bidstatus:\n num_players_won += 1\n wonplayers_sellprice_total += p_sellprice\n wonplayers_boughtprice_total += p_boughtprice\n if \"expired\" in p_bidstatus:\n num_players_expired += 1\n\n # TODO if num players lost deviates from players won, notify other autobidder is likely on player\n projectedprofit = wonplayers_sellprice_total - wonplayers_boughtprice_total\n self.user_players_won += num_players_won\n # self.user_projected_profit += projectedprofit\n\n log_event(self.queue, \"Players won: \" + str(num_players_won))\n log_event(self.queue, \"Players lost: \" + str(num_players_expired))\n log_event(self.queue, \"Total investment: \" +\n str(wonplayers_boughtprice_total))\n log_event(self.queue, \"Total proj. return: \" +\n str(wonplayers_sellprice_total))\n log_event(self.queue, \"Projected Profit: \" + str(projectedprofit))\n\n return num_players_won", "def _getWeeklyPlayHours(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n weekDaysCount = account_shared.currentWeekPlayDaysCount(time_utils._g_instance.serverUTCTime, serverRegionalSettings['starting_time_of_a_new_day'], serverRegionalSettings['starting_day_of_a_new_week'])\n return self._getDailyPlayHours() + sum(self.__stats.dailyPlayHours[1:weekDaysCount])", "def get_trends_by_week(self):\n try:\n return self.profile_data[\"trendsByWeek\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve weekly trends: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)", "def test_get_player_stats_from_game():\n team = \"Titans\"\n year = \"2018\"\n week = \"1\"\n expected_team_stats = {}\n\n assert ff_team.get_player_stats_from_game(\n team, year, week) == expected_team_stats", "def get_player_data(player, battleTag, responce):\r\n # Convert responce to a \"soup\" object by passing it to the soup constructor, and specify lxml as encoder \r\n soup = BeautifulSoup(responce.text, 'lxml')\r\n # List to store Hero Names and Quick Scores \r\n heroes = []\r\n # Loop Through each HTML tag under '<div>' : class: 'name' and look for name contents\r\n # In children, decode and output contents \r\n for parent in soup.find_all('div', {'class': 'name' }): # Specify the parent classes name, type(bs4.element.Tag)\r\n for child in parent.findChildren('a', recursive = False): # Access all of its children, store inside child var type(bs4.element.Tag) \r\n heroes.append(child.decode_contents()) # Get the contents of the child, add to the heroes list type(str)\r\n \r\n quick_scores = [] # To Store the quickscores \r\n # Loop Through each HTML tag under 'div' : class: group special and look for name \r\n #contents In children, decode and output contents, \r\n for parent in soup.find_all('div', {'class': 'group special' }):\r\n children = parent.findChildren('div', recursive = False)\r\n if not 'padded' in children[1].get('class'):\r\n quick_scores.append(children[1].findChildren('div', {'class': 'value' }, recursive = False)[0].decode_contents())\r\n \r\n player_image_link =\"\" \r\n\r\n # Get the profile Icon of the player\r\n for link in soup.find_all('div', {'class': 'image-with-corner' }):\r\n images = link.find_all('img')\r\n for img in images:\r\n if \"image-player\" in img['class']: \r\n player_image_link = img['src']\r\n\r\n # Get the number of wins from each hero and overall number of wins by the player\r\n # This time using regex, because why not :>\r\n temp = re.findall(\"<span class=\\\"color-stat-win\\\">[0-9]+</span>\", responce.text)\r\n i = 0\r\n hero_wins = []\r\n for elt in temp: \r\n if i < len(quick_scores)+1:\r\n val = re.sub(\"[^0-9]\", \"\", elt)\r\n hero_wins.append(val)\r\n i = i+1\r\n \r\n player.total_wins = hero_wins[0] # First item is Overall wins by player so far\r\n hero_wins.pop(0) \r\n player.hero_wins = hero_wins # other elements are wins from heroes\r\n \r\n # Convert scores to numeric format i.e 11,534 to 11534\r\n numeric_scores = []\r\n for x in quick_scores:\r\n numeric_scores.append(int(x.replace(',', '')))\r\n \r\n player.battle_tag = battleTag\r\n player.heroes = heroes\r\n player.quick_scores = numeric_scores\r\n player.player_logo = player_image_link", "def get_player_win_loss_stats(player_name: str) -> PlayerWinLossRecords:\n parsed_name = parse_player_name(player_name)\n player_bio = get_player_bio(parsed_name)\n # try:\n # if player_link_cache.__contains__(parsed_name):\n # print(list(map(lambda x: x[0], player_link_cache.__iter__())))\n # player_bio = player_link_cache[parsed_name]\n # else:\n # player_bio = get_player_bio(parsed_name)\n # except ValueError as e:\n # logError(e)\n # # return empty records object\n # return PlayerWinLossRecords()\n player_win_loss_records = {}\n win_loss_types = [\"tour\", \"challenger\", \"itf\"]\n for win_loss_type in win_loss_types:\n player_win_loss_records[\n win_loss_type] = get_player_win_loss_stats_for_tour(\n parsed_name, tour_type=win_loss_type)\n return PlayerWinLossRecords(**player_win_loss_records)", "def makeMatchupData(fantasyTeams, week):\n\t# this parsing will get messed up if a team doesn't have a full bench.\n\tmatchups = []\n\tmatchupsNicknames = []\n\tfor team in fantasyTeams:\n\t\tif team not in matchups:\n\t\t\tmatchups.append(team)\n\t\t\tmatchupsNicknames.append(fantasyTeams[team].nickname)\n\t\t\tmatchups.append(fantasyTeams[team].matchup[week])\n\t\t\tmatchupsNicknames.append(fantasyTeams[fantasyTeams[team].matchup[week]].nickname)\n\tteamsPoints = []\n\tfor team in matchups:\n\t\tteamPoints = []\n\t\tteamRoster = fantasyTeams[team].roster[week]\n\t\t#print(team, len(teamRoster))\n\t\tteamPoints.append(teamRoster[0].points[week]) # qb\n\t\tteamPoints.append(teamRoster[1].points[week] + teamRoster[2].points[week] + teamRoster[3].points[week]) # wrs\n\t\tteamPoints.append(teamRoster[4].points[week] + teamRoster[5].points[week]) # rbs\n\t\tteamPoints.append(teamRoster[6].points[week]) # te\n\t\tteamPoints.append(teamRoster[7].points[week]) # flx\n\t\tif teamRoster[len(teamRoster)-1].position == 'DEF':\n\t\t\tteamPoints.append(teamRoster[len(teamRoster)-2].points[week]) # k\n\t\t\tteamPoints.append(teamRoster[len(teamRoster)-1].points[week]) # def\n\t\telse: # if there's an IR spot\n\t\t\tteamPoints.append(teamRoster[len(teamRoster)-3].points[week]) # k\n\t\t\tteamPoints.append(teamRoster[len(teamRoster)-2].points[week]) # def\n\t\tteamsPoints.append(teamPoints)\n\tfor i in range(len(teamsPoints)):\n\t\tfor j in range(len(teamsPoints[0])):\n\t\t\tif teamsPoints[i][j]<-20:\n\t\t\t\tteamsPoints[i][j]=-20\n\treturn matchups, matchupsNicknames, teamsPoints", "def getAllPlayerInfoWatchlistFull(self):\n status = self.checkState(\"watchlist\")\n if status:\n try:\n players_on_page = self.driver.find_elements_by_tag_name(\n \"li.listFUTItem\")\n # page = self.driver.find_elements_by_tag_name(\"h1.title\")\n page = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[1]/h1\").text\n\n playerdata = []\n playernumber = 1\n sum_of_all_current_bids_on_watchlist = 0\n for card in players_on_page:\n # Only look at top 5 players\n bidstatus = card.get_attribute(\"class\")\n cardinfo = card.text.splitlines()\n\n # If user is on transfer list (from old implementation)\n if (len(cardinfo) == 15):\n rating = cardinfo[0]\n name = cardinfo[2]\n startprice = 0\n curbid_or_finalsoldprice = 0\n buynow = 0\n time = 0\n\n rating = int(rating)\n # print(\"Location: TRANSFERLIST || Player Unlisted\")\n else:\n rating = cardinfo[0]\n name = cardinfo[2]\n startprice = cardinfo[16]\n curbid_or_finalsoldprice = cardinfo[18]\n buynow = cardinfo[20]\n time = cardinfo[22]\n\n # clean rating\n rating = int(rating)\n\n # clean timeremaining\n seconds = 0\n if \"<5\" in time:\n return \"processing\"\n elif \"<10\" in time:\n seconds = 10\n elif \"<15\" in time:\n seconds = 15\n elif \"<30\" in time:\n seconds = 30\n elif \"1 Minute\" in time:\n seconds = 60\n elif \"Minutes\" in time:\n time = time[:-8]\n time = int(time)\n time = 60*time\n seconds = time\n elif \"Expired\" in time:\n seconds = -5\n\n # If any player is processing, just return\n elif \"Processing\" in time:\n seconds = -5\n return \"processing\"\n else:\n print(\"weird, assume it is >1 hour\")\n seconds = 60*65\n\n time = int(seconds)\n\n # clean startprice\n if \",\" in startprice:\n startprice = startprice.replace(\",\", \"\")\n\n startprice = int(startprice)\n\n # clean current bid or finalsoldprice\n if \"---\" in curbid_or_finalsoldprice:\n curbid_or_finalsoldprice = startprice-50\n elif \",\" in curbid_or_finalsoldprice:\n curbid_or_finalsoldprice = curbid_or_finalsoldprice.replace(\n \",\", \"\")\n\n curbid_or_finalsoldprice = int(\n curbid_or_finalsoldprice)\n sum_of_all_current_bids_on_watchlist += curbid_or_finalsoldprice\n\n # clean buy now\n if \",\" in buynow:\n buynow = buynow.replace(\",\", \"\")\n buynow = int(buynow)\n\n id = self.getPlayerID(name, rating)\n if (id == 0):\n log_event(self.queue, \"Error - ID not found in Targets, general id search found for name \" + str(\n name) + \" rating\" + str(rating))\n info = [playernumber, bidstatus, rating, name,\n startprice, curbid_or_finalsoldprice, buynow, time, id]\n playerdata.append(info)\n playernumber += 1\n self.user_sum_of_all_current_bids_on_watchlist = sum_of_all_current_bids_on_watchlist\n\n return playerdata\n except:\n # If method reaches here, the first card on watchlist likely dissappeared in the middle of parsing\n return \"processing\"", "def nflweeklyleaders(self, irc, msg, args):\n \n url = self._b64decode('aHR0cDovL2VzcG4uZ28uY29tL25mbC93ZWVrbHkvbGVhZGVycw==')\n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n\n html = html.replace('class=\"oddrow','class=\"evenrow')\n\n soup = BeautifulSoup(html)\n weeklytitle = soup.find('h1', attrs={'class':'h2'}).renderContents().strip()\n tables = soup.findAll('table', attrs={'class':'tablehead'})\n\n object_list = []\n\n for table in tables:\n statcategory = table.find('tr', attrs={'class':'stathead'}).find('td')\n rows = table.findAll('tr', attrs={'class': re.compile('evenrow.*')})\n for row in rows:\n player = row.find('td', attrs={'align':'left'})\n team = player.findNext('td') \n d = collections.OrderedDict()\n d['category'] = statcategory.renderContents().strip()\n d['player'] = str(player.text.replace('.','. '))\n d['team'] = team.renderContents().strip()\n object_list.append(d)\n \n passinglist = []\n rushinglist = []\n receivinglist = []\n defensivelist = []\n\n for each in object_list:\n if each['category'] == \"Passing Leaders\":\n passinglist.append(each['player'] + \"(\" + each['team'] + \")\")\n if each['category'] == \"Rushing Leaders\":\n rushinglist.append(each['player'] + \"(\" + each['team'] + \")\")\n if each['category'] == \"Receiving Leaders\":\n receivinglist.append(each['player'] + \"(\" + each['team'] + \")\") \n if each['category'] == \"Defensive Leaders\":\n defensivelist.append(each['player'] + \"(\" + each['team'] + \")\")\n \n irc.reply(ircutils.mircColor(weeklytitle, 'red'))\n irc.reply(ircutils.bold(\"Passing Leaders: \") + string.join([item for item in passinglist], \" | \"))\n irc.reply(ircutils.bold(\"Rushing Leaders: \") + string.join([item for item in rushinglist], \" | \"))\n irc.reply(ircutils.bold(\"Receiving Leaders: \") + string.join([item for item in receivinglist], \" | \"))\n irc.reply(ircutils.bold(\"Defensive Leaders: \") + string.join([item for item in defensivelist], \" | \"))", "def getAllPlayerInfoWatchlist(self):\n status = self.checkState(\"watchlist\")\n if status:\n try:\n players_on_page = self.driver.find_elements_by_tag_name(\n \"li.listFUTItem\")\n # page = self.driver.find_elements_by_tag_name(\"h1.title\")\n page = self.driver.find_element(\n By.XPATH, \"/html/body/main/section/section/div[1]/h1\").text\n\n playerdata = []\n playernumber = 1\n sum_of_all_current_bids_on_watchlist = 0\n for card in players_on_page:\n # Only look at top 5 players\n if playernumber < 6:\n bidstatus = card.get_attribute(\"class\")\n cardinfo = card.text.splitlines()\n\n # If user is on transfer list (from old implementation)\n if (len(cardinfo) == 15):\n rating = cardinfo[0]\n name = cardinfo[2]\n startprice = 0\n curbid_or_finalsoldprice = 0\n buynow = 0\n time = 0\n\n rating = int(rating)\n # print(\"Location: TRANSFERLIST || Player Unlisted\")\n else:\n rating = cardinfo[0]\n name = cardinfo[2]\n startprice = cardinfo[16]\n curbid_or_finalsoldprice = cardinfo[18]\n buynow = cardinfo[20]\n time = cardinfo[22]\n\n # clean rating\n rating = int(rating)\n\n # clean timeremaining\n seconds = 0\n if \"<5\" in time:\n return \"processing\"\n elif \"<10\" in time:\n seconds = 10\n elif \"<15\" in time:\n seconds = 15\n elif \"<30\" in time:\n seconds = 30\n elif \"1 Minute\" in time:\n seconds = 60\n elif \"Minutes\" in time:\n time = time[:-8]\n time = int(time)\n time = 60*time\n seconds = time\n elif \"Expired\" in time:\n seconds = -5\n\n # If any player is processing, just return\n elif \"Processing\" in time:\n seconds = -5\n return \"processing\"\n else:\n print(\"weird, assume it is >1 hour\")\n seconds = 60*65\n\n time = int(seconds)\n\n # clean startprice\n if \",\" in startprice:\n startprice = startprice.replace(\",\", \"\")\n\n startprice = int(startprice)\n\n # clean current bid or finalsoldprice\n if \"---\" in curbid_or_finalsoldprice:\n curbid_or_finalsoldprice = startprice-50\n elif \",\" in curbid_or_finalsoldprice:\n curbid_or_finalsoldprice = curbid_or_finalsoldprice.replace(\n \",\", \"\")\n\n curbid_or_finalsoldprice = int(\n curbid_or_finalsoldprice)\n sum_of_all_current_bids_on_watchlist += curbid_or_finalsoldprice\n\n # clean buy now\n if \",\" in buynow:\n buynow = buynow.replace(\",\", \"\")\n buynow = int(buynow)\n\n id = self.getPlayerID(name, rating)\n if (id == 0):\n log_event(self.queue, \"Error - ID not found in Targets, general id search found for name \" + str(\n name) + \" rating\" + str(rating))\n info = [playernumber, bidstatus, rating, name,\n startprice, curbid_or_finalsoldprice, buynow, time, id]\n playerdata.append(info)\n playernumber += 1\n self.user_sum_of_all_current_bids_on_watchlist = sum_of_all_current_bids_on_watchlist\n\n return playerdata\n except:\n # If method reaches here, the first card on watchlist likely dissappeared in the middle of parsing\n return \"processing\"", "def get_team_stats(players: list[Player]) -> dict[int]:\n\n team_stats = {}\n\n total_reaction = 0\n total_mechanical_skill = 0\n total_tactical_skill = 0\n total_game_knowledge = 0\n total_xp = 0\n\n for player in players:\n total_reaction += player.reaction\n total_mechanical_skill += player.mechanical_skill\n total_tactical_skill += player.tactical_skill\n total_game_knowledge += player.game_knowledge\n total_xp += player.xp\n\n team_stats.update(\n {\"reaction\": total_reaction,\n \"mechanical_skill\": total_mechanical_skill,\n \"tactical_skill\": total_tactical_skill,\n \"game_knowledge\": total_game_knowledge,\n \"xp\": total_xp})\n\n return team_stats", "def basic_player_stats(\n self, player_name: str,\n platform: ALPlatform,\n skip_tracker_rank=False) -> list:\n params: dict = {'platform': platform.value, 'player': player_name}\n if skip_tracker_rank:\n params.update({'skipRank': True})\n return self._make_request(additional_params=params)", "async def get_chapel_week(ctx, week_num):\n # week number must be none or a digit.\n if week_num is not None and not week_num.isdigit():\n return\n\n # get chapel schedule.\n contents = json_to_dict('json_files/chapel/schedule.json')\n\n schedule = []\n for week in contents:\n if week_num is not None and week != f'Week {week_num}':\n continue\n\n schedule.append('')\n schedule.append(f'__**{week}**__')\n\n # get chapel information for each week.\n for date in contents[week]:\n day_of_week = contents[week][date]['day_of_week']\n speaker = contents[week][date]['speaker']\n\n schedule.append(f'**{date}** [{day_of_week}] - *{speaker}*')\n\n # print chapel schedule.\n separator = '\\n'\n description = separator.join(schedule)\n\n # print error message.\n if len(description) == 0:\n await send_embed(ctx, title=get_chapel_title(), text=f'*no scheduled chapel for week {week_num}.*')\n\n # display chapel information.\n await send_embed(ctx, title=get_chapel_title(), text=description)", "def get_player_win_loss_stats_for_tour(player_name: str,\n tour_type: str = 'tour'\n ) -> PlayerWinLossRecord:\n parsed_name = parse_player_name(player_name)\n stats_for_tour = None\n player_bio = get_player_bio(parsed_name)\n print(player_bio)\n player_id = player_bio.split('/')[-2]\n # try:\n # if player_link_cache.__contains__(parsed_name):\n # print(list(map(lambda x: x[0], player_link_cache.__iter__())))\n # player_bio = player_link_cache[parsed_name]\n # else:\n # player_bio = get_player_bio(parsed_name)\n # player_id = player_bio.split('/')[-2]\n # except ValueError as e:\n # logError(e)\n # # return empty records object\n # return PlayerWinLossRecord()\n url = PLAYER_WIN_LOSS_URLS.format(tour_type, player_id)\n soup = get_parsed_site_content(url)\n classes = [MatchRecord, PressurePoints, Environment, Other]\n # start parsing\n megaTables = soup.select('.mega-table')\n player_win_loss_record = {}\n for megaTable in megaTables:\n thead_rows = megaTable.select('thead>th')\n tbody_rows = megaTable.select('tbody>tr')\n # if we are dealing with the Match Record sub table\n first_thead = megaTable.select_one('thead>th')\n if first_thead.parent.parent['class'][0] == 'mega-table':\n # if any(th.text.strip() == 'Match Record' for th in thead_rows):\n wl_stat_collection = {}\n for row in tbody_rows:\n tds = row.select('td')\n tdone = row.select_one('td')\n if not tdone.parent.parent.parent['class'][\n 0] == 'inner-win-loss-cells':\n parsed_cat = parse_category_label(tdone.text)\n wl_stat = parse_win_loss_stat_row(row)\n wl_stat_collection[parsed_cat] = wl_stat\n\n # decide which class we are dealing with\n for class_type in classes:\n if space_regex.sub(\n '', first_thead.text.strip()) == class_type.__name__:\n name = class_type.__name__\n player_win_loss_record[name] = class_type(\n **wl_stat_collection)\n break\n win_loss_object = PlayerWinLossRecord(**player_win_loss_record)\n return win_loss_object", "def fetch_players_stats():\n players_scraper = PlayerStatsScraper(API_URL, API_HEADERS)\n result = players_scraper.save_objects()\n return result", "def get_player_games(self, year, use_local=True):", "def playerStandings():\n\n getPlayers = \"SELECT id, name, wins, matches FROM playerstats ORDER BY wins DESC\"\n players = executeQuery({'dbname': 'tournament', 'query' : getPlayers, 'type' : 'find'})\n return players", "def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l", "def getPlayerAdvStat(self, stat, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_call = stat.lower()\r\n stat_dict = {'touch':'Possessions', 'possession':'Possessions',\r\n 'speed':'SpeedDistance', 'distance':'SpeedDistance'}\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashptstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&Height=&LastNGames=0&'\\\r\n 'LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&'\\\r\n 'PORound=0&PerMode=PerGame&PlayerExperience=&PlayerOr'\\\r\n 'Team=Player&PlayerPosition=&PtMeasureType=' + \\\r\n stat_dict[stat_call] + '&Season=' + season + \\\r\n '&SeasonSegment=&SeasonType=Regular+Season&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n advStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return advStat_df", "def getPlayerBaseStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashplayerstats?College=&'\\\r\n 'Conference=&Country=&DateFrom=&DateTo=&Division=&'\\\r\n 'DraftPick=&DraftYear=&GameScope=&GameSegment=&Height=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&Month=0&'\\\r\n 'OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season='+ season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision=&Weight='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n baseStat_df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n return baseStat_df", "def weekly():", "def playerStandings():\n # place all players in a dictionary\n player_dict = {}\n conn, c = connect()\n c.execute(\"\"\"SELECT * FROM players;\"\"\")\n for row in c.fetchall():\n player_dict[row[0]] = [row[1], 0, 0]\n\n # count the number of win and matches in for all matches\n c.execute(\"\"\"SELECT winner, loser FROM matches;\"\"\")\n for row in c.fetchall():\n if row[0] in player_dict:\n player_dict[row[0]][1] += 1\n player_dict[row[0]][2] += 1\n if row[1] in player_dict:\n player_dict[row[1]][2] += 1\n\n # compile win counts as the key to dictionary\n win_count = {}\n for i in player_dict:\n wins = player_dict[i][1]\n if wins in win_count:\n win_count[wins].append((i, player_dict[i][0],\n wins, player_dict[i][2]))\n else:\n win_count[wins] = [(i, player_dict[i][0],\n wins, player_dict[i][2])]\n\n # compile output list\n output_list = []\n for i in sorted(win_count.keys(), reverse=True):\n for j in win_count[i]:\n output_list.append(j)\n\n return output_list", "def playerStandings(t_name):\n t_id = getTournamentID(t_name, False)\n if t_id == -1:\n return []\n conn, cur = connect()\n cur.execute(\"SELECT create_summary();\")\n conn.commit()\n query = \"SELECT P_ID, P_NAME, WIN, MATCH FROM SUMMARY WHERE T_ID = %s\"\n param = (t_id, )\n cur.execute(query, param)\n ps = [(int(row[0]), str(row[1]), int(row[2]), int(row[3]))\n for row in cur.fetchall()]\n return ps", "def playerStandings():\n db_conn = connect()\n db_cursor = db_conn.cursor()\n db_cursor.execute(\"select player_id, player_name, wins, (wins + losses) as total_played from normalized_wins_and_losses order by wins desc, total_played desc;\")\n player_standings = db_cursor.fetchall()\n db_conn.commit()\n db_conn.close()\n return player_standings", "def CreateWeeklyResults(WeeklyScoresLadderfile: str, WeeklyScoresBracketfile: str,\r\n WeeklyResultsfile: str,week: int) -> None:\r\n WSL = pd.read_csv(WeeklyScoresLadderfile, encoding = \"ISO-8859-1\")\r\n WSB = pd.read_csv(WeeklyScoresBracketfile, encoding = \"ISO-8859-1\")\r\n WR = WSL # WeeklyResults\r\n\r\n WR['Placement'] = -1\r\n WR['Floated'] = 0\r\n RF.LimitLadderWins(WR)\r\n\r\n count = 0\r\n for index, row in WSB.iterrows(): # Add bracket player results to ladder\r\n inLadder = WR[WR['SmasherID'].isin([row['SmasherID']])]\r\n if len(inLadder) > 0: # Did the player enter ladder\r\n index = inLadder.index[0]\r\n WR.at[index, 'Wins'] = WR['Wins'][index] + row['Wins']\r\n WR.at[index, 'Losses'] = WR['Losses'][index] + row['Losses']\r\n WR.at[index, 'Placement'] = row['Placement']\r\n else:\r\n new_row = {'SmasherID': row['SmasherID'], # Player did not enter ladder\r\n 'SmashTag': row['SmashTag'],\r\n 'Coast': 'NOTAV',\r\n 'Wins': row['Wins'],\r\n 'Losses': row['Losses'],\r\n 'LimitLadderWins': 0,\r\n 'Prospect': 0,\r\n 'Rookie': 0,\r\n 'Pro': 0,\r\n 'AllStar': 0,\r\n 'HallOfFame': 0,\r\n 'Floated': 1,\r\n 'Placement': row['Placement']}\r\n WR = WR.append(new_row, ignore_index=True)\r\n\r\n WR = WR[['SmasherID', 'SmashTag', 'Coast', 'Wins', 'Losses',\r\n 'LimitLadderWins', 'Prospect', 'Rookie', 'Pro',\r\n 'AllStar', 'HallOfFame', 'Floated', 'Placement']]\r\n WR = WR.sort_values(by = 'SmasherID')\r\n WR.to_csv(WeeklyResultsfile, index=False, encoding = \"ISO-8859-1\")", "def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))" ]
[ "0.85106057", "0.6774097", "0.67719406", "0.6636765", "0.6384417", "0.6382811", "0.6193241", "0.616072", "0.6121495", "0.60404086", "0.60138744", "0.59989476", "0.59980756", "0.5972492", "0.5948775", "0.58840317", "0.58802027", "0.5877149", "0.5872693", "0.58488315", "0.5838913", "0.5822778", "0.581277", "0.5802869", "0.57781464", "0.5726798", "0.5725466", "0.5710865", "0.56694615", "0.5624315" ]
0.8211842
1
Build and display svg view for current tab.
def refresh_svg_canvas(self): if self.ui.tabWidget.currentIndex() == 0: self.ui.svg_canvas.build_schematic() self.ui.svg_canvas.viewport().update() elif self.ui.tabWidget.currentIndex() in (1,2): self.ui.svg_canvas.build_pcb() self.ui.svg_canvas.viewport().update() else: raise Exception("Unknown view to draw")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, stats=[]):\n clear_output(wait=True)\n svg_html = self.to_html(stats)\n display(svg_html)", "def _repr_svg_(self):\n pass", "def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )", "def _repr_svg_(self):\n if not IPythonConsole.ipython_useSVG:\n return None\n mol = self.owner.mol\n keku = IPythonConsole.kekulizeStructures\n size = IPythonConsole.molSize\n opts = IPythonConsole.drawOptions\n return Draw._moltoSVG(\n mol, size, self.aix, \"\", keku, drawOptions=opts, highlightBonds=self.bix\n )", "def show_svg(tmp_path = DEFAULT_PATH): \n global show_counter\n file_name = tmp_path + \"show_tmp_file_{}.svg\".format(show_counter)\n plt.savefig(file_name)\n os.system(\"open {}\".format(file_name))\n show_counter += 1\n plt.close()", "def __make_svg(self):\n if not self._items:\n return None\n\n # define call back functions for node format, href, subgraph\n def fnc_node_format(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][0]\n else:\n return None\n\n def fnc_href(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][1]\n else:\n return None\n\n def fnc_subgraph(n):\n if (n.type, n.output_name, n.task_name, n.shard_idx) in self._items:\n return self._items[(n.type, n.output_name, n.task_name, n.shard_idx)][2]\n else:\n return None\n\n # convert to dot string\n dot_str = self._dag.to_dot(\n fnc_node_format=fnc_node_format,\n fnc_href=fnc_href,\n fnc_subgraph=fnc_subgraph,\n template=self._template_d,\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # temporary dot, svg from graphviz.Source.render\n tmp_dot = os.path.join(tmp_dir, '_tmp_.dot')\n\n try:\n svg = Source(dot_str, format='svg').render(filename=tmp_dot)\n except (ExecutableNotFound, FileNotFoundError):\n logger.error(\n 'Importing graphviz failed. Task graph will not be available. '\n 'Check if you have installed graphviz correctly so that '\n '\"dot\" executable exists on your PATH. '\n '\"pip install graphviz\" does not install such \"dot\". '\n 'Use apt or system-level installer instead. '\n 'e.g. sudo apt-get install graphviz.'\n )\n return None\n\n # save to DOT\n uri_dot = os.path.join(\n self._out_dir,\n CrooHtmlReportTaskGraph.TASK_GRAPH_DOT.format(\n workflow_id=self._workflow_id\n ),\n )\n AutoURI(uri_dot).write(dot_str, no_lock=True)\n\n # save to SVG\n uri_svg = os.path.join(\n self._out_dir,\n CrooHtmlReportTaskGraph.TASK_GRAPH_SVG.format(\n workflow_id=self._workflow_id\n ),\n )\n svg_contents = AutoURI(svg).read()\n AutoURI(uri_svg).write(svg_contents, no_lock=True)\n\n return svg_contents", "def init_svg(self):\n self.svg = self.doc.createElement('svg')\n halfwidth = self.radius+self.daytick_space+self.daytick_monthsize+\\\n self.padding\n dimension = 2*halfwidth\n attr = {'xmlns':'http://www.w3.org/2000/svg', 'version':'1.1',\n 'xmlns:xlink':'http://www.w3.org/1999/xlink',\n 'viewBox':'0 0 %d %d'%(dimension,dimension),\n 'height':'%din'%self.inches, 'width':'%din'%self.inches, \n 'preserveAspectRatio':'xMinYMid meet',\n 'stroke':'black', 'fill':'none',\n 'font-family':'Arial', 'font-size':10}\n for k,v in attr.items(): self.svg.setAttribute(k,conv(v))\n # Create the clipping path for the interior region of the chart.\n self.defs = self.make_element(self.svg, 'defs')\n clip = self.make_element(\n self.defs, 'clipPath', ('id', 'innerClipPath'))\n self.make_element(\n clip, 'circle', ('cx',0), ('cy',0), ('r',self.radius))\n # Make 0,0 the center of the circle.\n self.centered = self.doc.createElement('g')\n self.centered.setAttribute('transform','translate(%d,%d)'%(\n 2*(halfwidth,)))\n self.svg.appendChild(self.centered)", "def wrap_in_html(self,svgofmodel):\n html= '''<html>\\n%s\\n%s\\n%s\\n</g></g></g></svg></body></html>\\n'''\n svgbody= '''<body onload=\"javascript:setTimeout(&quot;location.reload(true);&quot;,%d);\">\\n''' % self.vrefreshms\n svgbody += \"<h4>GeoGad</h4>\"\n svghead= '<svg xmlns=\"http://www.w3.org/2000/svg\" version=\"1.2\" baseProfile=\"tiny\" width=\"%dpx\" height=\"%dpx\">\\n'\n svghead= svghead % (self.vboxX,self.vboxY)\n svghead+= '<rect x=\"1\" y=\"1\" width=\"%d\" height=\"%d\" fill=\"none\" stroke=\"blue\" stroke-width=\"4\"/>\\n'% (self.vboxX,self.vboxY)\n svghead+= '<g fill=\"none\" stroke=\"black\" stroke-width=\"%0.2f\">\\n' % self.vlinewidth\n svghead+= '<g transform=\"scale(%0.2f,%0.2f)\">\\n' % (self.vscaleX,self.vscaleY)\n svghead+= '<g transform=\"translate(%0.2f,%0.2f)\">\\n' % (self.vtranX,self.vtranY)\n return html % (svgbody,svghead,svgofmodel)", "def ctab2svg():\n\n data = request.files.values()[0].file.read() if len(request.files) else request.body.read()\n return ctab2svgView(data, request.params)", "def render_tab_content(active_tab):\n if active_tab is not None:\n if active_tab == \"graph1\":\n return dcc.Graph(figure=example_graph1, id='graph')\n elif active_tab == \"graph2\":\n return dcc.Graph(figure=example_graph2, id='graph')\n elif active_tab == \"graph3\":\n return dcc.Graph(figure=example_graph3, id='graph')\n return \"No tab selected\"", "def get_xml(self):\n profile = self.profile\n version = self.version\n #self.attribs['xmlns'] = \"http://www.w3.org/2000/svg\"\n self.attribs['xmlns:xlink'] = \"http://www.w3.org/1999/xlink\"\n self.attribs['xmlns:ev'] = \"http://www.w3.org/2001/xml-events\"\n\n self.attribs['baseProfile'] = profile\n self.attribs['version'] = version\n return super(Drawing, self).get_xml()", "def ctab2svg(ctab):\n\n data = base64.urlsafe_b64decode(ctab)\n return ctab2svgView(data, request.params)", "def to_svg(self, outfile, scaling, precision, attributes):\n outfile.write('<g id=\"')\n outfile.write(self.name.replace(\"#\", \"_\"))\n outfile.write('\" ')\n outfile.write(attributes)\n outfile.write(\">\\n\")\n for polygon in self.polygons:\n polygon.to_svg(outfile, scaling, precision)\n for path in self.paths:\n path.to_svg(outfile, scaling, precision)\n for label in self.labels:\n label.to_svg(outfile, scaling, precision)\n for reference in self.references:\n reference.to_svg(outfile, scaling, precision)\n outfile.write(\"</g>\\n\")", "def __merger_svg(self):\n pass", "def use_svg_display(): #@save\n display.set_matplotlib_formats('svg')", "def generate_svg(self, item, type_, filename, locale):\n\n old_locale = item.session_manager.current_locale\n item.session_manager.current_locale = locale\n\n chart = None\n if type_ == 'candidates':\n chart = self.renderer.get_candidates_chart(item, 'svg')\n if type_ == 'connections':\n chart = self.renderer.get_connections_chart(item, 'svg')\n if type_ == 'list-groups':\n chart = self.renderer.get_list_groups_chart(item, 'svg')\n if type_ == 'lists':\n chart = self.renderer.get_lists_chart(item, 'svg')\n if type_ == 'lists-panachage':\n chart = self.renderer.get_lists_panachage_chart(item, 'svg')\n if type_ == 'seat-allocation':\n chart = self.renderer.get_seat_allocation_chart(item, 'svg')\n if type_ == 'party-strengths':\n chart = self.renderer.get_party_strengths_chart(item, 'svg')\n if type_ == 'parties-panachage':\n chart = self.renderer.get_parties_panachage_chart(item, 'svg')\n if type_ == 'entities-map':\n chart = self.renderer.get_entities_map(item, 'svg', locale)\n if type_ == 'districts-map':\n chart = self.renderer.get_districts_map(item, 'svg', locale)\n\n item.session_manager.current_locale = old_locale\n\n if chart:\n path = '{}/{}'.format(self.svg_dir, filename)\n with self.app.filestorage.open(path, 'w') as f:\n copyfileobj(chart, f)\n log.info(\"{} created\".format(filename))\n return 1\n\n return 0", "def render_tab_content(active_tab):\r\n if active_tab is not None:\r\n if active_tab == \"Info\":\r\n return html.Div([html.P('We will remember coronavirus for a long time as our society got affected worldwide adapting to a new normal. It was a global pandemic causing transformations to the daily life. The World Health Organization declared a Public Health Emergency of International Concern regarding COVID-19 on 30 January 2020, and later declared a pandemic on March 2020. We have been in lockdown for more than a year and as off now, May 2021 most of the countries are offering doses of vaccines to their citizens. For the final project of MA705 class I wanted to show a dashboard with visualizations using python concepts to represent a summary of data and graphs for Covid-19 vaccination by manufacturer.'),dcc.Graph(figure=example_graph1, id='graph')])\r\n elif active_tab == \"USA\":\r\n return dcc.Graph(figure=example_graph2, id='graph') \r\n elif active_tab == \"Daily vaccinations\":\r\n return dcc.Graph(figure=example_graph3, id='graph')\r\n elif active_tab == \"Manufacturer\":\r\n return dcc.Graph(figure=example_graph4, id='graph')\r\n elif active_tab == \"Top 5\":\r\n return dcc.Graph(figure=example_graph5, id='graph') \r\n return \"No tab selected\"", "def svg(self) -> str:\n data = {\n 'x': self.x,\n 'y': self.y,\n 'width': self.width,\n 'height': self.height,\n 'text_x': self.x + 30,\n 'text_y': self.y + 20,\n 'name': self.person.name\n }\n return PERSON_BOX_TEMPLATE.format(**data)", "def view():\r\n # collect figures in list\r\n figures = list(map(plt.figure, plt.get_fignums()))\r\n # start app\r\n app = QtWidgets.QApplication(sys.argv)\r\n main = Main()\r\n\r\n if figures:\r\n for count, figure in enumerate(figures):\r\n # main names for figures\r\n name = f\"{figure.number}\"\r\n # aliases for figures\r\n titles = [figure.axes[0].get_title(loc=i) for i in [\r\n \"left\", \"center\", \"right\"]]\r\n titles = [i for i in titles if i]\r\n title = f\"{count+1}- {titles[0]}\" if titles else \"\"\r\n axes_labels = f\"{count+1}- {figure.axes[0].get_ylabel()} vs {figure.axes[0].get_xlabel()} \"\r\n fignum = f\"Figure {figure.number}\"\r\n # Append figure to App\r\n main.append_fig(title, axes_labels, fignum, name, figure)\r\n\r\n main.show()\r\n sys.exit(app.exec_())", "def draw(self, context):\n layout = self.layout\n\n pie = layout.menu_pie()\n pie.operator(\"object.view_menu\", text=\"Node Editor\", icon='NODETREE').vp = \"NODE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"UV Image Editor\", icon='IMAGE_COL').vp = \"IMAGE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"Video Sequece Editor\", icon='SEQUENCE').vp = \"SEQUENCE_EDITOR\"\n pie.operator(\"object.view_menu\", text=\"Movie Clip Editor\", icon='CLIP').vp = \"CLIP_EDITOR\"", "def simplestExample():\n\n my_svg = drawSVG.SVG()\n return my_svg", "def use_svg_display():\n display.set_matplotlib_formats('svg')", "def use_svg_display():\n display.set_matplotlib_formats('svg')", "def use_svg_display():\n display.set_matplotlib_formats('svg')", "def use_svg_display():\n display.set_matplotlib_formats('svg')", "def use_svg_display():\n display.set_matplotlib_formats('svg')", "def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')", "def _repr_svg_(self):\n try:\n return self.mol._repr_svg_()\n except AttributeError:\n return None", "def output_svg(lines, regressions, requested_width, requested_height):\n \n (global_min_x, _), (global_max_x, global_max_y) = bounds(lines)\n max_up_slope, min_down_slope = bounds_slope(regressions)\n \n #output\n global_min_y = 0\n x = global_min_x\n y = global_min_y\n w = global_max_x - global_min_x\n h = global_max_y - global_min_y\n font_size = 16\n line_width = 2\n \n pic_width, pic_height = compute_size(requested_width, requested_height\n , w, h)\n \n def cw(w1):\n \"\"\"Converts a revision difference to display width.\"\"\"\n return (pic_width / float(w)) * w1\n def cx(x):\n \"\"\"Converts a revision to a horizontal display position.\"\"\"\n return cw(x - global_min_x)\n\n def ch(h1):\n \"\"\"Converts a time difference to a display height.\"\"\"\n return -(pic_height / float(h)) * h1\n def cy(y):\n \"\"\"Converts a time to a vertical display position.\"\"\"\n return pic_height + ch(y - global_min_y)\n \n print '<!--Picture height %.2f corresponds to bench value %.2f.-->' % (\n pic_height, h)\n print '<svg',\n print 'width=%s' % qa(str(pic_width)+'px')\n print 'height=%s' % qa(str(pic_height)+'px')\n print 'viewBox=\"0 0 %s %s\"' % (str(pic_width), str(pic_height))\n print 'onclick=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.shiftKey) { highlightRevision(null); }\"\n \" if (event.ctrlKey) { highlight(null); }\"\n \" return false;\")\n print 'xmlns=\"http://www.w3.org/2000/svg\"'\n print 'xmlns:xlink=\"http://www.w3.org/1999/xlink\">'\n \n print \"\"\"\n<defs>\n <marker id=\"circleMark\"\n viewBox=\"0 0 2 2\" refX=\"1\" refY=\"1\"\n markerUnits=\"strokeWidth\"\n markerWidth=\"2\" markerHeight=\"2\"\n orient=\"0\">\n <circle cx=\"1\" cy=\"1\" r=\"1\"/>\n </marker>\n</defs>\"\"\"\n \n #output the revisions\n print \"\"\"\n<script type=\"text/javascript\">//<![CDATA[\n var previousRevision;\n var previousRevisionFill;\n var previousRevisionStroke\n function highlightRevision(id) {\n if (previousRevision == id) return;\n\n document.getElementById('revision').firstChild.nodeValue = 'r' + id;\n document.getElementById('rev_link').setAttribute('xlink:href',\n 'http://code.google.com/p/skia/source/detail?r=' + id);\n \n var preRevision = document.getElementById(previousRevision);\n if (preRevision) {\n preRevision.setAttributeNS(null,'fill', previousRevisionFill);\n preRevision.setAttributeNS(null,'stroke', previousRevisionStroke);\n }\n \n var revision = document.getElementById(id);\n previousRevision = id;\n if (revision) {\n previousRevisionFill = revision.getAttributeNS(null,'fill');\n revision.setAttributeNS(null,'fill','rgb(100%, 95%, 95%)');\n \n previousRevisionStroke = revision.getAttributeNS(null,'stroke');\n revision.setAttributeNS(null,'stroke','rgb(100%, 90%, 90%)');\n }\n }\n//]]></script>\"\"\"\n \n def print_rect(x, y, w, h, revision):\n \"\"\"Outputs a revision rectangle in display space,\n taking arguments in revision space.\"\"\"\n disp_y = cy(y)\n disp_h = ch(h)\n if disp_h < 0:\n disp_y += disp_h\n disp_h = -disp_h\n \n print '<rect id=%s x=%s y=%s' % (qa(revision), qa(cx(x)), qa(disp_y),),\n print 'width=%s height=%s' % (qa(cw(w)), qa(disp_h),),\n print 'fill=\"white\"',\n print 'stroke=\"rgb(98%%,98%%,88%%)\" stroke-width=%s' % qa(line_width),\n print 'onmouseover=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.shiftKey) {\"\n \" highlightRevision('\"+str(revision)+\"');\"\n \" return false;\"\n \" }\"),\n print ' />'\n \n xes = set()\n for line in lines.itervalues():\n for point in line:\n xes.add(point[0])\n revisions = list(xes)\n revisions.sort()\n \n left = x\n current_revision = revisions[0]\n for next_revision in revisions[1:]:\n width = (((next_revision - current_revision) / 2.0)\n + (current_revision - left))\n print_rect(left, y, width, h, current_revision)\n left += width\n current_revision = next_revision\n print_rect(left, y, x+w - left, h, current_revision)\n\n #output the lines\n print \"\"\"\n<script type=\"text/javascript\">//<![CDATA[\n var previous;\n var previousColor;\n var previousOpacity;\n function highlight(id) {\n if (previous == id) return;\n\n document.getElementById('label').firstChild.nodeValue = id;\n\n var preGroup = document.getElementById(previous);\n if (preGroup) {\n var preLine = document.getElementById(previous+'_line');\n preLine.setAttributeNS(null,'stroke', previousColor);\n preLine.setAttributeNS(null,'opacity', previousOpacity);\n\n var preSlope = document.getElementById(previous+'_linear');\n if (preSlope) {\n preSlope.setAttributeNS(null,'visibility', 'hidden');\n }\n }\n\n var group = document.getElementById(id);\n previous = id;\n if (group) {\n group.parentNode.appendChild(group);\n \n var line = document.getElementById(id+'_line');\n previousColor = line.getAttributeNS(null,'stroke');\n previousOpacity = line.getAttributeNS(null,'opacity');\n line.setAttributeNS(null,'stroke', 'blue');\n line.setAttributeNS(null,'opacity', '1');\n \n var slope = document.getElementById(id+'_linear');\n if (slope) {\n slope.setAttributeNS(null,'visibility', 'visible');\n }\n }\n }\n//]]></script>\"\"\"\n for label, line in lines.items():\n print '<g id=%s>' % qa(label)\n r = 128\n g = 128\n b = 128\n a = .10\n if label in regressions:\n regression = regressions[label]\n min_slope = regression.find_min_slope()\n if min_slope < 0:\n d = max(0, (min_slope / min_down_slope))\n g += int(d*128)\n a += d*0.9\n elif min_slope > 0:\n d = max(0, (min_slope / max_up_slope))\n r += int(d*128)\n a += d*0.9\n \n slope = regression.slope\n intercept = regression.intercept\n min_x = regression.min_x\n max_x = regression.max_x\n print '<polyline id=%s' % qa(str(label)+'_linear'),\n print 'fill=\"none\" stroke=\"yellow\"',\n print 'stroke-width=%s' % qa(abs(ch(regression.serror*2))),\n print 'opacity=\"0.5\" pointer-events=\"none\" visibility=\"hidden\"',\n print 'points=\"',\n print '%s,%s' % (str(cx(min_x)), str(cy(slope*min_x + intercept))),\n print '%s,%s' % (str(cx(max_x)), str(cy(slope*max_x + intercept))),\n print '\"/>'\n \n print '<polyline id=%s' % qa(str(label)+'_line'),\n print 'onmouseover=%s' % qa(\n \"var event = arguments[0] || window.event;\"\n \" if (event.ctrlKey) {\"\n \" highlight('\"+str(label).replace(\"'\", \"\\\\'\")+\"');\"\n \" return false;\"\n \" }\"),\n print 'fill=\"none\" stroke=\"rgb(%s,%s,%s)\"' % (str(r), str(g), str(b)),\n print 'stroke-width=%s' % qa(line_width),\n print 'opacity=%s' % qa(a),\n print 'points=\"',\n for point in line:\n print '%s,%s' % (str(cx(point[0])), str(cy(point[1]))),\n print '\"/>'\n\n print '</g>'\n\n #output the labels\n print '<text id=\"label\" x=\"0\" y=%s' % qa(font_size),\n print 'font-size=%s> </text>' % qa(font_size)\n\n print '<a id=\"rev_link\" xlink:href=\"\" target=\"_top\">'\n print '<text id=\"revision\" x=\"0\" y=%s style=\"' % qa(font_size*2)\n print 'font-size: %s; ' % qe(font_size)\n print 'stroke: #0000dd; text-decoration: underline; '\n print '\"> </text></a>'\n\n print '</svg>'", "def get_session_svg(viz_data):\n \n graph = Dot('graphname', graph_type='digraph')\n \n #loop create all nodes and store by id\n node_dict = {}\n for i, node_data in enumerate(viz_data['nodes']):\n id = node_data['id']\n node_dict[id] = str(i)\n graph.add_node(Node(str(i)))\n \n #add edges by links\n for link_data in viz_data['links']:\n snode = node_dict[viz_data['nodes'][link_data['source']]['id']]\n tnode = node_dict[viz_data['nodes'][link_data['target']]['id']]\n graph.add_edge(Edge(snode, tnode))\n \n #get svg of graph\n file = NamedTemporaryFile()\n graph.write_svg(file.name)\n svg = file.read()\n file.close()\n \n #f = open('/tmp/session/session.svg', 'w')\n #f.write(\"%s\\n\" % svg)\n #f.close()\n\n return svg" ]
[ "0.62321585", "0.6213278", "0.6118039", "0.6118039", "0.6038661", "0.58348596", "0.58015627", "0.5744214", "0.5506525", "0.54956293", "0.5444143", "0.54360074", "0.54353726", "0.54283196", "0.5415451", "0.5410552", "0.53604287", "0.5355877", "0.5347241", "0.52882683", "0.52735734", "0.52583575", "0.52583575", "0.52583575", "0.52583575", "0.52583575", "0.52574706", "0.5256186", "0.5237498", "0.52324176" ]
0.68582183
0
Adds extra height to schematic body
def on_body_height_add(self, val): val = max(0, int(val)) self.mdl.cmp.s_add_height = val self.refresh_svg_canvas()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def footprint_height():", "def body_resize(self):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def setHeight(*args):", "def height(self, height):\n self.row += height", "def height(self):\n\t\tpass", "def set_height(self):\n\t\toffset = 100\n\t\tself.height = random.randrange(offset, WIN_HEIGHT-offset)", "def __set_height(self, height):\n self._height = height", "def height(self) -> int:", "def height(self) -> int:", "def height(self) -> int:", "def add_body(self):\r\n new_turtle = generate_turtle()\r\n new_turtle.goto(self.all_turtles[-1].position())\r\n self.all_turtles.append(new_turtle)", "def setTextHeight(h=36):\n dislin.height(h)", "def set_height(height):\n resize.transforms[1].size = height", "def add_body_size(self):\n if self.body_size.get() >= Species.MAX_SIZE:\n raise BodySizeOverflowException(\"Cannot add more than 6 body size\")\n self.body_size.set(self.body_size.get()+1)\n self.player.request_payment(\"discard\")", "def setHeight(self, height=None):\r\n if(height):\r\n self.heightATOM3Integer.setValue(height)\r\n else:\r\n height = self.heightATOM3Integer.getValue()\r\n if(self.constCodeWidget != None):\r\n self.constCodeWidget.config(height=height)", "def innerHeight(self):\n raise NotImplementedError", "def updateHeight(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n self.height = max(rightHeight, leftHeight) + 1", "def calc_size(self):\r\n self.height = HEIGHT_STATUS", "def get_new_height(self):\n return self.new_height", "def calc_size(self):\r\n self.height = HEIGHT_CON\r\n self.posy = self.termheight - self.height" ]
[ "0.6295264", "0.61815864", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.60334027", "0.5969041", "0.58549297", "0.57929575", "0.578066", "0.5753543", "0.5753543", "0.5753543", "0.57502264", "0.5725118", "0.5693514", "0.56815743", "0.5651424", "0.56460786", "0.56408095", "0.5639884", "0.5623053", "0.5579799" ]
0.7480933
0
Adds extra width to schematic body
def on_body_width_add(self, val): val = max(0, int(val)) self.mdl.cmp.s_add_width = val self.refresh_svg_canvas()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _extra_width(self) -> int:\n width = 0\n if self.box and self.show_edge:\n width += 2\n if self.box:\n width += len(self.columns) - 1\n return width", "def body_resize(self):", "def width(self):\n\t\tpass", "def width(self) -> int:", "def width(self) -> int:", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def set_width(self, *args):\n return _ida_hexrays.lvar_t_set_width(self, *args)", "def _refresh_width(self):\n self._width = curses.tigetnum('cols')\n self._writer = formatter.DumbWriter(self._output, maxcol=self._width)", "def min_width(self):\n ...", "def width(self, width):\n self.col += width", "def weight4width(box_width,platformWidth,stairsLength,stepCount,stepWidth):\n if (platformWidth-stairsLength)<0:\n platformWidth = stairsLength + 50 #platform width must larger than stairs length ,the value is 50\n return platformWidth\n else:return platformWidth", "def innerWidth(self):\n raise NotImplementedError", "def right_padding_width(self):\n ...", "def _update_width(self, is_commit_in_existing_columns):\n max_cols = self.num_columns + self.num_parents\n\n # Even if the current commit has no parents to be printed, it still\n # takes up a column for itself.\n if self.num_parents < 1:\n max_cols += 1\n\n # We added a column for the current commit as part of self.num_parents.\n # If the current commit was already in self.columns, then we have double\n # counted it.\n if is_commit_in_existing_columns:\n max_cols -= 1\n\n # Each column takes up 2 spaces\n self.width = max_cols * 2", "def weight_width(self):\n raise NotImplementedError(\"subclasses need to override this method\")", "def set_width(self, width):\n self.width = width", "def padding_width(self):\n ...", "def add_space(self) -> None:\n\n tag = r'\\vspace{5mm}'\n self.doc = self.doc + tag", "def get_width(self):\n return \"%s\" % self.width", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns" ]
[ "0.6539015", "0.64924866", "0.60851705", "0.6076208", "0.6076208", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.6055567", "0.5926316", "0.58366567", "0.5834761", "0.58082455", "0.5742201", "0.5740924", "0.570603", "0.5647858", "0.56011087", "0.5523787", "0.55004144", "0.548297", "0.5473463", "0.5464237" ]
0.7276638
0
Run figure's event loop while listening to interactive events. The events listed in event_names are passed to handler. This function is used to implement `.Figure.waitforbuttonpress`, `.Figure.ginput`, and `.Axes.clabel`.
def blocking_input_loop(figure, event_names, timeout, handler): if figure.canvas.manager: figure.show() # Ensure that the figure is shown if we are managing it. # Connect the events to the on_event function call. cids = [figure.canvas.mpl_connect(name, handler) for name in event_names] try: figure.canvas.start_event_loop(timeout) # Start event loop. finally: # Run even on exception like ctrl-c. # Disconnect the callbacks. for cid in cids: figure.canvas.mpl_disconnect(cid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fig1_press(event):\n if event.key == 'n':\n if results.type is not None:\n print(\"Moving to next neuron\")\n callback.next_move = 'next'\n plt.close(fig1)\n else:\n print('Ensure type is set')\n\n if event.key == 'b':\n if results.type is not None:\n print(\"Moving back to previous neuron\")\n callback.next_move = 'back'\n plt.close(fig1)\n else:\n print('Ensure type is set')\n \n elif event.key == 'c':\n print('Clearing clicks')\n results.clicks = defaultdict(list)\n \n mask_im = np.zeros_like(results.neurons[0]) # Will always be at least 1 neuron\n\n mask_ax.clear() # Clears axes limits\n mask_ax.imshow(mask_im)\n\n overlay_clicks(results, mask_ax)\n\n fig1.canvas.draw_idle()\n \n elif event.key == 'q':\n print('Exiting...')\n callback.next_move = 'quit'\n plt.close(fig1)\n\n elif event.key in ['0','1','2','3','4','5','6','7','8','9']:\n results.type = event.key\n\n fig1.suptitle(f\"{results.key} - Label: {results.type}\")\n fig1.canvas.draw_idle()", "def run(self, iterations=1):\n mouse_controller = mouse.Controller()\n keyboard_controller = keyboard.Controller()\n\n try:\n last_timestamp = self.events[0].timestamp\n except IndexError:\n logging.error(\"No events loaded\")\n exit(1)\n\n try:\n for _ in range(iterations):\n for event in self.events:\n duration = event.timestamp - last_timestamp\n time.sleep(duration)\n if event.key is None:\n mouse_controller.position = (event.x, event.y)\n if event.pressed:\n mouse_controller.click(event.button)\n logging.info(\n f'{event.button} pressed at {event.x},{event.y}.')\n else:\n mouse_controller.release(event.button)\n logging.info(\n f'{event.button} released at {event.x},{event.y}.')\n else:\n if event.pressed:\n keyboard_controller.press(event.key)\n logging.info(f'{event.key} key pressed')\n else:\n keyboard_controller.release(event.key)\n logging.info(f'{event.key} key released')\n last_timestamp = event.timestamp\n last_timestamp = self.events[0].timestamp\n except KeyboardInterrupt:\n logging.info(\"Interrupt detected\")\n finally:\n self.terminate_callback()", "def handleEvents(self, events):\n pass", "def eventloop(cls, custom_actions=[]):\n iiter = cls([None], custom_actions=custom_actions, verbose=False)\n print('[IITER] Begining interactive main loop')\n for _ in iiter:\n pass\n return iiter", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type == pg.KEYDOWN:\n self.keys = pg.key.get_pressed()\n self.toggle_show_fps(event.key)\n elif event.type == pg.KEYUP:\n self.keys = pg.key.get_pressed()\n self.toggle_fullscreen(event.key)\n self._scene.get_event(event)", "def __handle_events(self):\r\n for event in pygame.event.get():\r\n self.controller.handle_event(event)", "def events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.set_selected(self.mouse_on_grid())\n if self.get_selected() is not None and event.type == pygame.KEYDOWN:\n self.event_seletect_moved(event)\n self.event_cell_update(event)", "def event_loop(self):\n for event in pygame.event.get():\n self.scene.get_event(event)", "def event_handler(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.type == MOUSEBUTTONDOWN and event.button == LEFT_CLICK:\r\n self.left_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == LEFT_CLICK:\r\n self.left_mouse_up_handler(event)\r\n elif event.type == MOUSEBUTTONDOWN and event.button == RIGHT_CLICK:\r\n self.right_mouse_down_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button == RIGHT_CLICK:\r\n self.right_mouse_up_handler(event)\r\n elif event.type == MOUSEMOTION:\r\n self.mouse_motion_handler(event)\r\n elif event.type == MOUSEBUTTONUP and event.button in [2, 4, 5]:\r\n self.shortcut_click(event)", "def eventBindings(callbackFct, isThread=False,grabInput=False):\n\tprint(\"[PSSM_OpenCV - Click handler] : Let's do this\")\n\tglobal eventCallbackFct\n\tif grabInput:\n\t\tprint('Using an emulator - nothing to be grabbed')\n\teventCallbackFct = callbackFct\n\tcv2.setMouseCallback(\"PSSM_Emulator\", cv2Link)", "def handle_events(self) -> None:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == MOUSEMOTION:\n self.mouse_pos = event.pos\n elif event.type == MOUSEBUTTONDOWN:\n self.mouse_pos = event.pos\n self.mouse_clicked = True\n elif self._focused_button is not None and event.type == KEYDOWN:\n self._handle_key_press(event)", "def run(self):\n while True:\n event, values = self.window.read()\n if event == sg.WIN_CLOSED:\n break\n\n ev.fire(self.window, event, values)", "def update(self, *args):\n self.kwargs = {}\n if self.manual:\n self.manual_button.disabled = True\n try:\n show_inline_matplotlib_plots()\n with self.out:\n if self.clear_output:\n clear_output(wait=True)\n for widget in self.kwargs_widgets:\n value = widget.get_interact_value()\n self.kwargs[widget._kwarg] = value\n self.result = self.f(**self.kwargs)\n show_inline_matplotlib_plots()\n if self.auto_display and self.result is not None:\n display(self.result)\n except Exception as e:\n ip = get_ipython()\n if ip is None:\n self.log.warn(\"Exception in interact callback: %s\", e, exc_info=True)\n else:\n ip.showtraceback()\n finally:\n if self.manual:\n self.manual_button.disabled = False", "def running(self, event_name: str, **kwargs: Any) -> None:\n stats.inc('events_fired', 'EVENT')\n if not self.listeners:\n return\n timestamp = time.time()\n for listener in self.listeners:\n if not listener.wants_event(event_name, kwargs):\n continue\n try:\n t = listener.callback(listener, event_name, timestamp, kwargs)\n asyncio.ensure_future(t)\n except Exception as e:\n log.msg('Failed to run event listener callback: %s' % str(e))", "def execute(self, fig):\n # subclasses must implement this.\n raise NotImplementedError", "def loop(self):\n keys.mode = 'main'\n for line in client.readlines('/event'):\n if not self.alive:\n break\n self.dispatch(*line.split(' ', 1))\n self.alive = False", "def run_frame(self, elapsed, events):\n\n for event in events:\n # arrow keypresses\n if event.type == pygame.KEYDOWN and event.key in self.movekeys:\n movedir = self.movekeys.index(event.key)\n self._move_marker(((0, 1), (1, 0), (0, -1), (-1, 0))[movedir])\n\n # joystick hat motion\n elif event.type == pygame.JOYHATMOTION and event.joy == 0 and event.value != (0, 0):\n self._move_marker(event.value)\n\n # enter key or joystick button (currently any button from 0-3)\n elif ((event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN) or\n (event.type == pygame.JOYBUTTONDOWN and event.button <= 3)):\n func = self.options[self.selected][1]\n args = self.options[self.selected][2:]\n\n # run the selected option, exiting afterward if it returns false\n result = getattr(self, func)(*args)\n if result is False:\n return False\n\n # reset menu\n self.resize_view()\n self.selected = 0\n\n # escape key\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n return False", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type in (pg.KEYDOWN, pg.KEYUP):\n self.keys = pg.key.get_pressed()", "def fig1_button(event):\n if fig1.canvas.manager.toolbar.mode != '':\n print(f'Toolbar mode is {fig1.canvas.manager.toolbar.mode}')\n return\n\n if event.xdata != None and event.ydata != None and \\\n event.inaxes != mask_ax:\n \n index = np.argwhere(axes.flat == event.inaxes)[0,0]\n results.add_click(index, (event.xdata, event.ydata))\n overlay_clicks(results, mask_ax)\n\n for ax in axes.flat:\n if ax != mask_ax:\n ax.plot(event.xdata, event.ydata, 'go', ms=12)\n\n fig1.canvas.draw_idle()", "def handle_key(self, key):\n def _ui_loop_handle_key(_key):\n if _key in self.handlers.keys():\n self.handlers[_key].handle(_key, self.x, self.y)\n\n elif self.task:\n if self.task.handle_key(_key):\n if len(self.tasks) > 1:\n # only if we have multiple tasks, otherwise no point in offering 1 task option\n self.task = None\n self.set_help_text()\n\n elif _key in self.tasks:\n self.task = self.tasks[_key]\n self.set_help_text(self.task.helptext())\n self.task.start(self.x, self.y)\n\n if self.fig.document:\n # we now have an associated document, need to do this inside that context\n self.fig.document.add_next_tick_callback(\n lambda: _ui_loop_handle_key(_key=key))", "def tk_event_listener(F):\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n \n return listener", "def set_events(self):\r\n\r\n self.canvas.bind(\"<Button-1>\", self.event_click_left)\r\n self.bind(\"<Return>\", self.event_return)", "def make_fig1_callbacks(callback: Callback, results: Annotation, fig1: plt.Figure, axes: plt.Axes):\n\n mask_ax = axes.flat[-1]\n \n def fig1_press(event):\n \"\"\"Capture the keyboard pressing a button\n \n Arguments:\n event {matplotlib.backend_bases.KeyEvent} -- Keyboard item pressed\n \"\"\"\n if event.key == 'n':\n if results.type is not None:\n print(\"Moving to next neuron\")\n callback.next_move = 'next'\n plt.close(fig1)\n else:\n print('Ensure type is set')\n\n if event.key == 'b':\n if results.type is not None:\n print(\"Moving back to previous neuron\")\n callback.next_move = 'back'\n plt.close(fig1)\n else:\n print('Ensure type is set')\n \n elif event.key == 'c':\n print('Clearing clicks')\n results.clicks = defaultdict(list)\n \n mask_im = np.zeros_like(results.neurons[0]) # Will always be at least 1 neuron\n\n mask_ax.clear() # Clears axes limits\n mask_ax.imshow(mask_im)\n\n overlay_clicks(results, mask_ax)\n\n fig1.canvas.draw_idle()\n \n elif event.key == 'q':\n print('Exiting...')\n callback.next_move = 'quit'\n plt.close(fig1)\n\n elif event.key in ['0','1','2','3','4','5','6','7','8','9']:\n results.type = event.key\n\n fig1.suptitle(f\"{results.key} - Label: {results.type}\")\n fig1.canvas.draw_idle()\n\n\n def fig1_button(event):\n \"\"\"Capture the mouse button press\n \n Arguments:\n event {matplotlib.backend_bases.Evenet} -- Item for mouse button press\n \"\"\"\n if fig1.canvas.manager.toolbar.mode != '':\n print(f'Toolbar mode is {fig1.canvas.manager.toolbar.mode}')\n return\n\n if event.xdata != None and event.ydata != None and \\\n event.inaxes != mask_ax:\n \n index = np.argwhere(axes.flat == event.inaxes)[0,0]\n results.add_click(index, (event.xdata, event.ydata))\n overlay_clicks(results, mask_ax)\n\n for ax in axes.flat:\n if ax != mask_ax:\n ax.plot(event.xdata, event.ydata, 'go', ms=12)\n\n fig1.canvas.draw_idle()\n\n\n return fig1_press, fig1_button", "def handle_events(self):\n keys = pygame.key.get_pressed()\n if self.game_manager.game_state == GameState.Running:\n if self.arcade:\n self.game_manager.control_players_arcade(self.joysticks) \n else:\n self.game_manager.control_players(keys)\n elif self.arcade:\n self.ui.arcade_control(self.joysticks[1])\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.game_manager.game_state = GameState.Quit \n if self.game_manager.game_state == GameState.Finished or\\\n self.game_manager.game_state == GameState.Menu :\n if event.type == pygame.KEYDOWN and not self.arcade:\n self.ui.control(event.key)\n #self.start_new_game(GameMode.EatToSurvive)", "def handle_input_event(self):\n\n self.markerPos = self.get_mouse_coordinate()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n raise QuitRequestedError\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n raise QuitRequestedError\n if event.type == pygame.MOUSEBUTTONDOWN:\n if Event.is_valid_placement_stage(self.event):\n self.choice = self.get_mouse_coordinate()\n self.event = Event.next(self.event)\n self.timestep_watch.reset()\n\n liberties = self.env.liberty_after_next_steps(self.env.turn, self.env.getOpponent())\n self.env.printField(liberties)\n print()\n # self.env.printFlipNum(self.env.turn)\n # print(self.env.update_num_disks_can_filp(self.choice[0], self.choice[1], self.env.turn))\n\n # print(\"Click \", pos, \"coordinates: \", row, col)", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def show_fig_and_wait(self):\n\n # window management\n self.fig.canvas.manager.show()\n self.fig.canvas.draw_idle()\n # starting a 'blocking' loop to let the user interact\n self.fig.canvas.start_event_loop(timeout=-1)", "def setupEventHooks(self):\n # handle mouse clicks\n self.img.scene().sigMouseClicked.connect(self.handleClick)\n # handle mouse movement\n # Use signalproxy for ratelimiting\n sig = self.img.scene().sigMouseMoved\n self.mvProxy = pqg.SignalProxy(signal=sig, rateLimit=60, slot=self.handleMove)", "def window_handler(self):\n self.open_window()\n cv2.setMouseCallback(self.window_name, self.click_handler)\n finish = False\n while not finish:\n\n key = cv2.waitKey(0)\n\n finish = self.manage_key(key)" ]
[ "0.58663946", "0.5447554", "0.54228073", "0.53521365", "0.53415567", "0.5315508", "0.5248115", "0.52316606", "0.5220068", "0.5174886", "0.5119426", "0.51046586", "0.5099623", "0.5095802", "0.5055943", "0.503854", "0.5026388", "0.4981488", "0.49244776", "0.4902189", "0.48954543", "0.48896894", "0.48759627", "0.48729527", "0.48540297", "0.48503247", "0.48370785", "0.48313883", "0.4811412", "0.4777044" ]
0.7370716
0
Calculates the perimeter given the bottom length, top length, 1st side length, and 2nd side length.
def perimeter(self): return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )", "def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)", "def perimeter(self):\n\t\treturn 2 * (self.width + self.height)", "def perimeter(self):\n return 2 * (self.height + self.width)", "def perimeter(self):\n\t\treturn self.height * 4", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def perimeter(self):\r\n return (2*self.width) + (2*self.height)", "def perimeter(self):\n perimeter = (2 * self.__length) + (2 * self.__width)\n\n return perimeter", "def perimeter(self):", "def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])", "def perimeter(self):\n return sum(self._lengths)", "def perimeter(self):\n return sum(self._lengths)", "def get_rect_perimeter(length, width):\n length = (str)(length)\n width = (str)(width)\n if((length.isnumeric()) and (length.isnumeric())):\n length = (float)(length)\n width = (float)(width)\n perimeter = 2 * (length + width)\n else:\n perimeter = \"Invalid input, length and width must be numeric value\"\n return perimeter", "def perimeter(a:float, b:float, c:float):\n return a + b + c", "def island_perimeter(grid):\n w = len(grid[0])\n h = len(grid)\n perimeter = 0\n\n for i, col in enumerate(grid):\n for j, row in enumerate(col):\n if row == 1:\n perimeter += 4\n if grid[i][j-1] == 1:\n perimeter -= 1\n if grid[i][(j+1) % w] == 1:\n perimeter -= 1\n if grid[(i+1) % h][j] == 1:\n perimeter -= 1\n if grid[i-1][j] == 1:\n perimeter -= 1\n return perimeter", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def perimeter_distance(self, p1, p2):\n\n p1_projection = self.outline.project(shgeo.Point(p1))\n p2_projection = self.outline.project(shgeo.Point(p2))\n\n distance = p2_projection - p1_projection\n\n if abs(distance) > self.outline_length / 2.0:\n # if we'd have to go more than halfway around, it's faster to go\n # the other way\n if distance < 0:\n return distance + self.outline_length\n elif distance > 0:\n return distance - self.outline_length\n else:\n # this ought not happen, but just for completeness, return 0 if\n # p1 and p0 are the same point\n return 0\n else:\n return distance", "def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)", "def perimeter(points):\n return sum(get_distances(points))", "def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total", "def island_perimeter(grid):\n perimeter = 0\n for j in range(len(grid)):\n for i in range(len(grid[j])):\n if grid[j][i] == 1:\n perimeter += 4\n if i is not 0 and grid[j][i - 1] is 1:\n perimeter -= 1\n if j is not 0 and grid[j - 1][i] is 1:\n perimeter -= 1\n if j + 1 < len(grid) and grid[j + 1][i] is 1:\n perimeter -= 1\n if i + 1 < len(grid[j]) and grid[j][i + 1] is 1:\n perimeter -= 1\n return perimeter", "def square_area(side):\n return side**2", "def getPerimeter(self):\n return 2 * math.pi * self.__radius", "def squareArea(sidelength):\n sidelength = float(sidelength)\n return sidelength**2", "def island_perimeter(grid):\n perimeter = 0\n for row in range(len(grid)):\n for idx in range(len(grid[0])):\n if grid[row][idx] == 1:\n \"\"\"if 1 encountered check all sides for 0\"\"\"\n top = row - 1\n bottom = row + 1\n left = idx - 1\n right = idx + 1\n\n \"\"\"check top index value\"\"\"\n if top < 0:\n perimeter += 1\n elif grid[row - 1][idx] != 1:\n perimeter += 1\n\n \"\"\"check bottom index value\"\"\"\n if bottom >= len(grid):\n perimeter += 1\n elif grid[row + 1][idx] != 1:\n perimeter += 1\n\n \"\"\"check left index value\"\"\"\n if left < 0:\n perimeter += 1\n elif grid[row][idx - 1] != 1:\n perimeter += 1\n\n \"\"\"check right index value\"\"\"\n if right >= len(grid[0]):\n perimeter += 1\n elif grid[row][idx + 1] != 1:\n perimeter += 1\n return perimeter", "def get_perimeter_formula(cls):\n pass", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def rectangle_area(side1, side2):\n return float(side1) * float(side2)", "def island_perimeter(grid):\n perimeter, connections = 0, 0\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 1:\n perimeter += 4\n\n if i != 0 and grid[i-1][j] == 1:\n connections += 1\n if j != 0 and grid[i][j-1] == 1:\n connections += 1\n\n return(perimeter - (2 * connections))", "def calc_half_perimeter(self, source, sinks):\n deltax = 0\n deltay = 0\n assert self.cells[source].x in range(self.nx) and self.cells[source].y in range(self.ny)\n for sink in sinks:\n assert self.cells[sink].x in range(self.nx) and self.cells[sink].y in range(self.ny)\n dx = abs(self.cells[source].x - self.cells[sink].x)\n if dx > deltax:\n deltax = dx\n dy = abs(self.cells[source].y - self.cells[sink].y)\n if dy > deltay:\n deltay = dy\n return deltax + deltay" ]
[ "0.73933774", "0.7245891", "0.720648", "0.7175091", "0.7159154", "0.7143771", "0.7052815", "0.6963846", "0.6854498", "0.6804715", "0.6780466", "0.6780466", "0.6722681", "0.6501043", "0.6399388", "0.63739026", "0.63721514", "0.6371612", "0.6364265", "0.63316923", "0.6298338", "0.625855", "0.62480456", "0.62427074", "0.6232807", "0.6219057", "0.62017745", "0.6177004", "0.6150818", "0.6149094" ]
0.76503986
0
labels with round numbers
def init_round_numbers(self): for round_num in range(1, 13): lbl_round_num = tk.Label(self.master, text=str(round_num), font='courier 10 bold', fg='green', pady=2) lbl_round_num.grid(row=round_num+1, column=0) row = 14 for trump in ["C", "D", "H", "S", "NT"]: lbl_round_num = tk.Label(self.master, text="13"+trump, font='courier 10 bold', fg='green') lbl_round_num.grid(row=row, column=0) row += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autolabel(X_pos,values,height_lift):\r\n\theight= np.round(np.nan_to_num(values),2);y_pos = height_lift*height\r\n\tfor i in range(len(height)):\r\n\t\tax.text(X_pos[i],y_pos[i],'%4.2f' % height[i], ha='center', va='bottom',size=4)", "def getLabels(self):\n return self.numToLabel", "def label(self, margin):\n if self.alphaL == None or self.alphaR == None:\n self.label = \"N\"\n elif abs(self.alphaL - self.alphaR) <= margin:\n self.label = \"S\"\n elif (self.alphaL - self.alphaR) > margin:\n self.label = \"L\"\n elif -(self.alphaL - self.alphaR) > margin:\n self.label = \"R\"\n else:\n self.label = \"N\"", "def autolabel(rects): #source: [.........]\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % height.round(1),\n ha='center', va='bottom')", "def set_all_labels(ax, xax, ra_label, yax, dec_label, roundnum=1):\n ax.set_xticks(xax)\n ax.set_xticklabels(np.round(ra_label, roundnum))\n ax.set_yticks(yax)\n ax.set_yticklabels(np.round(dec_label, roundnum))\n ax.set_ylim(yax[0], yax[-1])\n \n return ax", "def autolabel(rects):\n\t for rect in rects:\n\t\theight = rect.get_height()\n\t\tax.text(rect.get_x() + rect.get_width()/2., 1.01*height,\n\t\t '%d' % int(height),\n\t\t ha='center', va='bottom')", "def setLabelDigits(digits=1, axes='XYZ'):\n dislin.labdig(digits, axes)", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(\n rect.get_x() + rect.get_width() / 2.,\n 1.005 * height,\n '%.1f' % height,\n ha='center',\n va='bottom')", "def __autolabel(ax, rects):\n for rect in rects:\n height = rect.get_height()\n if math.isnan(height):\n continue\n w = rect.get_x() + rect.get_width()/2.\n ax.text(w, 1.05*height,\n '%d' % int(height),\n ha='center', va='bottom', fontsize='x-large')", "def reformat_labels(label, bin_limits=[2]):\n# num_labels = y_batch.max() + 1\n label = np.array([label], dtype=np.float32)\n num_labels = 2\n label = np.digitize(label, bins=[2])\n label = (np.arange(num_labels) == label[:, None]).astype(np.float32)[0]\n return label", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\r\n '%d' % int(height),\r\n ha='center', va='bottom')", "def make_label(self, label, units):\n nice_label = self.tex_axis_label(label)\n if not (units == 'dimensionless') and \\\n (units is not None) and (not units == []):\n nice_label += ' (%s)'%self.tex_axis_label(units)\n return nice_label", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n # ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,\n # '%d' % int(height),\n # ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%.2f' % height,\n ha='center', va='bottom')", "def autolabel(ax, rects, vals, fsize):\n for i in range(len(rects)):\n rect = rects[i]\n val = vals[i]\n# for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.0*height,\n '%d' % int(val), fontsize=fsize,\n ha='center', va='bottom')", "def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]", "def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]", "def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % int(height),\n ha='center', va='bottom')", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n '%d' % int(height),\n ha='center', va='bottom')", "def autolabel(rects):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width()/2., 1.0*height, '%d' % int(height), ha='center', va='bottom')", "def autolabel(ax, rects):\n for rect in rects:\n height = rect.get_height()\n if height > 90:\n factor_text = 0.8\n else:\n factor_text = 1.05\n ax.text(\n rect.get_x() + rect.get_width() / 2.0,\n (factor_text * height),\n f\"{height}\",\n ha=\"center\",\n va=\"bottom\",\n fontsize=32,\n )", "def autolabel(rects, ax):\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.text(rect.get_x() + rect.get_width()/2, (height+0.2),\r\n '%.1f' % height,\r\n ha='center', va='bottom', fontsize=12)", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n # ax.text(rect.get_x() + rect.get_width() / 2., 1.22 * height,\n # '%d' % int(height),\n # ha='center', va='bottom')", "def autolabel(rects, n, add_value=[]):\n if rects.__len__() == add_value.__len__() and abs_val_legend:\n for rect, val in zip(rects, add_value):\n height = rect.get_height()\n if not (np.isnan(height) or height == 0):\n ax.text(rect.get_x() + rect.get_width()/2., 1.03 * height,\n ('%1.' + str(n) + 'f') % height + '\\n' + val + '',\n ha='center', va='bottom')\n else:\n for rect in rects:\n height = rect.get_height()\n if not (np.isnan(height) or height == 0):\n ax.text(rect.get_x() + rect.get_width()/2., 1.07* height,\n ('%1.' + str(n) + 'f') % height,\n ha='center', va='bottom')", "def autolabel(rects):", "def autolabel(rects):\n for rect in rects:\n height = rect.get_height()\n plt.text(rect.get_x() + rect.get_width()/2., 1*height,\n '%d' % int(height),\n ha='center', va='bottom')", "def AutoLabel(rects, ax):\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,\n \"%d \" % int(height),\n ha=\"center\", va=\"bottom\")", "def label_for(self, *pp, unit=True, description=True):\n if len(pp) > 1 and np.all([re.match(r\"k\\d+l\", p) for p in pp]):\n label = \"$k_nl$\"\n if unit:\n label += \" / $m^{-n}$\"\n return label\n return super().label_for(*pp, unit=unit, description=description)", "def test_rlabel(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n label = \"R\"\n ax.set_rlabel(label)\n assert ax.get_rlabel() == label" ]
[ "0.6358072", "0.635775", "0.6293751", "0.621447", "0.6207149", "0.62016577", "0.6130753", "0.61188644", "0.61017317", "0.60645777", "0.6025209", "0.60067993", "0.59946424", "0.5989236", "0.59892356", "0.597898", "0.597898", "0.597898", "0.59719455", "0.5968639", "0.59655595", "0.5956008", "0.5946158", "0.5945993", "0.59400874", "0.5934503", "0.59262484", "0.58971405", "0.58966935", "0.58828676" ]
0.65299755
0
command button that calculates scores
def init_button_calc(self): btn_calc = tk.Button(self.master, text='calculate', font='courier 10 bold', fg='purple', command=self.update_scores) btn_calc.grid(row=20, column=1, columnspan=3, sticky=tk.W+tk.E, pady=5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disp_score():", "def update_score():\n pass", "def enter_game_scores():\n pass", "def score(self):", "def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))", "def update_score(self):\n score_text = ' ' + str(self.x_score) + ' - ' + str(self.o_score) + ' '\n self.Score_Label.configure(text=score_text, foreground='#FFFFFF')", "def update_scoreboard(self):\n self.clear()\n self.goto(-(WIDTH//6), (HEIGHT//2-30))\n self.write(self.l_score, align = 'center', font = ('Courier', 20, 'normal'))\n self.goto((WIDTH//6), (HEIGHT//2-30))\n self.write(self.r_score, align = 'center', font = ('Courier', 20, 'normal'))", "def analyze(self):\r\n self.current = 'score'\r\n popup = AnalyzeInterface(self.current_screen).open()", "def score_update(scoreboard, compare):\r\n if compare == 'Victory':\r\n scoreboard['W'] += 1\r\n elif compare == 'Defeat':\r\n scoreboard['L'] += 1\r\n elif compare == 'Tie':\r\n scoreboard['T'] += 1", "def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")", "def r_point(self):\n self.r_score += 1\n self.update_scoreboard()", "def scoring(self):\n pass", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def update_scores(self):\r\n totals = [0, 0, 0, 0]\r\n for player in range(0, 4):\r\n for round_num in range(0, 17):\r\n try:\r\n bid = int(self.spin_bids[player][round_num].get())\r\n tricks = int(self.spin_tricks[player][round_num].get())\r\n except ValueError:\r\n bid = -1\r\n tricks = -1\r\n score = calc_score(min(round_num+1, 13), bid, tricks)\r\n self.lbl_scores[player][round_num].configure(text=str(score))\r\n totals[player] += score\r\n for player in range(0, 4):\r\n self.lbl_totals[player].configure(text=str(totals[player]))\r\n return totals[0] + totals[1] + totals[2] + totals[3]", "def update_score_board(self):\n score = ''\n for key, value in self.model.game_score.items():\n score += key + \"-\" + str(value) + ':'\n if self.view.score_board_entry.get():\n self.view.score_board_entry.delete(0, tkinter.END)\n self.view.score_board_entry.insert('1', score)", "def augmenter_score():\n\n global label_score\n global score\n\n score += 1\n label_score.config(text= \"score : \" + str(score))", "def score():\r\n\r\n point_1 = 0\r\n point_2 = 0\r\n print(term.move_xy(82,15) + term.white + 'Score joueur 1 : ', end='')\r\n print(point_1)\r\n print(term.move_xy(82,16) + term.white + 'Score joueur 2 : ', end='' )\r\n print(point_2)", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def increase_score(self):\n self.score += 1", "def _update_scoreboard(self, result):\n if result == Moveresult.KEY:\n self.current_turn.keys_collected += 1\n elif result == Moveresult.EXIT:\n self.current_turn.successful_exits += 1", "def view_scores(jenni, input):\n scores.view_scores(jenni, input)", "def on_mouse_press(self, x, y, button):\n if button == arcade.MOUSE_BUTTON_LEFT:\n if self.texture == self.ada:\n self.score += 1\n if self.texture == self.potato:\n self.score -= 1", "def show_scores(self):\n for text in self.score_text:\n text.draw()", "def cmd_calculation():", "def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)", "def afficher_scores(sj, so):\n lbl_sj1.configure(text=\"{}\".format(sj))\n lbl_sj2.configure(text=\"{}\".format(so))", "def updateScore(score):\n return score + 1", "def f1_score(self):" ]
[ "0.695034", "0.676045", "0.66949934", "0.668477", "0.6502419", "0.649244", "0.64213395", "0.63838565", "0.62789667", "0.62286115", "0.6212426", "0.62090117", "0.61582416", "0.61512876", "0.6119021", "0.61164653", "0.61131734", "0.6094686", "0.60794663", "0.6069196", "0.6050373", "0.60324985", "0.6016102", "0.59923166", "0.59838927", "0.59388417", "0.5937854", "0.59252566", "0.5924032", "0.5908876" ]
0.6769738
1
calculate and display scores for each valid bid x trick pair
def update_scores(self): totals = [0, 0, 0, 0] for player in range(0, 4): for round_num in range(0, 17): try: bid = int(self.spin_bids[player][round_num].get()) tricks = int(self.spin_tricks[player][round_num].get()) except ValueError: bid = -1 tricks = -1 score = calc_score(min(round_num+1, 13), bid, tricks) self.lbl_scores[player][round_num].configure(text=str(score)) totals[player] += score for player in range(0, 4): self.lbl_totals[player].configure(text=str(totals[player])) return totals[0] + totals[1] + totals[2] + totals[3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disp_score():", "def resultat_match(self, binomes):\n for binome in binomes:\n while True:\n score_un = self.vue.entree_resultats(binome[0])\n score_deux = self.vue.entree_resultats(binome[1])\n if score_un + score_deux != 1:\n self.vue.erreur_score()\n continue\n else:\n binome[0].ajout_score(score_un)\n binome[1].ajout_score(score_deux)\n table_players.update({\"score\": binome[0].points},\n doc_ids=[binome[0].id])\n table_players.update({\"score\": binome[1].points},\n doc_ids=[binome[1].id])\n break\n self.vue.afficher_resultats(binomes)", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def score(self):", "def score_phase(bids, tricks, deck_top, player_data=None,\n\tsuppress_player_data=True):\n\tscore = [0, 0, 0, 0]\n\n\t# prepare itertools.cycle() object\n\tseating_order = cycle([0, 1, 2, 3])\n\tnext(seating_order) \n\n\t# Determine the winner of each trick and increment score\n\tfor trick in tricks:\n\t\t_play, winning_index = det_winner(trick, deck_top[1], trick[0][1],\n\t\t\tindex_return=True)\n\n\t\t# using seating order determine which player played won the trick\n\t\tfor _ in range(winning_index):\n\t\t\twinning_player = next(seating_order)\n\n\t\tscore[winning_player] += 1\n\n\t# Check the results of players bids and increment score acordingly\n\tfor player in range(4):\n\t\tif score[player] == bids[player]:\n\t\t\tscore[player] += 10\n\n\tscore = tuple(score) # Dont want to mess with that PEP8s\n\n\treturn score if suppress_player_data else (score, player_data)", "def scoreGame(self):\n # create valueLs[card1,card2,...], pass it to sumHandReturnPoints(valueLs) or twoCardReturnPoints(valueLs)\n scoreLs = []\n ### Score of row\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n points = self.sumHandReturnPoints(valueLs)\n scoreLs.append(points)\n\n ### Score of 4-card column\n for offset in range(0,3): # 0,1,2\n tmpLs = []\n for rowKey in self.table:\n valueLs = self.table[rowKey]\n if len(valueLs) == 5:\n iterStart = 1\n else:\n iterStart = 0\n card = valueLs[iterStart+offset]\n tmpLs.append(card)\n points = self.sumHandReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Score of 2-card column\n #(1) 1st column\n valueLs1 = self.table['row1']\n valueLs2 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs1[0].get_rank())\n tmpLs.append(valueLs2[0].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points)\n #(2) 5th column\n valueLs3 = self.table['row1']\n valueLs4 = self.table['row2']\n tmpLs = []\n tmpLs.append(valueLs3[-1].get_rank())\n tmpLs.append(valueLs4[-1].get_rank())\n points = self.twoCardReturnPoints(tmpLs)\n scoreLs.append(points) \n\n ### Add up scoreLs\n sumPoints = 0\n for points in scoreLs:\n sumPoints += points\n return sumPoints", "def evaluate_score(self,word_id):\r\n total_best = 0\r\n assigned_to_return = {}\r\n for possible_word in self.satisfiers[word_id].keys():\r\n words_to_iterate = []\r\n iterated_word_ids = []\r\n # print()\r\n for connected_word_id in self.satisfiers[word_id][possible_word].keys():\r\n words_to_iterate.append(self.satisfiers[word_id][possible_word][connected_word_id])\r\n # print(\"word_id: {}, possible_word: {}, connected_id: {}, words: {}\".format(word_id,possible_word, connected_word_id,self.satisfiers[word_id][possible_word][connected_word_id]))\r\n iterated_word_ids.append(connected_word_id)\r\n \r\n # print(possible_word)\r\n # print(\"\\nPossible word:\",possible_word)\r\n for comb in itertools.product(*words_to_iterate):\r\n assigned_words = {}\r\n assigned_words[word_id] = possible_word\r\n for i in range(len(iterated_word_ids)):\r\n assigned_words[iterated_word_ids[i]] = comb[i]\r\n # print(\"word_id: {} comb: {}\".format(word_id,comb))\r\n # print(\"\\nword_id: {}, assigned words: {}\".format(word_id,assigned_words))\r\n new_assigned, current_max = self.get_max_score(word_id,assigned_words)\r\n # print(\"new_assigned: {}, current_max: {}\".format(new_assigned, current_max))\r\n if current_max > total_best:\r\n total_best = current_max\r\n assigned_to_return = {}\r\n assigned_to_return = new_assigned\r\n return assigned_to_return, total_best", "def score(self,\n tricks: Union[List, np.array],\n bid: int = None,\n vulnerability: Union[List, np.array] = None) -> np.array:\n\n # using class's internal values if bid or vulnerability were not submitted\n if bid is None:\n bid = self.max_bid\n if vulnerability is None:\n vulnerability = self.vulnerability\n\n out = np.zeros(NUM_PLAYERS)\n\n return out", "def extract_score(results):\n total_score = 0;\n total_possible_score = 0;\n for k in results.keys():\n total_score = total_score + results[k][0]\n total_possible_score = total_possible_score + results[k][1]\n return (total_score, total_possible_score)", "def get_score(self, a, b):\n ### FILL IN ###", "def compute_each_score(word_embeddings, each_id_pair): # without weighting scheme\n emb1 = word_embeddings[each_id_pair[0], :]\n emb2 = word_embeddings[each_id_pair[1], :]\n inn = np.inner(emb1, emb2)\n # print('inner product is {}'.format(inn))\n emb1norm = np.sqrt(np.inner(emb1, emb1))\n # print('emb1norm is {}'.format(emb1norm))\n emb2norm = np.sqrt(np.inner(emb2, emb2))\n # print('emb2norm is {}'.format(emb2norm))\n each_pair_score = inn / emb1norm / emb2norm\n # print('each score is {}\\n'.format(each_pair_score))\n return each_pair_score", "def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))", "def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc", "def score_bag(self, bag):\n # pop_scores = bag.retrieve_external_scores(self.file_name, self.col_name)\n # true_scores = pop_scores.ix[bag.true_pop]\n # scores_col = true_scores.columns[0]\n # res = np.array([true_scores.index.values, true_scores[scores_col].values]).transpose()\n # return res\n\n pop_scores = bag.retrieve_external_scores(self.file_name, self.col_name)\n candidate_ids_numeric = frozenset(bag.universe) - frozenset(bag.whites)\n # candidate_id_strings = (str(int(idnum)) for idnum in candidate_ids_numeric)\n candidate_scores = pop_scores.ix[candidate_ids_numeric].dropna()\n scores_col = candidate_scores.columns[0]\n res = np.array([\n candidate_scores.index.values.astype(int),\n candidate_scores[scores_col].values\n ]).transpose()\n return res", "def update_scores(self):\n self.score[0] = (-1)*sum(self.board[self.board == -1])\n self.score[1] = sum(self.board[self.board == 1])\n #self.score[i] = sum(1 for j in range(len(stones_on_board)) if stones_on_board[j] == i)", "def evaluate(self):\n self.matrix = pair_matrix(self)\n score = 0\n for x in range(len(self.seq)):\n for y in range(x, len(self.seq)):\n if self.matrix[x, y] == 1:\n if abs(x - y) < 5:\n score -= 7\n if self.seq[x] == complementary(self.seq[y]):\n score += 2\n elif self.seq[x] == 'U' and self.seq[y] == 'G' or self.seq[x] == 'G' and self.seq[y] == 'U':\n score += 1\n else:\n score -= 5\n return score", "def scoring(self):\n pass", "def get_score(self, student_answers):\r\n pass", "def calculate_all_scrabble_scores():\n for node in list_of_nodes:\n word = replace_umlauts(node.description)\n if word == \"'?'\":\n continue\n print word, scrabble_score(word)", "def calculate_scores(players):\n scores = {}\n for player in players.tuple_:\n scores[player.id_] = player.score()\n return scores", "def score(self):\r\n totN = 0\r\n totB = 0\r\n for l in range(SIZE):\r\n for c in range(len(COLONNES)):\r\n if self.jeu[l][c] == NOIR:\r\n totN += 1\r\n elif self.jeu[l][c] == BLANC:\r\n totB += 1\r\n return (totN, totB)", "def _score_to_decision(self, score):", "def get_scores(self) -> tuple:\n return (self.get_score(), self.p2_score)", "def hand_points(hand):\n points = [[]]\n branch = 1\n for card in hand:\n if not card[\"is_hidden\"]:\n if card[\"value\"].isnumeric():\n for possibility in range(branch):\n points[possibility].append(int(card[\"value\"]))\n elif card[\"value\"] == \"A\":\n for possibility in range(branch):\n # Ace is 1 or 11. Creating the two possibility\n points.append(points[possibility] + [11]) \n points[possibility].append(1)\n branch += 1\n else:\n # Left are the face value of 10\n for possibility in range(branch):\n points[possibility].append(10)\n\n score = list(zip([sum(branch) for branch in points], points))\n score.sort(key=lambda x: x[0], reverse=True)\n\n for total, points in score:\n if total == 21 and len(hand) == 2:\n return total, \"BlackJack!\"\n if total <= 21:\n if 1 in points and 11 in points:\n return total, None\n if 1 in points: \n return total, \"Soft\"\n if 11 in points:\n return total, \"Hard\"\n else:\n return total, None\n\n # If you get there, you have lost or you had empty hand \n # or all card in hand was hiddien\n if score:\n return score[-1][0], None\n else:\n return 0, None", "def completing_evalution(self, *args):\n self.calculate_risk_tol(*args)\n graham_picks = key_ratios(\"GrahamScore\", total_score)\n lynch_picks = key_ratios(\"LynchScore\", total_score)\n return (graham_picks, lynch_picks)", "def find_scores(self):\n p1_score = self.p1_store()\n p2_score = self.p2_store()\n return p1_score, p2_score", "def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate", "def compute_and_report(data, scorer):\n sim = sorted([(scorer(data, a, b), a, b)\n for a, b in combinations(data.keys(), 2)],\n reverse=True)\n\n for s, a, b in sim:\n print(\"%.2f\" % s, a, b)", "def pairwise_bit_scores(blastf, ss, outf, verbose=False):\n\n if verbose:\n sys.stderr.write(f\"{bcolors.GREEN}Creating scores{bcolors.ENDC}\\n\")\n\n pb = {}\n out = open(outf + \".tsv\", 'w')\n out.write(\"Query\\tSubject\\tQLen\\tSLen\\tBits\\tnBits\\n\")\n for b in stream_blast_results(blastf, verbose):\n if b.query not in pb:\n pb[b.query] = {}\n if b.db not in pb:\n pb[b.db] = {}\n\n # we normalize by the bitscore of the two proteins if we can!\n if b.query in ss and b.db in ss:\n nb = b.bitscore / ((ss[b.query] + ss[b.db])/2)\n else:\n # if we can't do that, we cheat and normalize \n # the bit score by twice\n # the average length of the proteins\n # i.e. the sum of the lengths\n nb = b.bitscore / (b.query_length + b.subject_length + 3.3)\n\n if b.query in pb[b.db] and pb[b.db][b.query] > nb:\n continue\n pb[b.db][b.query] = pb[b.db][b.query] = nb\n out.write(f\"{b.query}\\t{b.db}\\t{b.query_length}\\t{b.subject_length}\\t{b.bitscore}\\t{nb}\\n\")\n return pb", "def check_score(self) -> None:\n self.player_1, self.player_2 = 0, 0\n for cell in self.cells:\n if cell.player == 1:\n self.player_1 += 1\n elif cell.player == 2:\n self.player_2 += 1" ]
[ "0.6550589", "0.64147127", "0.6227076", "0.60567015", "0.6029427", "0.6025697", "0.5990587", "0.5938982", "0.5932216", "0.59277326", "0.592605", "0.59216154", "0.5920696", "0.5903962", "0.58853215", "0.5872225", "0.5831863", "0.58293426", "0.5814984", "0.58139294", "0.581041", "0.58100355", "0.5799656", "0.57565325", "0.57392895", "0.57129335", "0.5707027", "0.5703089", "0.56923646", "0.5688978" ]
0.6524363
1
Connect to address and return the socket object. Convenience function. Connect to address (a 2tuple ``(host, port)``) and return the socket object. Passing the optional timeout parameter will set the timeout on the socket instance before attempting to connect. If no timeout is supplied, the
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT): msg = "getaddrinfo returns an empty list" host, port = address for res in getaddrinfo(host, port, 0, SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket(af, socktype, proto) if timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) sock.connect(sa) return sock except error as msg: if sock is not None: sock.close() raise error(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(spec, timeout=None, nagle_off=True, cache=0,\n _cache=_connect_cache, _lock=_connect_cache_lock):\n # pylint: disable = W0102, R0912, R0915\n\n sock = None\n try:\n adi = None\n if cache > 0:\n _lock.acquire()\n try:\n if spec in _cache:\n adi, stamp = _cache[spec]\n if stamp < _datetime.datetime.utcnow():\n del _cache[spec]\n adi = None\n finally:\n _lock.release()\n if adi is None:\n if isinstance(spec, str):\n try:\n AF_UNIX = _socket.AF_UNIX\n except AttributeError:\n raise NotImplementedError(\n \"UNIX domain sockets are not supported\"\n )\n adi = [(AF_UNIX, _socket.SOCK_STREAM, 0, None, spec)]\n else:\n adi = _socket.getaddrinfo(spec[0], spec[1],\n _socket.AF_UNSPEC, _socket.SOCK_STREAM, 0, 0)\n if cache > 0:\n _lock.acquire()\n try:\n if spec not in _cache:\n _cache[spec] = (\n adi,\n _datetime.datetime.utcnow()\n + _datetime.timedelta(seconds=cache),\n )\n finally:\n _lock.release()\n\n AF_INET6 = getattr(_socket, 'AF_INET6', None)\n for family, stype, proto, _, addr in adi:\n if not _socket.has_ipv6 and family == AF_INET6:\n continue # skip silenty if python was built without it.\n\n sock = _socket.socket(family, stype, proto)\n sock.settimeout(timeout)\n retry = True\n while retry:\n try:\n sock.connect(addr)\n except _socket.timeout:\n break\n except _socket.error, e:\n if e[0] == _errno.EINTR:\n continue\n elif e[0] in (_errno.ENETUNREACH, _errno.ECONNREFUSED):\n break\n raise\n retry = False\n else:\n if nagle_off:\n disable_nagle(sock)\n return sock\n sock.close()\n except (_socket.error, IOError):\n try:\n raise_socket_error(timeout=timeout)\n except SocketError:\n e = _sys.exc_info()\n try:\n if sock is not None:\n sock.close()\n finally:\n try:\n raise e[0], e[1], e[2]\n finally:\n del e\n return None", "def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, bind_address=None):\n\n msg = \"getaddrinfo returns an empty list\"\n host, port = address\n for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket.socket(af, socktype, proto)\n if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n if bind_address is not None:\n sock.bind(bind_address)\n sock.connect(sa)\n return sock\n\n except socket.error, msg:\n if sock is not None:\n sock.close()\n\n raise socket.error, msg", "def connect(address):\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(address)\n sock.setblocking(0)\n return sock", "def connect(self, timeout=1.0):\n if self.socket:\n self.socket.close()\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(timeout)\n self.socket.connect((self.ip, self.port))", "def connect(self, host=HOST, port=PORT, timeout=10):\r\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self._socket.connect((host, port))\r\n if timeout is not None:\r\n self._socket.settimeout(timeout)\r\n logger.info('Connected to: %s...', repr((host, port)))", "def OpenSocket(ip_address, port, timeout):\r\n # Create the socket.\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.settimeout(timeout)\r\n\r\n # Connect to the Smart Car ESP.\r\n try:\r\n client_socket.connect((ip_address, port))\r\n except socket.timeout:\r\n print('Connection timed out connecting to {0}:{1}'.format(ip_address, port))\r\n quit()\r\n except:\r\n print('Error connecting to {0}:{1}: {2}'.format(ip_address, port, sys.exc_info()[0]))\r\n quit()\r\n\r\n return client_socket", "def connect(address, ssl_context=None, error_handler=None, **config):\n\n # Establish a connection to the host and port specified\n # Catches refused connections see:\n # https://docs.python.org/2/library/errno.html\n log_debug(\"~~ [RESOLVE] %s\", address)\n last_error = None\n for resolved_address in resolve(address):\n log_debug(\"~~ [RESOLVED] %s -> %s\", address, resolved_address)\n try:\n s = _connect(resolved_address, **config)\n s, der_encoded_server_certificate = _secure(s, address[0], ssl_context, **config)\n connection = _handshake(s, resolved_address, der_encoded_server_certificate, error_handler, **config)\n except Exception as error:\n last_error = error\n else:\n return connection\n if last_error is None:\n raise ServiceUnavailable(\"Failed to resolve addresses for %s\" % address)\n else:\n raise last_error", "def createConnection(addr):\r\n\r\n # cast port number to integer\r\n addr = (addr[0],int(addr[1]))\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.settimeout(5)\r\n try:\r\n s.connect(addr)\r\n except (socket.timeout, ConnectionRefusedError):\r\n return None\r\n return s", "def socket(host, port, timeout = 0):\n try:\n return _socket_real(_snc.socket(host, port, _use_ipv6, timeout), host is None)\n except _snc.error_timeout:\n raise ErrorTimeout\n except Exception as e:\n log.error(\"socket error: {0!r}\" . format(e))\n raise Error", "def connect(self):\n try:\n self.sock = socket.create_connection((self.host, self.port), self.connect_timeout)\n except SocketTimeout:\n raise InnerConnectionTimeoutError()\n\n if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT:\n self.sock.settimeout(socket.getdefaulttimeout())\n else:\n self.sock.settimeout(self.timeout)", "async def connect(addr: Address,\n **kwargs\n ) -> 'Connection':\n reader, writer = await asyncio.open_connection(addr.host, addr.port,\n **kwargs)\n return Connection(reader, writer)", "def connect(self, host: str, port: int, timeout: float) -> None:\n self.socket.settimeout(timeout)\n self.socket.connect((host, port))\n self.socket.settimeout(0)", "def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False):\n s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive)\n return connect_stream(s, service, config)", "def connect(self, params, connect_timeout=_CONNECT_TIMEOUT):\n if connect_timeout is not None:\n connect_timeout = connect_timeout / 1000 # Convert to seconds\n try:\n self._socket = socket.create_connection(params, connect_timeout)\n self._host = params[0]\n except ValueError:\n try:\n self._socket = socket.socket(socket.AF_UNIX)\n self._socket.settimeout(connect_timeout)\n self._socket.connect(params)\n self._is_socket = True\n except AttributeError:\n raise InterfaceError(\"Unix socket unsupported\") from None\n self._socket.settimeout(None)", "def connect(self, address):\n if self.socket is None:\n self.socket = self._create_socket()\n\n try:\n if self.tls_args:\n context, server_hostname = self.tls_args\n self.socket = context.wrap_socket(\n self.socket,\n server_side=False,\n server_hostname=server_hostname,\n )\n # Try and connect to remote at (address, port)\n # raises socket.error if connection refused\n self.socket.connect(address)\n self._is_connected = True\n # Evt2: Transport connection confirmation\n self.event_queue.put('Evt2')\n except (socket.error, socket.timeout) as exc:\n # Log connection failure\n LOGGER.error(\n \"Association request failed: unable to connect to remote\"\n )\n LOGGER.error(\"TCP Initialisation Error: Connection refused\")\n # Log exception if TLS issue to help with troubleshooting\n if isinstance(exc, ssl.SSLError):\n LOGGER.exception(exc)\n # Don't be tempted to replace this with a self.close() call -\n # it doesn't work because `_is_connected` is False\n if self.socket:\n try:\n self.socket.shutdown(socket.SHUT_RDWR)\n except:\n pass\n self.socket.close()\n self.socket = None\n self.event_queue.put('Evt17')", "def get_socket():\n return socket.create_connection((HOST, PORT))", "def connect(self, address, **kws):\r\n return Connect(self, address, timeout=self._timeout, **kws)", "def create_connection_nodelay(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):\n msg = \"getaddrinfo returns an empty list\"\n host, port = address\n for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket.socket(af, socktype, proto)\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n sock.connect(sa)\n return sock\n except socket.error as msg:\n if sock is not None:\n sock.close()\n raise socket.error(msg)", "def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def create_socket(host_name, timeout=None):\n if timeout is None:\n timeout = 1.0\n\n # Make the socket.\n s_family = socket.AF_INET\n s_type = socket.SOCK_RAW\n s_proto = dpkt.ip.IP_PROTO_ICMP\n\n sock = socket.socket(s_family, s_type, s_proto)\n sock.settimeout(timeout)\n\n # Connect to remote host.\n host_addr = socket.gethostbyname(host_name)\n port = 1 # dummy value\n\n sock.connect( (host_addr, port) )\n\n # Done.\n return sock", "def connect(address):\n try:\n s = socket.socket()\n s.connect(address)\n print(\"Connection Established.\")\n print(f\"Address: {address}\")\n except socket.error as error:\n print(\"Something went wrong... more info below.\")\n print(error)\n sys.exit()\n receiver(s)", "def _create_socket(self, address=('', 0)):\n # AF_INET: IPv4, SOCK_STREAM: TCP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # SO_REUSEADDR: reuse the socket in TIME_WAIT state without\n # waiting for its natural timeout to expire\n # Allows local address reuse\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # If no timeout is set then recv() will block forever if\n # the connection is kept alive with no data sent\n # SO_RCVTIMEO: the timeout on receive calls in seconds\n # set using a packed binary string containing two uint32s as\n # (seconds, microseconds)\n if self.assoc.network_timeout is not None:\n timeout_seconds = int(self.assoc.network_timeout)\n timeout_microsec = int(self.assoc.network_timeout % 1 * 1000)\n sock.setsockopt(\n socket.SOL_SOCKET,\n socket.SO_RCVTIMEO,\n pack('ll', timeout_seconds, timeout_microsec)\n )\n\n sock.bind(address)\n\n self._is_connected = False\n\n return sock", "def create_connection(address):\n\n sock = socks.socksocket()\n sock.connect(address)\n return sock", "def connect(self, timeout: float = 5) -> bool:\n if not self.connected:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.settimeout(timeout)\n try:\n self.socket.connect((self.host, self.port))\n self.connected = True\n self.socket.setblocking(False)\n self.endpoint = self.socket\n except ConnectionRefusedError:\n pass\n return self.connected", "def connect_to(address):\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(address)\n sock.setblocking(0)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n print 'client connected to {} '.format(address)\n return sock", "def connect(self):\n try:\n sock = socket.create_connection((self.host, self.port), self.connect_timeout)\n except SocketTimeout:\n raise InnerConnectionTimeoutError()\n\n if self.timeout is socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(socket.getdefaulttimeout())\n else:\n sock.settimeout(self.timeout)\n\n if self._tunnel_host:\n self.sock = sock\n self._tunnel()\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def connect(self, host=None, port=None):\n host = self.host if host is None else host\n port = self.port if port is None else port\n self.socket.connect(host, port)", "def get_socket_connection(hostname, port):\n for res in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM):\n family, sockettype, protocol, canonname, socketaddress = res\n try:\n sock = socket.socket(family, sockettype, protocol)\n sock.settimeout(10)\n # avoid TCP listen overflows when making back-to-back requests \n sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 1))\n\n except socket.error, msg:\n sock = None\n continue\n \n try:\n sock.connect(socketaddress)\n except socket.error, msg:\n sock.close()\n sock = None\n continue\n \n break\n\n if sock is None:\n raise ValueError('The script was unable to open a socket to the server')\n else:\n return sock" ]
[ "0.744305", "0.7351968", "0.71428746", "0.7093566", "0.69392675", "0.68707865", "0.6829429", "0.6805412", "0.67925763", "0.6737057", "0.6718961", "0.66749173", "0.6562161", "0.65577984", "0.65336025", "0.64614576", "0.6453029", "0.64274627", "0.6397214", "0.6356111", "0.632934", "0.63293153", "0.63193274", "0.6313374", "0.62930864", "0.6288865", "0.6257285", "0.6246247", "0.62121093", "0.6145063" ]
0.73768765
1
Writes refl and exper files for each experiment modeled during the ensemble refiner
def write_output_files(Xopt, LMP, Modelers, SIM, params): opt_det = geometry_refiner.get_optimized_detector(Xopt, LMP, SIM) # Store the hessian of negative log likelihood for error estimation # must determine total number of refined Fhkls and then create a vector of 0's of that length num_fhkl_param = 0 for name in LMP: if "fcell" in name: num_fhkl_param += 1 diag_hess = np.zeros(num_fhkl_param) if params.geometry.pandas_dir is not None and COMM.rank == 0: if not os.path.exists(params.geometry.pandas_dir): os.makedirs(params.geometry.pandas_dir) refdir = os.path.join(params.geometry.pandas_dir, "refls") expdir = os.path.join(params.geometry.pandas_dir, "expts") for dname in [refdir, expdir]: if not os.path.exists(dname): os.makedirs(dname) all_shot_pred_offsets = [] for i_shot in Modelers: Modeler = Modelers[i_shot] # these are in simtbx.diffBragg.refiners.parameters.RangedParameter objects rotX = LMP["rank%d_shot%d_RotXYZ%d" % (COMM.rank, i_shot, 0)] rotY = LMP["rank%d_shot%d_RotXYZ%d" % (COMM.rank, i_shot, 1)] rotZ = LMP["rank%d_shot%d_RotXYZ%d" % (COMM.rank, i_shot, 2)] num_uc_p = len(Modeler.ucell_man.variables) ucell_pars = [LMP["rank%d_shot%d_Ucell%d" % (COMM.rank, i_shot, i_uc)] for i_uc in range(num_uc_p)] # convert rotation angles back to radians (thats what the parameters.RangedParamter.get_val method does) rotXYZ = rotX.get_val(Xopt[rotX.xpos]), \ rotY.get_val(Xopt[rotY.xpos]), \ rotZ.get_val(Xopt[rotZ.xpos]) # ucell_man is an instance of # simtbx.diffBragg.refiners.crystal_systems.manager.Manager() # (for the correct xtal system) Modeler.ucell_man.variables = [p.get_val(Xopt[p.xpos]) for p in ucell_pars] ucpar = Modeler.ucell_man.unit_cell_parameters new_crystal = hopper_utils.new_cryst_from_rotXYZ_and_ucell(rotXYZ, ucpar, Modeler.E.crystal) new_exp = deepcopy(Modeler.E) new_exp.crystal = new_crystal wave, wt = map(np.array, zip(*Modeler.spectra)) ave_wave = (wave*wt).sum()/wt.sum() new_exp.beam.set_wavelength(ave_wave) new_exp.detector = opt_det Modeler.best_model = model(Xopt, LMP, i_shot, Modeler, SIM, return_model=True) Modeler.best_model_includes_background = True # Get the bragg-only component of model in order to compute hessian terms bragg = Modeler.best_model - Modeler.all_background # store the updated per-roi scale factors in the new refl table roi_scale_factor = flex.double(len(Modeler.refls), 1) for ii, fcell_idx in enumerate(Modeler.fcell_idx_unique): p = LMP["scale_fcell%d" % fcell_idx] scale_fac = p.get_val(Xopt[p.xpos]) slices = Modeler.fcell_idx_slices[fcell_idx] for slc in slices: # update the refl table column roi_refl_ids = Modeler.all_refls_idx[slc] unique_refl_ids = np.unique(roi_refl_ids) for refl_idx in unique_refl_ids: roi_scale_factor[refl_idx] = scale_fac # update the hessian of the log likelihood # first derivative is the Bragg component of the model divided by the scale factor # TODO what if scale_fac is close to 0 ? first_deriv = bragg[slc] / scale_fac u = Modeler.all_data[slc] - Modeler.best_model[slc] v = Modeler.best_model[slc] + Modeler.nominal_sigma_rdout**2 one_by_v = 1 / v G = 1 - 2 * u - u * u * one_by_v hessian_coef = one_by_v * (one_by_v * G - 2 - 2 * u * one_by_v - u * u * one_by_v * one_by_v) trusted_slc = Modeler.all_trusted[slc] diag_hess[fcell_idx] += -0.5*(hessian_coef * (first_deriv**2))[trusted_slc].sum() Modeler.refls["global_scale_factor"] = roi_scale_factor # get the new refls new_refl = hopper_utils.get_new_xycalcs(Modeler, new_exp, old_refl_tag="before_geom_ref") new_refl_fname, refl_ext = os.path.splitext(Modeler.refl_name) new_refl_fname = "rank%d_%s_%s%s" % (COMM.rank, os.path.basename(new_refl_fname), params.geometry.optimized_results_tag, refl_ext) if not new_refl_fname.endswith(".refl"): new_refl_fname += ".refl" new_refl_fname = os.path.join(params.geometry.pandas_dir,"refls", new_refl_fname) new_refl.as_file(new_refl_fname) shot_pred_offsets = geometry_refiner.get_dist_from_R(new_refl) all_shot_pred_offsets += list(shot_pred_offsets) new_expt_fname, expt_ext = os.path.splitext(Modeler.exper_name) new_expt_fname = "rank%d_%s_%s%s" % (COMM.rank, os.path.basename(new_expt_fname), params.geometry.optimized_results_tag, expt_ext) if not new_expt_fname.endswith(".expt"): new_expt_fname += ".expt" new_expt_fname = os.path.join(params.geometry.pandas_dir,"expts", new_expt_fname) new_exp_lst = ExperimentList() new_exp_lst.append(new_exp) new_exp_lst.as_file(new_expt_fname) if params.geometry.pandas_dir is not None: a,b,c,al,be,ga = ucpar ncells_p = [LMP["rank%d_shot%d_Nabc%d" % (COMM.rank, i_shot, i)] for i in range(3)] Na,Nb,Nc = [p.get_val(Xopt[p.xpos]) for p in ncells_p] scale_p = LMP["rank%d_shot%d_Scale" %(COMM.rank, i_shot)] scale = scale_p.get_val(Xopt[scale_p.xpos]) _,fluxes = zip(*SIM.beam.spectrum) eta_a = eta_b = eta_c = np.nan df= single_expt_pandas(xtal_scale=scale, Amat=new_crystal.get_A(), ncells_abc=(Na, Nb, Nc), ncells_def=(0,0,0), eta_abc=(eta_a, eta_b, eta_c), diff_gamma=(np.nan, np.nan, np.nan), diff_sigma=(np.nan, np.nan, np.nan), detz_shift=0, use_diffuse=params.use_diffuse_models, gamma_miller_units=params.gamma_miller_units, eta=np.nan, rotXYZ=tuple(rotXYZ), ucell_p = (a,b,c,al,be,ga), ucell_p_init=(np.nan, np.nan, np.nan, np.nan, np.nan, np.nan), lam0_lam1 = (np.nan, np.nan), spec_file=Modeler.spec_name, spec_stride=params.simulator.spectrum.stride, flux=sum(fluxes), beamsize_mm=SIM.beam.size_mm, orig_exp_name=Modeler.exper_name, opt_exp_name=os.path.abspath(new_expt_fname), spec_from_imageset=params.spectrum_from_imageset, oversample=SIM.D.oversample, opt_det=params.opt_det, stg1_refls=Modeler.refl_name, stg1_img_path=None) pandas_name = os.path.splitext(os.path.basename(new_expt_fname))[0] + ".pkl" pandas_name = os.path.join(params.geometry.pandas_dir, pandas_name) df.to_pickle(pandas_name) modeler_name = pandas_name.replace(".pkl", ".npy") np.save(modeler_name, Modeler) all_shot_pred_offsets = COMM.reduce(all_shot_pred_offsets) if COMM.rank==0: median_pred_offset = np.median(all_shot_pred_offsets) else: median_pred_offset = None median_pred_offset = COMM.bcast(median_pred_offset) # reduce the hessian over all shots then compute the errors of the structure factors diag_hess = COMM.reduce(diag_hess) uc_p = np.zeros(6) nshot = 0 for i_shot in Modelers: Mod = Modelers[i_shot] num_uc_p = len(Mod.ucell_man.variables) ucell_pars = [LMP["rank%d_shot%d_Ucell%d" % (COMM.rank, i_shot, i_uc)] for i_uc in range(num_uc_p)] Mod.ucell_man.variables = [p.get_val(Xopt[p.xpos]) for p in ucell_pars] uc_p += np.array(Mod.ucell_man.unit_cell_parameters) nshot += 1 nshot = COMM.reduce(nshot) uc_p = COMM.reduce(uc_p) if COMM.rank==0: ave_uc_p = uc_p / nshot fhkl_file = os.path.join(params.geometry.pandas_dir, "final_merge.mtz") F = SIM.crystal.miller_array Fmap = {h: amp for h, amp in zip(F.indices(), F.data())} with np.errstate(divide='ignore', invalid='ignore'): scale_variance = 1 / diag_hess indices = flex.miller_index() data = flex.double() sigmas = flex.double() for fcell_idx in range(num_fhkl_param): pname = "scale_fcell%d" % fcell_idx p = LMP[pname] scale = p.get_val(Xopt[p.xpos]) hkl = SIM.asu_from_idx[fcell_idx] F_no_scale = Fmap[hkl] Ihkl = scale* F_no_scale**2 Fhkl = np.sqrt(Ihkl) var_scale = scale_variance[fcell_idx] if var_scale <= 0: continue sig_F = 0.5*F_no_scale / np.sqrt(scale) * np.sqrt(var_scale) if np.isinf(sig_F): continue indices.append(hkl) data.append(Fhkl) sigmas.append(sig_F) # store an optimized mtz, and a numpy array with the same information sym = crystal.symmetry(tuple(ave_uc_p), SIM.crystal.symbol) mset = miller.set(sym, indices, True) ma = miller.array(mset, data, sigmas) ma = ma.set_observation_type_xray_amplitude().as_anomalous_array() ma.as_mtz_dataset(column_root_label="F").mtz_object().write(fhkl_file) return median_pred_offset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def write_data_model(doc_filename='data/documents.txt'):\n\n numiters = num_iters(doc_filename) + 1\n print 'number of iterations:', numiters - 1\n\n pickles = ['data/data_model_%s.pkl' % i for i in range(1, numiters)]\n doc_filename = doc_filename.strip('.txt')\n files = ['%s-%s.txt' % (doc_filename, i) for i in range(1, numiters)]\n\n with open('data/data_model.pkl', 'wb') as pklfile:\n for i in range(numiters - 1):\n write_partial_model(pklfile, files[i])", "def _process_datasets_all_frames(self):\n datasets = os.listdir(self.separated_root)\n for dataset in datasets:\n dataset_path = join(self.separated_root, dataset)\n\n for model in self.models:\n\n attacks_list = os.listdir(dataset_path)\n\n for attack in attacks_list:\n attack_path = join(dataset_path, attack)\n\n for prop in self.properties:\n property_alias = prop.get_property_alias()\n\n if os.path.exists(\n join(self.output_features, dataset, attack, property_alias, model.alias)):\n print('%s already extracted features' % dataset)\n continue\n\n path_train = join(attack_path, self.train_alias)\n path_test = join(attack_path, self.test_alias)\n\n X_train, y_train, indexes_train, samples_train = self._get_dataset_contents(path_train,\n property_alias)\n X_test, y_test, indexes_test, samples_test = self._get_dataset_contents(path_test,\n property_alias)\n\n output_features = join(self.output_features, dataset, attack, property_alias, model.alias)\n\n features_train = self._fetch_features(X_train, model, output_features, self.train_alias)\n features_test = self._fetch_features(X_test, model, output_features, self.test_alias)\n\n # saving features\n np.save(join(output_features, (NAME_FEATURES % self.train_alias)), features_train)\n np.save(join(output_features, (NAME_FEATURES % self.test_alias)), features_test)\n\n # saving targets\n np.save(join(output_features, (NAME_TARGETS % self.train_alias)), y_train)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n np.save(join(output_features, (NAME_TARGETS % self.test_alias)), y_test)\n\n # saving samples names\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.train_alias)), samples_train)\n self.__save_txt(join(output_features, (NAME_SAMPLES % self.test_alias)), samples_test)", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def md_writer(clf, features, outcome, eval_folder,\n config_file, summary_df, i=''):\n if config_file.endswith('.xlsx'):\n config = pd.read_excel(config_file, sheetname='Sheet1')\n elif config_file.endswith('.csv'):\n config = pd.read_csv(config_file)\n\n clf_params = clf.get_params()\n clf_name = str(clf)[:str(clf).index('(')]\n clf_img = clf_name+str(i)\n\n file_name = clf_name+str(i)+'_Evaluation.md'\n\n save_file = open(eval_folder+file_name, 'w')\n\n def new_line():\n save_file.write('\\n')\n\n save_file.write('<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" />\\n')\n save_file.write('# Model Evaluation Report\\n')\n new_line()\n\n save_file.write('## Data Configuration:\\n')\n new_line()\n save_file.write(config.to_html(na_rep='', index=False).replace('NaT', ''))\n new_line()\n\n save_file.write('## Classifier Parameters: '+clf_name+'\\n')\n new_line()\n for elem in clf_params:\n save_file.write('* {}: {}\\n'.format(elem, clf_params[elem]))\n new_line()\n\n summary_df = summary_df.T\n summary_df.columns = ['value']\n\n save_file.write('## Evaluation Metrics; Summary\\n')\n new_line()\n save_file.write(summary_df.to_html())\n new_line()\n\n save_file.write('## ROC Curve\\n')\n new_line()\n save_file.write('![mis](images/ROC_Curve_'+clf_img+'.png)\\n')\n new_line()\n\n save_file.write('## Precision-Recall Curve\\n')\n new_line()\n save_file.write('![mis](images/PR_Curve_'+clf_img+'.png)\\n')\n new_line()\n\n save_file.write('## Precision, Recall vs % Population\\n')\n new_line()\n save_file.write('![mis](images/PRATN_Curve_'+clf_img+'.png)\\n')\n\n if clf_name in ['LogisticRegression']:\n save_file.write('## Coefficients\\n')\n new_line()\n for i,coef in enumerate(clf.coef_[0]):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], round(coef,4)))\n new_line()\n\n if clf_name in ['WeightedQuestions']:\n save_file.write('## Weights\\n')\n new_line()\n for i,wt in enumerate(clf.weights):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], wt))\n new_line()\n\n save_file.close()\n\n def markdown_to_html(md_file, out_file_name=None):\n import markdown\n\n with open(md_file, 'r') as f:\n html = markdown.markdown(f.read())\n\n if out_file_name is None:\n out_file_name = md_file.split('.')[0]+'.html'\n with open(out_file_name, 'w') as f:\n f.write(html)\n\n markdown_to_html(eval_folder+file_name)", "def write_all_agents():\n os.makedirs('agent_refs', exist_ok=True)\n agents = [\n name\n for _, name, _ in pkgutil.iter_modules(\n [os.path.dirname(parlai.agents.__file__)]\n )\n ]\n for agent in agents:\n with open(f'agent_refs/{agent}.md', 'w') as fout:\n fout.write(prepare_agent_readme(agent))", "def _export_reference_representations(self):\n\n self.logger.msg1(\"Saving reference representations\")\n general_refset, _ = get_refsets(self.dbpath)\n general_refset.save(self.rootpath+\"-references\", \"phenotype\")", "def store_models(self) -> None:\n\n # Iterate over the learner types (for which there will be\n # separate instances for each sub-experiment of the\n # cross-validation experiment)\n for learner_name in self.cv_learners_:\n loginfo('Saving {0} model files to disk...'.format(learner_name))\n for i, estimator in enumerate(self.cv_learners_[learner_name]):\n loginfo('Saving {0} model file #{1}'.format(learner_name, i + 1))\n joblib.dump(estimator,\n self.model_path_template_.format(learner_name, i + 1))", "def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))", "def save_reconstructions(reconstructions, out_dir):\n print(\"save reconstruction\")\n out_dir.mkdir(exist_ok=True)\n for fname, recons in reconstructions.items():\n print(\"fname\",fname)\n with h5py.File(out_dir / fname, 'w') as f:\n f.create_dataset('reconstruction', data=recons)", "def refitandAnalyse(refit=True, usepickle=False, savedb=False):\n\n models = ['Modified Hill']\n '''models = ['1 pKa 2 Chemical shifts', 'Modified Hill',\n '2 pKas, 3 Chemical shifts',\n '3 pKas, 4 Chemical shifts']'''\n\n for e in ekindicts:\n if usepickle == True:\n filepi = open('ekindict_'+e, 'r')\n ekindicts[e] = pickle.load(filepi)\n filepi.close()\n elif refit == True:\n t.fitAll(ekindicts[e], models, strictchecking=False)\n filepi = open('ekindict_'+e, 'w')\n pickle.dump(ekindicts[e], filepi)\n filepi.close()\n\n #p = t.extractpKas(ekindicts[e])\n saveout = sys.stdout\n fsock = open('pkastab_'+e+'.html', 'w')\n sys.stdout = fsock\n\n #p=t.extractpKas(ekindicts[e], silent=True)\n p, img1, img2 = t.analysepKas(ekindicts[e], silent=True, prefix=e)#, satoms=['H','HB*'])\n t.makepKasTable(p, primary=True)\n #t.getExpErrors(e, xuncert=0.1, yuncert=yuncerts[i])\n #t.returnData()\n sys.stdout = saveout\n #analyseHill(ekindicts)\n\n #saveout = sys.stdout\n #fsock = open('fit_stats.html', 'w')\n #sys.stdout = fsock\n #t.dotitDBStats(ekindicts)\n #t.compareNuclei(ekindicts['15N NMR'], ekindicts['1H NMR'])\n #sys.stdout = saveout\n\n return", "def output_files(self):\n # Output file for Moller generation\n if 'moller' in self.name:\n return ['moller.stdhep']\n # Output file for beam generation\n return ['beam.stdhep']", "def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)", "def saving_cali_models(target_calib_model, config):\n\n print('Saving Models')\n for size, calibration_model in target_calib_model.items():\n for model_name, model_objs in calibration_model.items():\n out_cali_filename = config.model_path + \\\n '{0}_{1}_{2}mm_calibration_random_cv.pkl'.format(\n model_name.replace(\" \", \"-\"),\n config.target_data_names,size)\n print('Writing out: {0}'.format(out_cali_filename)) \n pickle.dump(model_objs,open(out_cali_filename,'wb'))\n return", "def save_reconstructions(reconstructions, out_dir):\n if (not (os.path.exists(out_dir))):\n os.mkdir(out_dir)\n out_dir.mkdir(exist_ok=True)\n print('Saved directory is',out_dir)\n for fname, recons in reconstructions.items():\n with h5py.File(out_dir / fname, 'w') as f:\n f.create_dataset('reconstruction', data=recons)", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def write_maps(self):\n if np.allclose(self.xmap.origin, 0):\n ext = \"ccp4\"\n else:\n ext = \"mrc\"\n\n for q, coor, b in zip(self._occupancies, self._coor_set, self._bs):\n self.conformer.q = q\n self.conformer.coor = coor\n self.conformer.b = b\n self._transformer.density()\n fname = os.path.join(self.directory_name, f\"model.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.xmap.array -= self.xmap.array\n fname = os.path.join(self.directory_name, f\"diff.{ext}\")\n self._transformer.xmap.tofile(fname)\n self._transformer.reset(full=True)", "def reffile_setup(self):\n # Prepare to find files listed as 'config'\n # and set up PSF path\n\n # set up as dictionary of dictionaries\n self.configfiles = {}\n self.psfpath = {}\n self.psfbasename = {}\n self.psfpixfrac = {}\n self.reference_file_dir = {}\n\n for instrument in 'nircam niriss fgs'.split():\n self.configfiles[instrument] = {}\n self.psfpath[instrument] = os.path.join(self.datadir, instrument, 'gridded_psf_library')\n self.psfbasename[instrument] = instrument\n self.reference_file_dir[instrument] = os.path.join(self.datadir, instrument, 'reference_files')\n\n # Set instrument-specific file paths\n if instrument == 'nircam':\n self.psfpixfrac[instrument] = 0.25\n elif instrument == 'niriss':\n self.psfpixfrac[instrument] = 0.1\n elif instrument == 'fgs':\n self.psfpixfrac[instrument] = 0.1\n\n # Set global file paths\n self.configfiles[instrument]['filter_throughput'] = os.path.join(self.modpath, 'config', 'placeholder.txt')\n\n for instrument in 'miri nirspec'.split():\n self.configfiles[instrument] = {}\n self.psfpixfrac[instrument] = 0\n self.psfbasename[instrument] = 'N/A'\n\n # create empty dictionaries\n list_names = 'superbias linearity gain saturation ipc astrometric photom pam dark lindark'.split()\n for list_name in list_names:\n setattr(self, '{}_list'.format(list_name), {})\n\n self.det_list = {}\n self.det_list['nircam'] = ['A1', 'A2', 'A3', 'A4', 'A5', 'B1', 'B2', 'B3', 'B4', 'B5']\n self.det_list['niriss'] = ['NIS']\n self.det_list['fgs'] = ['G1', 'G2']\n self.det_list['nirspec'] = ['NRS']\n self.det_list['miri'] = ['MIR']\n\n for instrument in 'nircam niriss fgs miri nirspec'.split():\n for list_name in list_names:\n getattr(self, '{}_list'.format(list_name))[instrument] = {}\n\n if self.offline:\n # no access to central store. Set all files to none.\n for list_name in list_names:\n if list_name in 'dark lindark'.split():\n default_value = ['None']\n else:\n default_value = 'None'\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n elif instrument == 'nircam':\n rawdark_dir = os.path.join(self.datadir, 'nircam/darks/raw')\n lindark_dir = os.path.join(self.datadir, 'nircam/darks/linearized')\n for det in self.det_list[instrument]:\n self.dark_list[instrument][det] = glob(os.path.join(rawdark_dir, det, '*.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(lindark_dir, det, '*.fits'))\n\n elif instrument in ['nirspec', 'miri']:\n for key in 'subarray_def_file fluxcal filtpupil_pairs readpatt_def_file crosstalk ' \\\n 'dq_init_config saturation_config superbias_config refpix_config ' \\\n 'linearity_config filter_throughput'.split():\n self.configfiles[instrument][key] = 'N/A'\n default_value = 'none'\n for list_name in list_names:\n for det in self.det_list[instrument]:\n getattr(self, '{}_list'.format(list_name))[instrument][det] = default_value\n\n else: # niriss and fgs\n for det in self.det_list[instrument]:\n if det == 'G1':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS1_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS1_DARK_SEARCH_STRING))\n\n elif det == 'G2':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/raw', FGS2_DARK_SEARCH_STRING))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'fgs/darks/linearized', FGS2_DARK_SEARCH_STRING))\n\n elif det == 'NIS':\n self.dark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/raw',\n '*uncal.fits'))\n self.lindark_list[instrument][det] = glob(os.path.join(self.datadir, 'niriss/darks/linearized',\n '*linear_dark_prep_object.fits'))", "def writeAfter(self, model=None, histories=None, results={}, saveModel=False):\n# Write out everything new we know after running the experiment\n# Will append to the existing file\n with open(self.filename,'a') as f:\n finish = datetime.datetime.now()\n f.write( \"Finish: {}\\n\".format( finish ) )\n f.write( \"Elapsed: {}\\n\".format( finish-self.start ) )\n if model is not None:\n summ_list = []\n model.summary(print_fn=lambda x: summ_list.append(x))\n f.write( \"Model:\\n\" )\n for summ in summ_list:\n f.write( ' {}\\n'.format(summ) )\n f.write( \"Results:\\n\" )\n for key,value in results.items():\n f.write( \" {}: {}\\n\".format( key, value ) )\n if model is not None and saveModel:\n fname = os.path.join( self.dir_name, self.name+\"_model.json\" )\n with open(fname,'w') as f:\n f.write(model.to_json())\n fname = os.path.join( self.dir_name, self.name+\"_weights.h5\" )\n model.save_weights(fname)\n if histories is not None:\n try:\n his_fname = os.path.join(self.dir_name, \"histories.pickle\")\n with open(his_fname, 'wb') as f:\n pickle.dump( histories, f, pickle.HIGHEST_PROTOCOL)\n except Exception as ex:\n print( \"Failed to write history ({}) to {}\\n {}\".format( type(histories), his_fname, ex ) )", "def write_embeddings_to_file(self):\n modes = [self.generator, self.discriminator]\n for i in range(2):\n embedding_matrix = modes[i].embedding_matrix\n embedding_matrix = embedding_matrix.detach().to('cpu').numpy()\n index = np.array(range(self.n_node)).reshape(-1, 1)\n embedding_matrix = np.hstack([index, embedding_matrix])\n embedding_list = embedding_matrix.tolist()\n embedding_str = [str(int(emb[0])) + \"\\t\" + \"\\t\".join([str(x) for x in emb[1:]]) + \"\\n\" \n for emb in embedding_list]\n with open(config.emb_filenames[i], \"w+\") as f:\n lines = [str(self.n_node) + \"\\t\" + str(config.n_emb) + \"\\n\"] + embedding_str\n f.writelines(lines)", "def _export_model_representations(self, config):\n\n self.logger.msg1(\"Preparing model representations\")\n modelsets = get_modelsets(self.dbpath, self.obo, config.partition_size)\n prefix = self.rootpath + \"-models-\"\n for i, refset in enumerate(modelsets):\n progress = str(i+1) + \"/\" + str(len(modelsets))\n self.logger.msg1(\"Saving model representations: \"+progress)\n refset.save(prefix + str(i+1), \"phenotype\", what=(\"data\",))", "def save_reconstructions(reconstructions, out_dir):\n out_dir.mkdir(exist_ok=True)\n for fname, recons in reconstructions.items():\n with h5py.File(out_dir / fname, 'w') as f:\n f.create_dataset('reconstruction', data=recons)", "def pickle_data(self):\n if 'data_sets.pckl' in self.expected_pickles:\n to_file(\n self.data_sets,\n os.path.join(self.logdir, 'data_sets.pckl')\n )\n if 'all_params.pckl' in self.expected_pickles:\n to_file(\n self.all_params,\n os.path.join(self.logdir, 'all_params.pckl')\n )\n if 'labels.pckl' in self.expected_pickles:\n to_file(\n self.labels,\n os.path.join(self.logdir, 'labels.pckl')\n )\n if 'minimiser_info.pckl' in self.expected_pickles:\n to_file(\n self.minimiser_info,\n os.path.join(self.logdir, 'minimiser_info.pckl')\n )", "def save_model(para_grid_length, clfs, result, result_path):\n for para_index in range(para_grid_length):\n bm = result['para_index_'+str(para_index)]['bm']\n for clf_string in clfs:\n bm_name = 'bm_' + clf_string\n model = bm[bm_name]['learner']\n filename = result_path + '/' + str(para_index) + 'prepro_' + clf_string + '.sav'\n pickle.dump(model, open(filename, 'wb'))", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def save_figures(expt):\n if isinstance(expt, str):\n expt = get_experiment(expt)\n \n tr_expt = get_training_expt(expt)\n\n storage.ensure_directory(expt.figures_dir())\n\n for it in tr_expt.save_after:\n for avg in AVG_VALS:\n print 'Iteration', it\n try:\n rbm = load_rbm(expt, it, avg)\n except:\n continue\n final_states = storage.load(expt.final_states_file(it, avg))\n gibbs_states = storage.load(expt.gibbs_states_file(it, avg))\n\n fig = rbm_vis.show_particles(rbm, final_states, expt.dataset)\n misc.save_image(fig, expt.final_states_figure_file(it, avg))\n\n fig = rbm_vis.show_particles(rbm, gibbs_states, expt.dataset)\n misc.save_image(fig, expt.gibbs_states_figure_file(it, avg))\n\n print_log_probs(expt, open(expt.log_probs_text_file(), 'w'))", "def add_reffile_overrides(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n #sbias, lin, sat, gainfile, dist, ipcfile, pam = self.reffiles_from_dict(status)\n manual_reffiles = self.reffiles_from_dict(updated_status)\n for key in manual_reffiles:\n if manual_reffiles[key] == 'none':\n manual_reffiles[key] = 'crds'\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = manual_reffiles['superbias']\n linearity_arr[match] = manual_reffiles['linearity']\n saturation_arr[match] = manual_reffiles['saturation']\n gain_arr[match] = manual_reffiles['gain']\n distortion_arr[match] = manual_reffiles['distortion']\n photom_arr[match] = manual_reffiles['photom']\n ipc_arr[match] = manual_reffiles['ipc']\n transmission_arr[match] = manual_reffiles['transmission']\n badpixmask_arr[match] = manual_reffiles['badpixmask']\n pixelflat_arr[match] = manual_reffiles['pixelflat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)", "def __init__(self, features, nonfeature_columns, out_filename):\n super(WriteRefAltHandler).__init__()\n\n self.needs_base_pred = True\n self.ref_writer = WritePredictionsHandler(\n features, nonfeature_columns, \"{0}.ref\".format(out_filename))\n self.alt_writer = WritePredictionsHandler(\n features, nonfeature_columns, \"{0}.alt\".format(out_filename))", "def setup_files(args):\n postfix = 'reinforce'\n has_value_model = False\n if args.baseline:\n postfix = \"reinforce-baseline\"\n has_value_model = True\n elif args.actor_critic:\n postfix = \"actor-critic\"\n has_value_model = True\n elif args.a2c:\n postfix = \"a2c\"\n has_value_model = True\n elif args.random:\n postfix = \"random\"\n\n # create the folder for log files\n try:\n os.mkdir(postfix)\n except FileExistsError:\n print(postfix, \" folder exists\")\n\n fileid = \"%s-%d\" % (postfix, int(time.time()))\n actor_weights = \"actor_weights-%s.h5\" % fileid\n actor_weights = os.path.join(postfix, actor_weights)\n encoder_weights = \"encoder_weights-%s.h5\" % fileid\n encoder_weights = os.path.join(postfix, encoder_weights)\n value_weights = None\n if has_value_model:\n value_weights = \"value_weights-%s.h5\" % fileid\n value_weights = os.path.join(postfix, value_weights)\n\n outdir = \"/tmp/%s\" % postfix\n\n misc = (postfix, fileid, outdir, has_value_model)\n weights = (actor_weights, encoder_weights, value_weights)\n\n return weights, misc", "def ReconEpis(self):\n run = zeros(100)\n if self.verbose:\n print 'Reconstruct EPIs'\n for pfile in self.pfiles_recon:\n if self.info[pfile]['refdat'] is None:\n# Find the ref.dat file later.\n continue\n if self.info[pfile]['compression'] is not None:\n# Data are compressed, copy to tmp.\n compression = self.info[pfile]['compression']\n\n pfile_decomp = '%s/%s' % (self.tmpdir, \\\n os.path.basename(self.info[pfile]['pfile_decomp']))\n if os.path.exists(pfile_decomp):\n errstr = 'Attempting to overwrite existing p-file (%s)' % pfile_decomp + \\\n ' in ReconEpis'\n\n cmd = '%s %s > %s' % \\\n (decompress_cmds[compression], pfile, pfile_decomp)\n self.ExecCmd(cmd)\n else:\n# Create a link on /tmp to the pfile so the link to ref.dat will also\n# be on /tmp, (which is always writeable.)\n pfile_decomp = '%s/%s' % (self.tmpdir, os.path.basename(pfile))\n if not os.path.exists(pfile_decomp):\n os.symlink(pfile, pfile_decomp)\n refname, refcmpress = self.CheckCompression( \\\n self.info[pfile]['refdat'])\n if refcmpress is not None:\n refdat_decomp = '%s/%s' % (self.tmpdir, os.path.basename(refname))\n cmd = '%s %s > %s' % \\\n (decompress_cmds[refcmpress], \\\n self.info[pfile]['refdat'], refdat_decomp)\n self.ExecCmd(cmd)\n else:\n refdat_decomp = self.info[pfile]['refdat']\n if refdat_decomp is not None:\n if refdat_decomp != 'ref.dat':\n# Create link bearing the file name epirecon_ex expects.\n refdat_link = '%s/ref.dat' % self.tmpdir\n if not os.path.exists(refdat_link):\n if self.verbose:\n print 'ln -s %s %s' % (refdat_decomp, refdat_link)\n if os.path.islink(refdat_link):\n# ref.dat is a broken symbolic link.\n if self.verbose:\n print 'rm %s' % ref_file\n os.remove(refdat_link)\n try:\n os.symlink(refdat_decomp, refdat_link)\n except OSError:\n self.errors = True\n pfile_link = '%s/%s' % (self.tmpdir, os.path.basename(pfile_decomp))\n os.symlink(pfile_decomp, pfile_link)\n os.symlink(refdat_decomp, '%s/ref.dat' % self.tmpdir)\n\n series = int(self.info[pfile]['series'])\n run[series] = run[series] + 1\n epiname = self.info[pfile]['imgfile']\n cmd = 'epirecon_ex -F -f %s -NAME %s -fmt brik -skip %d' % \\\n (pfile_decomp, epiname, self.skip)\n fname = '%s+orig.BRIK' % epiname\n self.CheckExec(cmd, [fname])\n# self.epi_prefixes[pfile] = self.info[pfile]['imgfile']\n else:\n errstr = '*******************************************\\n' + \\\n 'No ref.dat file exists for %s\\n' % pfile + \\\n '*******************************************\\n'\n self.error_log = self.error_log + errstr\n self.f_crash.write(errstr)" ]
[ "0.5975395", "0.59592813", "0.58979046", "0.58952695", "0.5826259", "0.58093685", "0.58026266", "0.5795915", "0.57447165", "0.57213265", "0.57084817", "0.56667113", "0.56539685", "0.562931", "0.56153154", "0.5604128", "0.5590978", "0.5588866", "0.55703115", "0.55635935", "0.5559943", "0.5549233", "0.55472946", "0.5531428", "0.5519189", "0.5511236", "0.5506128", "0.548992", "0.5486524", "0.548203" ]
0.62107396
0
Given a URL, try to return its associated region, bucket, and key names based on this object's endpoint info as well as all S3 endpoints given in the configuration.
def resolve_url_to_location(self, url): parsed_url = six.moves.urllib.parse.urlparse(url) if not parsed_url.scheme: parsed_url = six.moves.urllib.parse.urlparse('http://' + url) parsed_own_url = six.moves.urllib.parse.urlparse(self.endpoint) bucket, key = self.__match_path(parsed_url, parsed_own_url) if bucket: return self.region_name, bucket, key else: # Try to look it up in the config s3_urls = self.config.get_all_region_options('s3-url') for section, conf_url in s3_urls.items(): parsed_conf_url = six.moves.urllib.parse.urlparse(conf_url) bucket, key = self.__match_path(parsed_url, parsed_conf_url) if bucket: region = self.config.get_region_option('name', region=section) return region or section, bucket, key raise ValueError("URL '{0}' matches no known object storage " "endpoints. Supply one via the command line or " "configuration.".format(url))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_config_for_bucket(self, base_url, extra_configurations=None):\n\n warnings.warn(\n \"Use backend_config.bucket_config.BucketList.get_config_for_uri\",\n DeprecationWarning,\n )\n configs = S3BucketConfig.from_list(self.get(\"sdk.aws.s3.credentials\", []))\n if extra_configurations:\n configs.extend(extra_configurations)\n\n def find_match(host=None, bucket=None):\n if not host and not bucket:\n raise ValueError(\"host or bucket required\")\n try:\n if host:\n res = {\n config\n for config in configs\n if (config.host and fnmatch(host, config.host))\n and (\n not bucket\n or not config.bucket\n or fnmatch(bucket.lower(), config.bucket.lower())\n )\n }\n else:\n res = {\n config\n for config in configs\n if config.bucket\n and fnmatch(bucket.lower(), config.bucket.lower())\n }\n return next(iter(res))\n except StopIteration:\n pass\n\n parsed = urlparse(base_url)\n parts = Path(parsed.path.strip(\"/\")).parts\n if parsed.netloc:\n # We have a netloc (either an actual hostname or an AWS bucket name).\n # First, we'll try with the netloc as host, but if we don't find anything, we'll try without a host and\n # with the netloc as the bucket name\n match = None\n if parts:\n # try host/bucket only if path parts contain any element\n match = find_match(host=parsed.netloc, bucket=parts[0])\n if not match:\n # no path parts or no config found for host/bucket, try netloc as bucket\n match = find_match(bucket=parsed.netloc)\n else:\n # No netloc, so we'll simply search by bucket\n match = find_match(bucket=parts[0])\n\n if match:\n return match\n\n non_aws_s3_host_suffix = \":9000\"\n if parsed.netloc.endswith(non_aws_s3_host_suffix):\n host = parsed.netloc\n bucket = parts[0] if parts else None\n else:\n host = None\n bucket = parsed.netloc\n\n return S3BucketConfig(\n key=self.get(\"sdk.aws.s3.key\", None),\n secret=self.get(\"sdk.aws.s3.secret\", None),\n region=self.get(\"sdk.aws.s3.region\", None),\n use_credentials_chain=self.get(\"sdk.aws.s3.use_credentials_chain\", None),\n multipart=True,\n bucket=bucket,\n host=host,\n extra_args=self.get(\"sdk.aws.s3.extra_args\", None),\n )", "def inspect_bucket_region(bucket, s3_endpoint, allow_public=False):\n region = None\n s3_endpoint_parts = urlparse.urlparse(s3_endpoint)\n # Use a \"path-style\" S3 URL here to avoid failing TLS certificate validation\n # on buckets with a dot in the name.\n #\n # According to the following blog post, before deprecating path-style\n # URLs AWS will provide a way for virtual-hosted-style URLs to handle\n # buckets with dots in their names. Using path-style URLs here in\n # the meantime seems reasonable, compared to alternatives like forcing\n # HTTP or ignoring certificate validation.\n #\n # https://aws.amazon.com/blogs/aws/amazon-s3-path-deprecation-plan-the-rest-of-the-story/\n bucket_endpoint = f'https://{s3_endpoint_parts.netloc}/{bucket}'\n request = Request(bucket_endpoint, method='HEAD')\n try:\n # For private buckets the head request will always raise an\n # http error, the status code and response headers provide\n # context for where the bucket is. For public buckets we\n # default to raising an exception as unsuitable location at\n # least for the output use case.\n #\n # Dynamic use of urllib trips up static analyzers because of\n # the potential to accidentally allow unexpected schemes like\n # file:/. Here we're hardcoding the https scheme, so we can\n # ignore those specific checks.\n #\n # nosemgrep: python.lang.security.audit.dynamic-urllib-use-detected.dynamic-urllib-use-detected # noqa\n response = url_socket_retry(urlopen, request) # nosec B310\n # Successful response indicates a public accessible bucket in the same region\n region = response.headers.get('x-amz-bucket-region')\n\n if not allow_public:\n raise ValueError(\"bucket: '{bucket}' is publicly accessible\")\n except HTTPError as err:\n # Returns 404 'Not Found' for buckets that don't exist\n if err.status == 404:\n raise ValueError(f\"bucket '{bucket}' does not exist\")\n # Permission errors (403) or redirects (301) for valid buckets\n # should still contain a header we can use to determine the\n # bucket region. Permission errors are indicative of correct\n # region, while redirects are for cross region.\n region = err.headers.get('x-amz-bucket-region')\n\n return region", "def urls(self) -> Dict[str, str]:\n url_bases = self.url_bases\n unformatted_paths = self._url_module.url_paths\n\n urls = {}\n for url_base in url_bases:\n # The default URL_base will look like: http://service.[..].amazonaws.com/...\n # This extension ensures support for the China & ISO regions\n alt_dns_suffixes = {\"cn\": \"amazonaws.com.cn\"}\n if enable_iso_regions():\n alt_dns_suffixes.update(\n {\n \"iso\": \"c2s.ic.gov\",\n \"isob\": \"sc2s.sgov.gov\",\n \"isoe\": \"cloud.adc-e.uk\",\n \"isof\": \"csp.hci.ic.gov\",\n }\n )\n\n for url_path, handler in unformatted_paths.items():\n url = url_path.format(url_base)\n urls[url] = handler\n for dns_suffix in alt_dns_suffixes.values():\n alt_url_base = re.sub(r\"amazonaws\\\\?.com$\", dns_suffix, url_base)\n alt_url = url_path.format(alt_url_base)\n urls[alt_url] = handler\n\n return urls", "def parse_s3_url(url):\n parsed_url = urlparse(url)\n if parsed_url.scheme != \"s3\":\n raise ValueError(\"S3 URLs must start with 's3://'\")\n\n bucket = parsed_url.netloc.split(\".\")[0]\n key = parsed_url.path.lstrip(\"/\")\n\n return {\"bucket\": bucket, \"key\": key}", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def split_s3_path(url):\n\tparsed = urlparse (url)\n\tif not parsed.netloc or not parsed.path:\n\t\traise ValueError (\"bad s3 path {}\".format (url))\n\tbucket_name = parsed.netloc\n\ts3_path = parsed.path\n\t# Remove '/' at beginning of path.\n\tif s3_path.startswith (\"/\"):\n\t\ts3_path = s3_path[1:]\n\treturn bucket_name, s3_path", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def parse_s3_uri(URIs):\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys", "def get_metadata(urls, rse):\n result = {}\n for url in urls:\n try:\n endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)\n bucket = _get_bucket(rse, endpoint, bucket_name)\n metadata = None\n key = bucket.get_key(key_name)\n if key is None:\n metadata = exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))\n else:\n metadata = {'filesize': key.size}\n result[url] = metadata\n except boto.exception.S3ResponseError as e:\n if e.status in [404, 403]:\n raise exception.DestinationNotAccessible(e)\n else:\n raise exception.ServiceUnavailable(e)\n except exception.RucioException as e:\n result[url] = e\n except:\n result[url] = exception.RucioException(\"Failed to get metadata for %s, error: %s\" % (endpoint, traceback.format_exc()))\n return result", "def _get_buckets():\n\n return __opts__[\"s3.buckets\"] if \"s3.buckets\" in __opts__ else {}", "def parse_s3_url(url):\n result = urlparse.urlparse(url)\n return result.netloc, result.path[1:] # strip leading slash", "def _recurse(self) -> Iterator[str]:\n\n client: s3.Client = boto3.client('s3')\n\n decoded_url = urlparse(self.url)\n bucket_name = decoded_url.netloc\n\n paginator = client.get_paginator('list_objects_v2')\n\n page_iterator: PageIterator = paginator.paginate(\n Bucket=bucket_name,\n Prefix=decoded_url.path.lstrip('/'),\n )\n\n for page in page_iterator:\n records = page.get('Contents', [])\n\n for record in records:\n key = record['Key']\n yield f's3://{bucket_name}/{key}'", "def get_signed_urls(urls, rse, operation='read'):\n result = {}\n for url in urls:\n try:\n endpoint, bucket_name, key_name = _get_endpoint_bucket_key(url)\n\n signed_url = None\n if operation == 'read':\n # signed_url = conn.generate_url(3600, 'GET', bucket_name, key_name, query_auth=True, force_http=False)\n bucket = _get_bucket(rse, endpoint, bucket_name)\n key = bucket.get_key(key_name)\n if key is None:\n signed_url = exception.SourceNotFound('Key %s not found on %s' % (key_name, endpoint))\n else:\n try:\n signed_url = key.generate_url(3600, 'GET', query_auth=True, merge_meta=False, force_http=False)\n except TypeError:\n # merge_meta option is not supported\n signed_url = key.generate_url(3600, 'GET', query_auth=True, force_http=False)\n else:\n conn = _get_connection(rse, endpoint)\n _get_bucket(rse, endpoint, bucket_name, operation='write')\n signed_url = conn.generate_url(3600, 'PUT', bucket_name, key_name, query_auth=True, force_http=False)\n result[url] = signed_url\n except boto.exception.S3ResponseError as e:\n if e.status in [404, 403]:\n result[url] = exception.DestinationNotAccessible(e)\n else:\n result[url] = exception.ServiceUnavailable(e)\n except exception.RucioException as e:\n result[url] = e\n except:\n result[url] = exception.RucioException(\"Failed to get signed url for %s, error: %s\" % (url, traceback.format_exc()))\n return result", "def get_s3_url(self, bucket=None, region=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n if region is None:\n region = self.AWS_S3_REGION\n \n return \"https://{}.s3.{}.amazonaws.com/\".format(bucket, region)", "def get_buckets(number=0):\n result = {}\n click.echo(\"Getting S3 bucket details.\")\n s3 = boto3.client(\"s3\")\n bucket_metadata = s3.list_buckets()\n if number:\n buckets = bucket_metadata[\"Buckets\"][:number]\n else:\n buckets = bucket_metadata[\"Buckets\"]\n with click.progressbar(buckets, len(buckets)) as bucket_list:\n for bucket in bucket_list:\n name = bucket[\"Name\"]\n bucket_metadata = get_bucket(name)\n if bucket_metadata:\n result[name] = bucket_metadata\n return result", "def __init__(self, s3_connection, bucket_name, bucket_url):\n self.s3 = s3_connection\n self.bucket_name = bucket_name\n self.bucket_url = bucket_url", "def connect():\n # Reduce the number of retries to 1 if it's not set already so requests\n # fail quickly rather than delaying the downloading of photos\n if not boto.config.has_option('Boto', 'num_retries'):\n if not boto.config.has_section('Boto'):\n boto.config.add_section('Boto')\n boto.config.set('Boto', 'num_retries', '1')\n cfg = settings.config()\n try:\n aws_access_key = cfg.get('s3', 'access_key')\n aws_secret_key = cfg.get('s3', 'secret_key')\n aws_s3_bucket = cfg.get('s3', 'bucket')\n except NoOptionError as e:\n l.error(\"Error reading a setting from the config.cfg file: %s\", e)\n raise\n conn = S3Connection(aws_access_key, aws_secret_key)\n bucket = conn.get_bucket(aws_s3_bucket, validate=False)\n return bucket", "def test_bucket(gs_url):\n u = parse.urlparse(gs_url)\n assert u.netloc is not None, \"Missing bucket in url.\"\n return u.netloc", "def _get_state_file_from_s3(\n self,\n state_file_url: str,\n profile: str = None,\n region: str = None\n ) -> Dict[str, Any]:\n if profile:\n session = boto3.session.Session(profile_name=profile, region_name=region)\n else:\n session = get_boto3_session()\n s3 = session.resource('s3')\n parts = state_file_url[5:].split('/')\n bucket = parts[0]\n filename = \"/\".join(parts[1:])\n key = s3.Object(bucket, filename)\n try:\n state_file = key.get()[\"Body\"].read().decode('utf-8')\n except botocore.exceptions.ClientError as ex:\n if ex.response['Error']['Code'] == 'NoSuchKey':\n raise NoSuchTerraformStateFile(\"Could not find Terraform state file {}\".format(state_file_url))\n raise ex\n return json.loads(state_file)", "def get_matching_s3_keys(bucket, prefix=\"\", suffix=\"\"):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj[\"Key\"]\n\n def download_froms3(myfile, env='prod'):\n # session = boto3.Session(profile_name=PROFILE)\n boto_s3_session = boto3.Session(profile_name=env)\n s3 = boto_s3_session.resource('s3')\n s3client = boto_s3_session.client('s3', region_name='eu-west-2')\n try:\n file_name = unquote(myfile.split('/')[-1])\n oparse = urlparse(myfile, allow_fragments=False)\n print(oparse)\n S3_SRC_BUCKET_NAME = oparse.netloc\n key = oparse.path[1:]\n download_path = '{0}{1}'.format(BASE_PATH, file_name)\n print(f'Downloading from {S3_SRC_BUCKET_NAME} , {key} to {download_path} ')\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(key, download_path)\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(file_name, download_path)\n s3client.download_file(S3_SRC_BUCKET_NAME, key, download_path)\n print('File Downloaded')\n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\", err)\n else:\n # raise\n error = str(err)\n print(error)\n\n return myfile", "def getBucketLocation(self, bucketName):\n\t\t_bucket \t\t= f\"http://{bucketName}.s3.eu-west-1.amazonaws.com\"\n\t\trequest \t\t= get(_bucket)\n\t\tsourceCode \t\t= request.content.decode('UTF-8')\n\t\tregex \t\t\t= r'\\<Endpoint\\>(.*?)\\<\\/Endpoint\\>'\n\t\tlocation \t\t= parseRegex(regex, sourceCode)\n\t\tresult \t\t\t= \"\"\n\t\t\n\t\tif \"s3.amazonaws.com\" in str(location): \n\t\t\tresult \t\t= f\"http://{bucketName}.{location[0]}\"\n\t\t\n\t\telif len(location) == 0: \n\t\t\tresult \t\t= _bucket\n\t\t\n\t\telse: \n\t\t\tresult \t\t= f\"http://{location[0]}\"\n\n\t\twrite(var=\"$\", color=w, data=result)\n\t\treturn(result)", "def _read_s3_url(cls, s3_url):\n\n parsed_url = urllib.parse.urlparse(s3_url)\n return cls.s3.get_object(Bucket=parsed_url.netloc,\n Key=parsed_url.path.lstrip(\"/\"))[\"Body\"].read()", "def _get_connection(rse, endpoint):\n\n key = \"connection:%s_%s\" % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Creating connection object\")\n result = None\n credentials = _get_credentials(rse, endpoint)\n if 'access_key' in credentials and credentials['access_key'] and \\\n 'secret_key' in credentials and credentials['secret_key'] and \\\n 'is_secure' in credentials and credentials['is_secure'] is not None:\n\n parsed = urlparse.urlparse(endpoint)\n hostname = parsed.netloc.partition(':')[0]\n port = parsed.netloc.partition(':')[2]\n\n result = boto.connect_s3(aws_access_key_id=credentials['access_key'],\n aws_secret_access_key=credentials['secret_key'],\n host=hostname,\n port=int(port),\n is_secure=credentials['is_secure'],\n calling_format=boto.s3.connection.OrdinaryCallingFormat())\n\n REGION.set(key, result)\n logging.debug(\"Created connection object\")\n else:\n raise exception.CannotAuthenticate(\"Either access_key, secret_key or is_secure is not defined for RSE %s endpoint %s\" % (rse, endpoint))\n except exception.RucioException as e:\n raise e\n except:\n raise exception.RucioException(\"Failed to get connection for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result", "def output(self):\n for table_key, version_key in self.make_s3_keys():\n return S3Target(f\"s3://{BUCKET}/{table_key}\")", "def test_parse_s3_bucket_key_url(url, expected_bucket, expected_key):\n bucket, key = ff_utils.parse_s3_bucket_and_key_url(url)\n assert expected_bucket == bucket and key == expected_key", "def test_get_buckets(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n self.assertEqual(s3_connector.get_buckets(), [\"foobucket\"])", "def _connect_to_s3(self, credentials):\n connection = s3.S3Connection(credentials['token'], credentials['secret'])\n bucket = connection.get_bucket(credentials['bucket'])\n return connection, bucket", "def s3_url(row):\n return f's3://{row[\"Bucket\"]}/{row[\"Key\"]}'", "def get_s3_url(iid):\n return \"http://%s.s3-website.%s.amazonaws.com/%s\" % (\n BUCKET_NAME,\n AWS_CLIENT_CONFIG['region_name'],\n iid\n )" ]
[ "0.6745669", "0.6435143", "0.62151855", "0.5942863", "0.59099114", "0.59099114", "0.5813266", "0.5788725", "0.5757353", "0.5635177", "0.56279933", "0.5561384", "0.55386037", "0.55036676", "0.5499892", "0.54674625", "0.54305124", "0.54026806", "0.5393976", "0.53860795", "0.53642565", "0.5358735", "0.5341048", "0.53051746", "0.52676946", "0.5267013", "0.525849", "0.5247245", "0.52391326", "0.5238293" ]
0.78750426
0
construct the P_kd (kappaDelta) matrix such that kappa = P_kd Delta equivalent to equation 31 & 32 in Simon 2009, using Delta = delta/a as in Hu and Keeton 2003
def construct_P_kd(N1,N2,z_kappa,z_Delta, cosmo=None,**kwargs): if cosmo==None: cosmo = Cosmology(**kwargs) Nj = len(z_kappa) Nk = len(z_Delta) if max(z_Delta) > max(z_kappa): print "-------" print "WARNING: construct_P_kd: singular matrix [ min(z_kappa) < min(z_Delta) ]" print "-------" P = numpy.zeros([Nj,Nk]) #array to hold the comoving distance to each z in z_Delta Dk = numpy.zeros(Nk+1) #for ease of calculation below, # make z_Delta[-1] = 0 z_Delta = numpy.concatenate([z_Delta,[0]]) for k in range(Nk): Dk[k] = cosmo.Dc(z_Delta[k]) for j in range(Nj): Dj = cosmo.Dc(z_kappa[j]) for k in range(Nk): if Dj < Dk[k]: P[j,k] = 0 else: #P[j,k] = (Dj-Dk[k])*Dk[k]/Dj \ # * (z_Delta[k]-z_Delta[k-1]) / cosmo.H(z_kappa[j]) P[j,k] = (Dk[k]-Dk[k-1]) * (Dj-Dk[k])*Dk[k]/Dj*(1.+z_Delta[k]) #P *= ( 1.5 * cosmo.c*cosmo.Om*(cosmo.H0)**2 ) P *= ( 1.5 * cosmo.Om*(cosmo.H0 / cosmo.c)**2 ) print P.shape for i in range(P.shape[0]): pylab.plot(z_delta,P[i]) pylab.show() exit() return Lens3D_los_mat(Nk,N1,N2,Nj,data=P)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_kappa(D):\n\n return -0.5*D*np.log(2*np.pi) + 0.5*np.log(D*np.pi) - 1", "def em_epsilon_cdp(epsilon, delta, k):\n if delta <= 0:\n return epsilon / k\n else:\n log_delta = np.log(1 / delta)\n return max(\n epsilon / k,\n np.sqrt((8 * log_delta + 8 * epsilon) / k) -\n np.sqrt(8 * log_delta / k))", "def diff_precursor(state, th0, alpha, beta, beta_p, p_adj, rate_death, d):\n dt_state = np.zeros_like(state)\n\n for j in range(len(state)):\n if j == 0:\n dt_state[j] = p_adj*beta*th0 - beta*state[j] \n elif j < alpha:\n dt_state[j] = beta*state[j-1]- beta*state[j] \n elif j == alpha:\n # the problem with the 4 and 2 is that since differentiation takes 1 day it should divide twice giving 4 cells\n # however, if it has arrived in the final states if should double every half day\n dt_state[j] = beta*state[j-1]+2*beta_p*state[-1] - (rate_death+beta_p)*state[j] \n \n else:\n assert j > alpha \n dt_state[j] = beta_p*state[j-1]-(beta_p+rate_death)*state[j] \n \n return dt_state", "def get_kappa (self, t):\n\n Omega, dOmega_dt = self.get_Omega(t)\n Delta, dDelta_dt = self.get_Delta(t)\n\n kappa = Delta/2 + self.sign * \\\n (sqrt(Delta**2 + 2*Omega**2) / 2 - sqrt(Delta**2 + Omega**2))\n\n return kappa", "def calculate_A(self, D_k, theta_hat_k, delta_theta_k):\n\n A = np.identity(3)\n A.setflags(write=1)\n A[0, 2] = -D_k*np.sin(theta_hat_k + delta_theta_k)\n A[1, 2] = D_k*np.cos(theta_hat_k + delta_theta_k)\n return(A)", "def Lattice_Theory_Algebraic(N,Kappa,d) :\n\t\n\tMu2 = (1.0/6.0)*( (math.pi/(N**(1.0/d)) )**2 )*( ( Kappa + 1.0)**( (d + 2.0)/float(d) ) )\n\treturn Mu2", "def pia_from_kdp(kdp, dr, gamma=0.08):\n alpha = gamma * kdp\n return 2 * np.cumsum(alpha, axis=-1) * dr", "def _kappa(R,beta):\n return math.sqrt(2.*(1.+beta))*R**(beta-1)", "def delta_ad_ref(self, T: float):\n return self.kappa_ref * (np.exp(self.eps_ref / T) - 1.0)", "def Theory_Algebraic(N,Kappa,d) :\n\n\t# Calculate the radius from the epxcted mean degree:\n\tr = (1.0 / ((np.pi) ** 0.5)) * ((((Kappa) / N) * scipy.special.gamma((d + 2.0) / 2.0)) ** (1.0 / d))\n\n\t#Compute the algebraic connectivity:\n\tMu2 = Kappa- N*(r**(d/2.0))*scipy.special.jv( (d/2.0) , 2*math.pi*r )\n\n\treturn Mu2", "def analytic_dLdp(q,ps,C1s,C0s,ks,bs,sigma=1):\n n_p=len(ps)\n r=np.linalg.norm(ps-q,axis=1).reshape(-1,1)\n r_hat=(ps-q)/r\n t_hat=np.zeros(r_hat.shape)\n t_hat[:,0]=-r_hat[:,1]\n t_hat[:,1]=r_hat[:,0]\n\n dLdeta=np.zeros(n_p).reshape(-1,1)\n dLdr=np.zeros(n_p).reshape(-1,1)\n\n\n for i in range(n_p):\n Keta=2*(ks[i]*bs[i])**2/(sigma**2) * (r[i]-C1s[i])**(2*bs[i]-2)\n Kr=2*(ks[i]*bs[i])**2/(sigma**2) * (bs[i]-1) * (r[i]-C1s[i])**(2*bs[i]-3)\n sum_eta=sum_kr=0\n for j in range(n_p):\n \n rkrj=np.max([np.min([r_hat[i,:].dot(r_hat[j,:]),1]),-1])\n \n direction=np.sign(np.linalg.det(r_hat[[j,i],:]))\n\n sum_eta += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * rkrj * np.sqrt(1-rkrj**2) * direction\n sum_kr += (ks[j]*bs[j])**2 * (r[j]-C1s[j])**(2*bs[j]-2) * (1-rkrj**2)\n \n dLdeta[i]=Keta*sum_eta\n dLdr[i]=Kr*sum_kr\n \n dLdp = dLdr * r_hat + (dLdeta/r) * t_hat\n \n \n return dLdp", "def _prob_kuiper(d, n_eff, dtype=\"f8\"):\n n_time_slices = np.size(d) # single value or vector\n n_points = 100\n\n en = math.sqrt(n_eff)\n k_lambda = (en + 0.155 + 0.24 / en) * d # see [1]\n l2 = k_lambda**2.0\n j2 = (np.arange(n_points) + 1) ** 2\n j2 = j2.repeat(n_time_slices).reshape(n_points, n_time_slices)\n fact = 4.0 * j2 * l2 - 1.0\n\n # compute normalized pK value in range [0,1]\n a = -2.0 * j2 * l2\n b = 2.0 * fact\n pk_norm = -logsumexp(a, b=b, axis=0) / (2.0 * n_eff)\n\n # check for no difference to uniform cdf\n pk_norm = np.where(k_lambda < 0.4, 0.0, pk_norm)\n\n # check for round off errors\n pk_norm = np.where(pk_norm > 1.0, 1.0, pk_norm)\n\n return pk_norm", "def DRate_j(eta,Pap,Pec,exp_loss_jt):\n return (1 + Pap)*(1 - (1 - 2*Pec)*exp_loss_jt)", "def DKL(p, q,eps=1e-12):\n return -Hshannon(p,eps=eps) + NLL(p, q,eps=eps)", "def make_k_matrix(self):\r\n K = self.uv_vol + self.Epsilon * self.guv_vol + \\\r\n (self.Epsilon / self.Beta) * self.uv_bound\r\n return K", "def _find_k_offsets(self, k, d):\n olderr = sp.seterr(invalid= 'ignore') # turn off 'invalid multiplication' error;\n # it's just the 'inf' boundaries\n delta = k * d\n sp.seterr(**olderr) # turn the error back on\n return delta", "def __kappa_mle(self, k, R):\n return (iv(1, k) / iv(0, k)) - R", "def create_deltas_tensor(self, deltas):\n T = self.T\n N = self.N\n neighs = self.neighs\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.get_empty_matrix((len(neighs[n]), T))\n for cc in deltas:\n t = int(cc[0])\n if t >= T:\n raise ValueError(\"Contact time above T!\")\n i = int(cc[1])\n j = int(cc[2])\n delta = cc[3]\n #lam = np.clip(lam, 0, 1 - self.err_max_lambda)\n #print(t,i,j,lam)\n index_i = neighs[j].index(i)\n self.deltas[j][index_i][t] = delta\n\n '''def create_delta_tensor(self, gamma):\n \"\"\"\n Deltas values for the computation of parameters of rate of contagion\n \"\"\"\n N = self.N\n self.deltas = {}\n for n in range(N):\n self.deltas[n] = self.logp_lam[n]/gamma\n '''", "def delta(flag, S, K, t, r, sigma, q): \n\n b = r-q\n\n return numerical_delta(flag, S, K, t, r, sigma, b, f)", "def K(self, X, Xstar):\n r = l2norm_(X, Xstar)\n one = (1 + np.sqrt(3 * (r / self.l) ** 2))\n two = np.exp(- np.sqrt(3 * (r / self.l) ** 2))\n return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)", "def kappa(self):\n a, c, d, b = self.to_ccw()\n p1, q1 = a + b, c + d\n p2, q2 = a + c, b + d\n n = p1 + q1\n\n if n == 0:\n return np.nan\n elif a == n or d == n:\n # only one (diagonal) cell is non-zero\n return 0.5\n\n return _div(2 * self.covar(), p1 * q2 + p2 * q1)", "def tab_Pdk(dmax):\r\n kmax = dmax*6 #la somme des des ne peut etre superieur a 6 fois leur nombre\r\n res = np.ones((dmax, kmax))\r\n\r\n\t#on met a zero toutes les cases qui sont impossible a completer\r\n for d in range(dmax):\r\n for k in range(kmax):\r\n if (k+1)<2*(1+d) or (k+1)>6*(d+1):\r\n res[d,k] = 0\r\n\t\t \r\n\t#on initialise pour le cas d=1\r\n for i in range(1,6):\r\n res[0][i] = 1/5\r\n\r\n\t#on met les valeurs des Q(d,k) dans toutes les cases non nulles\r\n for d in range(1,dmax):\r\n for k in range(kmax):\r\n if (res[d,k]==1) :\r\n res[d,k] = 0\r\n #on fait un for dans les valeurs qui sont realisables. \r\n #le +1 apres le min est la car nous sommes dans un range\r\n for i in range(max(k-6,2*(d+1-1)-1) , min(k-2,6*(d+1-1))+1):\r\n res[d,k] += res[d-1,i]/5\r\n\r\n\t#On multiplie toutes les cases selon la formule pour obtenir les P(d,k)\r\n for d in range(dmax):\r\n for k in range(kmax):\r\n res[d,k] = res[d,k]*(5/6)**(d+1)\r\n\t\t \r\n for d in range(dmax):\r\n res[d, 0] = 1-(5/6)**(d+1)\r\n\t\t\r\n return res", "def dhMatrix(self):\n row1 = np.array([np.cos(self.theta), -np.sin(self.theta)*np.cos(self.alpha), np.sin(self.theta)*np.sin(self.alpha), self.a*np.cos(self.theta)])\n row2 = np.array([np.sin(self.theta), np.cos(self.theta)*np.cos(self.alpha), -np.cos(self.theta)*np.sin(self.alpha), self.a*np.sin(self.theta)])\n row3 = np.array([0.0, np.sin(self.alpha), np.cos(self.alpha), self.d])\n row4 = np.array([0.0, 0.0, 0.0, 1.0])\n T = np.array([row1, row2, row3, row4])\n return T", "def get_kappa(self):\n return kappa_class(self.space, self.degree) - sum((psi_class(self.space, p)**self.degree for p in self.space.marks) )", "def grad_KL_mu(self):\n return kron_mvp(self.K_invs, self.q_mu - self.mu)", "def pressure_pd_solution(delta_t, current_pressure, data):\r\n\r\n # TODO: remove naive solution\r\n #adjust_pressure = current_pressure\r\n\r\n # TODO: implement PD solution here\r\n prev_P_error=data['ErrorP']\r\n P_error=current_pressure-100\r\n D_error=(P_error-prev_P_error)/delta_t\r\n adjust_pressure=-pressure_tau_p * P_error - pressure_tau_d * D_error\r\n data['ErrorP'] = P_error\r\n data['ErrorD'] = D_error\r\n\r\n return adjust_pressure, data", "def K(self, X, Xstar):\n r = l2norm_(X, Xstar)/self.l\n one = (1 + np.sqrt(5 * r ** 2) + 5 * r ** 2 / 3)\n two = np.exp(-np.sqrt(5 * r ** 2))\n return self.sigmaf * one * two + self.sigman * kronDelta(X, Xstar)", "def _Kgradients(self):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlp = self.noise_model.dlogpdf_df(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #Explicit\r\n #expl_a = np.dot(self.Ki_f, self.Ki_f.T)\r\n #expl_b = self.Wi_K_i\r\n #expl = 0.5*expl_a - 0.5*expl_b\r\n #dL_dthetaK_exp = dK_dthetaK(expl, X)\r\n\r\n #Implicit\r\n impl = mdot(dlp, dL_dfhat, I_KW_i)\r\n\r\n #No longer required as we are computing these in the gp already\r\n #otherwise we would take them away and add them back\r\n #dL_dthetaK_imp = dK_dthetaK(impl, X)\r\n #dL_dthetaK = dL_dthetaK_exp + dL_dthetaK_imp\r\n #dL_dK = expl + impl\r\n\r\n #No need to compute explicit as we are computing dZ_dK to account\r\n #for the difference between the K gradients of a normal GP,\r\n #and the K gradients including the implicit part\r\n dL_dK = impl\r\n return dL_dK", "def dNdPdTheta(p, theta, mDarkPhoton, epsilon):\n diffRate = dNdZdPtSquare(p,mDarkPhoton,theta,epsilon) * dPt2dTheta(p,theta) * dZdP(p,theta)\n return math.fabs(diffRate) # integrating in (-pi, pi)...", "def compute_kappa(self):\n eta_tilde = self.eta * (1 - self.gamma * self.tau / (2 * self.eta))\n kappa_2_tilde = self.lam * self.sigma ** 2 / eta_tilde\n kappa = np.arccosh(kappa_2_tilde * self.tau ** 2 / 2 + 1) / self.tau\n return kappa" ]
[ "0.6006358", "0.5726812", "0.5607486", "0.55964506", "0.5561811", "0.55442095", "0.5524946", "0.5505259", "0.5502432", "0.54426163", "0.537911", "0.5328839", "0.5327884", "0.53229845", "0.53121215", "0.53063035", "0.530413", "0.52961487", "0.52916247", "0.52867043", "0.5284612", "0.52759486", "0.5233822", "0.5231127", "0.5218548", "0.52098036", "0.520717", "0.51862997", "0.5185753", "0.51801616" ]
0.67988276
0
Show that basic numpy operations with Column behave sensibly
def test_numpy_ops(self): arr = np.array([1, 2, 3]) c = Column('a', arr) eq = c == arr assert np.all(eq) assert len(eq) == 3 assert type(eq) == Column assert eq.dtype.str == '|b1' eq = arr == c assert np.all(eq) lt = c - 1 < arr assert np.all(lt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getattr__(self, col):\n return self._obj[col].to_numpy()", "def _modify_columns(self, cols, X, y=None):", "def _create_metric_column(\n data: pd.DataFrame,\n column_a: str,\n column_b: str,\n numpy_method: str,\n conjunction: str,\n) -> pd.DataFrame:\n column_operation = getattr(np, numpy_method)\n new_column = column_operation(data[column_a], data[column_b])\n id_columns = _get_id_columns(data=data)\n working_df = data[id_columns]\n working_df.assign(**{f\"{column_a}_{conjunction}_{column_b}\": new_column})\n return working_df", "def test_array_return_type_reduction(t, df):\n expr = collect(t.b)\n result = expr.execute()\n expected = df.b.compute().tolist()\n assert list(result) == expected", "def numpy_basics():\n # a) tworzenie macierzy\n X = np.zeros([3,3]) # macierz 0\n print np.array([[1.1]]) #tworzenie macierzy z listy python\n Y = np.eye(3,3) # macierz jednostkowa\n X[0,0] = 10.0 # ustawienie elementu\n print \"Array dimensions \",X.shape #wymiar macierzy\n\n # b) dodawanie macierzowe\n print (X+Y)\n\n # c) mnozenie macierzowe\n print np.dot(X,Y)\n\n # d) pobieranie elementow\n print X[1,1] #element X_22\n print X[1,:] #caly drugi wiersz, zwraca tez np.array\n\n # e) w kazdym wierszu najwiekszy element macierzy X+Y\n #TODO: fill in\n print (X+Y).max(axis=1) # \"zjadamy\" 2 wymiar, czyli kolumny", "def column_convertor(x):\n x.shape = (1, x.shape[0])\n return x", "def transform(array):\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new", "def numpy_vector(self):\n pass", "def _colvec(x):\n x = np.atleast_1d(x)\n return x[:, None]", "def fast_get_col(self,j):\n col = self.col_view[:,j].copy()\n col.data = self.X.data[col.data]\n return col", "def values(self) -> ndarray:\n if len(self._data) == 1:\n kind: str = next(iter(self._data))\n order: List[int] = [self._column_info[col].loc for col in self._columns]\n arr = self._data[kind][:, order]\n if kind == 'b':\n return arr == 1\n else:\n return arr\n\n if {'b', 'S', 'm', 'M'} & self._data.keys():\n arr_dtype: str = 'O'\n else:\n arr_dtype = 'float64'\n\n v: ndarray = np.empty(self.shape, dtype=arr_dtype, order='F')\n\n for col, dtype, loc, order, col_arr in self._col_info_iter(with_order=True, with_arr=True):\n if dtype == 'S':\n cur_list_map = self._str_reverse_map[loc]\n _va.make_object_str_array(cur_list_map, v, col_arr, order)\n elif dtype == 'M':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n # changes array in place\n _va.make_object_datetime_array(v, col_arr.view('uint64'), order, unit)\n elif dtype == 'm':\n unit = col_arr.dtype.name.replace(']', '').split('[')[1]\n _va.make_object_timedelta_array(v, col_arr.view('uint64'), order, unit)\n else:\n v[:, order] = col_arr\n return v", "def test12(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a), bcolz.carray(b, rootdir=self.rootdir)\n cr = bcolz.eval(\"c + 2 * d - 3\", out_flavor='numpy')\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr, type(cr)\n # print \"numpy ->\", nr\n self.assertTrue(type(cr) == np.ndarray)\n assert_array_equal(cr, nr, \"eval does not work correctly\")", "def cols(self, col):\n self.col += col", "def test_column_values():\n column_array = np.array(world[\"pop_est\"])\n m1 = view(world, column=\"pop_est\") # column name\n m2 = view(world, column=column_array) # np.array\n m3 = view(world, column=world[\"pop_est\"]) # pd.Series\n assert m1.location == m2.location == m3.location\n\n m1_fields = view(world, column=column_array, tooltip=True, popup=True)\n out1_fields_str = _fetch_map_string(m1_fields)\n assert (\n 'fields=[\"pop_est\",\"continent\",\"name\",\"iso_a3\",\"gdp_md_est\",\"range\"]'\n in out1_fields_str\n )\n assert (\n 'aliases=[\"pop_est\",\"continent\",\"name\",\"iso_a3\",\"gdp_md_est\",\"range\"]'\n in out1_fields_str\n )\n\n m2_fields = view(world, column=world[\"pop_est\"], tooltip=True, popup=True)\n out2_fields_str = _fetch_map_string(m2_fields)\n assert (\n 'fields=[\"pop_est\",\"continent\",\"name\",\"iso_a3\",\"gdp_md_est\",\"range\"]'\n in out2_fields_str\n )\n assert (\n 'aliases=[\"pop_est\",\"continent\",\"name\",\"iso_a3\",\"gdp_md_est\",\"range\"]'\n in out2_fields_str\n )\n\n # GeoDataframe and the given list have different number of rows\n with pytest.raises(ValueError, match=\"different number of rows\"):\n view(world, column=np.array([1, 2, 3]))", "def test03(self):\n a = np.arange(1, 101)\n b = bcolz.carray(a)\n c = b[[1.1, 3.3]]\n r = a[[1, 3]]\n assert_array_equal(c, r, \"fancy indexing does not work correctly\")", "def columns(self):\n \n pass", "def column(self):\n return self.reshape((self.size, 1))", "def column_expression(self, col):\n return getattr(func, self.impl.as_binary)(\n func.ST_Transform(col, self.app_srid),\n type_=self.__class__.impl(srid=self.app_srid)\n # srid could also be -1 so that the SRID is deduced from the\n # WKB data\n )", "def test02(self):\n a = np.arange(101)\n b = bcolz.carray(a)\n c = b[[]]\n r = a[[]]\n assert_array_equal(c, r, \"fancy indexing does not work correctly\")", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)", "def test02(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n cr = bcolz.eval(\"a * b\", rootdir=self.rootdir)\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_arithmetic_operations() -> None:\n\n # one two\n # 0 1\n # 2 3\n # 4 5\n df = pd.DataFrame(np.arange(6).reshape((3, 2)), columns=[\"one\", \"two\"])\n\n series = df.iloc[0] # first row == (0, 1)\n\n assert series.index.values.tolist() == [\"one\", \"two\"]\n assert series.values.tolist() == [0, 1]\n\n # Arithmetic operations between frames and series match the index of the\n # series (column names) on the columns of the frame, broadcasting over the\n # rows by default.\n\n df2 = df.sub(series) # axis=1\n\n # one two\n # 0 0\n # 2 2\n # 4 4\n assert df2.values.flatten().tolist() == [0, 0, 2, 2, 4, 4]\n\n # If you want to match on rows, use axis=0. This will match the index of the\n # series (row indices) on the rows of the frame, broadcasting over the\n # columns by default.\n series = df.loc[:, \"one\"]\n\n df2 = df.sub(series, axis=0)\n # one two\n # 0 1\n # 0 1\n # 0 1\n assert df2.values.flatten().tolist() == [0, 1, 0, 1, 0, 1]", "def augment_column(self, col: pd.Series,) -> pd.Series:", "def test05(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test09(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n c[\"a + 2 * d - 3 > 0\"] = 3\n a[(a + 2 * b - 3) > 0] = 3\n # print \"carray ->\", c\n # print \"numpy ->\", a\n assert_array_equal(c[:], a, \"carray[expr] = v does not work correctly\")", "def fast_update_col(self,j,vals):\n dataptr = self.col_view[:,j].data\n self.X.data[dataptr] = vals", "def __truediv__(self, other: Any) -> ColumnOperators:\n return self.operate(truediv, other)", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def x(self) -> np.ndarray:\n return self.array[:, 1] if self.scalar_vector else self.array[:, 0]", "def _astype_internal(self, column: str, numpy_dtype: str) -> None:\n new_kind: str = utils.convert_numpy_to_kind(numpy_dtype)\n dtype, loc, order = self._get_col_dtype_loc_order(column) # type: str, int, int\n\n srm = []\n\n if dtype == new_kind:\n return None\n col_data: ndarray = self._data[dtype][:, loc]\n nulls = utils.isna_array(col_data, dtype)\n\n if numpy_dtype == 'S':\n col_data = col_data.astype('U')\n col_data, _, srm = _va.convert_str_to_cat(col_data)\n col_data[nulls] = 0\n elif numpy_dtype == 'b':\n col_data = col_data.astype('bool').astype('int8')\n col_data[nulls] = -1\n elif numpy_dtype == 'i':\n col_data = col_data.astype('int64')\n col_data[nulls] = MIN_INT\n elif numpy_dtype == 'f':\n col_data = col_data.astype('int64')\n col_data[nulls] = np.nan\n elif col_data.dtype.kind == 'M':\n col_data = col_data.astype('datetime64[ns]')\n col_data[nulls] = NaT\n elif col_data.dtype.kind == 'm':\n col_data = col_data.astype('timedelta64[ns]')\n col_data[nulls] = NaT\n\n self._remove_column(column)\n self._write_new_column_data(column, new_kind, col_data, srm, order)" ]
[ "0.6132716", "0.5897211", "0.57506585", "0.57329494", "0.5698936", "0.5586861", "0.55237365", "0.548468", "0.54814553", "0.5479346", "0.5474814", "0.54733694", "0.5452359", "0.54214543", "0.54059917", "0.54054594", "0.53947806", "0.53844583", "0.53784984", "0.5374963", "0.53702307", "0.5361976", "0.5361018", "0.5360034", "0.5350142", "0.52860385", "0.52822274", "0.52585095", "0.5256866", "0.52561706" ]
0.70421773
0
Method to create embedddings for documents by encoding their image.
def encode( self, docs: Optional[DocumentArray] = None, parameters: dict = {}, *args, **kwargs, ) -> None: if not docs: return batch_generator = docs.batch( traversal_paths=parameters.get('traversal_paths', self.traversal_paths), batch_size=parameters.get('batch_size', self.batch_size), require_attr='blob', ) with torch.inference_mode(): for batch in batch_generator: images = [] for doc in batch: if self.use_preprocessing: if doc.blob.shape[2] != 3: raise ValueError( "If `use_preprocessing=True`, your image must" " be of the format [H, W, C], in the RGB format (C=3)," f" but got C={doc.blob.shape[2]} instead." ) images.append(self._default_transforms(doc.blob)) else: if doc.blob.shape[0] != 3: raise ValueError( "If `use_preprocessing=False`, your image must" " be of the format [C, H, W], in the RGB format (C=3)," f" but got C={doc.blob.shape[0]} instead." ) images.append(torch.tensor(doc.blob, dtype=torch.float32)) images = torch.stack(images) embeddings = self.model.encode_image(image=images.to(self.device)) embeddings = embeddings.cpu().numpy() for idx, doc in enumerate(batch): doc.embedding = embeddings[idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_image_embeddings(self):\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n\n # Map inception output onto embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.sentence_embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n image_embeddings = tf.nn.dropout(image_embeddings, self.config.dropout_keep_prob_encoder)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.sentence_embedding_size, name=\"image_embedding_size\")\n\n self.image_embeddings = image_embeddings", "def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}", "def embed(self, data, mime_type=\"text/plain\", encode_data_to_base64=True):\n if encode_data_to_base64:\n data = base64.standard_b64encode(data.encode()).decode()\n self.embeddings.append({\"data\": data, \"mime_type\": mime_type})", "def sentence_and_image_representations(dataset, text_encoder, image_encoder):\n\n vectorizer = get_text_vectorizer(text_encoder)\n stream_encoder, imread = get_image_encoder(image_encoder)\n\n captions = load_captions(dataset)\n images = load_images(dataset)\n\n encoding_path = BASE_PATH / dataset / ('{}_{}'.format(text_encoder, image_encoder))\n encoding_path.mkdir(exist_ok=True)\n\n count = 0\n for split in images:\n split_images = [i for i in images[split]]\n split_images_stream = get_image_stream(images, imread, stream_encoder, split)\n caption_stream = [list(captions[imid].values()) for imid in split_images]\n\n encoded_filename = encoding_path / '{}-encoded-captions-and-images.json'.format(split)\n\n with encoded_filename.open('w') as fout:\n for image_id, capts, image_vec in zip(split_images, caption_stream, split_images_stream):\n print(count)\n count += 1\n for c in capts:\n vec = vectorizer.transform([c]).tolist()[0]\n \n if not np.any(vec):\n continue\n\n print(json.dumps(\n {'id': str(image_id),\n 'text': c,\n 'x_text': vec,\n 'x_image': image_vec.tolist()}), file=fout)", "def embed():", "def encode_images(self, images):\n # todo\n pass", "def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))", "def add_image_face():\n\n try:\n img = decode_image(request.files[\"image\"].read())\n except Exception as e:\n log.error(e)\n data = {\"error\": \"Error while loading image\"}\n return jsonify(data), 500\n save_picture = False\n if request.args.get(\"save\") == \"true\":\n save_picture = True\n \n face_img, _ = processor.extract_faces()\n #TODO\n #1. get several images if possible\n #2. save face_img array as picture if save_picture == True\n #3. pipe face_img array to embedder --> embedder needs to be modified to not from a folder, but from array of face_img\n #4. get the embedder result, insert to a pickle object --> can be section ID, or whatever", "def set_embeddings(self):", "def build_book(self, embed_images = True, embed_styles = True, remove_scripts = True, add_navbar_js = True):\n \n chapter_list = self.get_chapter_list()\n \n for li in chapter_list:\n page = self.get_page(li)\n self.add_page_to(page.page_content, self.book_content)\n \n self.update_links()\n \n if embed_styles:\n self.embed_styles()\n \n if remove_scripts:\n self.remove_scripts()\n \n if embed_images:\n self.embed_images()\n \n if add_navbar_js:\n self.add_navbar_js()\n \n self.remove_html_widgets()\n self.remove_next_page_button()", "def encode(\n self,\n docs: Optional[DocumentArray] = None,\n parameters: dict = {},\n *args,\n **kwargs\n ) -> None:\n if not docs:\n return\n\n batch_generator = docs.batch(\n traversal_paths=parameters.get('traversal_paths', self.traversal_paths),\n batch_size=parameters.get('batch_size', self.batch_size),\n require_attr='text',\n )\n\n with torch.inference_mode():\n for batch in batch_generator:\n embeddings = self.model.encode_text(text=[[doc.text] for doc in batch])\n embeddings = embeddings.cpu().numpy()\n\n for idx, doc in enumerate(batch):\n doc.embedding = embeddings[idx]", "def build_image_embeddings(self, images):\n images = self.distort_images(images, tf.train.get_or_create_global_step())\n inception_output = image_embedding.inception_v3(\n images,\n trainable=self.train_inception,\n is_training=self.is_training(),\n add_summaries=False)\n\n self.inception_variables = tf.get_collection(\n tf.GraphKeys.GLOBAL_VARIABLES, scope=\"InceptionV3\")\n\n # Map inception output into embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = contrib_layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.embedding_size, name=\"embedding_size\")\n\n return image_embeddings", "def encode(self, docs: Optional[DocumentArray], parameters: dict, **kwargs):\n if docs:\n document_batches_generator = get_docs_batch_generator(\n docs,\n traversal_path=parameters.get('traversal_paths', self.default_traversal_paths),\n batch_size=parameters.get('batch_size', self.default_batch_size),\n needs_attr='text',\n )\n self._create_embeddings(document_batches_generator)", "def embed(self,\n documents: Union[List[str], str, pandas.core.series.Series],\n progress_bar: bool=False, device: str=\"cuda\",\n batch_size: int=1e3, to_numpy: bool=True):\n\n embeddings = self.embedding_model.encode(documents,\n device=device, show_progress_bar=progress_bar,\n convert_to_numpy=to_numpy, batch_size=batch_size)\n\n return embeddings", "def encode(self, docs: Optional[DocumentArray], parameters: Dict, **kwargs):\n if docs:\n docs_batch_generator = get_docs_batch_generator(\n docs,\n traversal_path=parameters.get(\n 'traversal_paths', self.default_traversal_path\n ),\n batch_size=parameters.get('batch_size', self.default_batch_size),\n needs_attr='blob',\n )\n self._compute_embeddings(docs_batch_generator)", "def encode(self, images):\n\n i = 0\n N = len(images)\n embs = None\n\n while True:\n end = min(N, i + self.batch_size)\n batch = images[i: end]\n\n size = end - i\n if size < self.batch_size:\n batch += self._input_padding[:self.batch_size - size]\n\n if embs is None:\n embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n else:\n _embs = self.sess.run(self.embed_layer, feed_dict={self.x: batch})\n embs = np.vstack((embs, _embs))\n\n i += self.batch_size\n\n if i >= N - 1:\n break\n\n return embs", "def embed_images(self, image_files, model=None):\n return self._multi_image_op(image_files, ['embed'], model=model)", "def get_embedding():\n\n HOST = app.config.get('EMBEDDING_HOST')\n PORT = app.config.get('EMBEDDING_PORT')\n query_params = {\n 'text' : request.args.get('text', \"\"),\n \"language\" : request.args.get('language', None)\n }\n r = requests.get(f\"http://{HOST}:{PORT}/api/v1/embeddings/create\", params=query_params)\n return jsonify(r.json())", "def embed_images(self, html):\n if not self.SUPPORT_EMBED_IMAGES:\n raise RuntimeError('%r does not support embed_images' % type(self))\n\n return self.RE_IMG.sub(self._embed_image, html)", "def augment_data(self):\n for char in self.hebrew.letter_li:\n char_path = self.training_folder / char\n img = cv.imread(\n str((self.training_folder / char / f\"{char}_original.jpeg\").resolve())\n ) # read font character\n h, w, _ = img.shape # image height and width\n\n for rep in range(self.repetitions):\n res = elastic_morphing(img, self.amp, self.sigma, h, w) # morph image\n cv.imwrite(\n str(char_path / f\"{char}{rep}.jpeg\"), res\n ) # write result to disk", "def main():\n \n download_blob('data/artist_albums_lyrics_0607.json', '/tmp/artist_albums_lyrics_0607.json')\n\n with open('/tmp/artist_albums_lyrics_0607.json', 'r') as f:\n data = json.load(f)\n\n data_emb = add_embeddings(data)\n\n with open('artist_albums_lyrics_embs_0608.json', 'w') as f:\n json.dump(data_emb, f, indent=4)\n\n upload_blob('artist_albums_lyrics_embs_0608.json', folder_name='data')", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def create_image_caption_pairs(self):", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def encode(image):\n from encoder import launch\n launch(image)", "def encode_(model, images, letterbox_size, verbose, onlyhor=False, fill=False):\n if verbose:\n pbar = tqdm(total=len(images)) \n results = []\n for image in images:\n orig_img = Image.open(image)\n lbimgs = []\n for hflip in [False, True]:\n for vflip in list(set([False, True and not(onlyhor)])):\n for rot in list(set([False, True and not(onlyhor)])):\n cur_img = orig_img.copy() \n if hflip:\n cur_img = cur_img.transpose(Image.FLIP_LEFT_RIGHT)\n if vflip:\n cur_img = cur_img.transpose(Image.FLIP_TOP_BOTTOM)\n if rot:\n cur_img = cur_img.transpose(Image.ROTATE_90)\n lbimg = Letterbox(cur_img)\n lbimg.letterbox(\n sizeh=letterbox_size,\n sizew=letterbox_size,\n randomize_pos=False,\n fill_letterbox=fill\n )\n lbimgs.append(np.array(lbimg) / 255.0)\n lbimgs = np.stack(lbimgs)\n img_results = model.predict(lbimgs)\n if verbose:\n pbar.update(1)\n results.append({'image':image, 'embeddings':img_results})\n if verbose:\n pbar.close()\n return results", "def image_tagger_representations(dataset, text_encoder, image_encoder):\n\n vectorizer = get_text_vectorizer(text_encoder)\n\n if image_encoder == 'inceptionresnetv2':\n stream_encoder, imread = get_inceptionresnetv2_tagger()\n else:\n raise NotImplementedError('{} not recognized image_encoder'.format(image_encoder))\n\n captions = load_captions(dataset)\n images = load_images(dataset)\n\n encoding_path = BASE_PATH / dataset / ('{}_{}'.format(text_encoder, image_encoder))\n encoding_path.mkdir(exist_ok=True)\n\n count = 0\n for split in images:\n split_images = [i for i in images[split]]\n split_images_stream = get_image_stream(images, imread, stream_encoder, split)\n caption_stream = [list(captions[imid].values()) for imid in split_images]\n\n encoded_filename = encoding_path / '{}-tagencoded-captions-and-images.json'.format(split)\n\n with encoded_filename.open('w') as fout:\n for image_id, capts, image_tags in zip(split_images, caption_stream, split_images_stream):\n print(count)\n count += 1\n for c in capts:\n vec = vectorizer.transform([c]).tolist()[0]\n image_vec = vectorizer.transform(image_tags).mean(axis=0).tolist()\n\n print(json.dumps(\n {'id': str(image_id),\n 'text': c,\n 'x_text': vec,\n 'x_image': image_vec}), file=fout)", "def embed(ctx=None, title=None, description=None, fields=None, customFooter=False, customThumbnail=None, customColor=None, image=None):\n\n e = discord.Embed(title=title, description=description)\n if customColor is None:\n e.color = color()\n else:\n e.color = color(customColor)\n \n if fields != None:\n index = 0\n # Please fix the code below, There's nothing wrong with it, it's just messy and I'm sure that's not the right way to do it.\n for field in fields:\n session = []\n for key, value in field.items():\n session.append(key)\n\n if key == \"n\":\n name = value \n \n if key == \"v\":\n xValue = value \n \n if key == \"inline\":\n inline = value \n \n if not \"inline\" in session:\n inline = False\n \n e.add_field(name=f\"{name}\", value=xValue, inline=inline)\n \n if not customFooter:\n footer(e, ctx)\n \n if image is None:\n try:\n if customThumbnail is None:\n e.set_thumbnail(url=ctx.author.avatar_url)\n else:\n e.set_thumbnail(url=customThumbnail)\n except:\n pass \n else:\n e.set_image(url=image)\n return e", "def generate_embeddings(vae, dataset):\n data = ((torch.unsqueeze(img,0), label) for img, label in dataset)\n data = ((vae.encoder(tens), label) for tens, label in data)\n data = ((vae.codebook(emb),label) for emb, label in data)\n data = ((torch.flatten(img),label) for img, label in data)\n data = (torch.cat([inds,Tensor([label]).int()]) for inds, label in data)\n return data", "def create_embeddings(docs):\n if len(docs) > 1:\n blobs, _ = make_blobs(n_samples=len(docs), centers=5, n_features=768, random_state=42)\n else:\n blobs, _ = make_blobs(n_samples=len(docs), centers=1, n_features=768, random_state=42)\n return blobs" ]
[ "0.5972184", "0.5909413", "0.5838329", "0.5812766", "0.5676898", "0.566756", "0.56353486", "0.5634795", "0.5614541", "0.560931", "0.55710936", "0.5562259", "0.5523897", "0.5518542", "0.5506819", "0.5501866", "0.54833275", "0.54608405", "0.5443685", "0.5399601", "0.5336045", "0.5324616", "0.5313196", "0.5306689", "0.5288832", "0.5264767", "0.52611846", "0.52408457", "0.52299243", "0.5223904" ]
0.6467108
0
Test that the digits are classified correctly by a classifier.
def __test_digits(self, X, y, clf): self.assertEqual(len(X), len(y)) correct = 0 for i in xrange(len(y)): expected = y[i] prediction = clf.classify([X[i]])[0] if expected == prediction: correct += 1 self.assertGreaterEqual(correct, self.TRHESH * len(y)) return correct
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def test_text_classifier_test(self):\n pass", "def digitis(classes):\n #Loading digit\n digits = []\n for i in classes:\n digits.append(load_digits(n_class=i,return_X_y=True))\n\n return digits", "def check_correctness_raw(classifier_out, test_data):\n labels = test_data.labels\n num_correct = 0\n total = len(classifier_out)\n for index, label in classifier_out:\n if labels[index] == label:\n num_correct += 1\n print(f'Got {num_correct} out of {total} correct: {(num_correct / total) * 100}%')", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n\n ### create classifier\n clf = GaussianNB()#TODO\n clf.fit(features_train,labels_train)\n ### fit the classifier on the training features and labels\n #TODO\n\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)#TODO\n\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example, \n ### where we just print the accuracy\n ### you might need to import an sklearn module\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred,labels_test)#TODO\n return accuracy", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def classify(self, nn=1):\n\t\t#err=0\n\t\tpossibilities=[]\n\t\tfor i in range(len(self.X_test)):\n\t\t\tfor lines in range(len((self.X_train))):\n\t\t\t\tdist=np.linalg.norm(self.X_test[i]-self.X_train[lines])\n\t\t\t\tpossibilities.append([dist,self.Y_train[lines]])\n\t\t\tpossibilities.sort()\n\t\t\tfinal=[]\n\t\t\tfor c in range(0,15):\n\t\t\t\tfinal.append(possibilities[c][1])\n\t\t\t\tprint possibilities[c][1]\n\t\t\tcount=np.zeros(10)\n\t\t\tfor m in final:\n\t\t\t\tcount[m]+=1\n\t\t\t\n\t\t\tans=np.any(count==count.max())\n\t\t\t\n\t\t\tprint \"actual=\",self.Y_test[i]\n\t\t\tif(ans!=self.Y_test[i]):\n\t\t\t\tglobal err\n\t\t\t\terr=err+1", "def train_digits(self):\n try:\n # TODO: Make decision taking validation into account validation\n metrics_result = self.model.train()\n logging.info(\"model performance is {}\".format(metrics_result))\n return metrics_result is not None\n # TODO: Apply specific exceptions and log,\n except:\n logging.error(\"Prediction Error:\", sys.exc_info()[0])\n raise ValueError()", "def classify(dataset,classifier,feat_mask=None):\r\n \r\n train = dataset.get_data('train',True)\r\n X_train = train['x']\r\n if feat_mask is not None:\r\n X_train = X_train[:,feat_mask]\r\n y_train = train['y']\r\n \r\n classifier.fit(X_train,y_train)\r\n \r\n test = dataset.get_data('test',True)\r\n X_test = test['x']\r\n if feat_mask is not None:\r\n X_test = X_test[:,feat_mask]\r\n y_test = test['y']\r\n \r\n pred = classifier.predict(X_test)\r\n \r\n acc = np.count_nonzero(pred==y_test) / len(y_test)\r\n return acc,y_test,pred", "def test_text_classifier_curate(self):\n pass", "def test(self):\r\n error_count = 0\r\n N_TESTING = len(self.TESTING_DATA)\r\n for i in range(N_TESTING):\r\n x_vec = self.TESTING_DATA[i][:-1]\r\n y = self.TESTING_DATA[i][-1]\r\n\r\n result = self.bp.classify(x_vec)\r\n if result != y: error_count += 1\r\n print(error_count, \" errors on the test data, out of \", N_TESTING, \"items.\")", "def test_model_evaluation(model, mnist, idx, label):\n expected_probabilities = np.zeros((10,))\n expected_probabilities[label] = 1.0\n assert_array_almost_equal(\n model.classify(mnist.get_test_image(idx)),\n expected_probabilities\n )", "def testClassifier(x_train, y_train, x_test, y_test, clf):\n #metrics = []\n start = dt.now()\n clf.fit(x_train, y_train)\n end = dt.now()\n print 'training time: ', (end - start)\n \n # add training time to metrics\n #metrics.append(end-start)\n \n start = dt.now()\n yhat = clf.predict(x_test)\n end = dt.now()\n print 'testing time: ', (end - start)\n \n # add testing time to metrics\n #metrics.append(end-start)\n \n print 'classification report: '\n# print classification_report(y_test, yhat)\n pp(classification_report(y_test, yhat))\n \n print 'f1 score'\n print f1_score(y_test, yhat, average='macro')\n \n print 'accuracy score'\n accuracy = accuracy_score(y_test, yhat)\n print accuracy\n #metrics.append(accuracy)\n #precision = precision_score(y_test, yhat, average=None)\n #recall = recall_score(y_test, yhat, average=None)\n \n # add precision and recall values to metrics\n #for p, r in zip(precision, recall):\n # metrics.append(p)\n # metrics.append(r)\n \n \n #add macro-averaged F1 score to metrics\n #metrics.append(f1_score(y_test, yhat, average='macro'))\n \n print 'confusion matrix:'\n print confusion_matrix(y_test, yhat)\n \n # plot the confusion matrix\n plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')\n plt.show()\n \n return accuracy", "def nb_accuracy(features_train, labels_train, features_test, labels_test):\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n predictions = clf.predict(features_test)\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example,\n ### where we just print the accuracy\n ### you might need to import an sklearn module\n\n # accuracy = no of test points that are classified correctly /\n # total no of points (in a test set)\n\n # method#1: write code that compares predictions to y_axis_test, element-by-element\n # method#2: google \"sklearn accuracy\" and go from there\n # method#3: There's another way you can do this, too\n # print clf.score(features_test, labels_test)\n #accuracy = clf.score(features_test, labels_test)\n accuracy = accuracy_score(predictions, labels_test)\n return accuracy", "def test_classify():\n if platform.machine() == 'x86_64':\n classifier = classifier_module.Classifier(None)\n for i in range(0, 5):\n signal_a = Signal_test(1.0 + i * 0.028, 1.00 - i * i * 0.20 * 0.30)\n signal_b = Signal_test(2.0 - i * 0.011, 2.00 - i * 0.020)\n signal_list_test = [signal_a, signal_b]\n\n symbol = classifier.classify(signal_list_test)\n assert symbol == 'test'", "def test_nb(x, y, tune):\n # Perform classification without tuning\n nb = GaussianNB()\n pipeline = create_pipeline(nb)\n return accuracy(pipeline, x, y)", "def test_default(ndigit):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n trainX_eigen = trainX_new\n testX_new = testX - trainX_mean\n testX_eigen = testX_new\n testO = []\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n testO.append(min_class)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d default train = %.6f test = %.6f \" % (\n ndigit, (train0 == trainY).mean(), (testO == testY).mean())", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def test_text_classifier_train(self):\n pass", "def test_text_classifier_vaporise(self):\n pass", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def _validateClassification(self, trainingSet):\n wrongCount = 0.\n\n pv = []\n tv = []\n\n if self.K == 1:\n for example in trainingSet:\n Y = self.test(example)\n \n givenClass = example.label[0]\n if Y[0] < 0.5:\n chosenClass = 0\n else:\n chosenClass = 1\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \n if chosenClass != givenClass:\n wrongCount += 1.\n else:\n for example in trainingSet:\n Y = self.test(example)\n \n posterior, chosenClass = max((x, i) for i, x in enumerate(Y))\n max_val, givenClass = max((x, i) for i, x in enumerate(example.label))\n \n pv.append(chosenClass)\n tv.append(givenClass)\n \t\t\t\n if chosenClass != givenClass:\n wrongCount += 1.\n \n return wrongCount/len(trainingSet), pv, tv", "def test_Bernoulli_NB_estimators():", "def test_classifier(self):\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n \n for testFile in os.listdir(self.testing_dir):\n if os.path.splitext(testFile)[1] in self.valid:\n\n files += 1\n fileName = self.testing_dir + \"/\" + testFile\n\n img = cv2.imread(fileName).astype(np.float32)\n self.Helpers.logger.info(\"Loaded test image \" + fileName)\n \n img = cv2.resize(img, (self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"], \n self.Helpers.confs[\"cnn\"][\"data\"][\"dim\"]))\n img = self.reshape(img)\n \n prediction = self.get_predictions(img)\n \n msg = \"\"\n if prediction == 1 and \"_1.\" in testFile:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif prediction == 1 and \"_0.\" in testFile:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif prediction == 0 and \"_0.\" in testFile:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif prediction == 0 and \"_1.\" in testFile:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\"\n self.Helpers.logger.info(msg)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n from sklearn.metrics import accuracy_score\n\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n # method 1\n accuracy = clf.score(features_test, labels_test)\n \n # method 2\n pred = clf.predict(features_test)\n accuracy = accuracy_score(pred, labels_test)\n \n return accuracy" ]
[ "0.6504808", "0.6338742", "0.63143575", "0.62959284", "0.62814325", "0.62447387", "0.61741614", "0.61499375", "0.61478704", "0.61478704", "0.6142268", "0.61027217", "0.6074408", "0.6066596", "0.60418713", "0.60381013", "0.603409", "0.6030845", "0.6029761", "0.6024929", "0.6019022", "0.6013295", "0.6002087", "0.59998125", "0.5998756", "0.598039", "0.5971172", "0.59681976", "0.59605473", "0.5953937" ]
0.7913877
0
Load training data from digits.png
def load_digits(cls): gray = cls.imgfile_to_grayscale(cls.DIGITS_FILE) # Now we split the image to 5000 cells, each 20x20 size cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)] # Make it into a Numpy array. It size will be (50,100,20,20) x = np.array(cells) # Training data X = [np.reshape(x[y][x_], (400, )).astype(np.float32) / 256 for x_ in xrange(100) for y in xrange(50)] # Expected y = [y for y in xrange(10) for x_ in xrange(len(X) / 10)] assert len(X) == len(y) return X, y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_digits():\n \n images, target = [], []\n for image_file in digit_image_filenames:\n image = cv2.imread(image_file)\n if image is None:\n raise RuntimeError(\"Failed to read the image file '{}'\".format(\n image_file))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n for slice in image_slices(image, 0):\n for i, character in enumerate(image_slices(slice, 1)):\n target.append(i)\n images.append(character)\n \n return images, target", "def load(digit, type_str='train'):\n assert type_str in ('test', 'train'), \"use the load_all function\"\n\n if type_str == 'test':\n base = 'testing'\n else:\n base = 'training'\n\n img_file = '{}_images'.format(base)\n lbl_file = '{}_labels'.format(base)\n images = load_images(img_file)\n labels = load_labels(lbl_file)\n\n # make a list of the rows that correspond to `digit`\n relevant = [images[i] for i , label in enumerate(labels) if label == digit]\n\n return numpy.array(relevant)", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels", "def load_data(self):\n sets = ['train', 'val']\n images = []\n labels = []\n self.labels_dic = {}\n file = open(self.path + 'wnids.txt')\n train_labels = file.read().split()\n if self.train:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n for i in os.listdir(self.path + 'train/' + f + '/images/'):\n images.append(Image.open(self.path + 'train/' + f + '/images/' + i))\n labels.append(f)\n #image label n link to folder names of TinyImageNet\n self.labels_dic[f] = fn\n\n else:\n for fn in range(self.num_classes):\n f = train_labels[fn]\n self.labels_dic[f] = fn\n file_val = open(self.path + 'val/val_annotations.txt')\n val_labels = file_val.read().split('\\n')\n for im in val_labels:\n im_data = im.split(\"\t\")[:2]\n if len(im_data) < 2:\n continue\n if im_data[1] in self.labels_dic:\n images.append(Image.open(self.path + 'val/images/' + im_data[0]))\n labels.append(im_data[1])\n\n self.images = images\n self.labels = labels", "def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)", "def load_data(datafile, num_class, save=False, save_path='dataset.pkl'):\n train_list = open(datafile, 'r')\n labels = []\n images = []\n for line in train_list:\n tmp = line.strip().split(' ')\n filepath = tmp[0]\n print(filepath)\n img = Image.open(filepath)\n img = prep.resize_image(img, 224, 224)\n np_img = prep.pil_to_nparray(img)\n images.append(np_img)\n\n # one-hot encoder\n index = int(tmp[1])\n label = np.zeros(num_class)\n label[index] = 1\n labels.append(label)\n if save:\n pickle.dump((images, labels), open(save_path, 'wb'))\n return images, labels", "def load_mnist_digits_bg(batch_size = 128):\r\n\r\n def relabel_letter_class(class_idx):\r\n excluded_letters_idx = [6,8,11,14,16]\r\n if class_idx in excluded_letters_idx:\r\n return None\r\n if class_idx >= 10:\r\n return 10\r\n\r\n\r\n\r\n background_train = torchvision.datasets.EMNIST(root='./data',\r\n train=True,\r\n download=True,\r\n split = 'letters',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_letter_class)\r\n\r\n\r\n\r\n background_test = torchvision.datasets.EMNIST(root='./data',\r\n train=False,\r\n download=True,\r\n split = 'letters',\r\n transform = emnist_img_transform,\r\n target_transform = relabel_letter_class)\r\n\r\n\r\n mnist_train = torchvision.datasets.EMNIST(root='./data',\r\n train=True,\r\n download=True,\r\n split = 'mnist',\r\n transform = emnist_img_transform)\r\n\r\n\r\n\r\n mnist_test = torchvision.datasets.EMNIST(root='./data',\r\n train=False,\r\n download=True,\r\n split = 'mnist',\r\n transform = emnist_img_transform)\r\n\r\n # Discard unwanted letters from the background data\r\n background_train = discard_none_targets(background_train)\r\n background_test = discard_none_targets(background_test)\r\n\r\n # merge background data and digits data into a new data set\r\n train_ds = ConcatDataset([mnist_train,background_train])\r\n test_ds = ConcatDataset([mnist_test,background_test])\r\n\r\n\r\n # create data loaders and shuffle everything...\r\n train_dl = torch.utils.data.DataLoader(train_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n test_dl = torch.utils.data.DataLoader(test_ds,\r\n batch_size=batch_size,\r\n shuffle=True)\r\n\r\n return train_dl,test_dl", "def _load_mnist(path, dataset=\"training\", digits=None, asbytes=False,\n selection=None, return_labels=True, return_indices=False):\n\n # The files are assumed to have these names and should be found in 'path'\n files = {\n 'training': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'testing': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'),\n }\n\n try:\n images_fname = os.path.join(path, files[dataset][0])\n labels_fname = os.path.join(path, files[dataset][1])\n except KeyError:\n raise ValueError(\"Data set must be 'testing' or 'training'\")\n\n # We can skip the labels file only if digits aren't specified and labels\n # aren't asked for\n if return_labels or digits is not None:\n flbl = open(labels_fname, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(images_fname, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection]\n\n images = np.zeros((len(indices), rows, cols), dtype=np.uint8)\n\n if return_labels:\n labels = np.zeros((len(indices)), dtype=np.int8)\n for i in range(len(indices)):\n images[i] = np.array(images_raw[indices[i] * rows * cols:(indices[i] + 1) * rows * cols]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n\n return ret", "def show_digit(self):\n x_train, _, _, _ = self._load_data()\n plt.imshow(x_train[0], cmap=plt.cm.binary)\n plt.show()", "def load_letter(folder, min_num_images):\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n image_index = 0\n print(folder)\n for image in os.listdir(folder):\n image_file = os.path.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) - \n pixel_depth / 2) / pixel_depth\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[image_index, :, :] = image_data\n image_index += 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n \n num_images = image_index\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n \n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def load_letter(folder,label,image_size=28,sample_num=-1):\n\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=image_data_type)\n num_images = 0\n if sample_num == -1:\n sample_num = len(image_files)\n for image in image_files:\n image_file = os.path.join(folder, image)\n try:\n image_data = ndimage.imread(image_file).astype(image_data_type)\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[num_images, :, :] = image_data\n num_images = num_images + 1\n if num_images >= sample_num:\n break\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n dataset = dataset[0:num_images, :, :]\n data_label = np.ndarray(shape=(num_images), dtype=np.int8)\n data_label.fill(label)\n return dataset,data_label", "def load_digit(image_name):\n\n gray = cv2.imread(image_name, cv2.IMREAD_GRAYSCALE)\n gray = cv2.resize(gray, (28, 28))\n gray = gray.reshape((1, 784))\n\n return gray", "def load_dataset(self):\n\n train_path = os.path.join(self.dataset_path, 'images_background')\n validation_path = os.path.join(self.dataset_path, 'images_evaluation')\n\n # First let's take care of the train alphabets\n for alphabet in os.listdir(train_path):\n if alphabet[0] == '.':\n continue\n alphabet_path = os.path.join(train_path, alphabet)\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.train_dictionary[alphabet] = current_alphabet_dictionary\n\n # Now it's time for the validation alphabets\n for alphabet in os.listdir(validation_path):\n alphabet_path = os.path.join(validation_path, alphabet)\n if alphabet[0] == '.':\n continue\n\n current_alphabet_dictionary = {}\n\n for character in os.listdir(alphabet_path):\n if character[0] == '.':\n continue\n character_path = os.path.join(alphabet_path, character)\n\n current_alphabet_dictionary[character] = os.listdir(\n character_path)\n\n self.evaluation_dictionary[alphabet] = current_alphabet_dictionary", "def load_letter(folder, min_num_images, image_size):\n pixel_depth = 255.0\n\n image_files = os.listdir(folder)\n dataset = np.ndarray(shape=(len(image_files), image_size, image_size),\n dtype=np.float32)\n image_index = 0\n print(folder)\n for image in os.listdir(folder):\n image_file = P.join(folder, image)\n try:\n image_data = (ndimage.imread(image_file).astype(float) -\n pixel_depth / 2) / (pixel_depth / 2)\n if image_data.shape != (image_size, image_size):\n raise Exception('Unexpected image shape: %s' % str(image_data.shape))\n dataset[image_index, :, :] = image_data\n image_index += 1\n except IOError as e:\n print('Could not read:', image_file, ':', e, '- it\\'s ok, skipping.')\n\n num_images = image_index\n dataset = dataset[0:num_images, :, :]\n if num_images < min_num_images:\n raise Exception('Many fewer images than expected: %d < %d' %\n (num_images, min_num_images))\n\n print('Full dataset tensor:', dataset.shape)\n print('Mean:', np.mean(dataset))\n print('Standard deviation:', np.std(dataset))\n return dataset", "def load_test_data():\n X = []\n y = []\n for fname in os.listdir(test_dir):\n label = int(fname.split(\"_\")[0])\n img = plt.imread(os.path.join(test_dir, fname))\n X.append(img)\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y", "def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X", "def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)", "def load_data(fname):\n pathname = \"data/\" + fname\n data = pickle.load(open(pathname, 'rb'), encoding='latin1')\n images = np.array([img[:-1] for img in data])\n ys = [int(img[-1]) for img in data]\n length = len(ys)\n labels = np.zeros((length, 10))\n\n for i in range(length):\n labels[i, ys[i]] = 1\n\n return images, labels", "def load_data():\n X = load_pickle(config['image_paths']['train_images_pickle'])\n y = load_train_labels()\n y = to_categorical(y)\n test_indices = np.random.choice(len(X), int(len(X) * float(config['model']['test_size'])), replace=False)\n X_train = np.asarray([e for idx, e in enumerate(X) if idx not in test_indices])\n X_test = np.asarray([e for idx, e in enumerate(X) if idx in test_indices])\n y_train = np.asarray([e for idx, e in enumerate(y) if idx not in test_indices])\n y_test = np.asarray([e for idx, e in enumerate(y) if idx in test_indices])\n return X_train, y_train, X_test, y_test", "def read():\n\n # load json and create model\n base_model = _model_builder.Network(0, model_type=\"load_model\")\n\n #load image and process\n digit = Image.open(\"./data/number.jpg\").convert(\"L\")\n digit = ImageOps.expand(digit,border=60,fill='black')\n digit = digit.resize((28, 28))\n\n #flatten the matrix (for input into MLP network todo:CNN)\n digit_flat = numpy.zeros((1, 784))\n counter = 0\n for j in range(0, 28):\n for i in range(0, 28):\n digit_flat[0][counter] = (digit.getpixel((i, j)))/255.0\n counter = counter+1\n\n #predict\n os.system('clear')\n base_model.predict(digit_flat)", "def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)", "def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels", "def load_dataset(data_dir, img_size):\n global input_set\n global test_set\n\n imgs = []\n img_files = os.listdir(data_dir)\n for img in img_files:\n # try:\n tmp = scipy.misc.imread(data_dir + \"/\" + img)\n x, y, z = tmp.shape # shape : width * length * chanel\n coords_x = int(x / img_size) # 坐标\n coords_y = int(y / img_size) #\n coords = [(q, r) for q in range(coords_x) for r in range(coords_y)] # 列表 x * y\n for coord in coords:\n imgs.append((data_dir + \"/\" + img, coord)) # 为列表添加文件目录\n # except BaseException:\n # print(\"oops\")\n test_size = min(10, int(len(imgs) * 0.2))\n random.shuffle(imgs)\n test_set = imgs[:test_size]\n train_set_X = imgs[test_size:][:200]\n train_set = imgs[test_size:][200:400]\n return", "def read_dataset(image_dir: str = IMAGE_DIR, dump: bool = True, **kwargs):\n global TRAIN_X, TRAIN_Y\n logdir = \"logs/scalars/\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tensorboard_callback = TensorBoard(log_dir=logdir)\n\n base_model = InceptionV3(include_top=False,\n weights='imagenet',\n input_shape=(WIDHT, HEIGHT, 3))\n for layer in base_model.layers:\n layer.trainable = False\n\n model = Sequential()\n model.add(base_model)\n model.add(GlobalAveragePooling2D())\n # model.add(Dense(512, activation='relu'))\n model.add(Dense(LABEL_SIZE, activation='softmax'))\n model.compile(\n loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'],\n )\n\n def define_label(parent_name):\n return \"-\".join(parent_name.split('-')[1:])\n\n for subdir, dirs, files in os.walk(image_dir):\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image_label = define_label(path.parent.name)\n TRAIN_Y.append(image_label)\n\n label_encoder = LabelEncoder()\n TRAIN_Y = label_encoder.fit_transform(TRAIN_Y)\n TRAIN_Y = np.array(to_categorical(TRAIN_Y, num_classes=LABEL_SIZE))\n\n count = 0\n current_length_train_x = 0\n\n for subdir, dirs, files in os.walk(image_dir):\n print(f'PATH: {subdir} is processing')\n count += 1\n for file in files:\n path = pathlib.Path(subdir).absolute() / file\n image = load_img(str(path), target_size=WH)\n TRAIN_X.append(np.array(image))\n\n if count % 40 == 0:\n slice_left = current_length_train_x\n slice_right = slice_left + len(TRAIN_X)\n current_length_train_x = slice_right\n # convert to binary matrix (120 labels at all) 2^10 = 128\n # normalize image\n # split image\n\n # TODO: make active on resume iterations\n # if count == 40:\n # # make empty\n # TRAIN_X = []\n # model = load_model(f'{model_name}_iter_40.dump')\n # continue\n\n x_train, x_test, y_train, y_test = train_test_split(\n np.array(TRAIN_X),\n TRAIN_Y[slice_left:slice_right],\n test_size=0.2,\n random_state=69,\n )\n\n # make empty\n TRAIN_X = []\n\n augs_gen.fit(x_train)\n model.fit_generator(\n augs_gen.flow(x_train, y_train, batch_size=25),\n validation_data=(x_test, y_test),\n validation_steps=1000,\n steps_per_epoch=1000,\n epochs=20,\n verbose=1,\n callbacks=[tensorboard_callback],\n )\n del x_train, x_test, y_train, y_test\n model.save(f'{model_name}_iter_{count}.dump')\n\n print(f'Executed {count} / 121')\n print('Prepare to write data on the disk')\n # if dump:\n # with open(DATA_DIR / 'xes.dump', 'wb') as file_x:\n # pickle.dump(TRAIN_X, file_x)\n # with open(DATA_DIR / 'ykes.dump', 'wb') as file_y:\n # pickle.dump(TRAIN_Y, file_y)\n\n # print('Dumped on the disk')\n # time.sleep(5)", "def images_for_denoising():\r\n return list_images(relpath('image_dataset/train'), True)", "def load_data(self) -> tuple:\n self.read_path = Path(os.environ[\"DATA_PATH\"]) / \"characters\"\n self.pretrain_path = Path(os.environ[\"FONT_DATA\"]) / \"training\"\n self.dataset_builder.build_data_set()\n X_pretrain, y_pretrain, X_train, y_train, X_dev, y_dev, X_test, y_test = tuple(\n [] for l in range(8)\n )\n\n for letter in self.hebrew.letter_li:\n pretrain_images = glob(f\"{Path(self.pretrain_path/letter)}/*.jpeg\")\n train_images = glob(f'{Path(self.read_path/\"train\"/letter)}/*.jpg')\n dev_images = glob(f'{Path(self.read_path/\"dev\"/letter)}/*.jpg')\n test_images = glob(f'{Path(self.read_path/\"test\"/letter)}/*.jpg')\n\n # pretrain data\n for img in pretrain_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_pretrain.append(image)\n y_pretrain.append(self.hebrew.letter_li.index(letter))\n\n # training data\n for img in train_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_train.append(image)\n y_train.append(self.hebrew.letter_li.index(letter))\n\n # dev data\n for img in dev_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_dev.append(image)\n y_dev.append(self.hebrew.letter_li.index(letter))\n\n # test data\n for img in test_images:\n image = cv2.imread(img)\n image = cv2.resize(image, self.img_size)\n X_test.append(image)\n y_test.append(self.hebrew.letter_li.index(letter))\n\n return (\n np.array(X_pretrain),\n np.array(y_pretrain),\n np.array(X_train),\n np.array(y_train),\n np.array(X_dev),\n np.array(y_dev),\n np.array(X_test),\n np.array(y_test),\n )", "def load_food_image_batch(filename, num):\n with open(filename, 'rb') as f:\n datadict = pickle.load(f)\n url_parts = datadict['Image URL'].split(\"/\")\n img_fn = url_parts[-1]\n with open(img_fn):\n X = f.read()\n Y = datadict['coarse_labels']\n X = X.reshape(num, 3, 32, 32).transpose(0,2,3,1).astype(\"float\")\n Y = np.array(Y)\n return X, Y", "def load_source_png_images(self, num_slice):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n data = [] \n for l in self.locations.LABELS:\n slice_file = self.locations.SOURCE_PNG % (l, num_slice)\n \n #print 'Loading Input Image \\t\\t%s'%slice_file \n slice_data = misc.imread(slice_file) \n data.append(slice_data)\n \n return data #images in the same order as labels" ]
[ "0.708967", "0.6994615", "0.6965878", "0.6935963", "0.6672894", "0.66682076", "0.65903187", "0.6515253", "0.64869475", "0.64020413", "0.6389433", "0.6385904", "0.63520086", "0.63297844", "0.632445", "0.632055", "0.63086367", "0.62679493", "0.6263167", "0.6256531", "0.6240129", "0.62342304", "0.6194563", "0.619067", "0.61832625", "0.6181304", "0.61382735", "0.61029166", "0.60895675", "0.6070254" ]
0.7322466
0
Using the public method mount to test _get_drive_mount_point_name
def test_get_drive_mount_point_name_unique_id_None(self): try: tmpdir = mkdtemp() root = os.path.join(tmpdir, 'mnt/gluster-object') drive = 'test' _init_mock_variables(tmpdir) gfs._allow_mount_per_server = True self.assertTrue(gfs.mount(root, drive)) finally: gfs._allow_mount_per_server = False _reset_mock_variables() shutil.rmtree(tmpdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_googledrive_mounting_point():\n return None", "def test_get_drive_mount_point_name_unique_id_exists(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n gfs._unique_id = 0\n self.assertTrue(gfs.mount(root, drive))\n finally:\n gfs._allow_mount_per_server = False\n gfs._unique_id = None\n _reset_mock_variables()\n shutil.rmtree(tmpdir)", "def get_mount_point(self):\n try:\n output = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--output=TARGET',\n '--raw',\n self.canonical_device_file,\n ]\n )\n # Examples:\n # /media/8c982ec2-8aa7-4fe2-a912-7478f0429e06\n # /srv/_dev_disk_by-id_dm-name-vg01-lv01\n # /srv/dev-disk-by-label-xx\\x20yy\n return openmediavault.string.unescape_blank(output.decode().strip())\n except subprocess.CalledProcessError:\n pass\n return None", "def create_onedrive_mounting_point():\n return None", "def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount", "def get_drive_list():\n\n if sys.platform == \"darwin\":\n MOUNT_PARSER = OSX_MOUNT_PARSER\n else:\n MOUNT_PARSER = LINUX_MOUNT_PARSER\n\n try:\n drivelist = subprocess.Popen(\"mount\", shell=True, stdout=subprocess.PIPE)\n drivelisto, err = drivelist.communicate()\n # Some Android devices at least now use the LINUX_MOUNT_PARSER format.\n # Try it and revert to RAW_MOUNT_PARSER if we can't find any matches with it.\n if on_android() and not MOUNT_PARSER.match(drivelisto.decode()):\n MOUNT_PARSER = RAW_MOUNT_PARSER\n except OSError: # couldn't run `mount`, let's try reading the /etc/mounts listing directly\n with open(\"/proc/mounts\") as f:\n drivelisto = f.read()\n MOUNT_PARSER = RAW_MOUNT_PARSER\n\n drives = []\n\n for drivematch in MOUNT_PARSER.finditer(drivelisto.decode()):\n\n drive = drivematch.groupdict()\n path = (\n drive[\"path\"]\n .replace(\"\\\\040\", \" \")\n .replace(\"\\\\011\", \"\\t\")\n .replace(\"\\\\012\", \"\\n\")\n .replace(\"\\\\134\", \"\\\\\")\n )\n\n # skip the drive if the filesystem or path is in a blacklist\n if drive[\"filesystem\"] in FILESYSTEM_BLACKLIST or any(\n path.startswith(p) for p in PATH_PREFIX_BLACKLIST\n ):\n logger.debug(\"Skipping blacklisted drive '{}'\".format(path))\n continue\n\n # skip if we don't have read access to the drive\n if not os.access(path, os.R_OK):\n continue\n\n # attempt to get some additional metadata about the drive\n try:\n usage = _get_drive_usage(path)\n except OSError:\n # skip if we don't have access to get drive usage\n continue\n\n dbus_drive_info = _try_to_get_drive_info_from_dbus(drive[\"device\"])\n diskutil_info = _try_to_get_drive_info_from_diskutil(drive[\"device\"])\n\n # combine the various metadata sources to construct the overall drive metadata\n drives.append(\n {\n \"path\": path,\n \"name\": dbus_drive_info.get(\"name\")\n or diskutil_info.get(\"name\")\n or path,\n \"filesystem\": drive[\"filesystem\"],\n \"freespace\": usage[\"free\"],\n \"totalspace\": usage[\"total\"],\n \"drivetype\": dbus_drive_info.get(\"drivetype\")\n or diskutil_info.get(\"drivetype\")\n or \"\",\n \"guid\": dbus_drive_info.get(\"guid\")\n or diskutil_info.get(\"guid\")\n or drive[\"device\"],\n }\n )\n\n return drives", "def get_mount_points():\n\n points = []\n t = subprocess.check_output(['mount'])\n t = t.decode()\n\n for line in t.splitlines():\n t = line.find('smbfs')\n if t < 0: continue\n b = line.find(' on ')\n points.append(line[b+4: t-2])\n # //[email protected]/storage on /Volumes/storage (smbfs, nodev, nosuid, mounted by ruan)\n return points", "def test_get_drives_drive_firmware(self):\n pass", "def testMountCommand(self):\n with self.assertRaises(FilePathException):\n File().getGirderMountFilePath(self.file)\n self.assertIsNone(File().getGirderMountFilePath(self.file, validate=False))\n mountPath = tempfile.mkdtemp()\n subprocess.check_call(['girder', 'mount', mountPath, '-d', os.environ['GIRDER_TEST_DB']])\n endTime = time.time() + 10 # maximum time to wait\n while time.time() < endTime:\n if os.path.exists(os.path.join(mountPath, 'user')):\n break\n time.sleep(0.1)\n filePath = os.path.join(mountPath, 'user', 'admin', 'Public', 'test', 'file1a.txt')\n self.assertEqual(File().getGirderMountFilePath(self.file), filePath)\n self.assertNotEqual(File().getGirderMountFilePath(self.file),\n File().getLocalFilePath(self.file))\n self.assertTrue(os.path.exists(filePath))\n self.assertEqual(open(filePath).read().strip(), 'File 1A')\n subprocess.check_call(['girder', 'mount', mountPath, '-u'])\n endTime = time.time() + 10 # maximum time to wait\n while time.time() < endTime:\n if not os.path.exists(os.path.join(mountPath, 'user')):\n break\n time.sleep(0.1)\n self.assertFalse(os.path.exists(filePath))\n os.rmdir(mountPath)\n with self.assertRaises(FilePathException):\n File().getGirderMountFilePath(self.file)", "def mpt():\n lbl_drives = ['device','mountpoint','fstype']\n disks = [d[0:3] for d in psutil.disk_partitions()]\n drives = [dict(zip(lbl_drives,ds)) for ds in disks]\n return [d['mountpoint']for d in drives]", "def get_disk_by_mountpoint(mnt_point):\n diskparts = psutil.disk_partitions()\n for item in diskparts:\n if item.mountpoint == mnt_point:\n return realpath(item.device)\n return None", "def get_mount_info(devname, label=None):\n mount_point = get_mount_target(devname, label)\n mounts = check_output('mount | grep \" %s \" || :' % mount_point, shell=True)\n if mounts:\n return Munch(zip(('device', 'mount_point', 'type', 'options'),\n MOUNTS_RE.match(mounts.decode()).groups()))", "def get_mount_point(path):\n\n path = os.path.abspath(path)\n while path != os.path.sep:\n if os.path.ismount(path):\n return path\n path = os.path.abspath(os.path.join(path, os.pardir))\n return path", "def create_dropbox_mounting_point():\n return None", "def mount_single(partition_size, drives):\n for drive_list in drives:\n if are_equal(drive_list, partition_size):\n for drive_info, partition_info in zip(drive_list, partition_size):\n mount_pattern = \"mount -t ntfs -o uid=1000,gid=1000,umask=0002 /dev/{} {}\"\n mount_cmd = mount_pattern.format(drive_info[1], partition_info[1])\n print(mount_cmd)", "def mountpoint(self):\n return self._properties.get('mountpoint')", "def get_disk_name():\n return \"%s.dat.disk\" % getpass.getuser()", "def device_mounted(uuid):\n out, err = run_cmd(['lsblk', '-o', 'NAME,UUID,MOUNTPOINT', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('children', blkdevice):\n for child in blkdevice['children']:\n if key_exists('mountpoint', child) and child['uuid'] == uuid:\n return child['mountpoint']", "def mount_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"mount_path\")", "def test_get_node_drive(self):\n pass", "def _get_disk_name(disk_type, instance, short=False):\n prefix = '%s_' % (disk_type[0] if short else disk_type)\n base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short\n else instance.name)\n return pvm_util.sanitize_file_name_for_api(\n base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short\n else pvm_const.MaxLen.FILENAME_DEFAULT)", "def isMounted(device):\n for _device, _path in getMounted():\n if device == _device:\n return _path\n return ''", "def get_disk_file_name():\n return \"%s/%s\" % (get_user_homedir(), get_disk_name())", "def mount(self):\n return self._mount", "def find_dev_mount_point(self, usb_table):\n mounts = open(\"/proc/mounts\")\n mount_lines = mounts.readlines()\n table = usb_table\n i = 0\n for device in table:\n for line in mount_lines:\n arguments = line.split(\" \")\n if arguments[0] == device[0]:\n usb_table[i].append(arguments[1])\n usb_table[i] = self.get_drive_stat(usb_table[i])\n break\n i += 1\n return usb_table", "def drive_type():", "def getmount(mypath): # noqa\n\n path_ = os.path.realpath(os.path.abspath(mypath))\n while path_ != os.path.sep:\n if os.path.ismount(path_):\n return path_\n path_ = os.path.abspath(os.path.join(path_, os.pardir))\n return path_", "def get_mountpoint(host, fqpath):\n command = \"df -P %s | awk 'END{print $NF}'\" % fqpath\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n return rout.strip()\n\n g.log.error(\"Get mountpoint failed: %s\" % rerr)\n return None", "def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))", "def find_mount_point(path):\n path = os.path.abspath(path)\n while not os.path.ismount(path):\n path = os.path.dirname(path)\n return path" ]
[ "0.71002924", "0.6954491", "0.6827536", "0.6759451", "0.6704294", "0.66067576", "0.64347553", "0.6420853", "0.63291794", "0.63125104", "0.6307517", "0.62765527", "0.62722826", "0.6255401", "0.61517626", "0.61497533", "0.6133829", "0.6132875", "0.6093856", "0.6090868", "0.6059725", "0.60457987", "0.6033606", "0.6032021", "0.6028802", "0.60190725", "0.5989768", "0.59817255", "0.5969452", "0.5952693" ]
0.75672483
0
Using the public method mount to test _get_drive_mount_point_name and the _unique_id is already defined
def test_get_drive_mount_point_name_unique_id_exists(self): try: tmpdir = mkdtemp() root = os.path.join(tmpdir, 'mnt/gluster-object') drive = 'test' _init_mock_variables(tmpdir) gfs._allow_mount_per_server = True gfs._unique_id = 0 self.assertTrue(gfs.mount(root, drive)) finally: gfs._allow_mount_per_server = False gfs._unique_id = None _reset_mock_variables() shutil.rmtree(tmpdir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_drive_mount_point_name_unique_id_None(self):\n try:\n tmpdir = mkdtemp()\n root = os.path.join(tmpdir, 'mnt/gluster-object')\n drive = 'test'\n\n _init_mock_variables(tmpdir)\n gfs._allow_mount_per_server = True\n self.assertTrue(gfs.mount(root, drive))\n finally:\n gfs._allow_mount_per_server = False\n _reset_mock_variables()\n shutil.rmtree(tmpdir)", "def create_googledrive_mounting_point():\n return None", "def create_onedrive_mounting_point():\n return None", "def testMountCommand(self):\n with self.assertRaises(FilePathException):\n File().getGirderMountFilePath(self.file)\n self.assertIsNone(File().getGirderMountFilePath(self.file, validate=False))\n mountPath = tempfile.mkdtemp()\n subprocess.check_call(['girder', 'mount', mountPath, '-d', os.environ['GIRDER_TEST_DB']])\n endTime = time.time() + 10 # maximum time to wait\n while time.time() < endTime:\n if os.path.exists(os.path.join(mountPath, 'user')):\n break\n time.sleep(0.1)\n filePath = os.path.join(mountPath, 'user', 'admin', 'Public', 'test', 'file1a.txt')\n self.assertEqual(File().getGirderMountFilePath(self.file), filePath)\n self.assertNotEqual(File().getGirderMountFilePath(self.file),\n File().getLocalFilePath(self.file))\n self.assertTrue(os.path.exists(filePath))\n self.assertEqual(open(filePath).read().strip(), 'File 1A')\n subprocess.check_call(['girder', 'mount', mountPath, '-u'])\n endTime = time.time() + 10 # maximum time to wait\n while time.time() < endTime:\n if not os.path.exists(os.path.join(mountPath, 'user')):\n break\n time.sleep(0.1)\n self.assertFalse(os.path.exists(filePath))\n os.rmdir(mountPath)\n with self.assertRaises(FilePathException):\n File().getGirderMountFilePath(self.file)", "def create_dropbox_mounting_point():\n return None", "def mount_single(partition_size, drives):\n for drive_list in drives:\n if are_equal(drive_list, partition_size):\n for drive_info, partition_info in zip(drive_list, partition_size):\n mount_pattern = \"mount -t ntfs -o uid=1000,gid=1000,umask=0002 /dev/{} {}\"\n mount_cmd = mount_pattern.format(drive_info[1], partition_info[1])\n print(mount_cmd)", "def make_mount_path(id_):\n mount_path = os.path.join(\n openmediavault.getenv(\"OMV_MOUNT_DIR\", \"/srv\"), id_.replace('/', '_')\n )\n return openmediavault.string.path_prettify(mount_path)", "def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount", "def test_create_drives_drive_smartfail_item(self):\n pass", "def get_mount_point(self):\n try:\n output = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--output=TARGET',\n '--raw',\n self.canonical_device_file,\n ]\n )\n # Examples:\n # /media/8c982ec2-8aa7-4fe2-a912-7478f0429e06\n # /srv/_dev_disk_by-id_dm-name-vg01-lv01\n # /srv/dev-disk-by-label-xx\\x20yy\n return openmediavault.string.unescape_blank(output.decode().strip())\n except subprocess.CalledProcessError:\n pass\n return None", "def device_mounted(uuid):\n out, err = run_cmd(['lsblk', '-o', 'NAME,UUID,MOUNTPOINT', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('children', blkdevice):\n for child in blkdevice['children']:\n if key_exists('mountpoint', child) and child['uuid'] == uuid:\n return child['mountpoint']", "def __init__(self, mount_point):\n self.mount = mount_point", "def test_mount_status_nas_share(self):\n pass", "def __init__(self, win_server, share_folder, mount_point, user, password):\n \n share_folder = share_folder.replace(\"/\", \"\\\\\")\n network_folder = r'//%s/%s' %(win_server, share_folder)\n\n print('Mount_point - %s' %(mount_point))\n print('In the python code next use mount() function - to mount windows share folder, and use umount() function - to unmount')\n self.mount_point = mount_point\n self.options = {'network_folder':network_folder, 'mount_point':mount_point,'user':user}\n self.success = []\n self.error = []\n\n mount_cmd = \"mount_smbfs //{user}:{password}@{network_folder} {mount_point}\"\n self.mount_cmd = mount_cmd.format(network_folder=network_folder,\n mount_point=mount_point,\n user=user,\n password=password)\n self.umount_cmd = \"umount {mount_point}\".format(mount_point=mount_point)", "def mount_device(uuid):\n mount_point = f'/mnt/{uuid}/back-up'\n # Create mountpoint if it doesn't exist\n pathlib.Path(mount_point).mkdir(parents=True, exist_ok=True)\n\n # Mount device\n out, err = run_cmd(['mount', '--uuid', uuid, mount_point])\n\n if not err:\n return mount_point\n else:\n abort(err, cause='mount')", "def actually_mount(self, client):\n try:\n getattr(client, self.mount_fun)(self.backend,\n mount_point=self.path)\n except hvac.exceptions.InvalidRequest as exception:\n match = re.match('existing mount at (?P<path>.+)', str(exception))\n if match:\n e_msg = \"%s has a mountpoint conflict with %s\" % \\\n (self.path, match.group('path'))\n raise aomi.exceptions.VaultConstraint(e_msg)\n else:\n raise", "def mount(self, mount_point):\n log.debug(\"Mounting {0} for {1}\".format(mount_point, self.fs.get_full_name()))\n for counter in range(30):\n if self.status == volume_status.ATTACHED:\n if os.path.exists(mount_point):\n # Check if the mount location is empty\n if len(os.listdir(mount_point)) != 0:\n log.warning(\"Mount point {0} already exists and is not \"\n \"empty!? ({2}) Will attempt to mount volume {1}\"\n .format(mount_point, self.volume_id,\n os.listdir(mount_point)))\n # return False\n else:\n log.debug(\"Creating mount point directory {0} for {1}\"\n .format(mount_point, self.fs.get_full_name()))\n try:\n os.mkdir(mount_point)\n except Exception, e:\n log.warning(\"Could not create {0} mount point {1}: {2}\"\n .format(self.fs.get_full_name(), mount_point, e))\n # Potentially wait for the device to actually become available in the system\n # TODO: Do something if the device is not available in the\n # given time period\n for i in range(10):\n if os.path.exists(self.device):\n log.debug(\"Device path {0} checked and it exists.\".format(\n self.device))\n break\n else:\n log.debug(\"Device path {0} does not yet exist; waiting...\".format(\n self.device))\n time.sleep(4)\n # Until the underlying issue is fixed (see FIXME below), mask this\n # even more by custom-handling the run command and thus not\n # printing the err\n cmd = '/bin/mount %s %s' % (self.device, mount_point)\n try:\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n _, _ = process.communicate()\n if process.returncode != 0:\n # FIXME: Assume if a file system cannot be mounted that it's because\n # there is not a file system on the device so try creating\n # one\n if run('/sbin/mkfs.xfs %s' % self.device,\n \"Failed to create a file system on device %s\" % self.device,\n \"Created a file system on device %s\" % self.device):\n if not run(\n '/bin/mount %s %s' % (self.device, mount_point),\n \"Error mounting file system %s from %s\" % (\n mount_point, self.device),\n \"Successfully mounted file system %s from %s\" %\n (mount_point, self.device)):\n log.error(\"Failed to mount device '%s' to mount point '%s'\"\n % (self.device, mount_point))\n return False\n # Resize the volume if it was created from a snapshot\n else:\n if self.snapshot and self.volume.size > self.snapshot.volume_size:\n run('/usr/sbin/xfs_growfs %s' % mount_point)\n log.info(\n \"Successfully grew file system {0}\".format(self.fs.get_full_name()))\n except Exception, e:\n log.error(\"Exception mounting {0} at {1}\".format(\n self.fs.get_full_name(), mount_point))\n return False\n try:\n # Default owner of all mounted file systems to `galaxy`\n # user\n os.chown(mount_point, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n # Add Galaxy- and CloudBioLinux-required files under the\n # 'data' dir\n if ServiceRole.GALAXY_DATA in self.fs.svc_roles:\n for sd in ['files', 'tmp', 'upload_store', 'export']:\n path = os.path.join(\n self.app.path_resolver.galaxy_data, sd)\n if not os.path.exists(path):\n os.mkdir(path)\n # Make 'export' dir that's shared over NFS be\n # owned by `ubuntu` user so it's accesible\n # for use to the rest of the cluster\n if sd == 'export':\n os.chown(path, pwd.getpwnam(\n \"ubuntu\")[2], grp.getgrnam(\"ubuntu\")[2])\n else:\n os.chown(path, pwd.getpwnam(\n \"galaxy\")[2], grp.getgrnam(\"galaxy\")[2])\n except OSError, e:\n log.debug(\n \"Tried making 'galaxyData' sub-dirs but failed: %s\" % e)\n # If based on an archive, extract archive contents to the mount point\n if self.from_archive:\n # Do not overwrite an existing dir structure w/ the archive\n # content. This happens when a cluster is rebooted.\n if self.fs.name == 'galaxy' and \\\n os.path.exists(self.app.path_resolver.galaxy_home):\n log.debug(\"Galaxy home dir ({0}) already exists; not \"\n \"extracting the archive ({1}) so not to \"\n \"overwrite it.\".format(self.app.path_resolver.galaxy_home,\n self.from_archive['url']))\n self.fs.nfs_share_and_set_state()\n else:\n self.fs.state = service_states.CONFIGURING\n # Extract the FS archive in a separate thread\n ExtractArchive(self.from_archive['url'], mount_point,\n self.from_archive['md5_sum'],\n callback=self.fs.nfs_share_and_set_state).run()\n else:\n self.fs.nfs_share_and_set_state()\n return True\n else:\n log.warning(\"Cannot mount volume '%s' in state '%s'. Waiting \"\n \"(%s/30).\" % (self.volume_id, self.status, counter))\n time.sleep(2)", "def get_drive_list():\n\n if sys.platform == \"darwin\":\n MOUNT_PARSER = OSX_MOUNT_PARSER\n else:\n MOUNT_PARSER = LINUX_MOUNT_PARSER\n\n try:\n drivelist = subprocess.Popen(\"mount\", shell=True, stdout=subprocess.PIPE)\n drivelisto, err = drivelist.communicate()\n # Some Android devices at least now use the LINUX_MOUNT_PARSER format.\n # Try it and revert to RAW_MOUNT_PARSER if we can't find any matches with it.\n if on_android() and not MOUNT_PARSER.match(drivelisto.decode()):\n MOUNT_PARSER = RAW_MOUNT_PARSER\n except OSError: # couldn't run `mount`, let's try reading the /etc/mounts listing directly\n with open(\"/proc/mounts\") as f:\n drivelisto = f.read()\n MOUNT_PARSER = RAW_MOUNT_PARSER\n\n drives = []\n\n for drivematch in MOUNT_PARSER.finditer(drivelisto.decode()):\n\n drive = drivematch.groupdict()\n path = (\n drive[\"path\"]\n .replace(\"\\\\040\", \" \")\n .replace(\"\\\\011\", \"\\t\")\n .replace(\"\\\\012\", \"\\n\")\n .replace(\"\\\\134\", \"\\\\\")\n )\n\n # skip the drive if the filesystem or path is in a blacklist\n if drive[\"filesystem\"] in FILESYSTEM_BLACKLIST or any(\n path.startswith(p) for p in PATH_PREFIX_BLACKLIST\n ):\n logger.debug(\"Skipping blacklisted drive '{}'\".format(path))\n continue\n\n # skip if we don't have read access to the drive\n if not os.access(path, os.R_OK):\n continue\n\n # attempt to get some additional metadata about the drive\n try:\n usage = _get_drive_usage(path)\n except OSError:\n # skip if we don't have access to get drive usage\n continue\n\n dbus_drive_info = _try_to_get_drive_info_from_dbus(drive[\"device\"])\n diskutil_info = _try_to_get_drive_info_from_diskutil(drive[\"device\"])\n\n # combine the various metadata sources to construct the overall drive metadata\n drives.append(\n {\n \"path\": path,\n \"name\": dbus_drive_info.get(\"name\")\n or diskutil_info.get(\"name\")\n or path,\n \"filesystem\": drive[\"filesystem\"],\n \"freespace\": usage[\"free\"],\n \"totalspace\": usage[\"total\"],\n \"drivetype\": dbus_drive_info.get(\"drivetype\")\n or diskutil_info.get(\"drivetype\")\n or \"\",\n \"guid\": dbus_drive_info.get(\"guid\")\n or diskutil_info.get(\"guid\")\n or drive[\"device\"],\n }\n )\n\n return drives", "def mount_factory(name, idn=0):\n # Import MOUNT_MAPPING at runtime so we avoid circular imports\n from robosuite.models.mounts import MOUNT_MAPPING\n\n return MOUNT_MAPPING.get(name, \"Unknown mount name: {}\".format(name))(idn=idn)", "def do_mount(devpath, mountpoint, fstype):\n try:\n if check_already_mounted(devpath, mountpoint):\n return\n\n mounter = Mounter()\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.MountException:\n try:\n mounter.make_filesystem(devpath, fstype)\n mounter.mount(devpath, mountpoint, fstype)\n except exceptions.FuxiException as e:\n with excutils.save_and_reraise_exception():\n LOG.error(str(e))", "def mount(self):\n return self._mount", "def generate_common_mount(self, working_file_name):\n\n # Reopenthe working file\n working_file = open(working_file_name, \"a\")\n\n # Check that the stack definition is in the configuration file\n if \"stack-definition\" not in self.project.firmware_definition[\"layout\"]:\n self.project.logging.critical(\"The stack definition is not in the configuration file\")\n exit(1)\n\n # Iterates the stack items\n for item in self.project.firmware_definition[\"layout\"][\"stack-definition\"]:\n # Generate the mount point creation code\n working_file.write(\"# Create the mount point for \" + item[\"stack-item\"][\"type\"] +\n \" '\" + item[\"stack-item\"][\"name\"] + \"'\\n\")\n working_file.write(\"mkdir -p /mnt/dft/\" + item[\"stack-item\"][\"name\"] + \"\\n\")\n working_file.write(\"\\n\")\n\n # Generate the mount commands\n working_file.write(\"# Mount item \" + item[\"stack-item\"][\"type\"] + \" '\" +\n item[\"stack-item\"][\"name\"] + \"'\\n\")\n\n # Generate the tmpfs specific mount command\n if item[\"stack-item\"][\"type\"] == \"tmpfs\":\n working_file.write(\"mount -t tmpfs \")\n\n # Is there some defined options ?\n if \"mount-options\" in item[\"stack-item\"]:\n # Yes, then append the options to the command\n working_file.write(\"-o \" + item[\"stack-item\"][\"mount-options\"] + \" \")\n\n # Complete the mount command\n working_file.write(\"tmpfs /mnt/dft/\" + item[\"stack-item\"][\"name\"] + \"\\n\")\n\n # Generate the tmpfs specific mount command\n if item[\"stack-item\"][\"type\"] == \"squashfs\":\n working_file.write(\"mount -t squashfs \")\n\n # Is there some defined options ?\n if \"mount-options\" in item[\"stack-item\"]:\n # Yes, then append the options to the command\n working_file.write(\"-o \" + item[\"stack-item\"][\"mount-options\"] + \" \")\n\n # Complete the mount command\n working_file.write(item[\"stack-item\"][\"squashfs-file\"] + \" /mnt/dft/\" +\n item[\"stack-item\"][\"name\"] + \" -o loop\\n\")\n\n # Generate the tmpfs specific mount command\n if item[\"stack-item\"][\"type\"] == \"partition\":\n working_file.write(\"mount \")\n\n # Is there some defined options ?\n if \"mount-options\" in item[\"stack-item\"]:\n # Yes, then append the options to the command\n working_file.write(\"-o \" + item[\"stack-item\"][\"mount-options\"] + \" \")\n\n # Complete the mount command\n working_file.write(item[\"stack-item\"][\"partition\"] + \" /mnt/dft/\" +\n item[\"stack-item\"][\"name\"] + \"\\n\")\n\n working_file.write(\"\\n\")\n\n # We are done here, now close the file\n working_file.close()", "def mount_block(block):\n # type: (str) -> str\n\n dir_path = tempfile.mkdtemp(prefix='mount-')\n _mount(block, dir_path)\n\n return dir_path", "def create_filesystem_mounting_point(username, password, domainName, path, instanceId):\n createFileSystemMountingPointParams = {\n \"Username\": username,\n \"Password\": password,\n \"DomainName\": domainName,\n \"Path\": path,\n \"InstanceId\": instanceId\n }\n return createFileSystemMountingPointParams", "def mount(mapping, loaded_data):\n for drive_size, partition_infos in mapping:\n mount_single(partition_infos, loaded_data[drive_size])", "def mount(self, pathname):\n \n # Make sure we don't try to mount something twice.\n if pathname in self.mounts:\n raise ProcessorError(\"%s is already mounted\" % pathname)\n \n # Call hdiutil.\n try:\n p = subprocess.Popen((\"/usr/bin/hdiutil\",\n \"attach\",\n \"-plist\",\n \"-mountrandom\", \"/private/tmp\",\n \"-nobrowse\",\n pathname),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n (out, err) = p.communicate()\n except OSError as e:\n raise ProcessorError(\"hdiutil execution failed with error code %d: %s\" % (\n e.errno, e.strerror))\n if p.returncode != 0:\n raise ProcessorError(\"mounting %s failed: %s\" % (pathname, err))\n \n # Read output plist.\n output = plistlib.readPlistFromString(out)\n \n # Find mount point.\n for part in output[\"system-entities\"]:\n if \"mount-point\" in part:\n # Add to mount list.\n self.mounts[pathname] = part[\"mount-point\"]\n self.output(\"Mounted disk image %s\" % (pathname))\n return self.mounts[pathname]", "def get_mount_info(devname, label=None):\n mount_point = get_mount_target(devname, label)\n mounts = check_output('mount | grep \" %s \" || :' % mount_point, shell=True)\n if mounts:\n return Munch(zip(('device', 'mount_point', 'type', 'options'),\n MOUNTS_RE.match(mounts.decode()).groups()))", "def test_mount_status_nas_share_by_nas(self):\n pass", "def process_mount_dataset(dataset, mount_path):\n entry = repository.get_entry(dataset)\n if entry:\n username = entry.username\n user_pkey = entry.user_pkey\n if username.strip() == \"\" or user_pkey.strip() == \"\":\n # use local settings\n syndicate_users = config.list_syndicate_users_by_ms_host(entry.ms_host)\n for suser in syndicate_users:\n username = suser.username\n user_pkey = suser.user_pkey\n break\n\n if username.strip() == \"\" or user_pkey.strip() == \"\":\n sdm_util.print_message(\"Cannot find user accounts to access the dataset - %s\" % (dataset))\n return 1\n\n try:\n bimpl = sdm_backends.Backends.get_backend_instance(backend, config.get_backend_config(backend))\n if not bimpl.is_legal_mount_path(mount_path):\n sdm_util.print_message(\"Cannot mount dataset to the given mount path for wrong mount path - %s\" % (mount_path))\n return 1\n\n # check existance\n records = mount_table.get_records_by_mount_path(mount_path)\n for rec in records:\n if rec.dataset == dataset and rec.status == sdm_mount_table.MountRecordStatus.UNMOUNTED:\n # same dataset but unmounted\n # delete and overwrite\n mount_table.delete_record(rec.record_id)\n\n mount_record = mount_table.add_record(dataset, mount_path, backend, sdm_mount_table.MountRecordStatus.UNMOUNTED)\n mount_table.save_table(MOUNT_TABLE_PATH)\n\n bimpl.mount(\n mount_record.record_id,\n entry.ms_host,\n entry.dataset,\n username,\n user_pkey,\n entry.gateway,\n mount_path\n )\n mount_record.status = sdm_mount_table.MountRecordStatus.MOUNTED\n mount_table.save_table(MOUNT_TABLE_PATH)\n return 0\n except sdm_mount_table.MountTableException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s to %s\" % (dataset, mount_path), True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n except sdm_absbackends.AbstractBackendException, e:\n sdm_util.print_message(\"Cannot mount dataset - %s to %s\" % (dataset, mount_path), True, sdm_util.LogLevel.ERROR)\n sdm_util.print_message(e, True, sdm_util.LogLevel.ERROR)\n return 1\n else:\n sdm_util.print_message(\"Dataset not found - %s\" % dataset)\n return 1", "def active_mountpoint(mount_point):\n execute('mount', mount_point, sudo=True)\n yield\n execute('umount', mount_point, sudo=True)" ]
[ "0.8202178", "0.71788365", "0.69768405", "0.67600346", "0.659675", "0.6513326", "0.6325042", "0.62535024", "0.62465084", "0.62291175", "0.61986035", "0.61748713", "0.6147795", "0.61267954", "0.6116906", "0.6063496", "0.6041668", "0.6027162", "0.6020935", "0.6009902", "0.60019654", "0.5998529", "0.59979767", "0.5938853", "0.5908909", "0.5871056", "0.5839761", "0.58282036", "0.58237433", "0.5805866" ]
0.7969075
1
lees de keyboard definities uit het/de settings file(s) van het tool zelf en geef ze terug voor schrijven naar het csv bestand
def buildcsv(settnames, page, showinfo=True): shortcuts = collections.OrderedDict() fdesc = ("File containing keymappings", "File containing command descriptions") ## pdb.set_trace() for ix, name in enumerate(settnames): try: initial = page.settings[name] except KeyError: initial = '' if showinfo: oms = ' - '.join((page.captions['C_SELFIL'], fdesc[ix])) if not initial: initial = os.path.dirname(__file__) fname = get_file_to_save(page.gui, oms=fdesc[ix], start=initial) else: fname = get_file_to_open(page.gui, oms=fdesc[ix], start=initial) if fname and fname != initial: page.settings[name] = fname page.settings["extra"][name] = fdesc[ix] else: fname = initial if ix == 0: kbfile = fname if not fname: return {}, {} elif ix == 1: descfile = fname stuffdict = read_keydefs_and_stuff(kbfile) keydefs = stuffdict.pop('keydefs') actions = stuffdict['actions'] omsdict = stuffdict['descriptions'] # omsdict is uit de accelmap afgeleid waar gewoonlijk geen omschrijvingen in staan. # Bij opnieuw opbouwen eerst kijken of deze misschien al eens zijn opgeslagen # De bestandsnaam kan als een extra setting worden opgenomen - dus: is er zo'n # setting bekend, dan dit bestand lezen # hier dan een GUI tonen waarin de omschrijvingen per command kunnen worden in/aangevuld # actions in de eerste kolom, descriptions in de tweede if descfile: msg, descdict = dml.read_data(descfile, omsdict) if msg: print(msg) elif showinfo: page.dialog_data = {'descdict': descdict, 'actions': actions} # , 'omsdict': omsdict} if show_dialog(page, AccelCompleteDialog): omsdict = page.dialog_data if omsdict != descdict: dml.write_data(descfile, omsdict) # als er sprake is van others dan ook deze meenemen (Dia) lastkey = 0 for key, mods, command in keydefs: lastkey += 1 context, action = actions[command] description = omsdict[command] shortcuts[lastkey] = (_translate_keyname(key), mods, context, action, description) return shortcuts, stuffdict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dataSave():\n # NR5G = gui_reader()\n try: #Python3\n f = open(__file__ + \".csv\",'wt', encoding='utf-8')\n except:\n f = open(__file__ + \".csv\",'wb')\n f.write('%s,'%(entryCol.entry0.get()))\n f.write('%s,'%(entryCol.entry1.get()))\n f.write('%s,'%(entryCol.entry2.get()))\n f.write('%s,'%(entryCol.entry3.get()))\n f.close()\n print(\"DataSave: File Saved\")", "def buildcsv(parent, showinfo=True):\n opprefs = '/home/albert/.config/opera-developer/Preferences'\n shortcuts = collections.OrderedDict()\n commandlist = []\n with open(opprefs, encoding='UTF-8') as _in:\n data = json.load(_in)\n keydict = data['Keybindings']['Basic']\n number = 0\n for cmdstr, value in keydict.items():\n commandlist.append(cmdstr)\n for hotkey in value:\n number += 1\n mod, key = getkey(hotkey)\n shortcuts[number] = (key, mod, 'Basic', cmdstr)\n return shortcuts, {'commands': commandlist}", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def settings():\r\n\r\n config = cp.ConfigParser()\r\n config.read('settings.ini')\r\n \r\n files = config['files']\r\n model = config['model']\r\n plot = config['plot']\r\n \r\n file_format = files['format']\r\n species_file = r'data/' + files['species file']\r\n reactions_file = r'data/' + files['reactions file']\r\n output_file = 'output/' + files['output file']\r\n model_type = model['model type']\r\n density = model.getfloat('density')\r\n temperature = model.getfloat('temperature')\r\n start_time = model.getfloat('start time')\r\n end_time = model.getfloat('end time')\r\n outfile = plot['outfile for plotting']\r\n\r\n return file_format, species_file, reactions_file, output_file, model_type, density, temperature, start_time, end_time, outfile", "def load_keyboard(self):\n if self.selectedKeyboard == 1:\n self.fileName = \"tables/Tamil-tamil99.txt.in\"\n elif self.selectedKeyboard == 2:\n self.fileName = \"tables/Tamil-phonetic.txt.in\"\n elif self.selectedKeyboard == 3:\n self.fileName = \"tables/Tamil-typewriter.txt.in\"\n elif self.selectedKeyboard == 4:\n self.fileName = \"tables/Tamil-bamini.txt.in\"\n elif self.selectedKeyboard == 5:\n self.fileName = \"tables/Tamil-inscript.txt.in\"\n else:\n pass", "def importSettings(self):\n \n self.ring1, self.ring2, self.ring3, self.ring4, self.ring5 = 0, 0, 0, 0, 0 #Initialise the ring setting values\n self.start1, self.start2, self.start3, self.start4, self.start5 = 1, 2, 3, 4, 5\n \n database = data() #Create a data object\n \n dailySettings = database.readData() #Import the daily settings\n row = dailySettings[0] #Assign the imported data to the row variable\n \n #FORMAT\n #[(1, 'IV V II', '20 09 23', 'TNUVHCQYOMFDRBAIKZGJSXEPLW', 'nft jlx nzj mbu')]\n \n rotors = row[1] #Fetch the data at the first index\n rotorL, rotorM, rotorR = rotors.split() #And split it into 3 seperate rotors\n rotorL, rotorM, rotorR = str(rotorL), str(rotorM), str(rotorR) #Ensure they are string variables\n \n ringSettings = row[2] #Fetch the data at the second index \n ringL, ringM, ringR = ringSettings.split() #And split it into 3 seperate ring positions\n ringL, ringM, ringR = int(ringL), int(ringM), int(ringR) #Ensure they are integer variables\n \n plugboardPairs = row[3] #Assign the element at the third index to the plugboard pairs\n charGroups = row[4] #Assign the element at the fourth index to the character groups\n \n startL, startM, startR = input('\\nEnter rotor starting positions: ').split() #Prompt the user to enter the rotor starting positions\n startL, startM, startR = int(startL), int(startM), int(startR) #Ensure they are integer variables\n \n reflectorType = input(\"Enter reflector type: \").upper() #Prompt user to enter reflector type\n \n ring = {'I':'ring1', 'II':'ring2', 'III':'ring3', 'IV':'ring4', 'V':'ring5'} #Match rotor types to string of their ring setting variables \n start = {'I':'start1', 'II':'start2', 'III':'start3', 'IV':'start4', 'V':'start5'} #Match rotor types to string of their start position variables\n \n setLStart = str(start.get(rotorL)) #Get the string of the rotors starting position\n setMStart = str(start.get(rotorM))\n setRStart = str(start.get(rotorR))\n \n vars(self)[setLStart] = startL #Create a dynamic variable using the string of the starting position and set its value as the input value for the left rotor starting position\n vars(self)[setMStart] = startM\n vars(self)[setRStart] = startR\n \n setLRing = str(ring.get(rotorL)) #Get the string of the rotors ring setting \n setMRing = str(ring.get(rotorM))\n setRRing = str(ring.get(rotorR))\n \n vars(self)[setLRing] = ringL #Create a dynamic variable using the string of the ring setting and set its value as the input value for the left rotor ring setting \n vars(self)[setMRing] = ringM\n vars(self)[setRRing] = ringR\n \n print(\"\\n************ Imported Settings ************\") #Output the imported settings to the user\n print(\"Left Rotor:\", rotorL + \", Ring position:\", str(ringL) + \", Start position:\", str(startL))\n print(\"Middle Rotor:\", rotorM + \", Ring position:\", str(ringM) + \", Start position:\", str(startM))\n print(\"Right Rotor:\", rotorR + \", Ring position:\", str(ringR) + \", Start position:\", str(startR))\n print(\"Kenngruppen:\", charGroups)\n \n print(\"Plugboard:\", plugboardPairs)\n print(\"Reflector type:\", reflectorType + '\\n')\n \n # ABCDEFGHIJKLMNOPQRSTUVWXYZ\n self._rotor1 = rotor('EKMFLGDQVZNTOWYHXUSPAIBRCJ','Y','Q', self.start1, self.ring1) #Create a rotor object using the user input for the starting position and ring setting values\n self._rotor2 = rotor('AJDKSIRUXBLHWTMCQGZNPYFVOE','M','E', self.start2, self.ring2)\n self._rotor3 = rotor('BDFHJLCPRTXVZNYEIWGAKMUSQO','D','V', self.start3, self.ring3)\n self._rotor4 = rotor('ESOVPZJAYQUIRHXLNFTGKDCMWB','R','J', self.start4, self.ring4)\n self._rotor5 = rotor('VZBRGITYUPSDNHLXAWMJQOFECK','H','Z', self.start5, self.ring5)\n \n self._UKWA = reflector('AE BJ CM DZ FL GY HX IV KW NR OQ PU ST') #Create the default reflector objects\n self._UKWB = reflector('AY BR CU DH EQ FS GL IP JX KN MO TZ VW')\n self._UKWC = reflector('AF BV CP DJ EI GO HY KR LZ MX NW QT SU')\n \n rotors = {'I':self._rotor1, 'II':self._rotor2, 'III':self._rotor3, 'IV':self._rotor4, 'V':self._rotor5} #Match the rotor types to their objects\n reflectors = {'A':self._UKWA, 'B':self._UKWB, 'C':self._UKWC} #Match the reflector types to their objects\n \n self._rotorL = rotors.get(rotorL) #Assign the corresponding rotor object to the rotor\n self._rotorM = rotors.get(rotorM)\n self._rotorR = rotors.get(rotorR)\n \n self._UKW = reflectors[reflectorType] #Assign the corresponding reflector object to the reflector\n \n self._plugboard = plugboard(plugboardPairs) #Assign the corresponding plugboard object to the plugboard", "def onLoadCSVList(self, evt):\n dlg = wx.FileDialog(self.view, \"Choose a file:\", wildcard = \"*.txt; *.csv\" ,\n style=wx.FD_DEFAULT_STYLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n print \"You chose %s\" % dlg.GetPath()\n self.config.CSVFilePath = dlg.GetPath()", "def activateDefaultDelimiter(self):\n radioButtons = self.ids.delimiterGrid.children\n _, fileExtension = self.filename.split('.')\n if (fileExtension.upper() == 'CSV'):\n for item in radioButtons:\n try:\n if item.name == ',':\n item.active = True\n else:\n item.active = False\n except AttributeError:\n pass\n else:\n for item in radioButtons:\n try:\n if item.name == '\\t':\n item.active = True\n else:\n item.active = False\n except AttributeError:\n pass", "def readInConfigFileDlg( self ):\n pass", "def readSettingsFile():\n\tglobal logfile\n\tglobal backupCount\n\tglobal maxBytes\n\tglobal debug\n\t\n\tif SettingsFile.getOptionString(INI_Section,\"logfile\"):\n\t\tlogfile = SettingsFile.getOptionString(INI_Section,\"logfile\")\n\tif SettingsFile.getOptionInt(INI_Section,\"maxBytes\"):\n\t\tmaxBytes = SettingsFile.getOptionInt(INI_Section,\"maxBytes\")\n\tif SettingsFile.getOptionInt(INI_Section,\"backupCount\"):\n\t\tbackupCount = SettingsFile.getOptionInt(INI_Section,\"backupCount\")\n\tif SettingsFile.getOptionBoolean(INI_Section,\"debug\"):\n\t\tdebug = SettingsFile.getOptionBoolean(INI_Section,\"debug\")\n\t#endif", "def _on_SavePref(self, _lang='en-US', _fileout='', _text=\"\"):\n\n lang = self.ddnGuiLanguage.get() if len(_lang) == 0 else _lang\n\n fileout = filedialog.asksaveasfilename(\\\n filetypes=[('Preferred characters file', '.csv'), ], \\\n initialdir=self.BibTerm, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['SavePref'], \\\n defaultextension='.csv') \\\n if len(_fileout) == 0 else _fileout\n if len(fileout) != 0:\n text = self.txtPrefChar.get(0.0, 9999.9999).strip() \\\n if len(_text) == 0 else _text\n text = ' '.join(text.split('\\n'))\n text = ' '.join(text.split('\\r'))\n text = ' '.join(text.split('\\f'))\n if ',' in text:\n pairs = [p.strip() for p in text.split(',')]\n else:\n pairs = [text,]\n fout = codecs.open(fileout, mode='w', encoding='utf-8')\n fout.write(', '.join(pairs))\n fout.close()", "def activateSettings (self):\r\n settings.loadSettings (os.path.expanduser(self.filename))\r\n self.nemeth_translator=settings.brailleTableToUse ()\r\n return settings.activateSettings ({\"braille\":self.nemeth_translator,\"speak\":self.speech_translator,\"preprocessor\":self.preprocessor})", "def setup(self):\n messages = [\n \"Please enter you Holberton email: \",\n \"Please enter your Holberton password (don't worry passwd will be encrypted): \",\n \"Please enter full path where you want to save future projects: \"\n ]\n settings_ini_variables = [\"username\", 'password', 'location']\n\n settings_ini = {}\n for msg, var in zip(messages, settings_ini_variables):\n user_input = str(input(msg))\n\n if var == \"location\":\n while not os.path.exists(user_input):\n print(\"[!]: SUPPLIED PATH DOES NOT EXIST.\")\n user_input = str(input(msg))\n settings_ini[var] = encrypted(user_input) if var == \"password\" else user_input\n\n self.write_to_file(**settings_ini)", "def save_csv_file():\n global output_on_display, import_lst, column_names, data\n if data_base == '':\n mistake_load_table()\n else:\n column_names = data[0]\n save_name = asksaveasfilename(title=\"Select file\", filetypes=((\"CSV\", \"*.csv\"), (\"all files\", \"*.*\")),\n confirmoverwrite=True, defaultextension='.csv')\n step = len(column_names)\n data_csv = import_lst\n if len(data_csv[0]) == step:\n pass\n else:\n data_csv = import_lst[step::]\n\n with open(save_name, 'w+') as csv_file:\n csv_writer = csv.writer(csv_file)\n csv_writer.writerow(column_names)\n csv_writer.writerows(data_csv)", "def init():\n args = []\n with open(\"settings.txt\", \"r\") as reader:\n for line in reader:\n args.append(line)\n return args", "def openSettings(self):\r\n pass", "def os_open_parmfile( self, ):\r\n #a_filename = self.starting_dir + os.path.sep + \"parameters.py\"\r\n AppGlobal.os_open_txt_file( \"parameters.py\" )", "def set_parameters(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.TSR, cls.RPM, cls.RAD, cls.BLA, cls.CHR, cls.SEC, cls.NT = \\\r\n np.loadtxt('settings.csv', delimiter=',', skiprows=1, unpack=True)", "def keyinput(key):\n if key == 'meta q':\n raise urwid.ExitMainLoop()\n elif key == 'w':\n cf_dat.backup_files()\n cf_viewer.timed_msg(1, ': Saving file')\n cf_dat.write_config_file()\n elif key in ['right', 'tab']:\n if cf_viewer.cfg_pile.get_focus() == cf_man.cfg_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.opt_lb)\n elif cf_viewer.cfg_pile.get_focus() == cf_man.opt_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.imp_lb)\n else:\n cf_viewer.cfg_pile.set_focus(cf_man.cfg_lb)\n elif key in ['left', 'shift tab']:\n if cf_viewer.cfg_pile.get_focus() == cf_man.cfg_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.imp_lb)\n elif cf_viewer.cfg_pile.get_focus() == cf_man.opt_lb:\n cf_viewer.cfg_pile.set_focus(cf_man.cfg_lb)\n else:\n cf_viewer.cfg_pile.set_focus(cf_man.opt_lb)", "def update_command_file(self):\n self.commandFileTextBrowser.clear()\n\n self.commandFileTextBrowser.append(\"CDEFAULT\")\n\n # CEPO List\n cepo = \"CEPO \"\n for ss in self.cepo_list:\n cepo += ss\n self.commandFileTextBrowser.append(cepo)\n\n if self.dataFormatComboBox.currentText() == \"RTB\":\n self.commandFileTextBrowser.append(\"CEOUTPUT 1\")\n else:\n if self.coordinateTransformComboBox.currentText() == \"Beam\":\n self.commandFileTextBrowser.append(\"CEOUTPUT 100,0 \")\n elif self.coordinateTransformComboBox.currentText() == \"Instrument\":\n self.commandFileTextBrowser.append(\"CEOUTPUT 100,1 \")\n elif self.coordinateTransformComboBox.currentText() == \"Earth\":\n self.commandFileTextBrowser.append(\"CEOUTPUT 100,2 \")\n elif self.coordinateTransformComboBox.currentText() == \"Ship\":\n self.commandFileTextBrowser.append(\"CEOUTPUT 100,3 \")\n\n self.commandFileTextBrowser.append(\"CEI \" + Commands.sec_to_hmss(self.ceiDoubleSpinBox.value()))\n self.commandFileTextBrowser.append(\"CWS \" + str(self.cwsSpinBox.value()))\n self.commandFileTextBrowser.append(\"CWSS \" + str(self.speedOfSoundSpinBox.value()))\n\n if self.cerecordCheckBox.isChecked():\n self.commandFileTextBrowser.append(\"CERECORD \" + \"1\")\n else:\n self.commandFileTextBrowser.append(\"CERECORD \" + \"0\")\n\n for tab in range(self.tabSubsystem.count()):\n ss_cmd_list = self.tabSubsystem.widget(tab).get_cmd_list()\n for ss_cmd in ss_cmd_list:\n self.commandFileTextBrowser.append(ss_cmd.to_str(tab))\n\n self.commandFileTextBrowser.append(\"CSAVE\")\n self.commandFileTextBrowser.append(\"START\")", "def parameter_tweaks( cls, ):\n cls.file_text_editor.add_command( cls.parameters.ex_editor )\n print( f\"parameter tweaks {cls.text_editors}\" ) #", "def read_calibr_table(self):\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Открыть', '.')[0]\n if filename and filename.lower().endswith('.csv'):\n self.set_calibr_table(filename)\n if self.state.ser:\n self.SpinFine.setEnabled(True)\n self.BtnSetFine.setEnabled(True)\n else:\n error_message(\"Файл не выбран или в формате .csv\")", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def save_preferences(self):\n\n print 'Saving DNAtool preferences'\n self.preferences.set('seqfont',self.seqfont.get())\n self.preferences.set('seqfontsize',self.seqfontsize.get())\n self.preferences.set('fontstyle',self.fontstyle.get())\n self.preferences.set('base_scale',self.base_scale.get())\n self.preferences.set('restr_font',self.restr_font.get())\n self.preferences.set('backgrcolor',self.backgrcolor.get())\n #print self.preferences.get('restr_font')\n return", "def create_settings_file():\n with open('./cfg/settings.cfg'.replace(\"/\", os.path.sep), 'w') as cfg:\n cfg.write('[report]\\nlogo = ./cfg/logo.png\\ncompany =\\nrecord =\\nunit =\\nexaminer =\\nnotes =\\n\\n[auth]\\ngmail = [email protected]\\npassw = yourpassword\\ndevid = 1234567887654321\\ncelnumbr = BackupPhoneNunmber\\n\\n[app]\\npkg = com.whatsapp\\nsig = 38a0f7d505fe18fec64fbf343ecaaaf310dbd799\\n\\n[client]\\npkg = com.google.android.gms\\nsig = 38918a453d07199354f8b19af05ec6562ced5788\\nver = 9877000'.replace(\"/\", os.path.sep))", "def userSetup(self):\n if self.user[\"Save\"] == \"\":\n self.ui.b_run.setEnabled(False)\n else:\n name_split = self.splitPath(self.user[\"Save\"])[-1]\n name = name_split.split(\".\")[0]\n self.ui.l_save.setText(\"Save to: \" + name)\n\n if self.user[\"GT\"] != \"\":\n self.ui.l_ground_truth.setText(self.splitPath(self.user[\"GT\"])[-1])\n\n self.ui.l_colour.setText(self.user[\"Colour\"])", "def save_csv(self):\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')\n\n if not path:\n return\n\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator='\\n')\n\n writer.writerow(self.headers.keys())\n\n for row in range(self.rowCount()):\n row_data = []\n for column in range(self.columnCount()):\n item = self.item(row, column)\n if item:\n row_data.append(str(item.text()))\n else:\n row_data.append('')\n writer.writerow(row_data)", "def save_to_conf(self):\r\n for checkbox, (option, _default) in list(self.checkboxes.items()):\r\n self.set_option(option, checkbox.isChecked())\r\n for radiobutton, (option, _default) in list(self.radiobuttons.items()):\r\n self.set_option(option, radiobutton.isChecked())\r\n for lineedit, (option, _default) in list(self.lineedits.items()):\r\n self.set_option(option, to_text_string(lineedit.text()))\r\n for spinbox, (option, _default) in list(self.spinboxes.items()):\r\n self.set_option(option, spinbox.value())\r\n for combobox, (option, _default) in list(self.comboboxes.items()):\r\n data = combobox.itemData(combobox.currentIndex())\r\n self.set_option(option, from_qvariant(data, to_text_string))\r\n for (fontbox, sizebox), option in list(self.fontboxes.items()):\r\n font = fontbox.currentFont()\r\n font.setPointSize(sizebox.value())\r\n self.set_font(font, option)\r\n for clayout, (option, _default) in list(self.coloredits.items()):\r\n self.set_option(option, to_text_string(clayout.lineedit.text()))\r\n for (clayout, cb_bold, cb_italic), (option, _default) in list(self.scedits.items()):\r\n color = to_text_string(clayout.lineedit.text())\r\n bold = cb_bold.isChecked()\r\n italic = cb_italic.isChecked()\r\n self.set_option(option, (color, bold, italic))", "def save_to_conf(self):\n for checkbox, (option, _default) in list(self.checkboxes.items()):\n self.set_option(option, checkbox.isChecked())\n for radiobutton, (option, _default) in list(self.radiobuttons.items()):\n self.set_option(option, radiobutton.isChecked())\n for lineedit, (option, _default) in list(self.lineedits.items()):\n self.set_option(option, to_text_string(lineedit.text()))\n for spinbox, (option, _default) in list(self.spinboxes.items()):\n self.set_option(option, spinbox.value())\n for combobox, (option, _default) in list(self.comboboxes.items()):\n data = combobox.itemData(combobox.currentIndex())\n self.set_option(option, from_qvariant(data, to_text_string))\n for (fontbox, sizebox), option in list(self.fontboxes.items()):\n font = fontbox.currentFont()\n font.setPointSize(sizebox.value())\n self.set_font(font, option)\n for clayout, (option, _default) in list(self.coloredits.items()):\n self.set_option(option, to_text_string(clayout.lineedit.text()))\n for (clayout, cb_bold, cb_italic), (option, _default) in list(self.scedits.items()):\n color = to_text_string(clayout.lineedit.text())\n bold = cb_bold.isChecked()\n italic = cb_italic.isChecked()\n self.set_option(option, (color, bold, italic))", "def saveInConfigFileDlg( self ):\n pass" ]
[ "0.58790845", "0.57085", "0.56216234", "0.5558641", "0.5554129", "0.5521753", "0.54928106", "0.54494387", "0.54430705", "0.54299235", "0.5418328", "0.54119194", "0.5343505", "0.5333444", "0.532513", "0.53054166", "0.53034294", "0.5302163", "0.5286992", "0.52804625", "0.5280058", "0.5266465", "0.52553934", "0.5246843", "0.52359396", "0.52177656", "0.5212758", "0.52124846", "0.52109367", "0.51921713" ]
0.63631856
0
Returns the graph complement of G.
def complement(G): R = G.__class__() R.add_nodes_from(G) R.add_edges_from(((n, n2) for n, nbrs in G.adjacency() for n2 in G if n2 not in nbrs if n != n2)) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complement(G):\n\n nset = set(G.nodes())\n n_nodes = G.order()\n n_edges = n_nodes * (n_nodes - 1) - G.size() + 1\n \n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n deg = make_deg(n_nodes, cmp_edges)\n cmp_edges = ((u, v) for u in G.nodes()\n\t\t for v in nset - set(G.successors(u)))\n H = make(n_nodes, n_edges, cmp_edges, deg)\n return H", "def inverse_graph(graph):\n graph2 = copy.deepcopy(graph)\n i = 0\n while i < len(graph2):\n j = 0\n while j < len(graph2):\n if i != j:\n if graph2[i][j] == 0:\n graph2[i][j] = 1\n j += 1\n elif graph2[i][j] == 1:\n graph2[i][j] = 0\n j += 1\n else:\n j += 1\n i += 1\n return graph2", "def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i", "def reverse(G, copy=True):\n if not G.is_directed():\n raise nx.NetworkXError(\"Cannot reverse an undirected graph.\")\n else:\n return G.reverse(copy=copy)", "def complement_of_cycle(n):\n return complement(nx.cycle_graph(n))", "def copy_graph(g):\n return copy.deepcopy(g)", "def exclude_nodes_GC(G):\n remove, present = [], []\n # Find giant component\n Gcc = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)\n G0 = Gcc[0]\n for node in G.nodes():\n if node not in G0.nodes():\n remove.append(node)\n G0.add_node(node,GC= 0)\n else:\n present.append(node)\n G0.add_node(node, GC= 1)\n # Remove nodes not in giant component\n remove_outliers = [node for node in G.nodes() if node not in G0.nodes()]\n G.remove_nodes_from(remove_outliers)\n return G", "def reduce_graph(G: Graph) -> Tuple[Graph, Graph]:\n G1 = Graph(G.V, set())\n G2 = Graph(G.V, set())\n # Note that the paper says |V1| != |V|, but it is likely a typo, and it meant\n # either \"until\" or \"while |V1| == |V|\"\n # After all, just looking at Figure 9 it is visible that the number of vertex\n # on G1 is not the number of vertex on the original graph\n while len(G1.V) == len(G.V):\n e = np.random.choice(list(G.E))\n S = generate_clique_candidate(G, e)\n G1 = induced_subgraph(G, S)\n G2.E = G.E.difference({e})\n return G1, G2", "def condensation(G):\n scc = strongly_connected_components(G)\n mapping = dict([(n,tuple(sorted(c))) for c in scc for n in c])\n cG = nx.DiGraph()\n for u in mapping:\n cG.add_node(mapping[u])\n for _,v,d in G.edges_iter(u, data=True):\n if v not in mapping[u]:\n cG.add_edge(mapping[u], mapping[v])\n return cG", "def to_undirected_graph(self):\n visited = set() \n G = Graph.Graph()\n \n for node in self.node_set:\n \n if node not in visited:\n visited.add(node)\n for i in self.suffix[node]:\n G.add_edge(node, i)\n \n return G", "def not_reachable(self,graph):\n reachable_in = nx.descendants(graph, 0)\n reachable_out = nx.ancestors(graph, self.nodes - 1)\n # add the last node back in\n reachable_out.add(self.nodes - 1)\n\n set_of_nodes = set(range(1, self.nodes))\n\n not_reachable_in = set_of_nodes - reachable_in\n not_reachable_out = set_of_nodes - reachable_out\n return not_reachable_in ,not_reachable_out", "def cleanGraph2(self,graph):\n return [graph[i] for i in range(len(graph)-1) if graphp[i]!=graph[i+1]]", "def bipartite_sets(G):\n color=bipartite_color(G)\n X=set(n for n in color if color[n]==1)\n Y=set(n for n in color if color[n]==0)\n return (X,Y)", "def get_downregulated_genes_network(self) -> Graph:\n logger.info(\"In get_downregulated_genes_network()\")\n\n deg_graph = self.graph.copy() # deep copy graph\n not_diff_expr = self.graph.vs(down_regulated_eq=False)\n\n # delete genes which are not differentially expressed or have no connections to others\n deg_graph.delete_vertices(not_diff_expr.indices)\n deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))\n\n return deg_graph", "def decompose(self,graph):\n x=[graph[i][0] for i in range(len(graph))]\n y=[graph[i][1] for i in range(len(graph))]\n return self.transform(x)+self.transform(y)", "def complement(self):\n assert self._.d == 2, \"the complement is only defined for two classes\"\n return self._.complement", "def get_graph(self):\n graph = copy.deepcopy(self.G)\n for source, dests in graph.items():\n for dest in dests:\n constraint = graph[source][dest]['constraint']\n new_constraint = self.preprocess_constraint(constraint)\n graph[source][dest]['constraint'] = new_constraint\n return graph", "def __neg__(self):\n # \n # TODO - your code here\n #\n result = [];\n for row in self.g:\n result.append([-1*n for n in row]);\n \n return Matrix(result);", "def complement(self):\n N = self._size + 1\n new_covers = [[N - i[0], N - i[1]] for i in self._poset.cover_relations_iterator()]\n return TamariIntervalPoset(N - 1, new_covers)", "def get_upregulated_genes_network(self) -> Graph:\n logger.info(\"In get_upregulated_genes_network()\")\n\n deg_graph = self.graph.copy() # deep copy graph\n not_diff_expr = self.graph.vs(up_regulated_eq=False)\n\n # delete genes which are not differentially expressed or have no connections to others\n deg_graph.delete_vertices(not_diff_expr.indices)\n deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))\n\n return deg_graph", "def dictionary_to_undirected_graph_form(G):\n # produce a set of disconnected Node objects with node names from the input graph\n nodeset = {Node(str(node_name), dict(), set()) for node_name in G.keys()}\n G_prime = UndirectedGraph(nodeset) # create the corresponding null graph\n\n # for every node name\n for first_incident_node_name in G.keys():\n # for every adjacent node name\n for second_incident_node_name, weight in G[first_incident_node_name].items():\n first_incident_node_name = str(first_incident_node_name)\n second_incident_node_name = str(second_incident_node_name)\n # if the edge has not already been added\n if {first_incident_node_name, second_incident_node_name} \\\n not in [{edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()}\n for edge in G_prime.get_edges()]:\n # get the first node object\n first_incident_node = \\\n GraphProcessing.search_node_names(G_prime.get_nodeset(), first_incident_node_name).pop()\n # get the second node object\n second_incident_node = \\\n GraphProcessing.search_node_names(G_prime.get_nodeset(), second_incident_node_name).pop()\n\n # add the edge\n G_prime.add_edge(weight, dict(), first_incident_node, second_incident_node)\n\n return G_prime # return the UndirectedGraph object", "def complement(self) -> JustLatticeChord:\n nodes = []\n for node in self._nodes:\n nodes.append(list(map(lambda x, y: y - x, node, self._root)))\n return JustLatticeChord(self._fundamental, self._root, nodes)", "def split_velocity_graph(G, neg_cells_trick=True):\n\n if not sp.issparse(G):\n G = sp.csr_matrix(G)\n if neg_cells_trick:\n G_ = G.copy()\n G.data[G.data < 0] = 0\n G.eliminate_zeros()\n\n if neg_cells_trick:\n G_.data[G_.data > 0] = 0\n G_.eliminate_zeros()\n\n return (G, G_)\n else:\n return G", "def difference(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n \n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n deg = make_deg(n_nodes, edges)\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n D = make(n_nodes, G.size(), edges, deg)\n return D", "def adjacency_opposite_calculator(graph):\n adjacency_matrix = sparse.csr_matrix(nx.adjacency_matrix(graph), dtype=np.float32).todense()\n adjacency_matrix_opposite = np.ones(adjacency_matrix.shape) - adjacency_matrix\n return adjacency_matrix_opposite", "def get_graph(self):\n return copy.deepcopy(self.graph)", "def __neg__(self):\n return self[::-1].complement", "def graph(g):\n return str(g.adjacencyList())", "def odd_decomposition(G) -> Tuple[Set[int], Set[int]]:\n multi = isinstance(G, nx.MultiGraph)\n\n # create a MultiGraph copy of G\n G = nx.MultiGraph(G)\n\n # remove isolated nodes from the graph, since they are irrelevant and we are working on a copy\n G.remove_nodes_from(list(nx.isolates(G)))\n\n # if the base graph is already odd\n if is_odd(G):\n return (set(G.edges(keys=multi)), set())\n\n odd_subgraph = G.subgraph(odd_nodes(G))\n even_subgraph = G.subgraph(even_nodes(G))\n\n odd_components = nx.connected_components(odd_subgraph)\n even_components = nx.connected_components(even_subgraph)\n\n # use the same notation as in our source paper\n X = list(odd_components)\n Y, Z = partition(even_components, lambda x: len(x) % 2 == 0)\n\n lX, lY, lZ = len(X), len(Y), len(Z)\n linear_system = Matrix(lY + lZ, lX + 1, bin_field)\n\n # create a linear system over GF(2) as described in our source paper\n for i, Yi in enumerate(Y):\n for j, Xi in enumerate(X):\n if n_joining(G, Xi, Yi) % 2 == 1:\n linear_system.set(i, j, 1)\n else:\n linear_system.set(i, j, 0)\n linear_system.set(i, lX, 1)\n\n for i, Zi in enumerate(Z):\n for j, Xi in enumerate(X):\n if n_joining(G, Xi, Zi) % 2 == 1:\n linear_system.set(i + lY, j, 1)\n else:\n linear_system.set(i + lY, j, 0)\n linear_system.set(i + lY, lX, 0)\n\n # transform the system into RREF\n linear_system.reduced_row_echelon_form()\n red = set()\n\n # we only need one solution\n # every non pivot is set to 0 (meaning blue)\n # while every pivot matches the augmented value\n # only create the red set since this is the only one we need\n\n for i in range(lY + lZ):\n val = linear_system.get(i, lX)\n # don't cross the last column (augmented part)\n for j in range(lX):\n v = linear_system.get(i, j)\n if v == 1:\n if val == 1:\n red.add(j)\n break\n else:\n if val == 1:\n # if any of the lines has no pivot and 1 as the augmented value this system is not solvable\n raise NotDecomposableError(\"The graph is not decomposable due to unsolvable system\")\n else:\n # this is a zero row ane there are no pivots after this row\n break\n\n red_nodes = set.union(*[X[i] for i in red]) if red else set()\n # edges adjacent to red nodes\n red_edges = set(G.edges(nbunch=red_nodes, keys=True))\n # calculate the red degree for every even node\n degrees = dict()\n for i in even_subgraph.nodes():\n degrees[i] = 0\n\n for i, j, k in red_edges:\n if i in degrees:\n degrees[i] += 1\n if j in degrees:\n degrees[j] += 1\n\n # T set is the set of all nodes of even red degree\n T = {i for i, deg in degrees.items() if deg % 2 == 0}\n\n # compute the T-join and mark the nodes as red/blue\n red_join = T_join(even_subgraph, T)\n\n red_edges.update(red_join)\n blue_edges = set(G.edges(keys=True)).difference(red_edges)\n\n if not multi:\n red_edges = {(i, j) for i, j, k in red_edges}\n blue_edges = {(i, j) for i, j, k in blue_edges}\n\n return (red_edges, blue_edges)", "def weakly_connected_components(G):\n seen = set()\n for v in G:\n if v not in seen:\n c = set(_plain_bfs(G, v))\n yield c\n seen.update(c)" ]
[ "0.8520391", "0.64977443", "0.64359856", "0.6389732", "0.6337305", "0.62457705", "0.6208965", "0.60704297", "0.60221344", "0.59848946", "0.5968795", "0.5959875", "0.59059066", "0.589863", "0.5895703", "0.5880331", "0.5879972", "0.58771133", "0.58479995", "0.58314204", "0.5827879", "0.5808705", "0.5789787", "0.5783938", "0.5766841", "0.5758836", "0.5751592", "0.57478195", "0.5740429", "0.5735341" ]
0.8689336
0
Returns the reverse directed graph of G.
def reverse(G, copy=True): if not G.is_directed(): raise nx.NetworkXError("Cannot reverse an undirected graph.") else: return G.reverse(copy=copy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reversed_graph(directed_graph):\n\n reversed = directed_graph.__class__()\n for i in directed_graph.get_vertices().keys():\n reversed.add_vertex(i)\n\n for i in directed_graph.get_vertices().keys():\n vertex = directed_graph.get_vertex(i)\n for j in vertex.get_heads():\n reversed.add_edge(j.get_label(), i)\n\n return reversed", "def reverse_graph(self) -> GraphInterface:\n ans = DiGraph()\n\n nodes = self._graph.get_all_v() # {key: NodeData}\n for key in nodes:\n ans.add_node(key)\n ans.get_node(key).tag = self._graph.get_node(key).tag\n\n for key in nodes:\n out_edges = self._graph.all_out_edges_of_node(key)\n for edge in out_edges:\n e = out_edges.get(edge)\n ans.add_edge(e.dest, e.src, e.weight)\n\n return ans", "def reverse_graph(self):\n rgraph = DGraph()\n rgraph.graph = deepcopy(self.graph)\n\n for node in rgraph.graph:\n node.data.children, node.data.parents = node.data.parents, node.data.children\n\n return rgraph", "def reverse(self):\n H = DiGraph(multiedges=self.allows_multiple_edges(), loops=self.allows_loops())\n H.add_vertices(self)\n H.add_edges( [ (v,u,d) for (u,v,d) in self.edge_iterator() ] )\n name = self.name()\n if name is None:\n name = ''\n H.name(\"Reverse of (%s)\"%name)\n return H", "def build_reverse_graph(self):\n adj = self.adj\n self.adjR = [[] for _ in range(n+1)]\n adjR = self.adjR\n for u, edges in enumerate(adj):\n for v, w in edges:\n adjR[v].append((u, w))", "def reversed_edge(self):\n reverse = Edge(id=self.id,\n start_node=self.end_node,\n end_node=self.start_node,\n cost=self.reverse_cost,\n reverse_cost=self.cost,\n reversed=not self.reversed)\n return reverse", "def reverse_test_graph(graph):\n flatten = lambda l: [item for sublist in l for item in sublist] # flattens a list of lists\n nodes = set(graph.keys()).union(set(flatten(graph.values())))\n reversed_graph = {node: [] for node in nodes}\n\n for row in graph:\n outgoing_edges = graph[row]\n for edge in outgoing_edges:\n reversed_graph[edge].append(row)\n\n return reversed_graph", "def complement(G):\n R = G.__class__()\n R.add_nodes_from(G)\n R.add_edges_from(((n, n2)\n for n, nbrs in G.adjacency()\n for n2 in G if n2 not in nbrs\n if n != n2))\n return R", "def add_reverse_edges(\n graph):\n senders = np.concatenate(\n (graph.senders, graph.receivers))\n receivers = np.concatenate(\n (graph.receivers, graph.senders))\n\n graph.senders = senders\n graph.receivers = receivers\n return graph", "def reverse(edge):\n return Edge(orig=edge.dest, dest=edge.orig, orig_id=edge.dest_id, dest_id=edge.orig_id)", "def to_undirected_graph(self):\n visited = set() \n G = Graph.Graph()\n \n for node in self.node_set:\n \n if node not in visited:\n visited.add(node)\n for i in self.suffix[node]:\n G.add_edge(node, i)\n \n return G", "def get_undirected_edges(mapping, G):\n edge_types = utils.rels_types\n edges = {}\n for et in edge_types:\n edges[et] = {}\n for g in G.nodes:\n edges[et][mapping[g]] = []\n for s, t, meta in G.edges(data=True):\n #print(s, t)\n edges[meta['type']][mapping[s]].append(mapping[t])\n edges[meta['type']][mapping[t]].append(mapping[s])\n return edges", "def reverse(self) -> \"CFG\":\n productions = []\n for production in self._productions:\n productions.append(Production(production.head,\n production.body[::-1]))\n return CFG(self.variables,\n self.terminals,\n self.start_symbol,\n productions)", "def dictionary_to_undirected_graph_form(G):\n # produce a set of disconnected Node objects with node names from the input graph\n nodeset = {Node(str(node_name), dict(), set()) for node_name in G.keys()}\n G_prime = UndirectedGraph(nodeset) # create the corresponding null graph\n\n # for every node name\n for first_incident_node_name in G.keys():\n # for every adjacent node name\n for second_incident_node_name, weight in G[first_incident_node_name].items():\n first_incident_node_name = str(first_incident_node_name)\n second_incident_node_name = str(second_incident_node_name)\n # if the edge has not already been added\n if {first_incident_node_name, second_incident_node_name} \\\n not in [{edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()}\n for edge in G_prime.get_edges()]:\n # get the first node object\n first_incident_node = \\\n GraphProcessing.search_node_names(G_prime.get_nodeset(), first_incident_node_name).pop()\n # get the second node object\n second_incident_node = \\\n GraphProcessing.search_node_names(G_prime.get_nodeset(), second_incident_node_name).pop()\n\n # add the edge\n G_prime.add_edge(weight, dict(), first_incident_node, second_incident_node)\n\n return G_prime # return the UndirectedGraph object", "def get_downregulated_genes_network(self) -> Graph:\n logger.info(\"In get_downregulated_genes_network()\")\n\n deg_graph = self.graph.copy() # deep copy graph\n not_diff_expr = self.graph.vs(down_regulated_eq=False)\n\n # delete genes which are not differentially expressed or have no connections to others\n deg_graph.delete_vertices(not_diff_expr.indices)\n deg_graph.delete_vertices(deg_graph.vs.select(_degree_eq=0))\n\n return deg_graph", "def reverse(self):\n Q = LinkedListQueue()\n S = LinkedListStack()\n current_node = self._head\n\n while current_node:\n S.push(current_node._element)\n current_node = current_node._next\n\n while not S.is_empty():\n Q.enqueue(S.pop())\n\n return Q", "def get_reversed(self):\n\n next = None\n current = self.head\n\n while current:\n tmp = Node(current.data)\n tmp.next = next\n next = tmp\n current = current.next\n\n return SinglyLinkedList(next)", "def reversed(self):\n ops = {Eq: Eq, Gt: Lt, Ge: Le, Lt: Gt, Le: Ge, Ne: Ne}\n a, b = self.args\n return Relational.__new__(ops.get(self.func, self.func), b, a)", "def reversed_edges(path):\n \n # Reversed initialization\n reversed_edges = []\n \n # Loop\n for edge in path:\n reversed_edges.append(edge[::-1])\n \n return reversed_edges", "def _get_recurrence_graph(self):\n try:\n return self._recurrence_graph\n except AttributeError:\n pass\n\n # g = DiGraph()\n g = nx.DiGraph()\n for i in range(self.num_switches()):\n for ii in {-i-1, i+1}:\n g.add_edges_from([(j, -k)\n for j in self.outgoing_branches(ii)\n for k in self.outgoing_branches(-ii)])\n\n self._recurrence_graph = g\n return g", "def graph(g):\n return str(g.adjacencyList())", "def reverse_edges(graph):\n # Add None to the end of each list of edges to act as sentinel value\n for node in graph:\n graph[node].append(None)\n # Add each new edge after the None sentinel\n new_key_values = defaultdict(lambda: list([None]))\n for node, edge_heads in graph.items():\n for head in edge_heads:\n if head is None:\n break\n if head in graph:\n graph[head].append(node)\n else:\n # Don't add new keys to dict while iterating over it\n new_key_values[head].append(node)\n # Add any new key-values to original adjacency list\n graph.update(new_key_values)\n # Remove all edges before the None sentinel, as well as the sentinel\n for node, edge_heads in graph.items():\n graph[node] = edge_heads[edge_heads.index(None)+1:]", "def reverse_edges(self, edges, inplace=True, multiedges=None):\n tempG = self if inplace else copy(self)\n for e in edges:\n tempG.reverse_edge(e,inplace=True,multiedges=multiedges)\n if not inplace:\n return tempG", "def get_graph(self):\n return copy.deepcopy(self.graph)", "def copy_graph(g):\n return copy.deepcopy(g)", "def reverse_iterative(self):\n # Create the new LinkedList.\n new_list = LinkedList()\n\n # Set the initial node to reverse from.\n node = self.first_node\n\n # iterate over each node and stop when node is None\n while node:\n next = node.next\n # Prepend the node to the new list.\n new_list.prepend(node)\n\n # Update the node reference.\n node = next\n return new_list", "def inverse_graph(graph):\n graph2 = copy.deepcopy(graph)\n i = 0\n while i < len(graph2):\n j = 0\n while j < len(graph2):\n if i != j:\n if graph2[i][j] == 0:\n graph2[i][j] = 1\n j += 1\n elif graph2[i][j] == 1:\n graph2[i][j] = 0\n j += 1\n else:\n j += 1\n i += 1\n return graph2", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def to_undirected(self):\n\n if self.graph_properties.directed is False:\n undirected_graph = type(self)()\n elif self.__class__.__bases__[0] == object:\n undirected_graph = type(self)()\n else:\n undirected_graph = self.__class__.__bases__[0]()\n undirected_graph._Impl = type(self._Impl)(undirected_graph.graph_properties)\n self._Impl.to_undirected(undirected_graph._Impl)\n return undirected_graph", "def __reversed__(self):\n if len(self) == 0:\n return\n\n # Create a list containing pointers to each\n # prev_node in the list.\n cur_node = self.head\n prev_nodes = [None]\n while cur_node != self.tail:\n prev_nodes.append(cur_node)\n cur_node = cur_node.next_node\n\n # Using the prev_nodes list, iterate backwards\n while cur_node is not None:\n for x in reversed(cur_node.data_list):\n yield x\n cur_node = prev_nodes[-1]\n del prev_nodes[-1]" ]
[ "0.7581556", "0.75628114", "0.7346692", "0.70150614", "0.6863429", "0.65913934", "0.64809585", "0.64787996", "0.643853", "0.63429785", "0.6326808", "0.62946874", "0.62814265", "0.627383", "0.62476474", "0.61388195", "0.6093543", "0.60812247", "0.6077702", "0.60706794", "0.60591537", "0.6036961", "0.5964223", "0.5955767", "0.59311235", "0.5925339", "0.5870989", "0.58377844", "0.58177096", "0.5754249" ]
0.83771706
0
Reads an INIfile containing domain type definitions and fills them into a TypeDefsobject.
def readDomainTypes(self, domainTypeFilePath): result = TypeDefs() inifile = IniFile(domainTypeFilePath) for section in inifile.getSections(): if section.endswith("(n)"): td = TypeDef(section[:-3], withLength = True) else: td = TypeDef(section, withLength = False) for (iniName, typeDefName) in [("dbtype", "databaseDefinition"), ("pythontype", "pythonType"), ("regex", "regex"), ("comment", "comment")]: if inifile.checkParam(section, iniName): setattr(td, typeDefName, inifile.getValue(section, iniName)) result.addTypeDef(td) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_domain(self, domainfile):\n\n with open(domainfile) as dfile:\n dfile_array = self._get_file_as_array(dfile)\n #Deal with front/end define, problem, :domain\n if dfile_array[0:4] != ['(', 'define', '(', 'domain']:\n print('PARSING ERROR: Expected (define (domain ... at start of domain file')\n sys.exit()\n self.domain = dfile_array[4]\n\n dfile_array = dfile_array[6:-1]\n opencounter = 0\n keyword = ''\n obj_list = []\n is_obj_list = True\n for word in dfile_array:\n if word == '(':\n opencounter += 1\n elif word == ')':\n opencounter -= 1\n elif word.startswith(':'):\n if word[1:] not in DFILE_KEYWORDS:\n pass\n elif keyword != 'requirements':\n keyword = word[1:]\n if opencounter == 0:\n if keyword == 'action':\n self.actions.append(obj_list)\n obj_list = []\n if keyword == 'types':\n for element in obj_list:\n self.types.setdefault('object', []).append(element)\n self.type_list.add('object')\n self.type_list.add(element)\n obj_list = []\n keyword = ''\n\n if keyword == 'requirements': #Requirements list\n if word != ':requirements':\n if not word.startswith(':'):\n print('PARSING ERROR: Expected requirement to start with :')\n sys.exit()\n elif word[1:] not in DFILE_REQ_KEYWORDS:\n print('WARNING: Unknown Rquierement ' + word[1:])\n #print 'Requirements must only be: ' + str(DFILE_REQ_KEYWORDS)\n #sys.exit()\n else:\n self.requirements.add(word[1:])\n elif keyword == 'action':\n obj_list.append(word)\n elif not word.startswith(':'):\n if keyword == 'types': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if not word in self.type_list:\n self.types.setdefault('object', []).append(word)\n self.type_list.add(word)\n self.types.setdefault(word, []).append(element)\n self.type_list.add(element)\n self.type_list.add(word)\n is_obj_list = True\n obj_list = []\n elif keyword == 'constants': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if word in self.type_list:\n self.constants.setdefault(word, []).append(element)\n #self.object_list.add(element)\n else:\n print(self.type_list)\n print(\"ERROR unknown type \" + word)\n sys.exit()\n is_obj_list = True\n obj_list = []\n elif keyword == 'predicates' or keyword == 'private': #Internally typed predicates\n if word == ')':\n if keyword == 'private':\n #print \"...skip agent: \" + str(obj_list[:3])\n obj_list = obj_list[3:]\n keyword = 'predicates'\n if len(obj_list) == 0:\n #print \"...skip )\"\n continue\n p_name = obj_list[0]\n #print \"parse predicate: \" + p_name + \" \" + str(obj_list)\n pred_list = self._parse_name_type_pairs(obj_list[1:],self.type_list)\n self.predicates.append(Predicate(p_name, pred_list, True, False))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n elif keyword == 'functions': #functions\n if word == ')':\n p_name = obj_list[0]\n if obj_list[0] == '-':\n obj_list = obj_list[2:]\n #print \"function: \" + word + \" - \" + str(obj_list)\n self.functions.append(Function(obj_list))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n\n #Work on the actions\n new_actions = []\n for action in self.actions:\n if action[0] == '-':\n action = action[2:]\n act_name = action[1]\n act = {}\n action = action[2:]\n keyword = ''\n for word in action:\n if word.startswith(':'):\n keyword = word[1:]\n else:\n act.setdefault(keyword, []).append(word)\n self.agent_types.add(act.get('agent')[2])\n agent = self._parse_name_type_pairs(act.get('agent'),self.type_list)\n param_list = agent + self._parse_name_type_pairs(act.get('parameters')[1:-1],self.type_list)\n up_params = Predicate('', param_list, True, False)\n pre_list = self._parse_unground_propositions(act.get('precondition'))\n eff_list = self._parse_unground_propositions(act.get('effect'))\n new_act = Action(act_name, up_params, pre_list, eff_list)\n\n new_actions.append(new_act)\n self.actions = new_actions", "def readDefinedTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.types.keys():\n types[typename] = typetype\n \n return types", "def getTypeDefinition(file, line, offset):\n args = {\"file\": file, \"line\": line, \"offset\": offset}\n response = send_request(\"typeDefinition\", args)\n return get_response_body(response)", "def readTypes(self):\r\n types = {}\r\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\r\n typename, typetype = m.groups() \r\n if typetype in self.SIMPLETYPES:\r\n types[typename] = typetype\r\n else:\r\n types[typename] = \"#\" + typetype\r\n \r\n return types", "def nifti_typedata(filepath, scan_type, desc_file=None, read_nifti=False):\n # Load the description and create the device description\n if desc_file is not None:\n with open(desc_file, \"rt\") as open_file:\n data_desc = json.load(open_file)\n manufacturer = data_desc.get(\"Manufacturer\", None)\n software_version = data_desc.get(\"SoftwareVersions\", \"unkwnown\")\n serialnum = data_desc.get(\"DeviceSerialNumber\", \"unkwnown\")\n identifier = \"{0}_{1}_{2}\".format(\n manufacturer, software_version, serialnum)\n if manufacturer is not None:\n device = {\n \"identifier\": md5_sum(identifier),\n \"manufacturer\": manufacturer,\n \"model\": data_desc.get(\"ManufacturersModelName\", \"unkwnown\"),\n \"serialnum\": serialnum,\n \"software_version\": software_version}\n else:\n device = None\n else:\n data_desc = {}\n device = None\n typedata_kwargs = {\n \"te\": data_desc.get(\"EchoTime\", 0),\n \"tr\": data_desc.get(\"RepetitionTime\", 0)}\n field = data_desc.get(\"MagneticFieldStrength\", None)\n if field is not None:\n typedata_kwargs[\"field\"] = \"{0}T\".format(field)\n\n # Load the nifti image and generate the type description\n if read_nifti:\n image = nibabel.load(filepath)\n shape = image.shape\n spacing = image.get_header().get_zooms()\n typedata = {\n \"type\": scan_type,\n \"shape_x\": int(shape[0]),\n \"shape_y\": int(shape[1]),\n \"shape_z\": int(shape[2]),\n \"voxel_res_x\": float(spacing[0]),\n \"voxel_res_y\": float(spacing[1]),\n \"voxel_res_z\": float(spacing[2])}\n else:\n typedata = {\n \"type\": scan_type,\n \"shape_x\": 0,\n \"shape_y\": 0,\n \"shape_z\": 0,\n \"voxel_res_x\": 0.,\n \"voxel_res_y\": 0.,\n \"voxel_res_z\": 0.}\n typedata.update(typedata_kwargs)\n\n return typedata, device", "def readSimpleTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (.*) = (.*);\", self.data):\n typename, typetype = m.groups() \n if typetype in self.SIMPLETYPES:\n types[typename] = typetype\n \n return types", "def load(input_path):\n\n dill._dill._reverse_typemap['ClassType'] = type\n with open(input_path, \"rb\") as file:\n return dill.load(file)", "def LoadTypeDeclarationFromFile(self, type_decl_path):\n with open(type_decl_path) as f:\n return self.LoadTypeDeclaration(f.read())", "def load_data_definitions(self, ddef, dimp):\n if ddef:\n errors = self.validate_fs(ddef)\n if errors:\n raise Exception(\"Provided format specification has\"\n \" the following error(s):\\n%s\" % errors)\n self.ddef = ddef\n default_dir = self.options['fspec_dir']\n for fv in dimp:\n # fv format is: \"<file_name>\":\"<var_name>\"\n matchObj = re.match(r'^\"([^\"]+)\":\"([^\"]+)\"$', fv) \n if not matchObj:\n raise Exception('** Error: Unable to find \"<file_name>\":\"<var>\" in ''%s''' % fv)\n fname = matchObj.group(1)\n var = matchObj.group(2)\n if not fname.endswith('.py'):\n fname += '.py'\n if not os.path.isfile(fname):\n fname = os.path.join(default_dir, fname)\n if not os.path.isfile(fname):\n raise Exception('Unable to locate format specification file: %s' %\n fname)\n dd = imp.load_source('temp_module_name', fname)\n if var not in dir(dd):\n raise Exception(\"Variable '%s' not defined in specification file '%s'\" %\n (var, fname))\n # get definitions that are in variable var\n ddefin = eval(\"dd.%s\" % var)\n del sys.modules['temp_module_name']\n # check for \"structures\" and \"locations\"\n errors = self.validate_fs(ddefin)\n if errors:\n print (\"Specification file '%s', variable '%s' has\"\n \" the following errors:\\n%s\" % (fname, var, errors))\n sys.exit(1)\n # seems, ok, merge it with other definitions\n self.ddef.update(ddefin) \n if not self.ddef:\n raise Exception(\"No file format specifications were provided. At least one\"\n \" is required.\")\n if self.default_ns not in self.ddef.keys():\n raise Exception(\"Default name space ('%s') does not appear in data definitions\"\n % self.default_ns)", "def deserialize_dictionaries(\n self,\n input_file,\n restrict_to = None):\n for type_name, symbol_table in io.read_symbol_table_dict(\n input_file, restrict_to).items():\n self._symtab[type_name] = symbol_table", "def load_defs():\n # Load word definitions\n fname = 'word-definitions.txt'\n with open(fname) as fh:\n lines = fh.readlines()\n \n # Create dictionary keyed by lowercase word\n def_tbl = dict()\n for line in lines:\n # split the dictionary line at the first space\n word, word_def = line.split(sep=None, maxsplit=1)\n # add this entry to the dictionary\n word = word.lower()\n def_tbl[word] = word_def.rstrip()\n return def_tbl", "def type_def_line(cls, line):\n type_def = None\n if not cls.type_match(line):\n sline = line.strip()\n if sline.lower()[0:4] == 'type':\n if '::' in sline:\n elements = sline.split('::')\n type_name = elements[1].strip()\n type_props = [x.strip() for x in elements[0].split(',')[1:]]\n else:\n # Plain type decl\n type_name = sline.split(' ', 1)[1].strip()\n type_props = None\n # End if\n if '(' in type_name:\n tnstr = type_name.split('(')\n type_name = tnstr[0].strip()\n type_params = '(' + tnstr[1].rstrip()\n else:\n type_params = None\n # End if\n type_def = [type_name, type_props, type_params]\n # End if\n # End if\n return type_def", "def read_inputs_field_types():\n inputs = yaml.load(\n open(os.path.join(os.path.dirname(__file__), 'inputs.yml')).read())\n\n for db in inputs.keys():\n inputs[db]['fieldnames'] = [field['name']for field in inputs[db]['fields']]\n return inputs", "def readOtherTypes(self):\n types = {}\n for m in re.finditer(\"TYPE (\\w*) = (.*);\", self.data):\n typename, type_string = m.groups() \n if typename not in self.types.keys():\n types[typename] = type_string\n \n return types", "def validate_type_definition(type_definition):\n # TODO:validator\n data_type = type_definition.get('data_type')\n validator = type_definition.get('validator')\n return type_definition", "def LoadTypeDeclaration(self, content):\n type_decl_unit = self._parser.Parse(content)\n functions_by_name = {f_name: list(g) for f_name, g\n in itertools.groupby(\n type_decl_unit.funcdefs,\n lambda f: f.name)}\n\n interface_by_name = {i.name: i for i in type_decl_unit.interfacedefs}\n\n class_by_name = {c.name: c for c in type_decl_unit.classdefs}\n return InterfacesClassesFuncsByName(\n interfaces=interface_by_name,\n classes=class_by_name,\n funcs=functions_by_name)", "def _read(self, in_file):\n #\n # I know this function is long, but the FRD block is long as well...\n # Splitting this into multiple functions would not help in my opinion.\n # Therefore -> shut up pylint\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n #\n self.setname = in_file.read(6).decode().strip()\n self.value = float(in_file.read(12))\n self.numnod = int(in_file.read(12))\n self.text = in_file.read(20).decode().strip()\n self.ictype = int(in_file.read(2))\n self.numstep = int(in_file.read(5))\n self.analys = in_file.read(10).decode().strip()\n self.format = int(in_file.read(2))\n in_file.read(1) # eol\n\n in_file.read(1) # pad byte\n in_file.read(2) # key = -4\n in_file.read(2) # pad bytes\n self.name = in_file.read(8).decode().strip()\n self.ncomps = int(in_file.read(5))\n self.irtype = int(in_file.read(5))\n if self.irtype != 1:\n raise NotImplementedError()\n in_file.read(1) # eol\n\n for i in range(self.ncomps):\n entity = FRDEntity()\n self.entities.append(entity)\n\n in_file.read(1) # pad byte\n entity.key = int(in_file.read(2))\n in_file.read(2) # pad bytes\n entity.name = in_file.read(8).decode().strip()\n entity.menu = int(in_file.read(5))\n entity.ictype = int(in_file.read(5))\n entity.icind1 = int(in_file.read(5))\n if entity.ictype == 4:\n entity.icind2 = int(in_file.read(5))\n elif entity.ictype == 2 and i == 3:\n entity.icind2 = int(in_file.read(5))\n entity.iexist = int(in_file.read(5))\n entity.icname = in_file.read(3).decode().strip()\n self.ncomps -= 1\n else:\n entity.iexist = int(in_file.read(5))\n in_file.read(1) # eol\n\n for i in range(self.numnod):\n result = FRDNodeResult()\n self.results.append(result)\n if self.format < 2:\n num_lines = int(self.ncomps/(6 + 1)) + 1\n result.data = []\n for j in range(num_lines):\n in_file.read(3) # pad byte and key = -1 || -2\n if result.node is None:\n result.node = int(in_file.read(5*(self.format+1)))\n else:\n in_file.read(5*(self.format+1))\n k_start = j*6\n k_end = min(self.ncomps - k_start, (j+1)*6)\n for _ in range(0, k_end):\n result.data.append(float(in_file.read(12)))\n in_file.read(1) # eol\n else:\n result.node = struct.unpack('i', in_file.read(4))[0]\n result.data = struct.unpack(\n 'f'*self.ncomps, in_file.read(self.ncomps*4))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def LoadDefinition(cls, metadata_object):\n pass", "def _ConstructType(self, type_name, type_contents, filepath, require_guid):\n\n description = ''\n parents = None\n local_field_names = None\n opt_local_field_names = None\n is_abstract = False\n allow_undefined_fields = False\n is_canonical = False\n guid = None\n\n expected_keys = set([\n 'description', 'implements', 'uses', 'opt_uses', 'is_abstract', 'guid',\n 'is_canonical', 'allow_undefined_fields'\n ])\n\n if 'description' in type_contents:\n description = type_contents['description']\n if 'implements' in type_contents:\n parents = type_contents['implements']\n if 'uses' in type_contents:\n local_field_names = type_contents['uses']\n if 'opt_uses' in type_contents:\n opt_local_field_names = type_contents['opt_uses']\n if 'is_abstract' in type_contents:\n is_abstract = type_contents['is_abstract']\n if 'allow_undefined_fields' in type_contents:\n allow_undefined_fields = type_contents['allow_undefined_fields']\n if 'is_canonical' in type_contents:\n is_canonical = type_contents['is_canonical']\n if 'guid' in type_contents:\n guid = type_contents['guid']\n\n # Generate tuples to represent each field\n fq_lfn = []\n if local_field_names:\n self._ConstructField(local_field_names, False, fq_lfn)\n if opt_local_field_names:\n self._ConstructField(opt_local_field_names, True, fq_lfn)\n\n entity_type = EntityType(\n filepath=filepath,\n typename=type_name,\n description=description,\n parents=parents,\n local_field_tuples=fq_lfn,\n is_abstract=is_abstract,\n allow_undefined_fields=allow_undefined_fields,\n inherited_fields_expanded=False,\n is_canonical=is_canonical,\n guid=guid,\n require_guid=require_guid,\n namespace=self.local_namespace)\n\n # Add errors to type if there's anything extra in the block. We add to the\n # entity type because an extra key here is likely a typo in a real key name\n # that would result in information being lost from the type.\n for key in type_contents:\n if key not in expected_keys:\n entity_type.AddFinding(\n findings_lib.UnrecognizedKeyError(key, entity_type.file_context))\n\n return entity_type", "def read_gds(\n self,\n infile,\n units=\"skip\",\n rename={},\n rename_template=\"{name}\",\n layers={},\n datatypes={},\n texttypes={},\n ):\n self._references = []\n close = True\n if hasattr(infile, \"__fspath__\"):\n infile = open(infile.__fspath__(), \"rb\")\n elif isinstance(infile, (basestring, Path)):\n infile = open(infile, \"rb\")\n else:\n close = False\n emitted_warnings = []\n kwargs = {}\n create_element = None\n factor = 1\n cell = None\n properties = {}\n attr = -1\n for record in _record_reader(infile):\n # LAYER\n if record[0] == 0x0D:\n kwargs[\"layer\"] = layers.get(record[1][0], record[1][0])\n # DATATYPE or BOXTYPE\n elif record[0] == 0x0E or record[0] == 0x2E:\n kwargs[\"datatype\"] = datatypes.get(record[1][0], record[1][0])\n # TEXTTYPE\n elif record[0] == 0x16:\n kwargs[\"texttype\"] = texttypes.get(record[1][0], record[1][0])\n # XY\n elif record[0] == 0x10:\n if \"xy\" in kwargs:\n kwargs[\"xy\"] = numpy.concatenate((kwargs[\"xy\"], factor * record[1]))\n else:\n kwargs[\"xy\"] = factor * record[1]\n # WIDTH\n elif record[0] == 0x0F:\n kwargs[\"width\"] = factor * abs(record[1][0])\n if record[1][0] < 0:\n kwargs[\"width_transform\"] = False\n # ENDEL\n elif record[0] == 0x11:\n if create_element is not None:\n el = create_element(**kwargs)\n if len(properties) > 0:\n el.properties = properties\n properties = {}\n cell.add(el)\n create_element = None\n kwargs = {}\n # BOUNDARY\n elif record[0] == 0x08:\n create_element = self._create_polygon\n # PATH\n elif record[0] == 0x09:\n create_element = self._create_path\n # BOX\n elif record[0] == 0x2D:\n create_element = self._create_polygon\n if record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] GDSII elements of type BOX are imported as polygons.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # TEXT\n elif record[0] == 0x0C:\n create_element = self._create_label\n # SNAME\n elif record[0] == 0x12:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n kwargs[\"ref_cell\"] = name\n # COLROW\n elif record[0] == 0x13:\n kwargs[\"columns\"] = record[1][0]\n kwargs[\"rows\"] = record[1][1]\n # STRANS\n elif record[0] == 0x1A:\n kwargs[\"x_reflection\"] = (int(record[1][0]) & 0x8000) > 0\n if (int(record[1][0]) & 0x0006) and record[0] not in emitted_warnings:\n warnings.warn(\n \"[GDSPY] Absolute magnification or rotation of \"\n \"references is not supported. Transformations \"\n \"will be interpreted as relative.\",\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n # MAG\n elif record[0] == 0x1B:\n kwargs[\"magnification\"] = record[1][0]\n # ANGLE\n elif record[0] == 0x1C:\n kwargs[\"rotation\"] = record[1][0]\n # SREF\n elif record[0] == 0x0A:\n create_element = self._create_reference\n # AREF\n elif record[0] == 0x0B:\n create_element = self._create_array\n # STRNAME\n elif record[0] == 0x06:\n if record[1] in rename:\n name = rename[record[1]]\n else:\n name = rename_template.format(name=record[1])\n cell = Cell(name, exclude_from_current=True)\n if name in self.cells:\n raise ValueError(\"[GDSPY] Multiple cells with name: {0} in GDSII file\".format(name))\n self.cells[name] = cell\n # STRING\n elif record[0] == 0x19:\n kwargs[\"text\"] = record[1]\n # ENDSTR\n elif record[0] == 0x07:\n cell = None\n # UNITS\n elif record[0] == 0x03:\n if units == \"skip\":\n factor = record[1][0]\n elif units == \"import\":\n self.unit = record[1][1] / record[1][0]\n self.precision = record[1][1]\n factor = record[1][0]\n elif units == \"convert\":\n factor = record[1][1] / self.unit\n else:\n raise ValueError(\n \"[GDSPY] units must be one of 'convert', 'import' or 'skip'.\"\n )\n # LIBNAME\n elif record[0] == 0x02:\n self.name = record[1]\n # PRESENTATION\n elif record[0] == 0x17:\n kwargs[\"anchor\"] = GdsLibrary._import_anchors[\n int(record[1][0]) & 0x000F\n ]\n # PATHTYPE\n elif record[0] == 0x21:\n kwargs[\"ends\"] = GdsLibrary._pathtype_dict.get(record[1][0], \"extended\")\n # BGNEXTN\n elif record[0] == 0x30:\n kwargs[\"bgnextn\"] = factor * record[1][0]\n # ENDEXTN\n elif record[0] == 0x31:\n kwargs[\"endextn\"] = factor * record[1][0]\n # ENDLIB\n elif record[0] == 0x04:\n for ref in self._references:\n if ref.ref_cell in self.cells:\n ref.ref_cell = self.cells[ref.ref_cell]\n # PROPATTR\n elif record[0] == 0x2B:\n attr = record[1][0]\n # PROPVALUE\n elif record[0] == 0x2C:\n properties[attr] = record[1]\n # Not supported\n elif (\n record[0] not in emitted_warnings\n and record[0] not in GdsLibrary._unused_records\n ):\n warnings.warn(\n \"[GDSPY] Record type {0} ({1:02X}) is not supported.\".format(\n GdsLibrary._record_name[record[0]], record[0]\n ),\n stacklevel=2,\n )\n emitted_warnings.append(record[0])\n if close:\n infile.close()\n return self", "def readAggregatedSimpleTypes(self):\n types = {}\n # SETs\n for m in re.finditer(\"TYPE (\\w*) = SET (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'SET ' + typetype\n \n # BAGs\n for m in re.finditer(\"TYPE (\\w*) = BAG (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'BAG ' + typetype\n \n # LISTs\n for m in re.finditer(\"TYPE (\\w*) = LIST (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'LIST ' + typetype\n \n # ARRAYs\n for m in re.finditer(\"TYPE (\\w*) = ARRAY (.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'ARRAY ' + typetype\n \n # STRING vectors\n for m in re.finditer(\"TYPE (\\w*) = STRING\\((.*);\", self.data):\n typename, typetype = m.groups() \n types[typename] = 'STRING(' + typetype\n \n return types", "def _parse_domain(self, f_domain):\n\n parse_tree = PDDL_Tree.create(f_domain)\n\n assert \"domain\" in parse_tree, \"Domain must have a name\"\n self.domain_name = parse_tree [\"domain\"].named_children ()[0]\n\n # must read types before constants\n if \":types\" in parse_tree:\n if \"-\" in parse_tree[\":types\"].named_children():\n type_hierarchy = PDDL_Utils.read_type(parse_tree[\":types\"])\n self.parent_types = {subtype: parent for subtype, parent in type_hierarchy}\n self.types = set(parse_tree[\":types\"].named_children())\n self.types.discard(\"-\")\n else:\n self.types = set(parse_tree[\":types\"].named_children())\n self.parent_types = {t: None for t in self.types}\n else:\n self.types = set([Predicate.OBJECT])\n self.parent_types = {Predicate.OBJECT: None}\n\n self.agents = [a.name for a in parse_tree[\":agents\"].children]\n self.types.add('agent')\n self.parent_types['agent'] = None\n self._add_objects([(ag, 'agent') for ag in self.agents])\n\n # must read in constants before actions or predicates\n if \":constants\" in parse_tree:\n object_list = PDDL_Utils.read_type(parse_tree[\":constants\"])\n self._add_objects(object_list)\n\n #TODO this may not be correct, depending on the type hierarchy\n const_map = dict()\n for const in self.objects:\n if len(self.obj_to_type[const]) == 0:\n raise RuntimeError(\"No type for constant object %s\" % const)\n else:\n const_map[const] = list(self.obj_to_type[const])[0]\n\n self.predicates = [self.to_predicate(c, map=const_map) for c in parse_tree[\":predicates\"].children]\n\n # some predicates have this property: they are untyped.\n for predicate in self.predicates:\n if Predicate.OBJECT not in self.types and any([arg[1] == Predicate.OBJECT for arg in predicate.args]):\n for t in self.types:\n if self.parent_types[t] is None:\n self.parent_types[t] = Predicate.OBJECT\n\n self.parent_types[Predicate.OBJECT] = None\n self.types.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT] = set([])\n for obj, type_list in self.obj_to_type.items():\n type_list.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT].add(obj)\n\n # only need to do this once, obviously\n break\n\n self.actions = [self.to_action(c) for c in parse_tree.find_all(\":action\")]", "def ingest(in_info):\n if type(in_info) == str:\n with open(in_info) as infile:\n lines = (line.split(None) for line in infile)\n in_dict = {defn[0] : defn[1:] for defn in lines}\n else:\n in_dict = in_info\n return in_dict", "def load_domain_value_files(base_path, force=False):\n logger.info('Loading Object Class')\n load_object_class(base_path)\n logger.info('Loading CFDA Program')\n load_cfda_program(base_path)\n logger.info('Loading Program Activity')\n load_program_activity_data(base_path)\n logger.info('Loading Country codes')\n load_country_codes(base_path, force)", "def get_type_definition(self, type_definition):\n if isinstance(type_definition, types.Enum):\n return self.define_enum_field(type_definition)\n if isinstance(type_definition, types.NumberTypeMarker):\n return self.define_basic_field(type_definition)\n if isinstance(type_definition, types.StringTypeMarker):\n return self.define_basic_field(type_definition)\n if isinstance(type_definition, types.Bool):\n return self.define_basic_field(type_definition)\n if isinstance(type_definition, types.Struct):\n # Since all the structs were already collected, and are\n # defined in the definitions section, it's enough to refer\n # to the struct here.\n return self.reference_type(type_definition)\n if isinstance(type_definition, types.Map):\n return self.define_map_field(type_definition)\n if isinstance(type_definition, types.List):\n return self.define_array(type_definition)\n if isinstance(type_definition, types.JSONData):\n return copy.deepcopy(JSONDATA_TEMPLATE)\n if isinstance(type_definition, types.Tuple):\n return self.define_array(type_definition)\n raise Exception(\n \"Cannot create schema for type %s\" %\n str(type_definition))", "def setTypes(self):\n\n integers = []\n floats = [\n 'S',\n 'Pinj',\n 'coreRadFrac',\n 'qBG',\n 'lqCN',\n 'lqCF',\n 'lqPN',\n 'lqPF',\n 'fracPN',\n 'fracPF',\n 'fracCN',\n 'fracCF',\n 'fracUI',\n 'fracUO',\n 'fracLI',\n 'fracLO',\n 'fG',\n ]\n\n\n for var in integers:\n if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):\n try:\n setattr(self, var, tools.makeInt(getattr(self, var)))\n except:\n print(\"Error with input file var \"+var+\". Perhaps you have invalid input values?\")\n log.info(\"Error with input file var \"+var+\". Perhaps you have invalid input values?\")\n for var in floats:\n if var is not None:\n if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):\n try:\n setattr(self, var, tools.makeFloat(getattr(self, var)))\n except:\n print(\"Error with input file var \"+var+\". Perhaps you have invalid input values?\")\n log.info(\"Error with input file var \"+var+\". Perhaps you have invalid input values?\")\n\n return", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def load_types(self, types_dir: Path, *, system_key: str, namespace_root: str = \"npc\") -> None:\n def process_types_dir(search_dir: Path) -> None:\n \"\"\"Load yaml files, expand sheet paths, handle implied sheets\n\n This internal helper method scans all the files in search_dir and tries to load them by their type:\n * yaml files are treated as type definitions and parsed. If they have a sheet_path property, it is\n expanded into a fully qualified Path for later use\n * All other files are set aside for later. After the types have been loaded, the base names of the\n remaining files are compared against the loaded type keys within our current namespace. Any that\n match are treated as the implicit sheet file for that type, and their Path is saved to the\n type's sheet_path property.\n\n Args:\n search_dir (Path): Directory to search for type and sheet files\n \"\"\"\n discovered_sheets: dict = {}\n for type_path in search_dir.glob(\"*.*\"):\n if type_path.suffix != \".yaml\":\n type_key: str = type_path.stem\n discovered_sheets[type_key] = type_path\n continue\n\n typedef: dict = quiet_parse(type_path)\n try:\n type_key: str = next(iter(typedef))\n except TypeError:\n raise ParseError(\"Missing top-level key for type config\", type_path)\n\n if typedef[type_key].get(\"sheet_path\"):\n sheet_path = Path(typedef[type_key].get(\"sheet_path\"))\n if sheet_path.is_absolute():\n typedef[type_key][\"sheet_path\"] = sheet_path.resolve()\n else:\n typedef[type_key][\"sheet_path\"] = search_dir.joinpath(sheet_path).resolve()\n\n self.merge_data(typedef, types_namespace)\n\n for type_key, sheet_path in discovered_sheets.items():\n if type_key not in self.get(types_namespace, {}):\n logger.info(f\"Type {type_key} not defined, skipping potential sheet {sheet_path}\")\n continue\n if \"sheet_path\" not in self.get(f\"{types_namespace}.{type_key}\"):\n self.merge_data({type_key: {\"sheet_path\": sheet_path}}, types_namespace)\n\n types_namespace: str = f\"{namespace_root}.types.{system_key}\"\n process_types_dir(types_dir)\n if self.get(f\"npc.systems.{system_key}.extends\"):\n process_types_dir(types_dir / self.get(f\"npc.systems.{system_key}.extends\"))\n process_types_dir(types_dir / system_key)", "def load_from_file(self, path):\n schema = self.schema\n \n # Set up the default values.\n if schema is not None:\n for sect, sect_obj in schema.items():\n for opt, val in sect_obj.items():\n # This call is to convert the value to\n # the type specified. We do this to\n # prevent the programmer from specifying\n # inconsistent type with the value in the \n # schema.\n self.set(*_convert(schema, sect, opt, val[1]))\n\n # Parse the INI file.\n parser = RawConfigParser()\n parser.read(path)\n \n sections = parser.sections()\n for section in sections:\n \n # If application has supplied a schema,\n # and it does not has such a section, we skip\n # it. No error raised.\n if schema is not None and \\\n not schema.has_key(section):\n continue\n\n options = parser.options(section)\n \n for option in options:\n \n # If application has supplied a schema,\n # we know the section is valid since it pass the\n # previus test, but if the option is not included\n # in the section, we skip it. No error raised.\n if schema is not None and \\\n (option not in schema[section]):\n continue \n \n # If there is a schema, then we convert the \n # option to its type stated in the schema,\n # otherwise we just leave it as string.\n if schema is not None:\n self.set(*_convert(schema, section, option,\n parser.get(section, option)))\n else:\n self.set(section, option,\n parser.get(section, option))", "def read_config(config):\n\n dic_types = json.load(open(config, 'r'))\n\n to_remove = []\n for attribute, value in dic_types.items():\n ls_val = value.keys()\n if 'type' in ls_val:\n val = value['type']\n value['type'] = str_to_type(val)\n none_type = False\n if not value['type']:\n none_type = True\n \n if not 'default' in ls_val and none_type:\n to_remove.append(attribute)\n value['type'] = val\n\n for to_rm in to_remove:\n print(' [WARN] Config for' , '\\'' + to_rm + '\\'', 'incorrect and ommitted: Type', '\\'' + dic_types[to_rm]['type'] + '\\'' , 'is not valid and no default value is indicated') \n del dic_types[to_rm]\n \n return dic_types" ]
[ "0.5692447", "0.56770426", "0.55325735", "0.5285232", "0.50543946", "0.4953483", "0.4840075", "0.48231658", "0.48218992", "0.4821366", "0.47643015", "0.47455364", "0.4673499", "0.4642409", "0.46207514", "0.46002218", "0.4586546", "0.4583959", "0.45795232", "0.4578879", "0.4550395", "0.45469365", "0.45280024", "0.45183623", "0.44668627", "0.4461901", "0.4454937", "0.4438707", "0.44215733", "0.44129595" ]
0.7333319
0
write spec back to directory, (if dir not specified is default spec dir)
def writeSpec(self,dir=""): for codestruct in self.codestructures: codestruct.writeSpec(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_change_dir_to_file(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_dir(dir0, \"foo\")\n self.sync_all()\n self.assertDirPresent(dir0, \"foo\")\n self.assertDirPresent(dir1, \"foo\")\n\n self.delete_dir(dir0, \"foo\")\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")", "def test_change_file_to_dir_without_file(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n\n self.delete_file(dir0, \"foo\")\n self.write_dir(dir0, \"foo\")\n self.sync_all()\n self.assertDirPresent(dir0, \"foo\")\n self.assertDirPresent(dir1, \"foo\")", "def test_change_non_empty_dir_to_file(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo/bar\", \"baz\")\n self.sync_all()\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")\n\n self.delete_file(dir0, \"foo/bar\")\n self.delete_dir(dir0, \"foo\")\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")", "def save(self, dir):\n raise NotImplementedError", "def fixture_out_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"out\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))", "def test_directory_to_manifest(self):\n\n # First, stub out a directory with files in it::\n def create_stub():\n directory = tempfile.mkdtemp()\n for i in 'foo', 'bar', 'fleem':\n file(os.path.join(directory, i), 'w').write(i)\n subdir = os.path.join(directory, 'subdir')\n os.mkdir(subdir)\n file(os.path.join(subdir, 'subfile'), 'w').write('baz')\n return directory\n stub = create_stub()\n self.assertTrue(os.path.exists(stub) and os.path.isdir(stub))\n\n # Make a manifest for it:\n self.assertEqual(convert([stub]),\n \"\"\"[bar]\n[fleem]\n[foo]\n[subdir/subfile]\"\"\")\n shutil.rmtree(stub) # cleanup\n\n # Now do the same thing but keep the manifests in place:\n stub = create_stub()\n convert([stub], write='manifest.ini')\n self.assertEqual(sorted(os.listdir(stub)),\n ['bar', 'fleem', 'foo', 'manifest.ini', 'subdir'])\n parser = ManifestParser()\n parser.read(os.path.join(stub, 'manifest.ini'))\n self.assertEqual([i['name'] for i in parser.tests],\n ['subfile', 'bar', 'fleem', 'foo'])\n parser = ManifestParser()\n parser.read(os.path.join(stub, 'subdir', 'manifest.ini'))\n self.assertEqual(len(parser.tests), 1)\n self.assertEqual(parser.tests[0]['name'], 'subfile')\n shutil.rmtree(stub)", "def test_change_file_to_dir_with_file(self):\n #TODO: File must be removed before directory is created\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\", \"bar\")\n self.sync_all()\n self.assertFile(dir0, \"foo\", \"bar\")\n self.assertFile(dir1, \"foo\", \"bar\")\n\n self.delete_file(dir0, \"foo\")\n self.write_file(dir0, \"foo/bar\", \"baz\")\n self.sync_all()\n self.assertFile(dir0, \"foo/bar\", \"baz\")\n self.assertFile(dir1, \"foo/bar\", \"baz\")", "def write_output(directory, name, html):\n if not os.path.isdir(directory):\n os.mkdir(directory)\n with open(os.path.join(directory, '.'.join((name, 'html'))), 'w') as f:\n f.write(beautify(html))", "def tmp_dir(monkeypatch):\n try:\n tmp_dir = tempfile.mkdtemp()\n yield tmp_dir\n finally:\n # tmp_dir を削除するためにカレントディレクトリを移動\n monkeypatch.chdir(os.path.dirname(tmp_dir))\n shutil.rmtree(tmp_dir)", "def test_absolute_outdir(tmp_path):\n # Create destination directory.\n tempdir = tmp_path / \"outdir\"\n tempdir.mkdir(mode=0o700)\n assert tempdir.exists()\n assert tempdir.is_absolute()\n assert len(list(tempdir.glob(\"**/*.*\"))) == 0, \"Must be empty.\"\n # Create a new configuration file with an absolute output_directory.\n # We are cheating a little by writing it to the same directory\n # where the test files will be saved.\n config_file = tempdir / Path(\"rewritten.cfg\")\n contents = Path(\"tests/generate.cfg\").read_text(encoding=\"utf-8\")\n contents = contents.replace(\".gendir-suite-cfg\", str(tempdir))\n contents = contents.replace(\"print = filename, summary\", \"print = summary\")\n _ = config_file.write_text(contents, encoding=\"utf-8\")\n phmdoctest.main.generate_using(config_file=config_file)\n assert config_file.exists(), \"In output_directory and didn't get wiped.\"\n assert (Path(tempdir) / \"test_project.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive1.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive2.py\").exists()\n assert (Path(tempdir) / \"test_doc__directive3.py\").exists()\n assert (Path(tempdir) / \"test_doc__example1.py\").exists()\n assert (Path(tempdir) / \"test_doc__example2.py\").exists()\n assert (Path(tempdir) / \"test_doc__inline_example.py\").exists()\n assert (Path(tempdir) / \"test_tests__managenamespace.py\").exists()\n assert (Path(tempdir) / \"test_tests__one_code_block.py\").exists()\n assert (Path(tempdir) / \"test_tests__output_has_blank_lines.py\").exists()\n assert (Path(tempdir) / \"test_tests__setup_only.py\").exists()\n assert (Path(tempdir) / \"test_tests__twentysix_session_blocks.py\").exists()\n assert len(list(tempdir.glob(\"**/*.*\"))) == 13, \"12 test files and .cfg file.\"", "def setup_outdir():\n try:\n shutil.rmtree(OUTDIR)\n except FileNotFoundError:\n pass\n os.makedirs(OUTDIR, exist_ok=True)", "def test_make_new_dir_1(self):\n test_dir = Path(\"test_dir\")\n output_path = basic.make_new_dir(self.base_dir, test_dir)\n exp_dir = \"test_dir\"\n exp_path = Path(self.base_dir, exp_dir)\n with self.subTest():\n self.assertTrue(exp_path.is_dir())\n with self.subTest():\n self.assertEqual(exp_dir, output_path.stem)", "def test_SpecConfig_class_minimal():\n res = SpecConfig(path=PATH_SPECS_2_YAML)\n assert res.path_out == PATH_SPECS_2_YAML_MODIFIED", "def tmp_dir(data_dir):\n tmp_dir = os.path.join(data_dir, 'manorm_tmp_output')\n yield tmp_dir\n shutil.rmtree(tmp_dir)", "def add_path_to_spec(spec, checkpoint_directory):\n # From pathways.tensorstore_utils\n spec = copy.deepcopy(spec)\n spec['kvstore']['path'] = os.path.join(checkpoint_directory,\n spec['kvstore']['path'])\n return spec", "def _FinaliseForTest():\n global outdir\n\n if outdir:\n _RemoveOutputDir()\n outdir = None", "def save(self, directory):\n pass # pragma: no cover", "def write_to_file(output, test_case_name, path):\n path_to_store = OutputWrite.make_test_dir(path, test_case_name)\n time_stamp = OutputWrite.get_time_stamp()\n try:\n LOG.debug('Changing the dir to {0}'.format(path_to_store))\n os.chdir(path_to_store)\n except Exception as _ex_:\n LOG.exception('Error :{0}'.format(_ex_))\n else:\n file_name = os.path.join(path_to_store, test_case_name +\n time_stamp)\n LOG.debug('The file name after joining = {0}'.format(file_name))\n try:\n LOG.debug('Writing Test case output to the file')\n with open(file_name, 'w') as file_obj:\n file_obj.write(output)\n except FileNotFoundError as _ex_:\n LOG.exception('Error : {0}'.format(_ex_))", "def split_spec(self):\n from django_swagger_utils.spec_client.split_spec import SplitSpec\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n\n if check_path_exists(os.path.join(self.paths['api_spec_dir'], \"specs\")):\n from shutil import rmtree\n rmtree(os.path.join(self.paths['api_spec_dir'], \"specs\"))\n split_spec = SplitSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n split_spec.split()", "def write_tmp_patch(diff, filename=None):\n if not filename:\n prefix = 'cugit-'\n suffix = '-patch'\n filename = mkstemp(suffix, prefix)[1]\n with open(filename, 'w') as f:\n f.write(diff)\n return filename", "def SetupOutDir(out_dir):\n logging.info('entering ...')\n assert re.match(r'^[a-zA-Z_\\-0-9]+$', out_dir), 'bad out_dir: %s' % out_dir\n\n if os.path.exists(out_dir):\n subprocess.check_call(['rm', '-rf', out_dir])\n os.mkdir(out_dir)\n logging.info('... done')", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def create(self, basedir, outdir, name, prefix=None):", "def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory", "def save_specs(self, filename):\n pass", "def save_specs(self, filename):\n pass", "def merge_spec(self):\n from django_swagger_utils.spec_client.merge_spec import MergeSpec\n merge_spec = MergeSpec(self.paths['api_spec_dir'], self.paths['base_dir'])\n merge_spec.merge()", "def write(task_spec: TaskSpec, destination_dir: Path, force: bool = False):\n\n file_path: Path = destination_dir / task_spec.filename\n file_path.touch(exist_ok=force)\n\n writable_task_spec: Dict = clean(task_spec)\n\n yaml.dump(writable_task_spec, file_path)", "def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)", "def test_write_config(default_config, tmp_path):\n testpath = Path(tmp_path, \"write_config\")\n testpath.mkdir()\n abcconfig.write_config(default_config, configpath=testpath)\n assert Path(testpath, \"config.yml\").exists()" ]
[ "0.59088475", "0.5767855", "0.56413054", "0.54185474", "0.53972125", "0.5375559", "0.5354217", "0.5278695", "0.5238211", "0.52255595", "0.5221744", "0.5195567", "0.5194306", "0.5183714", "0.51730597", "0.5154644", "0.51394916", "0.51373714", "0.5113076", "0.5090471", "0.50892586", "0.507057", "0.5070054", "0.50559396", "0.50499976", "0.50499976", "0.50258124", "0.5022972", "0.5016939", "0.50144804" ]
0.658977
0
read spec code and populate codestructures
def processSpecs(self): specSubDirName="_spec" codestructure = CodeStructure() for dir in self._dirs: if q.system.fs.exists(q.system.fs.joinPaths(dir,specSubDirName)): files=q.system.fs.listPyScriptsInDir(q.system.fs.joinPaths(dir,specSubDirName)) for fileName in files: codestructure.addCodeFile(self.processSpecFile(q.system.fs.joinPaths(dir,specSubDirName),"%s.py" % fileName)) return codestructure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processSourceCode(self):\n specSubDirName=\"\"\n codestructure = CodeStructure() \n for dir in self._dirs:\n if q.system.fs.exists(q.system.fs.joinPaths(dir,specSubDirName)): \n files=q.system.fs.listPyScriptsInDir(q.system.fs.joinPaths(dir,specSubDirName))\n for fileName in files:\n codestructure.addCodeFile(self.processSourceCodeFile(q.system.fs.joinPaths(dir,specSubDirName),\"%s.py\" % fileName))\n return codestructure", "def writeSpec(self,dir=\"\"):\n for codestruct in self.codestructures:\n codestruct.writeSpec(dir)", "def parse(source_code):\n tokens = tokenize(source_code)\n return read(tokens)", "def parse_spec (spec_file):\n spec_object = None\n spec_name = spec_file.replace(\".\", \"_\")\n params = []\n default_params = {}\n int_conversion = []\n namedtuple = False\n delimiter = \"\\n\"\n\n spec_file = open(spec_file, \"r\")\n spec = spec_file.readlines()\n spec_file.close()\n\n for line in spec:\n line = line.strip()\n param_name = None\n default_param = None\n if line.startswith(\"%id\"):\n spec_name = line.split(\" \", 1)[1]\n elif line.startswith(\"%delim\"):\n delimiter = line.split(\" \", 1)[1].strip()\n elif line.startswith(\"$\"):\n line = line.split(\" \", 1)\n if len(line) >= 1:\n param_name = line[0].strip(\"$\")\n if len(line) == 2:\n default_param = line[1].strip()\n if param_name and not param_name.isdigit():\n namedtuple = True\n if default_param and param_name.isdigit():\n assert param_name != \"0\"\n params.append(param_name)\n if default_param:\n default_params[param_name]=default_param\n elif line.startswith(\"%int\"):\n var = line.split(\" \", 1)[1].strip()\n int_conversion.append(var)\n\n if namedtuple:\n class parent (object):\n def __init__ (self, *args, **kwargs):\n self.__name__ = spec_name\n if len(args) == len(params):\n # arg for arg\n for key, value in zip(params, args):\n self.__dict__[key] = value\n elif len(kwargs) == len(params):\n for key, value in kwargs.iteritems():\n self.__dict__[key] = value\n else:\n assert not \"Didn't get the right number of arguments!\"\n def __repr__ (self):\n values = \"\"\n for key in params:\n values += \"%s=%s,\" % (key, repr(self.__dict__[key]))\n return \"<%s %s>\" % (self.__name__, values.strip(\", \"))\n else:\n parent = list\n\n class spec_object (parent):\n def __init__ (self, block):\n self.__name__ = spec_name\n if isinstance(block, str):\n block = split_escaped_delim(delimiter, block.strip())\n assert len(block) + len(default_params) >= len(params)\n if len(block) < len(params):\n for key, default in default_params.iteritems():\n if key.isdigit():\n assert int(key) >= len(block)\n block.insert(int(key), default)\n else:\n block.append(\"%s=%s\" % (key, default))\n\n if not namedtuple:\n if int_conversion:\n for conv in int_conversion:\n block[conv] = int(block[conv])\n parent.__init__(self, block)\n else:\n new_data = {}\n for item in block:\n new_item = split_escaped_delim(\"=\", item, 1)\n if len(new_item) == 1:\n new_item = split_escaped_delim(\":\", item, 1)\n if len(new_item) == 1:\n raise DatabaseError, \"Corrupted line? %s\" % item\n item = new_item\n if int_conversion and item[0] in int_conversion:\n item[1] = int(item[1])\n assert len(item) == 2\n # Don't overwrite real data with default values!\n if item[0] not in new_data:\n new_data[item[0]] = item[1]\n\n parent.__init__(self, **new_data)\n elif isinstance(block, list):\n if not namedtuple:\n parent.__init__(self, block)\n else:\n parent.__init__(self, *block)\n elif isinstance(block, dict):\n assert namedtuple\n parent.__init__(self, **block)\n def __repr__ (self):\n if namedtuple:\n return parent.__repr__(self)\n else:\n return \"<%s %s>\" % (self.__name__, parent.__repr__(self))\n\n return spec_object", "def read_input(self, specs):\n print('DEBUGG specs:', specs)", "def setup(self, ds: PetscDocStringImpl) -> None:\n items = {}\n\n class Inspector:\n __slots__ = 'codeblocks', 'startline'\n\n codeblocks: int\n startline: int\n\n def __init__(self, startline: int) -> None:\n self.codeblocks = 0\n self.startline = startline\n return\n\n def __call__(self, ds: PetscDocStringImpl, loc: SourceRange, line: str, verdict: Verdict) -> None:\n sub = self.codeblocks\n lstrp = line.lstrip()\n if lstrp.startswith('.vb'):\n items[sub] = [loc.start.line - self.startline]\n elif lstrp.startswith('.ve'):\n assert len(items[sub]) == 1\n items[sub].append(loc.start.line - self.startline + 1)\n self.codeblocks += 1\n return\n\n super()._do_setup(ds, Inspector(self.extent.start.line if self else 0))\n self.items = items\n return", "def _parse(self):\n with open(_join(self.man_dir, self.man_fn)) as fp:\n lines = fp.readlines()\n \n desc_indxs = []\n for i, L in enumerate(lines):\n if \"#landuse\" in L or \" # landuse\" in L:\n desc_indxs.append(i-1)\n desc_indxs.append(i-2)\n desc_indxs.append(i-3)\n \n lines = [L[:L.find('#')].strip() for L in lines]\n lines = [L for i, L in enumerate(lines) if len(L) > 0 or i in desc_indxs]\n\n del desc_indxs\n \n self.datver = lines.pop(0)\n self.nofe = int(lines.pop(0))\n self.sim_years = int(lines.pop(0))\n \n # Read Plant Growth Section\n self.plants = PlantLoops(lines, self)\n\n # Read Operation Section\n self.ops = OpLoops(lines, self)\n \n # Read Initial Condition Section\n self.inis = IniLoops(lines, self)\n \n # Read Surface Effects Section\n self.surfs = SurfLoops(lines, self)\n \n # Read Contour Section\n self.contours = ContourLoops(lines, self)\n \n # Read Drainage Section\n self.drains = DrainLoops(lines, self)\n \n # Read Yearly Section\n self.years = YearLoops(lines, self)\n \n # Read Management Section \n self.man = ManagementLoop(lines, self)", "def _parse(self):\n\n self.specification = {}\n\n while True:\n try:\n line = self._lines.current\n if ':' in line:\n self.specification.update(self._parse_spec())\n elif line.startswith('NODE_COORD_SECTION'):\n next(self._lines)\n self.coords = self._parse_coords()\n elif line.startswith('EDGE_WEIGHT_SECTION'):\n next(self._lines)\n self.weights = self._parse_weights()\n elif line.startswith('DISPLAY_DATA_SECTION'):\n next(self._lines)\n self.display = self._parse_coords()\n else:\n break\n except StopIteration:\n break\n\n del self._lines", "def build_specfile_sections(spec):\n str = \"\"\n\n mandatory_sections = {\n 'DESCRIPTION' : '\\n%%description\\n%s\\n\\n', }\n\n str = str + SimpleTagCompiler(mandatory_sections).compile( spec )\n\n optional_sections = {\n 'DESCRIPTION_' : '%%description -l %s\\n%s\\n\\n',\n 'CHANGELOG' : '%%changelog\\n%s\\n\\n',\n 'X_RPM_PREINSTALL' : '%%pre\\n%s\\n\\n',\n 'X_RPM_POSTINSTALL' : '%%post\\n%s\\n\\n',\n 'X_RPM_PREUNINSTALL' : '%%preun\\n%s\\n\\n',\n 'X_RPM_POSTUNINSTALL' : '%%postun\\n%s\\n\\n',\n 'X_RPM_VERIFY' : '%%verify\\n%s\\n\\n',\n\n # These are for internal use but could possibly be overridden\n 'X_RPM_PREP' : '%%prep\\n%s\\n\\n',\n 'X_RPM_BUILD' : '%%build\\n%s\\n\\n',\n 'X_RPM_INSTALL' : '%%install\\n%s\\n\\n',\n 'X_RPM_CLEAN' : '%%clean\\n%s\\n\\n',\n }\n\n # Default prep, build, install and clean rules\n # TODO: optimize those build steps, to not compile the project a second time\n if 'X_RPM_PREP' not in spec:\n spec['X_RPM_PREP'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"' + '\\n%setup -q'\n\n if 'X_RPM_BUILD' not in spec:\n spec['X_RPM_BUILD'] = '[ ! -e \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && mkdir \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_INSTALL' not in spec:\n spec['X_RPM_INSTALL'] = 'scons --install-sandbox=\"$RPM_BUILD_ROOT\" \"$RPM_BUILD_ROOT\"'\n\n if 'X_RPM_CLEAN' not in spec:\n spec['X_RPM_CLEAN'] = '[ -n \"$RPM_BUILD_ROOT\" -a \"$RPM_BUILD_ROOT\" != / ] && rm -rf \"$RPM_BUILD_ROOT\"'\n\n str = str + SimpleTagCompiler(optional_sections, mandatory=0).compile( spec )\n\n return str", "def read(self, run):\n # read the file\n self['run'] = run[0:run.rfind('.xml')]\n f = open(run)\n for line in f:\n \n if line.find('SDSU Exec') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n self['application'] = line[n1:n2]\n\n elif line.find('<detector_status') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n if line[n1:n2] != 'Ultraspec':\n raise Exception, 'Run ' + run + ' is not an Ultraspec file.'\n \n elif line.find('SPEED') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['speed'] = line[n1:n2]\n \n elif line.find('X_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x_bin'] = line[n1:n2]\n \n elif line.find('Y_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y_bin'] = line[n1:n2]\n \n # first window \n \n elif line.find('X1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_start'] = line[n1:n2]\n \n elif line.find('X1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_size'] = line[n1:n2]\n \n elif line.find('Y1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_start'] = line[n1:n2]\n \n elif line.find('Y1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_size'] = line[n1:n2]\n \n # second window\n \n elif line.find('X2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_start'] = line[n1:n2]\n \n elif line.find('X2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_size'] = line[n1:n2]\n \n elif line.find('Y2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_start'] = line[n1:n2]\n \n elif line.find('Y2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_size'] = line[n1:n2]\n \n elif line.find('<target>') >= 0:\n n1 = line.index('target') + 7\n n2 = line.index('<', n1)\n self['target'] = line[n1:n2]\n\n elif line.find('<grating>') >= 0:\n n1 = line.index('grating') + 8\n n2 = line.index('<', n1)\n self['grating'] = line[n1:n2]\n\n elif line.find('<slit_width>') >= 0:\n n1 = line.index('slit_width') + 11\n n2 = line.index('<', n1)\n self['slit_width'] = line[n1:n2]\n\n elif line.find('<slit_angle>') >= 0:\n n1 = line.index('slit_angle') + 11\n n2 = line.index('<', n1)\n self['slit_angle'] = line[n1:n2]\n \n elif line.find('<filters>') >= 0:\n n1 = line.index('filters') + 8\n n2 = line.index('<', n1)\n self['filters'] = line[n1:n2]\n\n elif line.find('<ID>') >= 0:\n n1 = line.index('ID') + 3\n n2 = line.index('<', n1)\n self['ID'] = line[n1:n2]\n\n elif line.find('<PI>') >= 0:\n n1 = line.index('PI') + 3\n n2 = line.index('<', n1)\n self['PI'] = line[n1:n2]\n\n elif line.find('<comment>') >= 0:\n n1 = line.index('comment') + 8\n n2 = line.index('<', n1)\n self['comment'] = line[n1:n2]\n \n\n # check that we have found what we expected to find\n if 'application' not in self:\n raise Exception, 'Failed to find application name in ' + run\n\n if self.is_not_power_onoff():\n\n if 'x_bin' not in self:\n raise Exception, 'Failed to find X_BIN in ' + run\n\n if 'y_bin' not in self:\n raise Exception, 'Failed to find Y_BIN in ' + run\n\n if 'x1_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x1_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y1_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y1_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'x2_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x2_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y2_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y2_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'target' not in self:\n self['target'] = 'UNKNOWN'\n\n if 'filters' not in self:\n self['filters'] = '---'\n\n if 'grating' not in self:\n self['grating'] = '---'\n\n if 'slit_width' not in self:\n self['slit_width'] = '---'\n\n if 'slit_angle' not in self:\n self['slit_angle'] = '---'\n\n if 'ID' not in self:\n self['ID'] = 'UNKNOWN'\n\n if 'PI' not in self:\n self['PI'] = 'UNKNOWN'", "def parse(source):\r\n #remove shared indentation\r\n source = dedent(source)\r\n print source\r\n\r\n structure = structure_parse(source)\r\n\r\n print structure.signature\r\n print structure.annotation\r\n print structure.body\r\n\r\n\r\n signature = signature_parse(structure.signature)\r\n\r\n kernel = kernel_parse(signature.kernel)\r\n inputs = input_parse(signature.input)\r\n\r\n print signature.kernel\r\n print signature.input\r\n print signature.output\r\n\r\n\r\n quit()", "def parse_spec(inp_file):\n try:\n y_spec = yaml.load(inp_file, Loader=yaml.SafeLoader)\n spec = create_spec(y_spec)\n except jsonschema.exceptions.RefResolutionError:\n logging.error(\"Could not load specification. Check your network or try again\")\n raise err.BeaconTestError()\n except openapi_spec_validator.exceptions.OpenAPIValidationError:\n logging.error(\"Could not read specification. Check tat your file is valid\")\n raise err.BeaconTestError()\n return spec", "def __init__(self, code):\n if isinstance(code, str):\n if not self._iscode(code):\n raise ValueError(\"String is not a valid LoC code\")\n\n self.orig_code = \"\"\n self.section = \"\"\n self.topic = \"\"\n self.sub_topic = \"\"\n self.cutter = \"\"\n self.version = 0\n self._year = 0\n self.work_letter = \"\"\n self.copy = 0\n self.other = \"\"\n\n self.orig_code = code\n code = code.split()\n\n # If there's a section name, pull that out first\n if code[0].isalpha():\n self.section = code.pop(0)\n\n # Now, get the topic and sub-topic if they exist. Also cutter if it's dotted.\n whole_topic = code.pop(0)\n whole_topic = whole_topic.split(\".\")\n self.topic = whole_topic.pop(0) +\\\n (\".{}\".format(whole_topic.pop(0)) if len(whole_topic) and whole_topic[0].isnumeric() else \"\")\n if len(whole_topic):\n self.sub_topic = whole_topic.pop(0)\n if len(whole_topic):\n self.cutter = whole_topic[0]\n\n # Now, pull out the cutter if it exists separately.\n if len(code) and is_topic_or_cutter(code[0]):\n self.cutter = code.pop(0)\n\n # Remainder can come in any order. We'll figure out which it is each iteration.\n for item in code:\n if item.startswith(\"v.\"):\n self.version = int(item[2:])\n elif item.startswith(\"c.\"):\n self.copy = int(item[2:])\n elif is_year(item):\n self._year, self.work_letter = _split_year(item)\n elif self.section != \"\" and item.isalpha():\n self.section = item\n else:\n if self.other:\n self.other += \" \"\n self.other += item\n elif isinstance(code, LOC):\n self.orig_code = code.orig_code\n self.section = code.section\n self.topic = code.topic\n self.sub_topic = code.sub_topic\n self.cutter = code.cutter\n self.version = code.version\n self._year = code._year\n self.work_letter = code.work_letter\n self.copy = code.copy\n self.other = code.other\n else:\n raise TypeError(\"Input must be a string LoC code or LoC object\")", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def pre_process(in_path):\n in_string = open(in_path, 'r').read()\n multi_line = '/\\\\*[^*]*\\\\*+(?:[^/*][^*]*\\\\*+)*/'\n\n # header\n description = re.search(multi_line, in_string).group(0)\n unit = re.search('\\\\n\\\\s*// unit .*', in_string).group(0)\n imports = re.findall('\\\\n\\\\s*// import .*', in_string)\n import_string = ''\n for i in imports:\n import_string += resolve_import(i.strip()[10:], in_path.parent)\n\n use_string = ''\n uses = re.findall('\\\\n\\\\s*// uses .*', in_string)\n for u in uses:\n use_string += 'uses ' + u.strip()[8:] + ';\\n'\n if use_string != '':\n use_string = '\\n\\n' + use_string\n\n header = '{' + description[2:-2] + '}\\n\\nunit ' + unit.strip()[8:] + ';' + use_string + '\\n\\n'\n\n # main part\n in_string_list, delphi_string_list = split(import_string + '\\n\\n' + in_string)\n\n return header, in_string_list, delphi_string_list", "def __init__(self, filename):\r\n self._results = SpecParser(filename).parse()", "def load(self):\n self.data = NSPSpecIO().read(self.path)", "def parseProgram(inputFile):\n print(\"Program\")\n parseStatements(inputFile)", "def __init__(self, spec, decl=None):\n self._spec = []\n self.initialize()\n self._processDecl(decl)\n self._processSpec(spec)", "def parse(self):\n\n coverage_data = {\n 'packages': {},\n 'summary': {'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0},\n 'timestamp': str(int(time.time()))\n }\n package = None\n current_file = None\n file_lines_total = 0\n file_lines_covered = 0\n file_lines = {}\n file_methods = {}\n file_branches_total = 0\n file_branches_covered = 0\n\n for line in self.lcov_data.split('\\n'):\n if line.strip() == 'end_of_record':\n if current_file is not None:\n package_dict = coverage_data['packages'][package]\n package_dict['lines-total'] += file_lines_total\n package_dict['lines-covered'] += file_lines_covered\n package_dict['branches-total'] += file_branches_total\n package_dict['branches-covered'] += file_branches_covered\n file_dict = package_dict['classes'][current_file]\n file_dict['lines-total'] = file_lines_total\n file_dict['lines-covered'] = file_lines_covered\n file_dict['lines'] = dict(file_lines)\n file_dict['methods'] = dict(file_methods)\n file_dict['branches-total'] = file_branches_total\n file_dict['branches-covered'] = file_branches_covered\n coverage_data['summary']['lines-total'] += file_lines_total\n coverage_data['summary']['lines-covered'] += file_lines_covered\n coverage_data['summary']['branches-total'] += file_branches_total\n coverage_data['summary']['branches-covered'] += file_branches_covered\n\n line_parts = line.split(':')\n input_type = line_parts[0]\n\n if input_type == 'SF':\n # Get file name\n file_name = line_parts[-1].strip()\n relative_file_name = os.path.relpath(file_name, self.base_dir)\n package = '.'.join(relative_file_name.split(os.path.sep)[0:-1])\n class_name = file_name.split(os.path.sep)[-1]\n if package not in coverage_data['packages']:\n coverage_data['packages'][package] = {\n 'classes': {}, 'lines-total': 0, 'lines-covered': 0,\n 'branches-total': 0, 'branches-covered': 0\n }\n coverage_data['packages'][package]['classes'][\n relative_file_name] = {\n 'name': class_name, 'lines': {}, 'lines-total': 0,\n 'lines-covered': 0, 'branches-total': 0,\n 'branches-covered': 0\n }\n package = package\n current_file = relative_file_name\n file_lines_total = 0\n file_lines_covered = 0\n file_lines.clear()\n file_methods.clear()\n file_branches_total = 0\n file_branches_covered = 0\n elif input_type == 'DA':\n # DA:2,0\n (line_number, line_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'false', 'branches-total': 0,\n 'branches-covered': 0\n }\n file_lines[line_number]['hits'] = line_hits\n # Increment lines total/covered for class and package\n if int(line_hits) > 0:\n file_lines_covered += 1\n file_lines_total += 1\n elif input_type == 'BRDA':\n # BRDA:1,1,2,0\n (line_number, block_number, branch_number, branch_hits) = line_parts[-1].strip().split(',')\n line_number = int(line_number)\n if line_number not in file_lines:\n file_lines[line_number] = {\n 'branch': 'true', 'branches-total': 0,\n 'branches-covered': 0, 'hits': 0\n }\n file_lines[line_number]['branch'] = 'true'\n file_lines[line_number]['branches-total'] += 1\n file_branches_total += 1\n if branch_hits != '-' and int(branch_hits) > 0:\n file_lines[line_number]['branches-covered'] += 1\n file_branches_covered += 1\n elif input_type == 'BRF':\n file_branches_total = int(line_parts[1])\n elif input_type == 'BRH':\n file_branches_covered = int(line_parts[1])\n elif input_type == 'FN':\n # FN:5,(anonymous_1)\n function_name = line_parts[-1].strip().split(',')[1]\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = '0'\n elif input_type == 'FNDA':\n # FNDA:0,(anonymous_1)\n (function_hits, function_name) = line_parts[-1].strip().split(',')\n function_name = self.demangle_function_name(function_name)\n file_methods[function_name] = function_hits\n\n # Exclude packages\n excluded = [x for x in coverage_data['packages'] for e in self.excludes\n if re.match(e, x)]\n for package in excluded:\n del coverage_data['packages'][package]\n\n # Compute line coverage rates\n for package_data in list(coverage_data['packages'].values()):\n package_data['line-rate'] = self._percent(\n package_data['lines-total'],\n package_data['lines-covered'])\n package_data['branch-rate'] = self._percent(\n package_data['branches-total'],\n package_data['branches-covered'])\n\n return coverage_data", "def proc_data_file(cfg, data_file, atom_id_dict, type_dict):\n # Easier to pass when contained in a dictionary\n nums_dict = {}\n num_dict_headers = [NUM_ATOMS, NUM_ATOM_TYP, NUM_BONDS, NUM_BOND_TYP, NUM_ANGLS, NUM_ANGL_TYP,\n NUM_DIHES, NUM_DIHE_TYP, NUM_IMPRS, NUM_IMPR_TYP]\n\n with open(data_file) as d:\n print(\"Reading file: {}\".format(data_file))\n section = SEC_HEAD\n found_box_size = False\n section_order = []\n count = 0\n for key in num_dict_headers:\n nums_dict[key] = None\n content = {SEC_HEAD: [], }\n highlight_content = {}\n\n for line in d.readlines():\n line = line.strip()\n if len(line) == 0:\n continue\n\n if section is None:\n section, count = find_section_state(line, section, section_order, content, highlight_content)\n\n elif section == SEC_HEAD:\n # Head is the only section of indeterminate lengths, so check every line *after the first, comment\n # line** to see if a new section is encountered\n if count == 0:\n content[SEC_HEAD].append(line)\n content[SEC_HEAD].append('')\n count += 1\n else:\n section, count = find_section_state(line, section, section_order, content, highlight_content)\n if section == SEC_HEAD:\n s_line = line.split()\n try:\n # For the box sizes:\n s_line[0:2] = list(map(float, s_line[0:2]))\n if not found_box_size:\n found_box_size = True\n content[SEC_HEAD].append(\"\")\n content[SEC_HEAD].append('{:12.5f} {:12.5f} {:} {:}'.format(*s_line))\n except ValueError:\n s_line[0] = int(s_line[0])\n content[SEC_HEAD].append('{:12d} {:}'.format(s_line[0], \" \".join(s_line[1:])))\n find_header_values(line, nums_dict)\n else:\n # Upon exiting header, see if have minimum data needed\n if nums_dict[NUM_ATOMS] is None:\n raise InvalidDataError(\"Did not find total atom number in the header of \"\n \"file {}\".format(data_file))\n\n for key, val in nums_dict.items():\n if val <= 0:\n raise InvalidDataError(\"Invalid value ({}) encountered for key '{}' in file: \"\n \"{}\".format(val, key, data_file))\n\n elif section in TYPE_SEC_DICT:\n s_line = line.split()\n\n try:\n coeff_id = int(s_line[0])\n except ValueError as e:\n raise InvalidDataError(\"Encountered error '{}' reading line: {} \\n in file: {}\\n\"\n \"Check number of lines in the section to make sure that they match the \"\n \"number specified in the header section.\".format(e, line, data_file))\n\n # Rename the following to make it easier to follow:\n type_count = TYPE_SEC_DICT[section][0]\n highlight_types = cfg[TYPE_SEC_DICT[section][1]]\n change_dict = type_dict[TYPE_SEC_DICT[section][2]]\n\n if coeff_id in change_dict:\n s_line[0] = change_dict[coeff_id]\n else:\n s_line[0] = coeff_id\n\n content[section].append(s_line)\n\n if coeff_id in highlight_types:\n highlight_content[section].append(s_line)\n if type_count in nums_dict:\n if count == nums_dict[type_count]:\n content[section].sort()\n section = None\n\n else:\n count += 1\n else:\n raise InvalidDataError(\"Found section {}, but did not find number of entries for that section \"\n \"in the header.\".format(section))\n\n elif section == SEC_VELOS:\n s_line = line.split()\n try:\n atom_id = int(s_line[0])\n except (ValueError, KeyError) as e:\n raise InvalidDataError(\"In section '{}', Error {} on line: {}\\n in file: {}\"\n \"\".format(section, e, line, data_file))\n if atom_id in atom_id_dict:\n s_line[0] = atom_id_dict[atom_id]\n else:\n s_line[0] = atom_id\n content[section].append(s_line)\n\n if atom_id in cfg[PRINT_DATA_ATOMS] or atom_id in cfg[PRINT_OWN_ATOMS]:\n highlight_content[section].append(s_line)\n\n for col in range(1, 4):\n s_line[col] = float(s_line[col])\n\n if count == nums_dict[NUM_ATOMS]:\n content[section].sort()\n highlight_content[section].sort()\n section = None\n else:\n count += 1\n\n elif section == SEC_ATOMS:\n s_line = line.split()\n try:\n atom_id = int(s_line[0])\n atom_type = int(s_line[2])\n except (ValueError, KeyError) as e:\n raise InvalidDataError(\"In section '{}', Error {} on line: {}\\n in file: {}\"\n \"\".format(section, e, line, data_file))\n\n if atom_id in atom_id_dict:\n s_line[0] = atom_id_dict[atom_id]\n else:\n s_line[0] = atom_id\n\n if atom_type in type_dict[SEC_ATOMS]:\n s_line[2] = type_dict[SEC_ATOMS][atom_type]\n\n for col in range(3, 7):\n s_line[col] = float(s_line[col])\n\n content[section].append(s_line)\n\n if atom_id in cfg[PRINT_DATA_ATOMS] or atom_id in cfg[PRINT_OWN_ATOMS]:\n highlight_content[section].append(s_line)\n\n if count == nums_dict[NUM_ATOMS]:\n content[section].sort()\n highlight_content[section].sort()\n section = None\n else:\n count += 1\n elif section in NUM_SEC_DICT:\n highlight_line = False\n tot_num_key = NUM_SEC_DICT[section][0]\n if tot_num_key not in nums_dict:\n raise InvalidDataError(\"Found section {}, but did not find number of bonds \"\n \"in the header.\".format(section))\n\n min_col_num = NUM_SEC_DICT[section][1]\n s_line = line.split()\n try:\n s_line[0] = int(s_line[0])\n s_line[1] = int(s_line[1])\n atoms = list(map(int, s_line[2:min_col_num]))\n except (ValueError, KeyError) as e:\n raise InvalidDataError(\"Error {} reading line: {} \\n in section {} of file: {} \"\n \"\".format(e, line, section, data_file))\n new_atoms = atoms\n for index, atom_id in enumerate(atoms):\n if atom_id in atom_id_dict:\n new_atoms[index] = atom_id_dict[atom_id]\n if atom_id in cfg[PRINT_DATA_ATOMS]:\n highlight_line = True\n\n # check for ownership\n if section == SEC_BONDS:\n if atoms[0] in cfg[PRINT_OWN_ATOMS]:\n highlight_line = True\n else:\n if atoms[1] in cfg[PRINT_OWN_ATOMS]:\n highlight_line = True\n\n if s_line[1] in type_dict[section]:\n s_line[1] = type_dict[section][s_line[1]]\n\n if len(s_line) > min_col_num:\n end = s_line[min_col_num:]\n else:\n end = []\n\n # noinspection PyTypeChecker\n line_struct = s_line[0:2] + new_atoms + end\n content[section].append(line_struct)\n\n if highlight_line:\n highlight_content[section].append(line_struct)\n\n if count == nums_dict[tot_num_key]:\n if cfg[SORT_ME]:\n if section == SEC_BONDS:\n content[section].sort(key=itemgetter(3))\n content[section].sort(key=itemgetter(2))\n elif section == SEC_ANGLS:\n content[section].sort(key=itemgetter(4))\n content[section].sort(key=itemgetter(2))\n content[section].sort(key=itemgetter(3))\n else:\n content[section].sort(key=itemgetter(5))\n content[section].sort(key=itemgetter(4))\n content[section].sort(key=itemgetter(2))\n content[section].sort(key=itemgetter(3))\n # noinspection PyAssignmentToLoopOrWithParameter\n for index, line in enumerate(content[section]):\n line[0] = index + 1\n section = None\n else:\n count += 1\n\n if cfg[DATA_COMP] is None:\n print_content(atom_id_dict, cfg, content, data_file, highlight_content, section_order, type_dict)\n return\n else:\n return content, section_order", "def _initFromData(self, data):\n # Read the standard header\n magic, bom, version, filesize, headersize, numblocks = \\\n _common.NDS_STD_FILE_HEADER.unpack_from(data, 0)\n if version != 0x100:\n raise ValueError(f'Unsupported SDAT version: {version}')\n\n if magic != b'SDAT':\n raise ValueError(\"Wrong magic (should be b'SDAT', instead found \"\n f'{magic})')\n\n # Read the block offsets and sizes\n (symbolsBlockOffset, symbolsBlockSize,\n infoBlockOffset, infoBlockSize,\n fatBlockOffset, fatBlockSize,\n fileBlockOffset, fileBlockSize,\n ) = struct.unpack_from('<8I', data, 0x10)\n\n # Read the symbols block\n (symbolsMagic, symbolsSize) = \\\n struct.unpack_from('<4sI', data, symbolsBlockOffset)\n\n if symbolsBlockOffset != 0:\n symbolsOffsets = struct.unpack_from('<8I', data,\n symbolsBlockOffset + 8)\n assert symbolsMagic == b'SYMB'\n else:\n symbolsOffsets = [None] * 8\n\n\n lastEndOfString = 0 # relative to SYMB block\n def readSymbolsList(offset, hasSubgroups):\n \"\"\"\n Read a list of symbols at offset offset. If hasSubgroups,\n it'll be parsed assuming that the symbol table has entries\n for sub-symbol-lists as well. (In practice, this only occurs\n for SSARs.)\n If there are no symbols, return an empty list.\n \"\"\"\n nonlocal lastEndOfString\n\n if offset is None: return []\n\n off = symbolsBlockOffset + offset\n count, = struct.unpack_from('<I', data, off); off += 4\n\n symbols = []\n for i in range(count):\n symbolOff, = struct.unpack_from('<I', data, off)\n off += 4\n\n if symbolOff == 0:\n thisSymbol = None\n else:\n thisSymbol = _common.loadNullTerminatedStringFrom(data,\n symbolsBlockOffset + symbolOff)\n lastEndOfString = symbolOff + len(thisSymbol) + 1\n\n if not hasSubgroups:\n symbols.append(thisSymbol)\n else:\n subSymbolsOff, = struct.unpack_from('<I', data, off)\n off += 4\n\n if subSymbolsOff == 0:\n subSymbols = []\n else:\n subSymbols = readSymbolsList(subSymbolsOff, False)\n\n symbols.append((thisSymbol, subSymbols))\n \n return symbols\n\n\n # Read the FAT block\n (fatMagic, fatSize, fatCount) = \\\n struct.unpack_from('<4sII', data, fatBlockOffset)\n assert fatMagic == b'FAT ' # note trailing space\n\n # Read the files from the FILES block\n files = []\n fatArrayPos = fatBlockOffset + 0x0C\n self.fileAlignment = 0x200\n self.fatLengthsIncludePadding = True\n finalFileEnd = fileBlockOffset + 8\n for i in range(fatCount):\n (fileOffset, fileSize) = \\\n struct.unpack_from('<II', data, fatArrayPos)\n fatArrayPos += 0x10 # There's 8 pad bytes.\n\n # We'll need this later\n finalFileEnd = fileOffset + fileSize\n\n if i != fatCount - 1:\n nextOffset, = struct.unpack_from('<I', data, fatArrayPos)\n paddedSize = nextOffset - fileOffset\n if paddedSize != fileSize:\n self.fatLengthsIncludePadding = False\n\n # Most SDATs require files to be padded to 0x20, but some\n # use other amounts. We check for that here, so that we can\n # rebuild it correctly later.\n if fileOffset % 0x200 == 0x100:\n self.fileAlignment = min(self.fileAlignment, 0x100)\n if fileOffset % 0x100 == 0x80:\n self.fileAlignment = min(self.fileAlignment, 0x80)\n if fileOffset % 0x80 == 0x40:\n self.fileAlignment = min(self.fileAlignment, 0x40)\n if fileOffset % 0x40 == 0x20:\n self.fileAlignment = min(self.fileAlignment, 0x20)\n if fileOffset % 0x20 == 0x10:\n self.fileAlignment = min(self.fileAlignment, 0x10)\n if fileOffset % 0x10 == 8:\n self.fileAlignment = min(self.fileAlignment, 8)\n if fileOffset % 8 == 4:\n self.fileAlignment = min(self.fileAlignment, 4)\n if fileOffset % 4 == 2:\n self.fileAlignment = min(self.fileAlignment, 2)\n if fileOffset % 2 == 1: # yes, this happens sometimes\n self.fileAlignment = min(self.fileAlignment, 1)\n\n if i == 0:\n self.firstFileAlignment = self.fileAlignment\n\n file = data[fileOffset : fileOffset + fileSize]\n files.append(file)\n\n if self.firstFileAlignment == self.fileAlignment:\n self.firstFileAlignment = None\n\n # Check if the end is definitely unpadded (that is, if there\n # should be padding and it's not present)\n if finalFileEnd == len(data) and finalFileEnd % self.fileAlignment != 0:\n self.padAtEnd = False\n\n # Do another quick pass to find if the FAT file lengths include\n # padding\n\n # Read the info block\n (infoMagic, infoSize) = \\\n struct.unpack_from('<4sI', data, infoBlockOffset)\n infoOffsets = struct.unpack_from('<8I', data,\n infoBlockOffset + 8)\n assert infoMagic == b'INFO'\n\n def getInfoEntryOffsets(partNum):\n off = infoOffsets[partNum]\n count, = struct.unpack_from('<I', data, infoBlockOffset + off)\n entryOffsets = struct.unpack_from(f'<{count}I', data,\n infoBlockOffset + off + 4)\n for entryOff in entryOffsets:\n if entryOff == 0:\n yield None\n else:\n yield infoBlockOffset + entryOff\n\n\n # Info part 0: SSEQ (references SBNK)\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(0),\n readSymbolsList(symbolsOffsets[0], False)):\n if entryOff is None:\n sseq = None\n else:\n (fileID, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID) = \\\n struct.unpack_from('<3H4B', data, entryOff)\n sseq = soundSequence.SSEQ(files[fileID], unk02, bankID,\n volume, channelPressure, polyphonicPressure, playerID)\n sseq.dataMergeOptimizationID = fileID\n\n self.sequences.append((symb, sseq))\n\n # Info part 1: SSAR\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(1),\n readSymbolsList(symbolsOffsets[1], True)):\n if entryOff is None:\n ssar = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n subSymb = symb[1] if symb is not None else None\n ssar = soundSequenceArchive.SSAR(files[fileID], unk02, subSymb)\n ssar.dataMergeOptimizationID = fileID\n\n name = symb[0] if symb is not None else None\n self.sequenceArchives.append((name, ssar))\n\n # Info part 2: SBNK\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(2),\n readSymbolsList(symbolsOffsets[2], False)):\n if entryOff is None:\n sbnk = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n swarIDs = struct.unpack_from('<4h', data, entryOff + 4)\n swarIDs2 = []\n for x in swarIDs:\n if x == -1:\n swarIDs2.append(None)\n else:\n swarIDs2.append(x)\n\n sbnk = soundBank.SBNK(files[fileID], unk02, swarIDs2)\n sbnk.dataMergeOptimizationID = fileID\n\n self.banks.append((symb, sbnk))\n\n # Info part 3: SWAR\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(3),\n readSymbolsList(symbolsOffsets[3], False)):\n if entryOff is None:\n swar = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n swar = soundWaveArchive.SWAR(files[fileID], unk02)\n swar.dataMergeOptimizationID = fileID\n\n self.waveArchives.append((symb, swar))\n\n # Info part 4: Sequence players\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(4),\n readSymbolsList(symbolsOffsets[4], False)):\n if entryOff is None:\n sp = None\n else:\n maxSequences, channelMask, heapSize = \\\n struct.unpack_from('<HHI', data, entryOff)\n\n channels = set()\n for i in range(16):\n if (channelMask >> i) & 1:\n channels.add(i)\n\n sp = soundSequencePlayer.SequencePlayer(maxSequences,\n channels,\n heapSize)\n\n self.sequencePlayers.append((symb, sp))\n\n # Info part 5: Groups\n for groupOff, symb in itertools.zip_longest(getInfoEntryOffsets(5),\n readSymbolsList(symbolsOffsets[5], False)):\n if groupOff is None:\n entries = None\n else:\n entriesCount, = struct.unpack_from('<I', data, groupOff)\n\n entries = []\n arrayOff = groupOff + 4\n for i in range(entriesCount):\n type, options, id = struct.unpack_from('<BHxI', data, arrayOff)\n arrayOff += 8\n\n entries.append(soundGroup.GroupEntry(type, options, id))\n\n self.groups.append((symb, entries))\n\n # Info part 6: Stream players\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(6),\n readSymbolsList(symbolsOffsets[6], False)):\n if entryOff is None:\n sp = None\n else:\n count, = struct.unpack_from('<B', data, entryOff)\n channels = list(\n struct.unpack_from(f'<{count}B', data, entryOff + 1))\n sp = soundStreamPlayer.StreamPlayer(channels)\n\n self.streamPlayers.append((symb, sp))\n\n # Info part 7: Streams\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(7),\n readSymbolsList(symbolsOffsets[7], False)):\n if entryOff is None:\n strm = None\n else:\n fileID, unk02, volume, priority, playerID, unk07 = \\\n struct.unpack_from('<HH4B', data, entryOff)\n strm = soundStream.STRM(files[fileID], unk02, volume, priority, playerID, unk07)\n strm.dataMergeOptimizationID = fileID\n\n self.streams.append((symb, strm))\n\n\n # If the symbols block size is definitely padded, record that\n if symbolsBlockSize % 4 == 0 and lastEndOfString % 4 != 0:\n self.padSymbSizeTo4InSDATHeader = True", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here", "def test_create_seqstructs(self):\n with open(\"./support_files/cs.fasta\") as fin:\n obs = create_seqstructs(fin, 10)\n self.assertEqual(obs, self.seqstruct)", "def build_specfile_header(spec):\n str = \"\"\n\n # first the mandatory sections\n mandatory_header_fields = {\n 'NAME' : '%%define name %s\\nName: %%{name}\\n',\n 'VERSION' : '%%define version %s\\nVersion: %%{version}\\n',\n 'PACKAGEVERSION' : '%%define release %s\\nRelease: %%{release}\\n',\n 'X_RPM_GROUP' : 'Group: %s\\n',\n 'SUMMARY' : 'Summary: %s\\n',\n 'LICENSE' : 'License: %s\\n',\n }\n\n str = str + SimpleTagCompiler(mandatory_header_fields).compile( spec )\n\n # now the optional tags\n optional_header_fields = {\n 'VENDOR' : 'Vendor: %s\\n',\n 'X_RPM_URL' : 'Url: %s\\n',\n 'SOURCE_URL' : 'Source: %s\\n',\n 'SUMMARY_' : 'Summary(%s): %s\\n',\n 'ARCHITECTURE' : 'BuildArch: %s\\n',\n 'X_RPM_DISTRIBUTION' : 'Distribution: %s\\n',\n 'X_RPM_ICON' : 'Icon: %s\\n',\n 'X_RPM_PACKAGER' : 'Packager: %s\\n',\n 'X_RPM_GROUP_' : 'Group(%s): %s\\n',\n\n 'X_RPM_REQUIRES' : 'Requires: %s\\n',\n 'X_RPM_PROVIDES' : 'Provides: %s\\n',\n 'X_RPM_CONFLICTS' : 'Conflicts: %s\\n',\n 'X_RPM_BUILDREQUIRES' : 'BuildRequires: %s\\n',\n\n 'X_RPM_SERIAL' : 'Serial: %s\\n',\n 'X_RPM_EPOCH' : 'Epoch: %s\\n',\n 'X_RPM_AUTOREQPROV' : 'AutoReqProv: %s\\n',\n 'X_RPM_EXCLUDEARCH' : 'ExcludeArch: %s\\n',\n 'X_RPM_EXCLUSIVEARCH' : 'ExclusiveArch: %s\\n',\n 'X_RPM_PREFIX' : 'Prefix: %s\\n',\n\n # internal use\n 'X_RPM_BUILDROOT' : 'BuildRoot: %s\\n',\n }\n\n # fill in default values:\n # Adding a BuildRequires renders the .rpm unbuildable under systems which\n # are not managed by rpm, since the database to resolve this dependency is\n # missing (take Gentoo as an example)\n #if 'X_RPM_BUILDREQUIRES' not in spec:\n # spec['X_RPM_BUILDREQUIRES'] = 'scons'\n\n if 'X_RPM_BUILDROOT' not in spec:\n spec['X_RPM_BUILDROOT'] = '%{_tmppath}/%{name}-%{version}-%{release}'\n\n str = str + SimpleTagCompiler(optional_header_fields, mandatory=0).compile( spec )\n\n # Add any extra specfile definitions the user may have supplied.\n # These flags get no processing, they are just added.\n # github #3164: if we don't turn off debug package generation\n # the tests which build packages all fail. If there are no\n # extra flags, default to adding this one. If the user wants\n # to turn this back on, supply the flag set to None.\n\n if 'X_RPM_EXTRADEFS' not in spec:\n spec['X_RPM_EXTRADEFS'] = ['%global debug_package %{nil}']\n for extra in spec['X_RPM_EXTRADEFS']:\n str += extra + '\\n'\n\n return str", "def parse_ucode_file(opts):\n with open(opts.container_file, \"rb\") as ucode_file:\n print(\"Microcode patches in %s:\" % (opts.container_file))\n\n # Seek to end of file to determine file size\n ucode_file.seek(0, 2)\n end_of_file = ucode_file.tell()\n\n # Check magic number\n ucode_file.seek(0, 0)\n if ucode_file.read(4) != b'DMA\\x00':\n print(\"ERROR: Missing magic number at beginning of container\")\n sys.exit()\n\n # Read the equivalence table length\n ucode_file.seek(EQ_TABLE_LEN_OFFSET, 0)\n eq_table_len = read_int32(ucode_file)\n\n ids = parse_equiv_table(ucode_file, eq_table_len)\n\n cursor = EQ_TABLE_OFFSET + eq_table_len\n while cursor < end_of_file:\n # Seek to the start of the patch information\n ucode_file.seek(cursor, 0)\n\n patch_start = cursor + 8\n\n patch_type = read_int32(ucode_file)\n if patch_type != 1:\n print(\"Invalid patch identifier: %#010x\" % (patch_type))\n break\n\n patch_length = read_int32(ucode_file)\n ucode_file.seek(4, 1)\n ucode_level = read_int32(ucode_file)\n ucode_file.seek(16, 1)\n equiv_id = read_int16(ucode_file)\n\n if not equiv_id in ids:\n print(\"Patch equivalence id not present in equivalence table (%#06x)\"\n % (equiv_id))\n\n cursor = cursor + patch_length + 8\n continue\n\n cpu_id = ids[equiv_id]\n\n # The cpu_id is the equivalent to CPUID_Fn00000001_EAX\n family = (cpu_id >> 8) & 0xf\n family += (cpu_id >> 20) & 0xff\n\n model = (cpu_id >> 4) & 0xf\n model |= (cpu_id >> 12) & 0xf0\n\n stepping = cpu_id & 0xf\n\n print(\" Family=%#04x Model=%#04x Stepping=%#04x: Patch=%#010x Length=%u bytes\"\n % (family, model, stepping, ucode_level, patch_length))\n\n if opts.extract:\n extract_patch(opts, patch_start, patch_length, ucode_file,\n ucode_level)\n\n cursor = cursor + patch_length + 8", "def parse_file(self):\n for num, line in enumerate(self._text):\n if \"CRYSTAL STRUCTURE SOLUTION\" in line:\n line = line.strip().strip('+').strip()\n if 'SHELXTL' in line:\n self.version = 'SHELXT ' + line.split()[-1]\n if line.strip().startswith('R1 Rweak Alpha'):\n for n in range(100):\n if not self._text[num + 1 + n]:\n break\n if self._text[num + 1]:\n self.solutions[self._text[num + 1 + n][58:76].strip()] = self._text[num + 1 + n][37:51].strip()", "def test_identify_names2(tmpdir):\n code_str = b\"\"\"\n'''\nTitle\n-----\n\nThis is an example.\n'''\n# -*- coding: utf-8 -*-\n# \\xc3\\x9f\nfrom a.b import c\nimport d as e\nimport h.i\nprint(c)\ne.HelloWorld().f.g\nh.i.j()\n\"\"\"\n expected = {\n 'c':\n [{\n 'name': 'c',\n 'module': 'a.b',\n 'module_short': 'a.b',\n 'is_class': False,\n 'is_explicit': False,\n }],\n 'e.HelloWorld':\n [{\n 'name': 'HelloWorld',\n 'module': 'd',\n 'module_short': 'd',\n 'is_class': False,\n 'is_explicit': False,\n }],\n 'h.i.j':\n [{\n 'name': 'j',\n 'module': 'h.i',\n 'module_short': 'h.i',\n 'is_class': False,\n 'is_explicit': False,\n }],\n }\n\n fname = tmpdir.join(\"identify_names.py\")\n fname.write(code_str, 'wb')\n\n _, script_blocks = split_code_and_text_blocks(fname.strpath)\n res = sg.identify_names(script_blocks)\n\n assert expected == res\n\n code_str = b\"\"\"\n'''\nTitle\n-----\n\nThis example uses :func:`k.l` and :meth:`~m.n`.\n'''\n\"\"\" + code_str.split(b\"'''\")[-1]\n expected['k.l'] = [{u'module': u'k', u'module_short': u'k', u'name': u'l',\n 'is_class': False, 'is_explicit': True}]\n expected['m.n'] = [{u'module': u'm', u'module_short': u'm', u'name': u'n',\n 'is_class': False, 'is_explicit': True}]\n\n fname = tmpdir.join(\"identify_names.py\")\n fname.write(code_str, 'wb')\n _, script_blocks = split_code_and_text_blocks(fname.strpath)\n res = sg.identify_names(script_blocks)\n\n assert expected == res", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg" ]
[ "0.6069532", "0.602706", "0.5843338", "0.5768507", "0.5756641", "0.571061", "0.5692741", "0.5651732", "0.55933094", "0.5569754", "0.5564679", "0.5557777", "0.5535603", "0.54826546", "0.5445141", "0.5437613", "0.5425164", "0.535459", "0.53148675", "0.529252", "0.5287907", "0.5286041", "0.5281195", "0.5281195", "0.52748376", "0.5258062", "0.52541006", "0.52459306", "0.5235894", "0.52304167" ]
0.65098464
0
If the flow in the pipe is laminar, you can use the Poiseuille Equation to calculate the flow rate mu = 0.001 @ 25 degrees C Q = (pi (D4) delta_p) / (128 mu pipe_length)
def pois_metric(pipe_diameter, delta_p, pipe_length): mu = 0.001 # water @ 25 degrees C pois = mu * 10 flow_rate_lam = (math.pi * (pipe_diameter ** 4) * delta_p) / (128 * pois * pipe_length) return flow_rate_lam
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bern_metric(pipe_diameter, delta_p, pipe_length):\n fr_c = 0.003 # assuming Reynolds number is 10**5 and pipe material is smooth copper\n fr_reyn = 0.046 / (reynolds_num(pipe_diameter, delta_p, pipe_length) ** 0.2) # Taitel and Dukler approximation\n rho = 1000 # density of water @ 4 deg celsius (kg/m**3)\n\n v = math.sqrt((2 * delta_p) / (rho * (4 * fr_reyn * (pipe_length / pipe_diameter) - 1)))\n flow_rate_turb = v * ((math.pi / 4) * (pipe_diameter ** 2))\n\n return flow_rate_turb, v", "def bern_max_metric(pipe_diameter, delta_p):\n\n rho = 1000 # density of water kg/m^3\n flow_rate_max = ((math.pi * (pipe_diameter**2)) / 4) * math.sqrt((2 * delta_p) / rho)\n\n return flow_rate_max", "def solar_ppa():\n per_kwh = 0.196 # [$/kWh]\n\n return per_kwh", "def Piping(T_in, p_in, m_dot, d_inner, l_pipe, f, epsilon_pipe, T_shield, N):\r\n\r\n ## Estimation of the influence of the arcs\r\n # Calculation according to VDI Heatatlas 2013\r\n # Assumption isoenthalpic flow\r\n state_Arc = FlowRestriction(T_in, p_in, m_dot, d_inner, f)\r\n p_Arc = state_Arc.get(\"p\")\r\n T_Arc = state_Arc.get(\"T\")\r\n\r\n ## Estimation of the influence of thermal radiation on the compressible flow\r\n\r\n # Emission coefficent for an enclosed vessel\r\n # Assuming much bigger hot surface -> emissivity of hot surface doesnt matter anymore, just the cold one\r\n # Thus the simple equation can be used\r\n q_pipe = epsilon_pipe * sp.constants.Stefan_Boltzmann * (T_shield**4 - T_Arc**4) #W\r\n\r\n # Calling of the function SimplePipe\r\n state_out = SimplePipe(T_Arc, p_Arc, m_dot, d_inner, l_pipe, N, 0, q_pipe)\r\n #Transfer results\r\n p_out = state_out.get(\"p\")\r\n T_out = state_out.get(\"T\")\r\n h_out = state_out.get(\"h\")\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n\r\n return state_out", "def get_flow(self, pressure_drop, coeff): \n flow = coeff * pressure_drop**0.5\n return flow", "def __mu_calc(self, mu_pi):\n return mu_pi[1] / (mu_pi[0] + mu_pi[1])", "def _calculate_pipe_transmittance_values(self):\n if self.age['YEAR'] >= 1995:\n phi_pipes = [0.2, 0.3, 0.3]\n # elif 1985 <= self.age['built'] < 1995 and self.age['HVAC'] == 0:\n elif 1985 <= self.age['YEAR'] < 1995:\n phi_pipes = [0.3, 0.4, 0.4]\n else:\n phi_pipes = [0.4, 0.4, 0.4]\n return phi_pipes", "def pump_rate(self, ml_per_min):\n ml_per_step = int(syringe_size / max_steps) # calculate volume of one step\n step_per_min = int(ml_per_min / ml_per_step) # calculate steps per min from mL/min\n half_step_per_sec = int((2 * step_per_min) / 60) # calculate Hz (half steps per second) from steps per min\n if ml_per_min == \"default\": # if we can't be bothered\n return \"\"\n elif half_step_per_sec in range(top_velocity + 1): # if we actually want to do this\n return \"V\" + str(half_step_per_sec)\n else:\n pass # todo: error handling", "def LL_Rate(Uion,E):\n\treturn (4.0/E) * ((2*Uion)**2.5) * np.exp(-(2.0/3.0)*((2*Uion)**1.5)/E)", "def get_Pn(f, L, S_lp, S_ac): \r\n # single-link optical metrology noise (Hz^{-1}), Equation (10)\r\n P_oms = S_lp**2 \r\n # single test mass acceleration noise, Equation (11)\r\n P_acc = S_ac**2*(1. + 0.1e-3/f) \r\n # total noise in Michelson-style LISA data channel, Equation (12)\r\n Pn = (P_oms + 4.*P_acc/(2.*pi*f)**4.)/L**2. \r\n return Pn", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def liqpressure(temp):\n tau = temp/_TTP\n pres = 1.\n for (a,b) in _C_PMELT:\n pres += a * (1 - tau**b)\n pres *= _PTPE\n return pres", "def waveparameterh(L):\r\n return 8.13 - ((250 - 0.7 * L) / 125) ** 3", "def u(x, y, l, p):\n\n # Helical beam has a radially symmetrical amplitude,\n # so the amplitude function is only dependent on the\n # distance from the origin to the x, y coordinates.\n r = rho(x,y)\n\n # Evaluate the equation from Sundbeck.\n return (-1)**p * (np.sqrt(2) * r/w)**l * \\\n sp.genlaguerre(p, l)(2 * r**2 / w**2) * \\\n np.exp(- r**2 / w**2)", "def pulsatile_flow(r, p0, pn, phi, timestep, grid, ru=1060, mu=.0035, freq=1.5):\n ofst = int(np.round(grid / 2))\n rxl = int(np.round(3 * ofst / 4))\n h = r / rxl\n nw = pn.size\n omega = 2 * np.pi * freq\n u = np.zeros((timestep, grid, grid))\n zt = np.zeros(timestep + 1, np.complex)\n alpha = r * np.sqrt(omega * ru / mu)\n kapa = alpha * 1j ** 1.5 / r\n\n snw = nw * (nw + 1) / 2\n # alpha = alpha * np.sqrt(snw)\n for k in range(timestep):\n t = (k + 1) / timestep / freq\n for l in range(nw):\n zt[k] += pn[l] * np.exp(1j * (omega * t * (l + 1) - phi[l]))\n\n CJA = special.jv(0, kapa * r)\n for m in range(-rxl, rxl):\n for n in range(-rxl, rxl):\n for k in range(timestep):\n ri = np.sqrt(m ** 2 + n ** 2)\n if ri * h < r:\n CBJ0 = special.jv(0, kapa * h * ri)\n u[k, m + ofst, n + ofst] = p0 * ((ri * h) ** 2 - r ** 2) / 4 / mu + np.real(\n 1j / ru / omega / snw * (1 - CBJ0 / CJA) * zt[k])\n\n return u / u.max()", "def lunarperigee(time):\n dtor = np.pi / 180\n t1 = 1 + time\n t2 = t1 * t1\n t3 = t2 * t1\n perigee = (\n 334.329653 * dtor\n + 4069.0340329575 * dtor * t1\n - 0.010325 * dtor * t2\n - 1.2e-5 * dtor * t3\n )\n return perigee", "def wind_ppa():\n per_kwh = 0.0384 # [$/kWh]\n\n return per_kwh", "def get_stream_function_vortex(strength, xv, yv, X, Y):\r\n psi = strength / (4 * math.pi) * numpy.log((X - xv)**2 + (Y - yv)**2)\r\n \r\n return psi", "def mass_flow_rate(rho, u, A):\n\n return rho * u * A", "def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second", "def test_ul_per_mm_continuous(pipette_model: PipetteModel) -> None:\n config = pipette_config.load(pipette_model)\n aspirate = config.ul_per_mm[\"aspirate\"]\n dispense = config.ul_per_mm[\"dispense\"]\n min_vol = 0.000001 # sufficiently small starting volume\n for lno in range(len(aspirate) - 1):\n line = aspirate[lno]\n curr_max_vol = line[0]\n # find a halfway point roughly between max and min volume for a given\n # piecewise sequence of a pipette function\n half_max_vol = (curr_max_vol - min_vol) / 2 + min_vol\n\n min_ul_per_mm = line[1] * min_vol + line[2]\n mid_ul_per_mm = line[1] * half_max_vol + line[2]\n max_ul_per_mm = line[1] * curr_max_vol + line[2]\n\n lower_mm = min_ul_per_mm / min_vol\n higher_mm = max_ul_per_mm / curr_max_vol\n half_mm = mid_ul_per_mm / half_max_vol\n\n range_1 = (half_mm >= lower_mm) and (half_mm <= higher_mm)\n range_2 = (half_mm <= lower_mm) and (half_mm >= higher_mm)\n\n assert range_1 or range_2\n\n min_vol = curr_max_vol\n # make sure the mm of movement for max aspirate and max dispense agree\n aspirate_seq = aspirate[len(aspirate) - 1]\n dispense_seq = dispense[len(dispense) - 1]\n pip_max_vol = config.max_volume\n aspirate_mm = (aspirate_seq[1] * pip_max_vol + aspirate_seq[2]) / pip_max_vol\n dispense_mm = (dispense_seq[1] * pip_max_vol + dispense_seq[2]) / pip_max_vol\n # for many of the older pipettes, the aspirate and dispense values are\n # not the same.\n assert isclose(round(aspirate_mm), round(dispense_mm))", "def waveparameterc(L):\r\n\r\n if 65 <= L < 90:\r\n return (118 - 0.36 * L) * (L / 1000)\r\n if 90 <= L < 300:\r\n return 10.75 - ((300-L) / 100) ** 1.5\r\n if 300 <= L <= 350:\r\n return 10.75\r\n if L > 350:\r\n return 10.75 - ((L-350) / 150) ** 1.5", "def hw_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n flow_dir = np.sign(i[0])\n\n return ((i[1] - o[1]) * flow_dir -\n (10.67 * abs(i[0]) ** 1.852 * self.L.val /\n (self.ks.val ** 1.852 * self.D.val ** 4.871)) *\n (9.81 * ((v_i + v_o) / 2) ** 0.852))", "def get_psi(data):\n max_sensor_psi = 100 # Also 30\n psi = (data - 0.51) * (max_sensor_psi / 4)\n psi = round(psi, 0)\n return psi", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def mi_pressure_vessel(self, u):\n assert len(u) == 4, 'MI Pressure vessel design needs to specify 4 parameters.'\n R = u[0]\n L = u[1]\n ts = u[2]\n th = u[3]\n fitness = 0.6224 * R * ts * L + 1.7781 * R ** 2 * th + 3.1611 * ts ** 2 * L + 19.8621 * R * ts ** 2\n return fitness", "def phi2_coefficient(L):\r\n\r\n if 0 < L < 120:\r\n return L / 120\r\n if L >= 120:\r\n return 1", "def phase_velocity(self):\n return 1/np.sqrt(self.mu*self.epsilon)", "def Re_feed(F_mass, z_way, d_inner, n_pipe, mu_feed): \n return 0.785 * F_mass * z_way / (d_inner * n_pipe * mu_feed)", "def idealOpAmp():" ]
[ "0.64306885", "0.6375345", "0.6155883", "0.6086178", "0.5807599", "0.57985467", "0.5757301", "0.5742598", "0.57329386", "0.57231694", "0.5673357", "0.56649464", "0.56552863", "0.55984855", "0.5589797", "0.55843973", "0.55824184", "0.5581834", "0.55515593", "0.5546592", "0.5546187", "0.55438596", "0.55245054", "0.55054474", "0.54877937", "0.54791814", "0.54430395", "0.5438936", "0.5437838", "0.54293585" ]
0.7763178
0
Create a Plotly Dash 'A' element that downloads a file from the app.
def file_download_link(filename): location = f"/{UPLOAD_DIRECTORY}/{filename}" return html.A(filename, href=location)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_download_link(filename):\n location = \"/download/{}\".format(urlquote(filename))\n return html.A(filename, href=location)", "def download_link(request, job_id, filename):\n template_values = remote_view_util.fill_job_values(request, job_id)\n template_values = remote_view_util.fill_template_values(request, **template_values)\n template_values = catalog_view_util.fill_template_values(request, **template_values)\n template_values['title'] = 'Download area'\n template_values['file_name'] = filename\n return render_to_response('catalog/download_link.html',\n template_values)", "def create_link(self):\n self.filename = App.get_running_app().root.ids.camera_screen.capture()\n self.url = FileSharer(self.filename).share()\n self.ids.label.text = self.url", "def download(self,**attrs):\n\t\treturn super().download(**attrs)", "def upload(cls, dashboard, filename, sharing=\"public\", auto_open=True):\n if sharing == \"public\":\n world_readable = True\n elif sharing == \"private\":\n world_readable = False\n elif sharing == \"secret\":\n world_readable = False\n\n data = {\n \"content\": json.dumps(dashboard),\n \"filename\": filename,\n \"world_readable\": world_readable,\n }\n\n file_info = _create_or_update(data, \"dashboard\")\n\n url = file_info[\"web_url\"]\n\n if sharing == \"secret\":\n url = add_share_key_to_url(url)\n\n if auto_open:\n webbrowser.open_new(file_info[\"web_url\"])\n\n return url", "def createLink(self, downloadUrl, title):\n newUrl = downloadUrl.replace(\"details\", \"download\") \n return self.url + '/' + newUrl", "def static(filename):\n return href.static(file=filename)", "def download():\n \n browser.find_element_by_xpath('//*[@id=\"ctl00_contentPlaceHolder_divAllVariablesPerYear2012\"]/div[2]/div[2]/div[1]/a').click()", "def generate(self, node, file):\n file.write(Html.generate_element('a', node.get_html_attributes(), node._argument))", "def plot(figure_or_data, show_link=True, link_text='Export to plot.ly',\n validate=True, output_type='file', include_plotlyjs=True,\n filename='temp-plot.html', auto_open=True, image=None,\n image_filename='plot_image', image_width=800, image_height=600):\n if output_type not in ['div', 'file']:\n raise ValueError(\n \"`output_type` argument must be 'div' or 'file'. \"\n \"You supplied `\" + output_type + \"``\")\n if not filename.endswith('.html') and output_type == 'file':\n warnings.warn(\n \"Your filename `\" + filename + \"` didn't end with .html. \"\n \"Adding .html to the end of your file.\")\n filename += '.html'\n\n config = {}\n config['showLink'] = show_link\n config['linkText'] = link_text\n\n plot_html, plotdivid, width, height = _plot_html(\n figure_or_data, config, validate,\n '100%', '100%', global_requirejs=False)\n\n resize_script = ''\n if width == '100%' or height == '100%':\n resize_script = (\n ''\n '<script type=\"text/javascript\">'\n 'window.removeEventListener(\"resize\");'\n 'window.addEventListener(\"resize\", function(){{'\n 'Plotly.Plots.resize(document.getElementById(\"{id}\"));}});'\n '</script>'\n ).format(id=plotdivid)\n\n if output_type == 'file':\n with open(filename, 'w') as f:\n if include_plotlyjs:\n plotly_js_script = ''.join([\n '<script type=\"text/javascript\">',\n get_plotlyjs(),\n '</script>',\n ])\n else:\n plotly_js_script = ''\n\n if image:\n if image not in __IMAGE_FORMATS:\n raise ValueError('The image parameter must be one of the '\n 'following: {}'.format(__IMAGE_FORMATS))\n # if the check passes then download script is injected.\n # write the download script:\n script = get_image_download_script('plot')\n script = script.format(format=image,\n width=image_width,\n height=image_height,\n filename=image_filename,\n plot_id=plotdivid)\n else:\n script = ''\n\n f.write(''.join([\n '<html>',\n '<head><meta charset=\"utf-8\" /></head>',\n '<body>',\n plotly_js_script,\n plot_html,\n resize_script,\n script,\n '</body>',\n '</html>']))\n\n url = 'file://' + os.path.abspath(filename)\n if auto_open:\n webbrowser.open(url)\n\n return url\n\n elif output_type == 'div':\n if include_plotlyjs:\n return ''.join([\n '<div>',\n '<script type=\"text/javascript\">',\n get_plotlyjs(),\n '</script>',\n plot_html,\n '</div>'\n ])\n else:\n return plot_html", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def getDownloadLink(self):\n context = aq_inner(self.context)\n type = context.file.getContentType()\n extension = ''\n \n if BLOB_SUPPORT:\n if hasattr(context.file, 'getBlob'):\n # return a view that return the aquisition-wrapped object \n if type.startswith('audio/'):\n extension = '?e=.mp3'\n return context.absolute_url() + '/download' + extension\n \n # Fallback for media-files added before blob-support in operun.media.\n # context.file.absolute_url() doesn't return file-extensions, so we do some guessing. \n else:\n if type.startswith('audio/'):\n extension = '?e=.mp3'\n if type.startswith('video/'):\n extension = '?e=.flv'\n return context.file.absolute_url() + extension \n\n else:\n # get the file without plone.app.blob \n return context.absolute_url() + '/' + context.getFileName()", "def handle_as_data_url(view: View, point: int, ext: str, encoded: str):\n\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR, \"tmp_data_image.\" + ext)\n file_hash = int(hashlib.sha1(encoded.encode('utf-8')\n ).hexdigest(), 16) % (10 ** 8)\n name = str(file_hash) + \".\" + ext\n\n # Save downloaded data in the temporary file\n try:\n dst = open(tmp_file, \"wb\")\n dst.write(base64.b64decode(encoded))\n except Exception as e:\n print(e)\n return\n finally:\n dst.close()\n\n real_width, real_height, size = get_image_size(tmp_file)\n width, height = get_dimensions(view, tmp_file)\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n save(tmp_file, name, \"data_url\")\n elif href == \"save_as\":\n convert(tmp_file, \"data_url\", name)\n else:\n sublime.active_window().open_file(tmp_file)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate\n )", "def cli(ctx, dataset_collection_id, file_path):\n return ctx.gi.dataset_collections.download_dataset_collection(dataset_collection_id, file_path)", "def get_download_url(self, ha):\n return create_ipa_url(ha)", "def download_file(self, file_name):\n\n link = self.UTILS.element.getElement(('css selector', 'a[href=\"{}\"]'.format(file_name)),\n 'The file [{}] to download'.format(file_name), True, 10)\n link.tap()", "def upload(cls, presentation, filename, sharing=\"public\", auto_open=True):\n if sharing == \"public\":\n world_readable = True\n elif sharing in [\"private\", \"secret\"]:\n world_readable = False\n else:\n raise _plotly_utils.exceptions.PlotlyError(SHARING_ERROR_MSG)\n data = {\n \"content\": json.dumps(presentation),\n \"filename\": filename,\n \"world_readable\": world_readable,\n }\n\n file_info = _create_or_update(data, \"spectacle_presentation\")\n\n url = file_info[\"web_url\"]\n\n if sharing == \"secret\":\n url = add_share_key_to_url(url)\n\n if auto_open:\n webbrowser.open_new(file_info[\"web_url\"])\n\n return url", "def download(self, download_path):\n return", "def create_figure_file():\n\n # Any plotly compliant dict or list that can be converted to json. You can use the Plotly python sdk to construct figures, by adding it to requirements.txt\n fig = {\"data\": [{\"x\": [\"giraffes\", \"orangutans\", \"monkeys\"],\n \"y\": [20, 14, 23],\n \"type\": \"bar\"\n }]}\n\n # Dump the dict to a plain text json file. Note that for more advanced data (e.g. including numpy arrays etc) you\n # may wish to use the serialiser provided with the plotly library\n name = analysis.output_dir + '/zoo_barchart.json'\n with open(name, 'w') as outfile:\n json.dump(fig, outfile)\n\n # You can either do this here, or in your main run() function definition (or basically anywhere else you like)...\n # but you need to add the created file (which is part of the analysis results) to the output results manifest. In\n # this case we do it here, which has the advantage of keeping file creation and manifesting together; but has the\n # disadvantage of needing to modify your code to pass the analysis around. If you're unable to alter the API of your\n # code; no problem - just do all your manifest creation separately (e.g. at the end of the run function)\n fig_data = {'name': name,\n 'short_caption': 'A shortened caption',\n 'caption': 'A longer caption, perhaps including some description of why on earth we would want to see a bar chart of different zoo animals'}\n # TODO add_to_manifest('figure', name, fig_data)", "def download_link(self, handle):\n return None", "def download():\n if auth.has_membership(1):\n user = \"Admin\"\n elif auth.has_membership(2):\n user = \"Examiner\"\n elif auth.has_membership(3):\n user = \"student\"\n elif auth.has_membership(5):\n user = \"Managment\"\n\n db.activity_log.insert( Title_entry=\"Download assignment\", \n referance_id=auth.user.id,\n remarks=\"content downloaded by {}\".format(user))\n db.commit()\n return response.download(request, db)", "def download(self):\n pass", "def download(self):\n pass", "def download(args):\n with_dataset(args, Dataset._download)", "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download,pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "def download_file(self, parsed_event, input_dir_path):", "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download, pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False)\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode()).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'", "def download_link(object_to_download, download_filename, download_link_text):\n if isinstance(object_to_download, pd.DataFrame):\n object_to_download = object_to_download.to_csv(index=False, sep = ';')\n\n # some strings <-> bytes conversions necessary here\n b64 = base64.b64encode(object_to_download.encode(\"latin1\")).decode()\n\n return f'<a href=\"data:file/txt;base64,{b64}\" download=\"{download_filename}\">{download_link_text}</a>'" ]
[ "0.6015341", "0.56226736", "0.55951804", "0.552248", "0.5511452", "0.5382601", "0.53783137", "0.53105325", "0.5288451", "0.52870387", "0.5222768", "0.5213722", "0.5212254", "0.5193048", "0.5184498", "0.5182813", "0.51763964", "0.51701194", "0.5161087", "0.5158966", "0.515461", "0.51303816", "0.51303816", "0.5122473", "0.5117201", "0.5117201", "0.5117201", "0.5116391", "0.51104707", "0.5103775" ]
0.61712605
0
Locate the value for a grounded node and its parents in a rule set, return 1 if not found. For functors with binary ranges, when all parents match but child's value does not, return 1prob for other value.
def ruleMatch (ruleSet, node, parents): def getProb (node): for rule in ruleSet: #print rule if (rule.child.eq(node) and len(rule.parentList)==len(parents) and all([n[0].eq(n[1]) for n in zip(rule.parentList,parents)])): #print "winning eq", [n for n in zip(rule.parentList,parents)] return rule.prob else: return -1 prob = getProb (node) if prob == -1 and functorRangeSize(node.functor) == 2: tn = copy.copy(node) tn.val = functorOtherValue(tn.functor, tn.val) prob = getProb (tn) if prob != -1: return 1 - prob else: return prob return prob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValue(self):\n r = 1 if self.left.getValue() <= self.right.getValue() else 0\n return r", "def find(self, node):\n if not node:\n return 0\n\n left = self.find(node.left)\n right = self.find(node.right)\n cur = 1 # node.val\n path = 1\n if left and node.left.val == node.val:\n path += left\n cur = left + 1\n\n if right and node.right.val == node.val:\n path += right\n if right > left:\n cur = right + 1\n\n self.ret = max(self.ret, path - 1)\n return cur", "def find(self, node):\n if not node:\n return 0\n\n left = self.find(node.left)\n right = self.find(node.right)\n left_path = left + 1 if node.left and node.left.val == node.val else 0\n right_path = right + 1 if node.right and node.right.val == node.val else 0\n self.ret = max(self.ret, left_path + right_path)\n return max(left_path, right_path)", "def getValue(self):\n r = 1 if self.left.getValue() >= self.right.getValue() else 0\n return r", "def search(self):\n open_set = set()\n closed_set = set()\n open_set.add(self.start_node)\n\n # loop through all nodes until open set is empty to build neighbor map\n while open_set:\n current_node = open_set.pop()\n closed_set.add(current_node)\n for removed_cells, score, next_status in current_node.find_next_moves():\n open_status_set = [i.status for i in open_set]\n closed_status_set = [i.status for i in closed_set]\n if next_status in open_status_set:\n index = open_status_set.index(next_status)\n node = list(open_set)[index]\n elif next_status in closed_status_set:\n index = closed_status_set.index(next_status)\n node = list(closed_set)[index]\n else:\n node = PopstarsNode(next_status)\n open_set.add(node)\n node.parents.append(current_node)\n current_node.children[node].append(\n (score, removed_cells, True))\n current_node.update_parents()\n max_score = []\n for i in self.start_node.children:\n max_score += self.start_node.children[i]\n return max(max_score)[0]", "def getValue(self):\n r = 1 if self.left.getValue() == self.right.getValue() else 0\n return r", "def search(self, value):\n node = self\n parent = None\n\n while node is not None:\n\n if value == node.value:\n return node, parent, True\n\n elif value < node.value:\n parent = node\n node = node.left\n\n elif value > node.value:\n parent = node\n node = node.right\n\n return node, parent, False", "def _find(self, value): \n # case 1: look deeper, left\n if self.value > value and self.left is not None:\n return self.left._find(value)\n\n # case 2: look deeper, right\n if self.value < value and self.right is not None:\n return self.right._find(value)\n\n # case 3: found it, or nothing to find\n else:\n return self", "def fn(node):\n if not node: return 0\n return 1 + max(fn(node.left), fn(node.right))", "def fn(node):\n if not node: return 0\n return 1 + max(fn(node.left), fn(node.right))", "def get_node_value(succs, preds):\n ret = 1\n if succs == 0:\n ret *= NODE_ENTRY\n\n if preds == 0:\n ret *= NODE_EXIT\n\n ret *= NODE_NORMAL\n return ret", "def getValue(self):\n r = 1 if self.left.getValue() > self.right.getValue() else 0\n return r", "def getValue(self):\n r = 1 if self.left.getValue() != self.right.getValue() else 0\n return r", "def count_value(tree,val):\r\n if (tree==None):\r\n return 0\r\n elif(value(tree)==val):\r\n return 1+count_value(left(tree), val)+count_value(right(tree), val)\r\n else:\r\n return count_value(left(tree), val)+count_value(right(tree), val)", "def find(self, value):\n if self.value is None:\n raise BinaryTreeValueError(\"Value {} not in tree\")\n\n if self.value == value:\n return self.left_length\n\n elif value < self.value:\n # Value is in left side of tree\n return self.left.find(value)\n\n else:\n # Value is in right side of tree\n return self.right.find(value) + self.left_length + 1", "def getValue(self):\n r = 1 if self.left.getValue() < self.right.getValue() else 0\n return r", "def fn(node):\n lc = rc = 0\n lv = rv = node.val \n if node.left: lc, lv = fn(node.left)\n if node.right: rc, rv = fn(node.right)\n if lv == rv == node.val: return lc + rc + 1, node.val \n return lc + rc, None", "def fn(node):\n if not node: return 0 \n h = 1 + max(fn(node.left), fn(node.right))\n seen.setdefault(h, []).append(node.val)\n return h", "def getValue(self):\n if self.left.getValue() != 0.0:\n return 1.0\n if self.right.getValue() != 0.0:\n return 1.0\n return 0.0", "def contains(self,value,parent= None):\n if value == self.node.value: \n return True\n if (value < self.node.value):\n if (self.node.left):\n return self.node.left.contains(value, self.node)\n else: \n return False\n else:\n if (self.node.right):\n return self.node.right.contains(value, self.node)\n else:\n return False", "def fn(node):\n if node: \n (ln, lx), (rn, rx) = fn(node.left), fn(node.right)\n if node in (p, q): return node, 1 + lx + rx\n if ln and rn: return node, lx + rx\n return (ln, lx) if ln else (rn, rx)\n return None, 0", "def fn(node):\n if not node: return 0 \n left, right = fn(node.left), fn(node.right)\n if node.val == x: \n cnt[0], cnt[1] = left, right\n return 1 + left + right", "def heuristic_val(self, node, g_pos, goal_depth, nb_food,nb_ghost, depth):\n ret = 0\n if node.has_food:\n ret += 10 # food at that position\n x = node.i\n y = node.j\n for pos in g_pos:\n x1, y1, _, (scared,_),_,_ = pos\n if x == x1 and y == y1:\n if scared == 0:\n ret -= 1000000 # ghost at that position causing termination\n else:\n ret += 200\n\n if goal_depth >= 0:\n ret += goal_depth*50\n else:\n if self.maxDepth - depth < len(self.moves):\n ret -= dist(self.moves[self.maxDepth - depth],node)\n else:\n ret -= dist(self.goal, node)\n ret += nb_food * 10\n ret += 200*nb_ghost\n\n return ret", "def fn(node):\n if not node: return 0\n if not node.left or not node.right: return 1 + fn(node.left) + fn(node.right)\n return 1 + min(fn(node.left), fn(node.right))", "def _successor(self):\n if self.right is None:\n # get first rightward ancestor\n m = self\n n = m.parent\n while n is not None and m is n.right:\n m = n\n n = n.parent\n else:\n # get leftmost of right child\n n = self.right\n while n.left is not None:\n n = n.left\n return n", "def _lookup(self, data):\n parent, current = None, self.root\n while current:\n if current < data: # data should be in right\n parent, current = current, current.right\n elif current > data: # data should be in left\n parent, current = current, current.left\n else: # equals\n return parent, current\n return parent, current", "def getValue(self):\n if self.left.getValue() == 0.0:\n return 0.0\n if self.right.getValue() == 0.0:\n return 0.0\n return 1.0", "def _find_positive_ancestor(self, refdata, seedindex): \n \n seedval = refdata[seedindex]\n if seedval > self.row_priors[seedindex]: \n return seedindex, -seedval/self.row_priors[seedindex]\n \n # find parents of seed\n parents = self.parents\n seedparents = parents[seedindex]\n parents_len = len(seedparents)\n if parents_len == 0:\n return None, 0\n elif parents_len == 1:\n return self._find_positive_ancestor(refdata, seedparents[0])\n elif parents_len == 2:\n # handle special case when there are only two items\n # instead of doing a general query and sort, pick best of two \n r0 = self._find_positive_ancestor(refdata, seedparents[0])\n r1 = self._find_positive_ancestor(refdata, seedparents[1])\n if r1[1] < r0[1]:\n return r1 \n return r0 \n \n # study multiple paths toward root, return most enriched\n result = [self._find_positive_ancestor(refdata, _) for _ in seedparents] \n return min(result, key=itemgetter(1))", "def is_cousin(parent_db, A, B):\n parent_dict = {}\n for item in parent_db:\n if item[0] in parent_dict: #If parent is already in the dictionary, add this child to value (set of children)\n parent_dict[item[0]].add(item[1])\n else:\n parent_dict[item[0]] = {item[1]}\n\n child_dict = {}\n for item in parent_db:\n if item[1] in child_dict: #If child is already in the dictionary, add this parent to value (set of parents)\n child_dict[item[1]].add(item[0])\n else:\n child_dict[item[1]] = {item[0]}\n\n if A==B:\n return None\n\n for parent in parent_dict:\n if A in parent_dict[parent] and B in parent_dict[parent]: #Checking if they share the same parent\n return None\n\n grandparents_A = set()\n for parent in child_dict[A]: #Iterating through parents of A\n for grandparent in child_dict[parent]: #Iterating through parents of parents of A (grandparents of A)\n grandparents_A.add(grandparent)\n\n for parent in child_dict[B]: #Iterating through parents of B\n for grandparent in child_dict[parent]: #Iterating through parents of parents of B (grandparents of B)\n if grandparent in grandparents_A:\n return grandparent\n\n return None", "def evaluate(self,parents):\n for i in range(10):\n try:\n sis,bro = self.crosser(parents)\n if sis.validate() and bro.validate():\n return sis,bro\n except SymbolError: break\n except NoneError:\n print \"hmmm. None for a parent value, try again\"\n print \" This often happens when 'ST' isn't included in rejects list\"\n\n raise ValueError" ]
[ "0.5753097", "0.5747222", "0.57390654", "0.57036936", "0.56338394", "0.55915225", "0.5584634", "0.5555824", "0.5537557", "0.5537557", "0.5511906", "0.54953927", "0.5484709", "0.5481439", "0.5480224", "0.5455876", "0.5380829", "0.53786016", "0.53774273", "0.537409", "0.53558004", "0.5354007", "0.5352297", "0.53145224", "0.5269906", "0.5248043", "0.52408797", "0.523047", "0.52191925", "0.5195177" ]
0.62124074
0
Return default uniform distribution for the range of a functor
def default(functor): return 1.0/functorRangeSize(functor)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))", "def glorot_uniform(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='uniform', seed=seed)", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicWeibullDistribution(self.k,self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def he_uniform(seed=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=2., mode='fan_in', distribution='uniform', seed=seed)", "def initializeDistribution(self):\n if (self.lowerBoundUsed == False and self.upperBoundUsed == False):\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.low)\n self.lowerBound = self.low\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicExponentialDistribution(self.lambdaVar,self.lowerBound,self.upperBound,self.low)", "def default_quantile():\n return np.logspace(-5, 0, 100)", "def _base_dist(self, *args, **kwargs):\n return tfd.TransformedDistribution(\n distribution=tfd.Uniform(low=0.0, high=1.0),\n bijector=tfp.bijectors.Invert(tfp.bijectors.Weibull(*args, **kwargs)),\n name=\"Weibull\",\n )", "def random_from_bound(bound):\n if (isinstance(bound, tuple)):\n val = np.random.uniform(low = bound[0], high = bound[1])\n else:\n val = 0.0\n return val", "def uniform(feature, bins):\n t = (feature.max()-feature.min())/bins\n return [t*i for i in range(1, bins)]", "def dispatch_dg(distribution_u_want, a):\n return {\n 'bernoulli': lambda: a / (1. + a),\n 'geometric': lambda: a / (1. - a), # initialization can give divide by zero\n 'exponential': lambda: a,\n }.get(distribution_u_want, lambda: None)()", "def uniform_dist(low, high):\n return sp_uniform(low, high - low)", "def random_uniform_initializer(minval=0.0, maxval=1.0, seed=None,\n dtype=dtypes.float32):\n def _initializer(shape, dtype=_assert_float_dtype(dtype)):\n return random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed)\n return _initializer", "def uniform(a: float, b: float) -> float:\n ...", "def uniform(self, key, min_value=0., max_value=1.):\n return min_value + self._random(key) * (max_value - min_value)", "def loguniform_dist(low, high, base=10):\n return ExponentiateDistribution(sp_uniform(low, high - low), base=base)", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale)\n else:\n if self.lowerBoundUsed == False:\n a = -sys.float_info.max\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicLogisticDistribution(self.location,self.scale,a,b)", "def get_standard_normal_distribution():\n return np.random.normal(0, 1)", "def generate_number(lbound=1, ubound=100, mean=None, std=None):\n x = np.arange(lbound, ubound + 1)\n if mean and std:\n prob = stats.norm.pdf(x, loc=mean, scale=std)\n prob = prob / prob.sum() #normalize the probabilities so they sum up to 1\n else:\n prob = np.repeat(1 / len(x), len(x))\n num = np.random.choice(x, p=prob)\n return num", "def discrete_uniform_sampler(upper_value):\n return int(np.random.random() * upper_value)", "def test_uniform(self):\r\n\r\n s = np.random.uniform(-1.35, 0.5, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()", "def _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\n del rng_key # non-deterministic prng.\n minval = jax.lax.convert_element_type(minval, dtype)\n maxval = jax.lax.convert_element_type(maxval, dtype)\n return jax.lax.rng_uniform(minval, maxval, shape)", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicPoissonDistribution(self.mu)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n self.raiseAnError(IOError,'Truncated poisson not yet implemented')", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.low)\n self.lowerBound = 0.0\n self.upperBound = sys.float_info.max\n else:\n if self.lowerBoundUsed == False:\n self.lowerBound = self.low\n if self.upperBoundUsed == False:\n self.upperBound = sys.float_info.max\n self._distribution = distribution1D.BasicLogNormalDistribution(self.mean,self.sigma,self.lowerBound,self.upperBound, self.low)", "def uniform(lower, upper):\n\n return lower + random.random() * (upper - lower)", "def uniform_weight_init(\n input_size: int,\n output_size: int,\n min_bounds: float = 0.0,\n max_bounds: float = 1.0,\n positive_ratio: Optional[float] = None,\n) -> t.Tensor:\n if input_size < 1:\n raise ValueError(\"input_size must be a positive integer.\")\n if output_size < 1:\n raise ValueError(\"output_size must be a positive integer.\")\n if min_bounds > max_bounds:\n raise ValueError(\"min_bounds must not be greater than max_bounds.\")\n if positive_ratio is not None:\n if positive_ratio > 1 or 0 > positive_ratio:\n raise ValueError(\n \"positive_ratio must be None, or must be between zero and one.\"\n )\n\n result = t.empty((input_size, output_size))\n uniform_(result, a=min_bounds, b=max_bounds)\n\n # TODO: test this.\n if positive_ratio is not None:\n bernoulli_distribution = Bernoulli(t.tensor([positive_ratio]))\n mask = bernoulli_distribution.sample((input_size, output_size)).squeeze().bool()\n result.abs_()\n result = result.where(mask, -result)\n\n return result", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicGeometricDistribution(self.p)\n else: self.raiseAnError(IOError,'Truncated Geometric not yet implemented')", "def normal_upper_bound(probability: float, mu: float = 0, sigma: float = 1) -> float:\n return inverse_normal_cdf(probability, mu, sigma)", "def uniform_random_value(l_boundary: float, r_boundary: float) -> float:\n return uniform(l_boundary, r_boundary)", "def randrange(start: int, stop: int, step: int) -> int:\n ..." ]
[ "0.67329484", "0.63917905", "0.634777", "0.63349366", "0.61998284", "0.61475074", "0.61413395", "0.6124729", "0.6119773", "0.61055756", "0.6086929", "0.6079064", "0.60665655", "0.6043183", "0.6039257", "0.5985008", "0.5984233", "0.59805816", "0.59690744", "0.59467566", "0.5941135", "0.5927508", "0.5901463", "0.5896202", "0.5884528", "0.5881738", "0.584001", "0.5826473", "0.5800217", "0.5795453" ]
0.7167602
0
Look up the range for a functor
def functorRange(functor): for (name, range) in functorRangeList: if functor == name: return range else: raise Exception ("Functor " + functor + " not present in range list")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _in_range_op(spec):", "def _range_func(self, withscores, score_cast_func, decode_value_func=lambda x: x):\n if withscores:\n return lambda score_member: (decode_value_func(score_member[1]), score_cast_func(self._encode(score_member[0]))) # noqa\n else:\n return lambda score_member: decode_value_func(score_member[1])", "def ranges(self, predicate):\n\n x = np.zeros(len(self)).astype(np.bool)\n for i, elem in enumerate(self.elements):\n x[i] = predicate(elem)\n\n return np.where(x)[0]", "def range_(self):\n return self.bset.range_", "def getRange(self, p_int): # real signature unknown; restored from __doc__\n pass", "def test_get_range(self):\n pass", "def GetScalarRange(self):\n ...", "def get_lower_bound(self):\n return self._lower_bound", "def get_lower_bound(self):\n return self._lower_bound", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def provider_range_lookup(self, record):\n pass", "def get_bounds(f, lb=0, ub=None):\r\n lb_idx = np.searchsorted(f, lb, 'left')\r\n if ub == None:\r\n ub_idx = len(f)\r\n else:\r\n ub_idx = np.searchsorted(f, ub, 'right')\r\n\r\n return lb_idx, ub_idx", "def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)", "def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]", "def range(self):\n return self.timerange()", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def lower_bound(self) -> float:\n ...", "def _hit_range_get(self):\n return (self.hit_start, self.hit_end)", "def range (self):\n return self._range", "def range (self):\n return self._range", "def range(self):\n return self.range_array", "def visit_bound_predicate(self, predicate) -> T:", "def _query_range_get(self):\n return (self.query_start, self.query_end)", "def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range", "def _builtin_between(low, high, value, **k):\n mode = check_mode((low, high, value), ['iii', 'iiv'], functor='between', **k)\n low_v = int(low)\n high_v = int(high)\n if mode == 0: # Check\n value_v = int(value)\n if low_v <= value_v <= high_v:\n return [(low, high, value)]\n else: # Enumerate\n results = []\n for value_v in range(low_v, high_v + 1):\n results.append((low, high, Constant(value_v)))\n return results", "def getRange(self):\n return self.range", "def functorOtherValue(functor, val):\n range = functorRange(functor)\n assert len(range) == 2\n if val == range[0]:\n return range[1]\n else:\n return range[0]", "def f_get_range(self, copy=True):\n raise NotImplementedError(\"Should have implemented this.\")" ]
[ "0.68109703", "0.63385564", "0.6327256", "0.6227952", "0.62047696", "0.61920094", "0.61834943", "0.6165325", "0.6165325", "0.61133087", "0.61056334", "0.6076185", "0.60739946", "0.60496026", "0.6020257", "0.60012704", "0.6000408", "0.5993944", "0.5981103", "0.5969238", "0.5917489", "0.5917489", "0.5914561", "0.59062403", "0.58986896", "0.5891751", "0.5876367", "0.5875278", "0.5871048", "0.5856082" ]
0.839487
0
Return cardinality of range for a functor
def functorRangeSize(functor): return len(functorRange(functor))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cardinality(self):\n estimate = self._alpha * math.pow(self._m, 2) / sum(math.pow(2, -x) for x in self._registers)\n\n if estimate <= 2.5 * self._m:\n # get number of registers equal to zero\n empty_registers = self._registers.count(0)\n if empty_registers != 0:\n return self._linear_count(empty_registers)\n else:\n return estimate\n elif estimate <= ((1 << 32) / 30):\n return estimate\n else:\n return self._large_range_correction(estimate)", "def functorRange(functor):\n for (name, range) in functorRangeList:\n if functor == name:\n return range\n else:\n raise Exception (\"Functor \" + functor + \" not present in range list\")", "def interval_cardinality(self):\n return len(list(self.lower_contained_intervals()))", "def cardinality(self):\n raise NotImplementedError()", "def f_get_range_length(self):\n if not self.f_has_range():\n raise TypeError(\"Not applicable, parameter does not have a range\")\n elif hasattr(self, \"__len__\"):\n return len(self)\n else:\n raise NotImplementedError(\"Should have implemented this.\")", "def cardinality(self):\n return int(ifac(self.size))", "def compute_search_cardinality(params_space: List[Dict[str, Any]]) -> float:\n # check if search space is infinite\n is_infinite = any([param[\"type\"] == \"range\" for param in params_space])\n if is_infinite:\n return math.inf\n else:\n res = 1\n for param in params_space:\n if \"values\" in param:\n res *= len(param[\"values\"])\n return res", "def count(predicate: Predicate[int]) -> Predicate[Sized]:\n\n def compare(sized: Sized, /) -> bool:\n return predicate(len(sized))\n\n return compare", "def cardinality(self):\n from sage.arith.all import binomial\n n = self._size\n if n == 0:\n return Integer(1)\n return (2 * binomial(4 * n + 1, n - 1)) // (n * (n + 1))\n # return Integer(2 * factorial(4*n+1)/(factorial(n+1)*factorial(3*n+2)))", "def _in_range_op(spec):", "def count_oob(cube):\n out_range = (cube > 4).any(1) | (cube < -4).any(1)\n out_range = out_range.sum() / cube.shape[0]\n return out_range", "def __len__(self):\n if self._data is None:\n return 0\n elif len(self._explored_range) > 0:\n return len(self._explored_range)\n else:\n return 1", "def count(cls):\n return lambda x,y: ((type(x)==int) and [x+1] or ((y==None) and [1] or [2]))[0]", "def get_event_count(event_times, start, end):\n mask = (event_times > start) & (event_times <= end)\n return event_times[mask].size", "def GetScalarRange(self):\n ...", "def _freqs(seq, lower_bound, upper_bound, k, normalized=False):\n freqs = []\n region_width = (upper_bound - lower_bound) / k \n\n for i in range(k):\n low = lower_bound + i * region_width\n high = lower_bound + i * region_width + region_width\n freqs.append( np.logical_and(seq >= low, seq < high).sum() )\n\n # because last interval has '[a;b]' - bounds, not '[a,b)'\n freqs[-1] += 1\n\n if normalized:\n freqs = np.array(freqs) / len(seq)\n\n return np.array(freqs), region_width", "def test_cardinality(\n self, prior_name, min_bound, max_bound, precision, cardinality\n ):\n dim = Real(\n \"yolo\", prior_name, min_bound, max_bound, precision=precision, shape=None\n )\n assert dim.cardinality == cardinality\n dim = Real(\n \"yolo\", prior_name, min_bound, max_bound, precision=precision, shape=(2, 3)\n )\n assert dim.cardinality == cardinality ** (2 * 3)", "def f_has_range(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def howmany_within_range(row, minimum, maximum):\n count = 0\n for n in row:\n if minimum <= n <= maximum:\n count = count + 1\n return count", "def get_desired_count(value, lower, upper):\n if lower != -1 and value < lower:\n return lower\n if upper != -1 and value > upper:\n return upper\n return value", "def test_cardinality(self):\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=2)\n space.register(dim)\n dim = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Fidelity(\"epoch\", 1, 9, 3)\n space.register(dim)\n\n assert space.cardinality == (4**2) * (6 + 1) * 1\n\n dim = Integer(\"yolo3\", \"uniform\", -3, 2, shape=(3, 2))\n space.register(dim)\n assert space.cardinality == (4**2) * (6 + 1) * 1 * ((2 + 1) ** (3 * 2))\n\n dim = Real(\"yolo4\", \"norm\", 0.9)\n space.register(dim)\n assert np.inf == space.cardinality", "def f02_03_countElemBetween(l, a, b):\n return sum([a < x < b for x in l])", "def detect_range(self) -> Union[int, float]:\n return self.proto.detect_range", "def count_if(iterable, pred, first=0, last=None):\n assert hasattr(iterable, '__getitem__')\n # Only slice for sub-ranges, slight performance improvement\n iterable = iterable if first == 0 and last is None else iterable[first:last]\n return sum(1 for x in iterable if pred(x))", "def count_lorentz(fit_range, lorentz_array_2d):\n counter = 0\n for i in range(0, lorentz_array_2d.shape[0]):\n f0 = lorentz_array_2d[i][1]\n if f0 > fit_range[1] and f0 < fit_range[2]:\n counter += 1\n return counter", "def countElements(self, nums):\n import sys\n max_n = -sys.maxint\n min_n = sys.maxint\n\n for n in nums:\n max_n = max(n, max_n)\n min_n = min(n, min_n)\n\n count = 0\n for n in nums:\n if min_n < n < max_n:\n count += 1\n return count", "def default(functor):\n return 1.0/functorRangeSize(functor)", "def boundaries_size(*args):\n return _ida_hexrays.boundaries_size(*args)", "def count_property_range_hits(prop, node_dict, hits):\n\tres = []\n\t# sets tuple position to use in dict value\n\tswitcher = {\n \"length\": (0,(0,4000,8000,12000,16000,20000)),\n \"steps\": (1,(0,2,4,8,16,32)),\n \"cov\": (2,(1,10,100,1000,10000,100000)),\n \"cv\": (3, (0,0.05,0.10,0.15,0.20,0.25))\n }\n\tif prop not in switcher:\n\t\treturn res\n\ttup_pos = switcher[prop][0]\n\tnode_cnt = 0\n\tpos_cnt = 0\n\tfor ind in range(len(switcher[prop][1])-1):\n\t\tmin_val = switcher[prop][1][ind]\n\t\tmax_val = switcher[prop][1][ind+1]\n\t\tfor node in node_dict.keys():\n\t\t\tval = node_dict[node][tup_pos]\n\t\t\tif ind < len(switcher[prop][1])-2:\n\t\t\t\trange_test_val = (min_val <= val < max_val)\n\t\t\telse:\n\t\t\t\trange_test_val = (min_val <= val <= max_val)\n\t\t\t# print \"range bool is\", range_test_val\n\t\t\tif range_test_val:\n\t\t\t\tnode_cnt += 1\n\t\t\t\tif node in hits: pos_cnt += 1\n\t\tif node_cnt > 0:\n\t\t\tres.append( (pos_cnt, node_cnt, round(float(pos_cnt)/node_cnt,2)))\n\t\telse:\n\t\t\tres.append((0,0,0))\n\t\tnode_cnt = 0\n\t\tpos_cnt = 0\n\treturn res", "def complexity(self, mode='#nodes'):\n if mode == '#nodes':\n return len(self.nodes)" ]
[ "0.6364935", "0.6203069", "0.6184483", "0.6088533", "0.5999436", "0.5949017", "0.5914987", "0.5850817", "0.5838362", "0.58077663", "0.57850456", "0.56294644", "0.55813044", "0.5576867", "0.5546719", "0.55258554", "0.55099475", "0.5464677", "0.5460476", "0.54486597", "0.54301196", "0.542768", "0.5427204", "0.54044926", "0.5389398", "0.5384164", "0.5373981", "0.5367209", "0.53636926", "0.53592116" ]
0.799879
0
For functors with a binary range, return the other element
def functorOtherValue(functor, val): range = functorRange(functor) assert len(range) == 2 if val == range[0]: return range[1] else: return range[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def functorRange(functor):\n for (name, range) in functorRangeList:\n if functor == name:\n return range\n else:\n raise Exception (\"Functor \" + functor + \" not present in range list\")", "def ranges(self, predicate):\n\n x = np.zeros(len(self)).astype(np.bool)\n for i, elem in enumerate(self.elements):\n x[i] = predicate(elem)\n\n return np.where(x)[0]", "def __or__(self, other):\n if not isinstance(other, Range):\n raise TypeError(\n f\"unsupported operand types for |: \"\n f\"{type(self).__name__!r} and {type(other).__name__!r}\"\n )\n\n if self == other:\n return Range(self.vmin, self.vmax)\n elif self.vmax < other.vmin - 1:\n return None\n elif self.vmin > other.vmax + 1:\n return None\n\n return Range(\n vmin=min(self.vmin, other.vmin),\n vmax=max(self.vmax, other.vmax)\n )", "def _in_range_op(spec):", "def __and__(self, other):\n if not isinstance(other, Range):\n raise TypeError(\n f\"unsupported operand types for &: \"\n f\"{type(self).__name__!r} and {type(other).__name__!r}\"\n )\n\n if self == other:\n return Range(self.vmin, self.vmax)\n elif self < other or self > other:\n return None\n\n return Range(\n vmin=max(self.vmin, other.vmin),\n vmax=min(self.vmax, other.vmax)\n )", "def from_inclusive(a, b):\n c = int(b > a)*2-1\n return range(a, b+c, c)", "def _range_func(self, withscores, score_cast_func, decode_value_func=lambda x: x):\n if withscores:\n return lambda score_member: (decode_value_func(score_member[1]), score_cast_func(self._encode(score_member[0]))) # noqa\n else:\n return lambda score_member: decode_value_func(score_member[1])", "def _bisect_right(*args, **kwargs): # real signature unknown\n pass", "def _builtin_between(low, high, value, **k):\n mode = check_mode((low, high, value), ['iii', 'iiv'], functor='between', **k)\n low_v = int(low)\n high_v = int(high)\n if mode == 0: # Check\n value_v = int(value)\n if low_v <= value_v <= high_v:\n return [(low, high, value)]\n else: # Enumerate\n results = []\n for value_v in range(low_v, high_v + 1):\n results.append((low, high, Constant(value_v)))\n return results", "def cal(num1, num2, range):\r\n setup(range)\r\n return Andcollections(num1, num2)", "def evaluate_comparison_range(node):\n return None", "def range_(self):\n return tuple((e[0], e[-1]) for e in self.edges)", "def _range_overapped(self, x, y):\n xs = set( range(x[0], x[1]))\n ys = set( range(y[0], y[1]))\n return xs.intersection(ys)", "def in_range(x, a, b):\n return (x >= a and x <= b) or (x <= a and x >= b)", "def between(min, max):\n def func(x):\n return min <= x <= max\n return func", "def visit_bound_predicate(self, predicate) -> T:", "def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]", "def other_above(self, element, first_successor):\n successors = list(self.above(element))\n if len(successors) != 2:\n raise ValueError(\"element is not binary in lattice\")\n elif successors[0] == first_successor:\n return successors[1]\n elif successors[1] == first_successor:\n return successors[0]\n else:\n raise ValueError(\"first_successor is not a successor of element in lattice\")", "def __or__(self, other):\n return MyCustomNumber(self.value | other.value)", "def _intersect_interval(self, other):\n interval = Intersection(self.interval, other.interval)\n return interval.inf, interval.sup", "def dock_if_bound(f, v):\n return v[1:] if is_bound(f) else v", "def __ge__(self, other):\n return self.master.abs2phy.__getitem__(pos=other)", "def _less_than_or_equal_to_op(spec):", "def __le__(self, other):\n return self.element() <= other.element()", "def get(self, a: int, b: int) -> int:\n result = self.identity()\n q = [(1, 0, self.n2)]\n while q:\n k, left, right = q.pop()\n if a <= left and right <= b:\n result = self.binary(result, self.tree[k])\n continue\n m = (left + right) // 2\n k <<= 1\n if a < m and left < b:\n q.append((k, left, m))\n if a < right and left < m:\n q.append((k + 1, m, right))\n return result", "def _bound(x, min_value, max_value):\n return np.maximum(min_value, np.minimum(x, max_value))", "def __ge__(self, other):\n return self.element() >= other.element()", "def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)", "def __le__(self, other):\r\n # self <= other\r\n s = (self - other).share[0]\r\n return type(self)((s <= 0, self.exponent_type(0)))", "def ge(self, y):\n return 1 - self.lt(y)" ]
[ "0.6828715", "0.6507789", "0.62094086", "0.6026386", "0.5969396", "0.5849859", "0.5836691", "0.58243334", "0.57149607", "0.5674531", "0.5671843", "0.56514573", "0.56158537", "0.56145406", "0.55590016", "0.55359095", "0.5534223", "0.5493425", "0.54904515", "0.5454143", "0.5448288", "0.5446138", "0.5445249", "0.5415833", "0.538504", "0.53773594", "0.53736085", "0.53575164", "0.53477377", "0.5343283" ]
0.75341356
0
Return the atoms, derived from the first entry in the joint probability table
def atomList(joints): assert len(joints) > 0 first = joints[0] functorList = first[1][:-2] # Second element of row, last two elements of that are joint prob and log prob atomList = [] for (node,_) in functorList: atomList.append(node.functor+"("+",".join(node.varList)+")") return atomList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jointProbabilities(constants, db, ruleList, bn):\n vars = bn.variableList()\n combs = generateCombos(vars, constants)\n joints = []\n for grounding in combs:\n joints.append((grounding, bn.jointProbs(grounding, db, ruleList)))\n return (vars, atomList(joints), joints)", "def enumerate_joint_ask(X, e, P):\n Q = ProbDist(X) ## A probability distribution for X, initially empty\n Y = [v for v in P.variables if v != X and v not in e]\n for xi in P.values(X):\n Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)\n return Q.normalize()", "def prior_sample(self, bn):\n x = np.zeros(3)\n\n # first joint prob\n random_choice = np.random.choice(bn[0], 1, bn[0].all(), bn[0])\n x[0] = random_choice[0]\n\n # Second Joint Prob\n if x[0] == 0.1:\n random_choice = np.random.choice(bn[1][0], 1, bn[1][0].all(), bn[1][0])\n x[1] = random_choice\n elif x[0] == 0.9:\n random_choice = np.random.choice(bn[1][1], 1, bn[1][1].all(), bn[1][1])\n x[1] = random_choice\n\n # Third Joint Prob\n if random_choice[0] == 0.8 or random_choice == 0.1:\n random_choice = np.random.choice(bn[2][0], 1, bn[2][0].all(), bn[2][0])\n x[2] = random_choice\n else:\n random_choice = np.random.choice(bn[2][1], 1, bn[2][1].all(), bn[2][1])\n x[2] = random_choice\n return x", "def pon(self):\n return self.table[1, 1] / (self.table[0, 1] + self.table[1, 1])", "def head2head(self, atoms):\n\n c1, c2 = atoms.keys()\n c1_ndx, c2_ndx = atoms.values()\n\n chain1, chain2 = self.determine_chains([c1, c2])\n\n # to get indexing right\n c1_ndx -= self.monomer.indices[chain1]['C1']\n c2_ndx -= self.monomer.indices[chain2]['C2']\n\n types = {'chain1': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c2', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'ha'},\n 'chain2': {'C1': 'c3', 'C2': 'c2', 'C3': 'c2', 'C4': 'c3', 'H1': 'hc', 'H2': 'hc', 'H3': 'ha',\n 'H4': 'ha', 'H5': 'hc', 'D4': 'hc'}}\n\n # update types\n reacted_types = {'chain1': {c1_ndx + self.monomer.indices[chain1][a]: types['chain1'][a] for a in\n types['chain1'].keys()},\n 'chain2': {c2_ndx + self.monomer.indices[chain2][a]: types['chain2'][a] for a in\n types['chain2'].keys()}}\n\n # bond between carbons\n bonds = [[c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C1'], 'carbon']]\n\n # dummy bonds - 1 new bond between dummy atoms and carbon\n bonds += [[c2_ndx + self.monomer.indices[chain2]['C4'], c2_ndx + self.monomer.indices[chain2]['D4'], 'dummy']]\n\n # define indices of left-over radicals\n radicals = [c1_ndx + self.monomer.indices[chain1]['C2']]\n\n chain1_impropers = ['C1'] # [1]\n chain2_impropers = ['C1', 'C4'] # [1, 2]\n rm_improper = []\n for c in chain1_impropers:\n rm_improper.append([c1_ndx + self.monomer.indices[chain1][x] for x in self.monomer.impropers[chain1][c]])\n for c in chain2_impropers:\n rm_improper.append([c2_ndx + self.monomer.indices[chain2][x] for x in self.monomer.impropers[chain2][c]])\n\n # define terminated atoms\n terminated = [c1_ndx + self.monomer.indices[chain1]['C1'], c2_ndx + self.monomer.indices[chain2]['C1'],\n c2_ndx + self.monomer.indices[chain2]['C2']] # C2 terminated for now even though still alkene\n\n return reacted_types, bonds, radicals, rm_improper, terminated", "def getFirstTorsionInds(self, extended):\n offset = 6 if extended else 0\n torsionInds = np.array(range(offset + 5, self.natoms * 3, 3))\n primaryTorsions = sorted(list(set(self._firstTorsionTInd)))\n return list(torsionInds[primaryTorsions])", "def get_center_of_mass_allies(self,obs):", "def posterior_first(self, word):\r\n prob = {}\r\n if word not in prob.keys():\r\n prob[word] = {\r\n pos: self.emission_probability[pos][word]\r\n * self.initial_probability[pos]\r\n if word in self.emission_probability[pos]\r\n else (1 / float(10 ** 10)) * self.initial_probability[pos]\r\n for pos in self.position_list\r\n }\r\n\r\n return prob[word]", "def __generate_atoms__(self, pdb):\n\n atoms = [] # Maybe this can be a set \n # TODO: Here I can use self.structure.header['missing_residues'] to get a list of residues. It will have their seq and I can use this to make a sequential index\n for model in self.structure:\n residues = model.get_residues() # Biopython \n for residue in residues:\n full_id = residue.get_full_id()\n ins_code = full_id[3][2] \n this_model = str(int(full_id[1]) + 1) # BioPython starts at 0 and fr3d-python starts at 1. Add 1 to each model so unit ids match\n this_chain = full_id[2]\n component_number = full_id[3][1]\n if 'H' in full_id[3][0][0]:\n res_group = 'HETATM'\n else:\n res_group = 'ATOM'\n\n res = residue.get_resname().replace(\" \",\"\")\n\n if ins_code == \" \":\n ins_code = None\n\n for atom in residue:\n #drop numbers \n id = atom.id \n id = re.sub(r'\\d+', '',id)\n first = id[0]\n # logic to extract the type of atom from the id\n if 'C' == first: #Carbon\n atom_type = 'C' \n elif 'O' == first: #Ox\n atom_type = 'O'\n elif 'P' == first: #Phosphorus\n atom_type = 'P'\n elif 'N' == first: # nitrogen\n atom_type = 'N'\n else: #Magnesium, other ions\n atom_type = atom.id\n\n x = atom.coord[0]\n y = atom.coord[1]\n z = atom.coord[2]\n \n alt_id = atom.get_altloc()\n if alt_id == \" \":\n alt_id = None\n atoms.append(Atom(x=x, y=y, z=z,\n pdb=self.name,\n model=this_model,\n chain=this_chain,\n component_id=res,\n component_number=component_number,\n component_index=component_number,\n insertion_code=ins_code,\n alt_id= alt_id,\n group=res_group,\n type=atom_type,\n name=atom.get_name(),\n symmetry='1_555', #I haven't figured out how to extract symmetries from pdb files yet. Resort to identity\n polymeric=True)) # Need to find a way to parse this from biopython. Important, may be relevent in structures.py\n return atoms", "def get_result(person_to_joint_assoc, joint_list):\n outputs = []\n for ridxPred in range(len(person_to_joint_assoc)):\n\n keypoints = np.zeros((18, 3))\n\n for part in range(18):\n index = int(person_to_joint_assoc[ridxPred, part])\n\n if -1 == index:\n keypoints[part, 0] = 0\n keypoints[part, 1] = 0\n keypoints[part, 2] = 0\n\n else:\n keypoints[part, 0] = joint_list[index, 0] + 0.5\n keypoints[part, 1] = joint_list[index, 1] + 0.5\n keypoints[part, 2] = 1.\n\n outputs.append(keypoints)\n return outputs", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def get_joints(self, anno: List[Mapping[str, Any]]) -> np.ndarray:\n joints = []\n\n for i, obj in enumerate(anno):\n keypoints = np.array(obj[\"keypoints\"]).reshape([-1, 3])\n joints.append(keypoints)\n\n num_instances = len(joints)\n joints = np.array(joints, dtype=np.float32).reshape((num_instances, self.num_joints, 3))\n return joints", "def get_prob(self, head):\n if len(self._head_vars) == 0 and head.size() > 0:\n return 0.\n\n trimmed_head = head.get_trimmed(self._head_vars)\n if trimmed_head in self._table:\n return self._table.get(trimmed_head)\n\n return 0.", "def calc_pvecs_1mol(mol_crds, act_ats):\n nearest_neighbours = np.zeros((len(act_ats), 3, 3))\n at_inds = np.arange(len(mol_crds))\n at_map = {} # map at num to active at num\n\n # Loop over active atoms and calc nearest neighbours\n for count, iat in enumerate(act_ats):\n at_crd = mol_crds[iat]\n dists = np.linalg.norm(mol_crds - at_crd, axis=1)\n\n dist_mask = dists < 3.5\n nn_ats = at_inds[dist_mask][:3]\n if len(nn_ats) != 3:\n # Set the map at to the next closest one\n closest_at = K_min(list(dists), 2)\n at_map[count] = closest_at[1]\n continue\n else:\n # Make sure iat is the first atom\n nn_ats = nn_ats[nn_ats != iat][:2]\n nn_ats = [iat, *nn_ats]\n assert len(nn_ats) == 3\n\n nearest_neighbours[count] = mol_crds[nn_ats]\n\n # Set pvecs the same as the closest atom if we can't calculate them\n for at in at_map:\n nearest_neighbours[at] = nearest_neighbours[at_map[at]]\n\n pvecs = []\n for a1, a2, a3 in nearest_neighbours:\n v1 = a2 - a1\n v2 = a3 - a1\n pvec = np.cross(v1, v2)\n pvec /= np.linalg.norm(pvec)\n pvecs.append(pvec)\n\n return np.array(pvecs)", "def evaljointbayes(fname):\n #read file and save lines to contents\n contents = []\n f = open(fname)\n for line in f:\n randomVar = line.rstrip().split()\n if randomVar[0] != 'END':\n contents.append(randomVar)\n print \"1. Read file\", fname, \"successfully.\"\n f.close()\n \n #count numbers of nodes and probabilities in each line\n length = len(contents)\n nodes, prob = [0] * length, [0] * length\n table = [] #save all probabilities for each node \n for num in range(0, length):\n tableline = []\n for i in contents[num]:\n try:\n j = float(i)\n prob[num] += 1\n tableline.append(j) \n except ValueError:\n if i != 'NONE':\n nodes[num] += 1 \n table.append(tableline) \n \n #print out the joint distribution formular\n print \"2. The joint distribution using this network is:\"\n nodelist = []\n for line in contents:\n nodelist.append(line[0])\n print \"P(\", printElement(nodelist), \") = \"\n for num in range(0, length):\n line = contents[num]\n if nodes[num] == 1:\n print \"P(\", line[0], \")\", \n else: \n print \"P(\", line[0], '|', printElement(line[1:nodes[num]]),\\\n \")\", \n if num == length - 1:\n print ' '\n else: \n print ' * ', \n \n #print out the full joint distribution table \n###This is the revised version using recursive calls and###\n###print out the cpt table to a .txt file###\n #further revision includes deleting depth by pop() table, contents and nodes\n #also, I can extract the parents in the previous step, then contents will not be used here\n fo=open(fname+'.zz.txt','w')\n result = 1.0\n depth = 0\n global additions, multiplications\n additions, multiplications = 0, 0\n fullCPT(nodelist, [], result, depth, fo, contents, table, nodes)\n fo.close()\n\n #print out result of step 3 \n print \"3. Additions and multiplications needed to calculate\",\\\n \"the joint distribution is:\", additions, \"and\", multiplications\n print \"The number of nodes in the network is: \", length\n\n #print out reselt of step 4\n spaceFull = int(math.pow(2,length)) - 1\n spaceBN = sum(prob)\n print \"4. Space this network saved is (Compactness): \", spaceBN, \"/\", \\\n spaceFull, \"=\", float(spaceBN) / float(spaceFull), '\\n'\n return", "def _get_joints(self, anno, idx):\n num_people = len(anno)\n\n joints = np.zeros(\n (num_people, self.ann_info['num_joints'], 3), dtype=np.float32)\n\n for i, obj in enumerate(anno):\n joints[i, :self.ann_info['num_joints'], :3] = \\\n np.array(obj['keypoints']).reshape([-1, 3])\n\n img_info = self.coco.loadImgs(self.img_ids[idx])[0]\n orgsize = np.array([img_info['height'], img_info['width'], 1])\n\n return joints, orgsize", "def setAtomPairs(self):\n atomPairs = []\n for item in self.condensedProperDihedrals:\n dih = item[0]\n atom1 = dih.atoms[0]\n atom2 = dih.atoms[3]\n pair = [atom1, atom2]\n if atomPairs.count(pair) == 0:\n atomPairs.append(pair)\n self.atomPairs = atomPairs # [[atom1, atom2], ...]\n self.printDebug(\"atomPairs done\")", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def atoms(self):\n return self.qc_mol.atoms + self.br_mol.atoms + self.pc_mol.atoms", "def find_data_association(self, obs):\n prob = 0\n ass_obs = np.zeros((2,1))\n ass_jacobian = np.zeros((2,2))\n ass_adjcov = np.zeros((2,2))\n landmark_idx = -1\n for idx, landmark in enumerate(self.landmarks):\n predicted_obs, jacobian, adj_cov = self.compute_jacobians(landmark)\n p = multi_normal(np.transpose(np.array([obs])), predicted_obs, adj_cov)\n if p > prob:\n prob = p\n ass_obs = predicted_obs\n ass_jacobian = jacobian\n ass_adjcov = adj_cov\n landmark_idx = idx\n return prob, landmark_idx, ass_obs, ass_jacobian, ass_adjcov", "def bond_atoms(atom_list):\n pass", "def create_joint_prob_corr_table(to_dict=True):\n\n p = np.arange(0., 1.05, 0.05) # Range of marginal probability\n corr = np.arange(-1., 1.05, 0.05) # Range of correlations\n\n table = np.zeros(shape=(len(corr), len(p), len(p)))\n\n for i in range(len(corr)):\n\n sig = np.matrix([[1., corr[i]], [corr[i], 1.]])\n\n for j in range(len(p)):\n for k in range(j, len(p)):\n\n if corr[i] == -1:\n jp = max(0., p[j] + p[k] - 1.)\n elif corr[i] == 0.:\n jp = p[j] * p[k]\n elif corr[i] == 1.:\n jp = min(p[j], p[k])\n elif p[j] * p[k] == 0. or p[j] == 1 or p[k] == 1.:\n jp = p[j] * p[k]\n else:\n jp = mvnun(np.array([0., 0.]), np.array([np.inf, np.inf]),\n [norm.ppf(p[j]), norm.ppf(p[k])],\n sig)[0]\n\n table[i, j, k] = table[i, k, j] = jp\n\n if to_dict:\n n_table = {}\n\n # convert to dictionary:\n for j in range(len(p)):\n for k in range(j, len(p)):\n pj = round(p[j], 2)\n pk = round(p[k], 2)\n n_table[(pj, pk)] = n_table[(pj, pk)] = np.array((corr, table[:, j, k]))\n\n return n_table\n\n return table", "def _propose_atoms_in_order(self, atom_group):\n atom_torsions= []\n logp = []\n assert len(atom_group) == len(set(atom_group)), \"There are duplicate atom indices in the list of atom proposal indices\"\n while len(atom_group) > 0:\n #initialise an eligible_torsions_list\n eligible_torsions_list = list()\n\n for atom_index in atom_group:\n\n # Find the shortest path up to length four from the atom in question:\n shortest_paths = nx.algorithms.single_source_shortest_path(self._residue_graph, atom_index, cutoff=4)\n\n # Loop through the destination and path of each path and append to eligible_torsions_list\n # if destination has a position and path[1:3] is a subset of atoms with positions\n for destination, path in shortest_paths.items():\n\n # Check if the path is length 4 (a torsion) and that the destination has a position. Continue if not.\n if len(path) != 4 or destination not in self._atoms_with_positions_set:\n continue\n\n # If the last atom is in atoms with positions, check to see if the others are also.\n # If they are, append the torsion to the list of possible torsions to propose\n if set(path[1:3]).issubset(self._atoms_with_positions_set):\n eligible_torsions_list.append(path)\n\n assert len(eligible_torsions_list) != 0, \"There is a connectivity issue; there are no torsions from which to choose\"\n #now we have to randomly choose a single torsion\n ntorsions = len(eligible_torsions_list)\n random_torsion_index = np.random.choice(range(ntorsions))\n random_torsion = eligible_torsions_list[random_torsion_index]\n\n #append random torsion to the atom_torsions and remove source atom from the atom_group\n chosen_atom_index = random_torsion[0]\n first_old_atom_index = random_torsion[1]\n atom_torsions.append(random_torsion)\n atom_group.remove(chosen_atom_index)\n\n #add atom to atoms with positions and corresponding set\n self._atoms_with_positions_set.add(chosen_atom_index)\n\n #add a bond from the new to the previous torsion atom in the _reference_connectivity_graph\n self._reference_connectivity_graph.add_edge(chosen_atom_index, first_old_atom_index)\n\n #add the log probability of the choice to logp\n logp.append(np.log(1./ntorsions))\n\n # Ensure that logp is not ill-defined\n assert len(logp) == len(atom_torsions), \"There is a mismatch in the size of the atom torsion proposals and the associated logps\"\n\n return atom_torsions, logp", "def get_first(self):\n return self.A[1][0] if self.n > 0 else None", "def position(self):\n return self.atoms.reshape((1,-1))", "def atoms(self):\n return self._atoms", "def initialize_record_distribution(states, actions, empty_atoms):\n dist = {}\n\n atom_shape = empty_atoms.shape\n\n # initial values, equal probability distribution\n val = np.full(shape=atom_shape, fill_value=1.0 / float(atom_shape[0]))\n\n for i in states:\n dist[i] = {}\n for j in actions:\n dist[i][j] = val.copy()\n\n return dist", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def homo(a):\n return [ a[0]/a[3],\n a[1]/a[3],\n a[2]/a[3],\n 1 ]", "def getActionProb(self, canonicalBoard, temp=1):\n for i in range(self.args.numMCTSSims):\n dir_noise = (i == 0 and self.dirichlet_noise)\n self.search(canonicalBoard, dirichlet_noise=dir_noise)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [\n self.Nsa[(s, a)] if (s, a) in self.Nsa else 0\n for a in range(self.game.getActionSize())\n ]\n\n if temp == 0:\n bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()\n bestA = np.random.choice(bestAs)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x**(1. / temp) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n return probs" ]
[ "0.594754", "0.5753266", "0.57007027", "0.5424559", "0.539321", "0.5265403", "0.52582085", "0.52560824", "0.5244419", "0.5229816", "0.5205455", "0.51626754", "0.51596713", "0.5148809", "0.51186013", "0.51104856", "0.51007", "0.50988847", "0.50907356", "0.5085001", "0.50847787", "0.506436", "0.5059798", "0.5058871", "0.5054824", "0.50164527", "0.49896", "0.49687666", "0.49669787", "0.49666062" ]
0.63074166
0
Compute the joint probabilities for all combinations of values
def jointProbabilities(constants, db, ruleList, bn): vars = bn.variableList() combs = generateCombos(vars, constants) joints = [] for grounding in combs: joints.append((grounding, bn.jointProbs(grounding, db, ruleList))) return (vars, atomList(joints), joints)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def joint_proba(self, X):\n return self.weights * self._bernoulli(X)", "def joint_prob(network, assignment):\n prob = 1\n for a_key in assignment:\n conditions = []\n current = network[a_key]\n for parent in current['Parents']:\n conditions.append(True) if assignment[parent] else conditions.append(False)\n conditions = tuple(conditions)\n if not assignment[a_key]:\n prob *= (1 - current['CPT'].get(conditions))\n else:\n prob *= (current['CPT'].get(conditions)) \n return prob", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def joint_probabilities_from_transitions(ordered_pitch_types, transition_counts):\n first_pitch_totals = {first_pitch_type: sum(transition_counts[first_pitch_type].values())\n for first_pitch_type in ordered_pitch_types}\n\n total_transitions = sum(first_pitch_totals.values())\n\n markov_rows = []\n joint_probabilities = {}\n\n for first_pitch_type in ordered_pitch_types:\n first_pitch_transitions = transition_counts[first_pitch_type]\n joint_probabilities[first_pitch_type] = {}\n first_pitch_type_probability = float(first_pitch_totals[first_pitch_type]) / total_transitions\n\n second_pitch_total = sum(first_pitch_transitions.values())\n row = [first_pitch_type]\n\n for second_pitch_type in ordered_pitch_types:\n if second_pitch_total == 0:\n second_pitch_conditional_probability = 0\n\n else:\n second_pitch_conditional_probability = \\\n float(first_pitch_transitions[second_pitch_type]) / second_pitch_total\n\n row.append(second_pitch_conditional_probability)\n\n joint_probabilities[first_pitch_type][second_pitch_type] = \\\n first_pitch_type_probability * second_pitch_conditional_probability\n\n markov_rows.append(row)\n\n return joint_probabilities, markov_rows, total_transitions", "def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def probabilities(self):\n raise NotImplementedError", "def compute_conditional_distributions(joints):\n\n marginals = np.sum(joints, axis=1)\n posidx = marginals > 0\n \n conditionals = joints.copy()\n conditionals[posidx, :] /= marginals[posidx, None]\n conditionals[~posidx, :] = marginals\n assert np.isclose(np.sum(conditionals), len(conditionals))\n assert np.allclose(np.sum(conditionals, axis=1), 1.0)\n\n return conditionals", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def CalculateProbabilities(self, beta_0, beta_1):\n denom = self.zero_zero + self.zero_one + self.one_zero + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero = min( max( (self.zero_zero + self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one = min( max( (self.one_zero + self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_zero + self.one_zero + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_zero = min( max( (self.zero_zero + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_zero = min( max( (self.one_zero + beta_1 - 1) / denom, 0.0 ), 1.0 )\n \n denom = self.zero_one + self.one_one + beta_0 + beta_1 - 2\n if denom != 0:\n self.prob_zero_given_one = min( max( (self.zero_one + beta_0 - 1) / denom, 0.0 ), 1.0 )\n self.prob_one_given_one = min( max( (self.one_one + beta_1 - 1) / denom, 0.0 ), 1.0 )", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def get_components_probabilities(self):\n compsProbs={}\n for d in self.diagnoses:\n p = d.get_prob()\n for comp in d.get_diag():\n compsProbs[comp] = compsProbs.get(comp,0) + p\n return sorted(compsProbs.items(), key=lambda x: x[1], reverse=True)", "def compute_joint_probability(token_list, token_probabilities, use_log_prob=False):\n\n log_prob = 0\n\n for word in token_list:\n\n # do not allow zero probabilites\n assert word in token_probabilities\n\n if use_log_prob:\n log_prob += token_probabilities[word]\n else:\n log_prob += log10(token_probabilities[word])\n\n if use_log_prob:\n return log_prob\n\n return 10**log_prob", "def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total", "def _update_probabilities(self):\n pairs_last = copy.deepcopy(self.pairs)\n # pairs_last = [el for el in pairs]\n for src_p in range(self.frame_0.shape[0]):\n for seql_num, dst_p in enumerate(self.pairs[src_p][0]):\n Q = self._calculate_Q(src_p, dst_p, pairs_last)\n # update pair probability.\n self.pairs[src_p][1][seql_num] = pairs_last[src_p][1][seql_num] * (self.A + self.B * Q)\n\n # normalize probability\n self.pairs[src_p][1] = self.pairs[src_p][1] / self.pairs[src_p][1].sum()", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3", "def caculate_prob(self):\n t_H = self.tree.depth()\n t_h = 1\n while(t_h <= t_H):\n t_hnodes = self.get_h(t_h)\n t_sum = 0\n t_hpro = []\n t_cpro = []\n for t_n in t_hnodes:\n t_sum = self.tree.get_node(t_n).data[0] + t_sum\n t_node = self.tree.get_node(t_n)\n if t_node.is_leaf():\n t_node.data.append(0)\n continue\n t_childrens = self.tree.children(t_n)\n t_shang = 0\n for child in t_childrens:\n t_shang = t_shang + (child.data[0]/t_node.data[0])*np.log(child.data[0]/t_node.data[0])\n t_node.data.append(-t_shang)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n t_parentnode = self.tree.parent(t_n)\n if t_h > 1:\n t_node.data.append((t_node.data[0] / t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n t_hpro.append((t_node.data[0]/t_sum) * (t_node.data[0]/t_parentnode.data[0]))\n else:\n t_node.data.append((t_node.data[0] / t_sum))\n t_hpro.append((t_node.data[0] / t_sum))\n\n t_cpro.append(t_node.data[1])\n t_ndata = np.array(t_hpro)\n mean = np.mean(t_ndata)\n std = np.std(t_ndata,ddof=1)\n t_sdata = np.array(t_cpro)\n mean_s = np.mean(t_sdata)\n std_s = np.std(t_sdata,ddof=1)\n for t_n in t_hnodes:\n t_node = self.tree.get_node(t_n)\n if(std != 0):\n t_node.data[2] = (t_node.data[2] - mean)/std\n else:\n t_node.data[2] = (t_node.data[2] - mean)\n if(mean_s == 0 and std_s ==0):\n t_node.data[1] = -100.0\n continue\n t_node.data[1] = (t_node.data[1] - mean_s)/std_s\n t_h = t_h + 1", "def get_joint_outcomes(d):\n repeated_list = [[True, False]] * d\n oc = np.array(list(itertools.product(*repeated_list)))\n\n return oc", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def prodgreqs_base(A):\n choices = [ list(range(xi+1)) for xi in A ]\n M = len(choices) + 1\n for yi in itertools.product(*choices):\n prod, freq = 1, 1\n for a, y, x in zip(range(2, M+1), yi, A):\n prod *= a ** y\n freq *= math.factorial(x) // math.factorial(y) // math.factorial(x-y)\n yield ProdFreqPair(prod, freq)", "def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output", "def probability(distances):\n v = [1.0/(d + 1) for d in distances]\n s = sum(v)\n return [i/s for i in v]", "def enumerate_joint_ask(X, e, P):\n Q = ProbDist(X) ## A probability distribution for X, initially empty\n Y = [v for v in P.variables if v != X and v not in e]\n for xi in P.values(X):\n Q[xi] = enumerate_joint(Y, extend(e, X, xi), P)\n return Q.normalize()", "def proba(self, X):\n return np.sum(self.joint_proba(X), axis=-1)", "def _get_selection_probabilities(self):\r\n probabilities = np.arange(1, self.population_size+1, dtype=float)[::-1]\r\n probabilities /= probabilities.sum()\r\n return probabilities", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def joint(G, xs=None):\n vars = G.vars() #: [var]\n facs = { f : G.N(f) for f in G.facs() } #: fac => vars\n\n dims = [G.node[x]['d'] for x in vars] #: [nat]\n _joint = ones(dims)\n\n for vals in itertools.product( *(xrange(d) for d in dims) ): # cartesian product\n _vars = dict(zip(vars,vals)) #: var => val\n vals = tuple(vals) # to index\n #print\n #print _vars\n for fac in facs:\n _vals = [_vars[v] for v in facs[fac]] # keep only fac's vars' vals\n #print '%s%s' % (fac, tuple(_vals))\n _joint[vals] *= G(fac, *_vals)\n\n Z = sum(_joint)\n\n return pd(_joint), Z", "def comp_relation_probs(self, x_feats):\n # l : examples batch size\n # d : dimensionality of the (binary) feature space\n relation_scores = sparse.dot(x_feats, self.W) + self.Wb # [l, d] x [d, m] + [m] => [l, m]\n relation_probs = T.nnet.softmax(relation_scores)\n return relation_probs" ]
[ "0.6887764", "0.6807377", "0.6577042", "0.6448017", "0.64271957", "0.6388704", "0.621264", "0.61288685", "0.6084318", "0.60133076", "0.59769166", "0.59629256", "0.5954798", "0.5925714", "0.5911252", "0.5900835", "0.5882425", "0.5880176", "0.5865823", "0.5850093", "0.580452", "0.57948756", "0.5785913", "0.5762536", "0.57624674", "0.5760802", "0.5755206", "0.5739757", "0.57310236", "0.572803" ]
0.6838733
1
Generate all possible groundings (assignments of constants to variables)
def generateCombos(vars,constants): # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS assert len(vars) == 2 and len(constants) == 2 combs = [] for c1 in constants: for c2 in constants: combs.append(Grounding([(vars[0], c1), (vars[1], c2)])) return combs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ground_operator(self, op_name):\n op = self.domain.operators[op_name]\n self._set_operator_groundspace( op_name, op.variable_list.items() )\n for ground in self._instantiate( op_name ):\n # print('grounded', ground)\n st = dict(ground)\n gop = Operator(op_name)\n gop.variable_list = st\n gop.precondition_pos = set( [ a.ground( st ) for a in op.precondition_pos ] )\n gop.precondition_neg = set( [ a.ground( st ) for a in op.precondition_neg ] )\n gop.effect_pos = set( [ a.ground( st ) for a in op.effect_pos ] )\n gop.effect_neg = set( [ a.ground( st ) for a in op.effect_neg ] )\n yield gop", "def __compileVariables(self):\n state_variables = []\n state_diffs = []\n networks = []\n frames = []\n vars = []\n\n language = self.language\n\n instansiate_variables = {}\n\n self.obj_vars = self.object_variables()\n for key, var in self.variables.items(): # Grouping the variables\n size = self.size_of_variable(var)\n variable_type = var.type\n if not size or key in self.variables_not_included: # Skip variable\n print('Variable not included:\\tvar: {}'.format(var.label))\n continue\n if var.equation_list and variable_type in ['constant', \"network\"]:\n self.constantEquations.append(*var.equation_list)\n continue\n if variable_type in ['constant', 'frame', 'state']:\n # print(dir(var.units))\n units_pp = var.units.prettyPrint()\n doc_pp = var.doc\n if units_pp:\n units_doc_str = '{}, {}'.format(units_pp, doc_pp)\n else:\n units_doc_str = '{}'.format('Empty', doc_pp)\n instansiate_variables[key] = self.variable_dict(var)\n if var.compiled_index_list: # Index sets need compilation\n string_version = self.matrix_string_zeros(size, prefix = ' ')\n mat = np.zeros(size)\n index = str(var.index_structures) # Convert to string\n rep = self.mod_index[var.index_structures[0]].printable()\n string_w_comments = self.matrix_to_str_w_line_comments(mat, rep,\n prefix = ' ')\n width = 79 - 16 - len(index) - len(var.compiled) # 16 is others\n width2 = 79 - 8 - len(units_pp) - len(doc_pp)\n cons_str = '\\n{0} = {ar}({v: <{msg_box}} {com} {ind}\\n {st} ){v: <{w2}} {com} {udoc}'\n cons_var_str = cons_str.format(var.compiled,\n com = CODE[language][\"comment\"],\n ar = CODE[language][\"list\"],\n st = string_w_comments,\n udoc = units_doc_str,\n ind = index,\n w2 = width2,\n w = width,\n v = '')\n # cons_str = '{0} = {ar}({v: <{msg_box}} {com} {ind}\\n {st})'\n # cons_var_str = cons_str.format(var.compiled,\n # ar = CODE[language]['list'],\n # st = string_w_comments,\n # ind = index,\n # msg_box = width,\n # v = '')\n else:\n string_version = self.matrix_string_zeros(size, prefix = ' ')\n index = 'none'\n width = 79 - 16 - len(index) - len(var.compiled) # 16 is others\n width2 = 79 - 9 - len(units_pp) - len(doc_pp)\n cons_str = '\\n{0} = {array}({val: <{msg_box}} # {ind}\\n {st}\\n ) {h: <{w2}} {com} {udoc}'\n cons_var_str = cons_str.format(var.compiled,\n array = CODE[language][\"list\"],\n com = CODE[language][\"comment\"],\n udoc = units_doc_str,\n st = string_version,\n ind = index,\n w = width,\n w2 = width2,\n h = '',\n val = '')\n if variable_type in ['frame']:\n frames.append(cons_var_str)\n elif variable_type in ['state']:\n if var.label in self.state_variables:\n state_variables.append(cons_var_str)\n else:\n pass\n else:\n vars.append(cons_var_str)\n elif variable_type in [\"network\"]:\n mat = self.populateNetworkVariable(var)\n string_mat = self.matrix_to_string(mat, prefix = ' ')\n index = str(var.index_structures) # Convert to string\n width = 79 - 20 - len(index) - len(var.compiled) # 16 is others\n if width < 1:\n width = 1\n nt_str = '{0} = {array}({val: <{msg_box}} # {ind}\\n {st})'\n netw_var_str = nt_str.format(var.compiled,\n array = CODE[language][\"list\"],\n st = string_mat,\n ind = index,\n w = width,\n val = '')\n networks.append(netw_var_str)\n putData(instansiate_variables, self.variable_instantiate_file)\n return [state_variables, state_diffs, networks, frames, vars]", "def compile_globulars():\n \n gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc, z_sun=0*u.pc)\n frame_dict0 = gc_frame.__dict__\n old_keys = frame_dict0.keys()\n \n frame_dict = {}\n for k in ['galcen_distance', 'roll', 'galcen_v_sun', 'galcen_coord', 'z_sun']:\n frame_dict[k] = frame_dict0['_{}'.format(k)]\n \n t = Table.read('../data/gdr2_satellites_c3.txt', format='ascii')\n \n x = np.array([t['X']-8, t['Y'], t['Z']])*u.kpc\n v = np.array([t['U'], t['V'], t['W']])*u.km/u.s\n \n for i in range(3):\n v[i] = v[i] + gc_frame.galcen_v_sun.d_xyz[i]\n \n xgal = coord.Galactocentric(x, **frame_dict)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, v, galactocentric_frame=gc_frame)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # store observables\n data = {'name': t['Name'], 'ra': xeq.ra, 'dec': xeq.dec, 'distance': xeq.distance, 'pmra': veq[0], 'pmdec': veq[1], 'vr': veq[2]}\n \n tout = Table(data=data, names=('name', 'ra', 'dec', 'distance', 'pmra', 'pmdec', 'vr'))\n tout.pprint()\n tout.write('../data/positions_globular.fits', overwrite=True)", "def gen_constant(self, g, ng, ct):\n pass", "def set_ground_modes(self, basis=None, auto=True):\n if self._atmospheric_basis is None: # Presently, the ground can not yet be set independently of an atmosphere.\n print('Atmosphere modes not set up. Add an atmosphere before adding the ground!')\n print('Ground setup aborted.')\n return\n\n if auto:\n if self.gotemperature_params is None or isinstance(self.gotemperature_params, OceanicTemperatureParams):\n self.gotemperature_params = GroundTemperatureParams(self.scale_params)\n if self.ground_params is None:\n self.ground_params = GroundParams(self.scale_params)\n\n self.oceanic_params = None\n self._oceanic_basis = None\n\n if basis is not None:\n self.ground_basis = basis\n else:\n self.ground_basis = self._atmospheric_basis\n\n self._oceanic_var_string = list()\n self._oceanic_latex_var_string = list()\n self._ground_latex_var_string = list()\n self._ground_var_string = list()\n if self.dynamic_T:\n self._oceanic_latex_var_string.append(r', T_{{\\rm g},0}')\n self._oceanic_var_string.append(r'T_g_0')\n for i in range(1, self.nmod[1] + 1):\n self._ground_latex_var_string.append(r'delta T_{\\rm g,' + str(i) + \"}\")\n self._ground_var_string.append(r'delta_T_g_' + str(i))", "def generate_source():\n \"\"\"their dependencies\"\"\"\n global dictionary_names, dictionary_slices\n src = \"\"\n for s in dictionary_slices:\n src += deconstruct(s)\n src += \" '\" + pointer_to_name(s)\n src += \"' define\\n\"\n return src + \"\\n\"", "def generate_cfg(numStart, numNonterminals, numTerminals, numProductions, min_length, max_length, onlyunaryterminal,\n terminalprob, minrhs):\n grammar = cfg.ContextFreeGrammar()\n if numTerminals == -1:\n terminals = list(dictionary.generateDictionary(numProductions * max_length))\n else:\n terminals = list(dictionary.generateDictionary(numTerminals))\n nonterminals = []\n terminalCounter = 0\n start = []\n if numStart == 1:\n start.append(\"S\")\n else:\n for i in xrange(numStart):\n start.append(\"S_\" + str(i))\n for s in start:\n nonterminals.append(s)\n vcup = []\n for terminal in terminals:\n vcup.append(terminal)\n for i in range(numNonterminals - numStart):\n nt = \"NT\" + str(i)\n nonterminals.append(nt)\n vcup.append(nt)\n productionSet = set()\n obligatoryrhs = []\n for x in xrange(minrhs):\n for nt in nonterminals:\n obligatoryrhs.append(nt)\n while len(productionSet) < numProductions:\n if len(productionSet) < len(obligatoryrhs):\n lhs = obligatoryrhs[len(productionSet)]\n else:\n lhs = random.choice(nonterminals)\n rhs = []\n rhslength = random.randrange(min_length, max_length + 1)\n # print rhslength\n if rhslength == 1 and onlyunaryterminal:\n if numTerminals == -1:\n rhs.append(terminals[terminalCounter])\n terminalCounter += 1\n else:\n rhs.append(random.choice(terminals))\n else:\n for i in range(rhslength):\n if random.random() < terminalprob:\n if numTerminals == -1:\n rhs.append(terminals[terminalCounter])\n terminalCounter += 1\n else:\n rhs.append(random.choice(terminals))\n else:\n rhs.append(random.choice(nonterminals))\n prod = (lhs, tuple(rhs))\n if not prod in productionSet:\n productionSet.add(prod)\n # print prod\n for nt in nonterminals:\n n = 0\n for lhs, rhs in productionSet:\n for sym in rhs:\n if sym == nt:\n break\n else:\n # not on the rhs of any nonterminal.\n while True:\n lhs = random.choice(nonterminals)\n if lhs != nt:\n rhslength = random.randrange(min_length, max_length + 1)\n if rhslength == 1 and not onlyunaryterminal:\n productionSet.add((lhs, (nt,)))\n break\n elif rhslength > 1:\n position = random.choice(range(rhslength))\n rhs = []\n for i in range(rhslength):\n if i == position:\n rhs.append(nt)\n else:\n if random.random() < terminalprob:\n if numTerminals == -1:\n rhs.append(terminals[terminalCounter])\n terminalCounter += 1\n else:\n rhs.append(random.choice(terminals))\n else:\n rhs.append(random.choice(nonterminals))\n productionSet.add((lhs, tuple(rhs)))\n\n # now we have the set of weighted productions.\n grammar.productions = productionSet\n grammar.nonterminals = nonterminals\n grammar.terminals = terminals\n grammar.start_set = start\n return grammar.trim()", "def create_gol_constants() -> None:\r\n\r\n self.INITIAL_RULE = 'B3/R23'\r\n self.INVALID_RULE_MESSAGE = (\r\n 'Invalid rule.\\n\\n'\r\n 'Set the rule in the format \"Bx/Ry\", where x and y are numbers of neighbors that:\\n'\r\n 'x: causes a birth of a cell\\n'\r\n 'y: allows a living cell to remain alive\\n\\n'\r\n 'Numbers 0 and 9 cannot belong to x and y.'\r\n )\r\n\r\n self.BOARD_WIDTH = 1000\r\n self.BOARD_HEIGHT = 1000\r\n self.BOARD_BG = (0, 0, 0)\r\n self.BOARD_STROKE = (50, 50, 50)\r\n self.BOARD_FILL = (255, 255, 255)\r\n\r\n self.IMAGE_MAX_WIDTH = 2000\r\n self.IMAGE_MAX_HEIGHT = 2000\r\n self.CELL_SIZES = [3, 5, 10, 20, 30, 50]\r\n self.INITIAL_ZOOM = len(self.CELL_SIZES) // 2\r\n\r\n self.TIMES_PER_GEN = [3000, 2000, 1500, 1000, 700, 400, 200, 100, 50]\r\n self.INITIAL_TIME_PER_GEN = len(self.TIMES_PER_GEN) // 2", "def to_general(y: List[Symbol], yp: Symbol = 0, t: Symbol = t, constant_prefix: str = \"C\") -> Tuple[Symbol, List[Symbol]]:\n\n const_iter = numbered_symbols(prefix=constant_prefix, start=1)\n consts = []\n\n general = yp\n for y_ in y:\n const = next(const_iter)\n consts.append(const)\n general += const * y_\n general = constantsimp(general.collect(y_), consts)\n\n return general, consts", "def _create_variables(self):\n\n \n with tf.name_scope(\"variable\"):\n if self.reg_type == 'L2':\n regularizer = tf.contrib.layers.l2_regularizer(scale=self.reg_scale)\n else:\n regularizer = tf.contrib.layers.l1_regularizer(scale=self.reg_scale)\n \n self.dim_lst = [self.dim_inputs] + self.dim_hidden_lst + [self.number_structures]\n print(self.dim_lst)\n \n self.W_lst = []\n self.b_lst = []\n for i in range(len(self.dim_lst)-1):\n self.W_lst.append(tf.get_variable(\n \"W{}\".format(i+1),\n shape=[self.dim_lst[i], self.dim_lst[i+1]],\n initializer=tf.contrib.layers.xavier_initializer(),\n regularizer=regularizer)\n )\n # not output layer, has bias term\n if i < len(self.dim_lst) - 2:\n self.b_lst.append(tf.get_variable(\"b{}\".format(i+1), shape=[self.dim_lst[i+1]]))", "def define_variables(m):\r\n\r\n # Non-negative candidate capacity\r\n m.mu_1 = Var(m.G_C, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Solar build limits\r\n m.mu_2 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Wind build limits\r\n m.mu_3 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage build limits\r\n m.mu_4 = Var(m.Z, m.Y, within=NonNegativeReals, initialize=0)\r\n\r\n # Min power output (all generators excluding storage units)\r\n m.sigma_1 = Var(m.G.difference(m.G_STORAGE), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing thermal\r\n m.sigma_2 = Var(m.G_E_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate thermal\r\n m.sigma_3 = Var(m.G_C_THERM, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing wind\r\n m.sigma_4 = Var(m.G_E_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate wind\r\n m.sigma_5 = Var(m.G_C_WIND, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - existing solar\r\n m.sigma_6 = Var(m.G_E_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - candidate solar\r\n m.sigma_7 = Var(m.G_C_SOLAR, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max power output - hydro\r\n m.sigma_8 = Var(m.G_E_HYDRO, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min charging power - storage units\r\n m.sigma_9 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min discharging power - storage_units\r\n m.sigma_10 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - existing storage\r\n m.sigma_11 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max charging power - candidate storage\r\n m.sigma_12 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - existing storage\r\n m.sigma_13 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max discharging power - candidate storage\r\n m.sigma_14 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - storage units\r\n m.sigma_15 = Var(m.G_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - existing storage units\r\n m.sigma_16 = Var(m.G_E_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - candidate storage\r\n m.sigma_17 = Var(m.G_C_STORAGE, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min energy - interval end\r\n m.sigma_18 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Max energy - interval end\r\n m.sigma_19 = Var(m.G_STORAGE, m.Y, m.S, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate up (thermal and hydro generators)\r\n m.sigma_20 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Ramp-rate down (thermal and hydro generators)\r\n m.sigma_23 = Var(m.G_THERM.union(m.G_E_HYDRO), m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Non-negative lost load power\r\n m.sigma_26 = Var(m.Z, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Min powerflow\r\n m.sigma_27 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Max powerflow\r\n m.sigma_28 = Var(m.L, m.Y, m.S, m.T, within=NonNegativeReals, initialize=0)\r\n\r\n # Storage energy transition\r\n m.zeta_1 = Var(m.G_STORAGE, m.Y, m.S, m.T, initialize=0)\r\n\r\n # Power balance (locational marginal price)\r\n m.lamb = Var(m.Z, m.Y, m.S, m.T, initialize=0)\r\n\r\n return m", "def build_schematic(self, bg=None):", "def build_goal_generation(\n cls, constants: CType, mujoco_simulation: SType\n ) -> GoalGenerator:\n pass", "def setup_steps(self):\n step1 = ground_step.Ground(5745, 495, 40, 44)\n step2 = ground_step.Ground(5788, 452, 40, 44)\n step3 = ground_step.Ground(5831, 409, 40, 44)\n step4 = ground_step.Ground(5874, 366, 40, 176)\n\n step5 = ground_step.Ground(6001, 366, 40, 176)\n step6 = ground_step.Ground(6044, 408, 40, 40)\n step7 = ground_step.Ground(6087, 452, 40, 40)\n step8 = ground_step.Ground(6130, 495, 40, 40)\n\n step9 = ground_step.Ground(6345, 495, 40, 40)\n step10 = ground_step.Ground(6388, 452, 40, 40)\n step11 = ground_step.Ground(6431, 409, 40, 40)\n step12 = ground_step.Ground(6474, 366, 40, 40)\n step13 = ground_step.Ground(6517, 366, 40, 176)\n\n step14 = ground_step.Ground(6644, 366, 40, 176)\n step15 = ground_step.Ground(6687, 408, 40, 40)\n step16 = ground_step.Ground(6728, 452, 40, 40)\n step17 = ground_step.Ground(6771, 495, 40, 40)\n\n step18 = ground_step.Ground(7760, 495, 40, 40)\n step19 = ground_step.Ground(7803, 452, 40, 40)\n step20 = ground_step.Ground(7845, 409, 40, 40)\n step21 = ground_step.Ground(7888, 366, 40, 40)\n step22 = ground_step.Ground(7931, 323, 40, 40)\n step23 = ground_step.Ground(7974, 280, 40, 40)\n step24 = ground_step.Ground(8017, 237, 40, 40)\n step25 = ground_step.Ground(8060, 194, 40, 40)\n step26 = ground_step.Ground(8103, 194, 40, 360)\n\n step27 = ground_step.Ground(8488, 495, 40, 40)\n\n self.step_group = pygame.sprite.Group(step1, step2,\n step3, step4,\n step5, step6,\n step7, step8,\n step9, step10,\n step11, step12,\n step13, step14,\n step15, step16,\n step17, step18,\n step19, step20,\n step21, step22,\n step23, step24,\n step25, step26,\n step27)", "def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def gen_task0():\n argc = 1\n goal = 'f'\n premise = 'b'\n ctx, targets = list(), list()\n # Generate according to goal <- premise\n args = r_consts(argc)\n # Add the successful ground case\n ctx.append([(premise, args)])\n targets.append(((goal, args), 1))\n # Fail on non-matching constant\n args = args.copy()\n args[R.randrange(len(args))] = r_consts(1, args)[0]\n preds = r_preds(3)\n ctx.append([(preds[0], args)])\n targets.append(((goal, args), 0))\n # Add padding length dummy rule\n vs = r_vars(argc)\n ctx.append([(preds[1], vs), (preds[2], vs)])\n preds.extend([goal, premise])\n gen_task(ctx, targets, preds)", "def gen_params(no_cultures):\n # Plate level\n kn = 0.1 # Nutrient diffusion\n ks = 0.1 # Signal diffusion\n b = 0.05 # Signal on cells effect constant\n a = 0.05 # Signal secretion constant\n # Culture level\n # Growth rate constant\n r_mean = 1.0\n r_var = 1.0\n r_params = [max(0.0, gauss(r_mean, r_var)) for i in range(no_cultures)]\n params = np.array([kn, ks, b, a] + r_params)\n return params", "def variabilize(self):\n if self.nvars>=0:\n pass #already done\n else:\n varTab = syt.SymbolTable()\n def convertArgs(args):\n return map(lambda a: -varTab.getId(a) if isVariableAtom(a) else a, args)\n def convertGoal(g):\n return Goal(g.functor, convertArgs(g.args))\n if self.lhs: self.lhs = convertGoal(self.lhs)\n self.rhs = map(convertGoal, self.rhs)\n if self.features:\n self.features = map(convertGoal, self.features)\n if self.findall:\n self.findall = map(convertGoal, self.findall) \n self.variableList = varTab.getSymbolList()\n self.nvars = len(self.variableList)", "def generate_powerset_bridge_constraints(problem):\n\n c_30 = _dynamic_constraint_30(problem)\n c_33 = _dynamic_constraint_33(problem)\n c_34 = _dynamic_constraint_34(problem)\n c_35 = _dynamic_constraint_35(problem)\n c_36 = _dynamic_constraint_36(problem)\n\n return c_30 & c_33 & c_34 & c_35 & c_36", "def generate_all_locations(grid, shape):", "def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp", "def _generate_raw_environments(self, num, seed):", "def ground(cls, nocc, nspins):\n from math import floor, ceil\n nspatials = int(nspins/2)\n alpha = [1]*ceil(nocc/2) + [0]*(nspatials-ceil(nocc/2))\n beta = [1]*floor(nocc/2) + [0]*(nspatials-floor(nocc/2))\n configuration = []\n for i,j in zip(alpha, beta):\n configuration.append(i)\n configuration.append(j)\n\n return cls(configuration)", "def one_variation(self):\n\n globals_ = dict(\n # Physical constants\n g=9.81, # Gravitational acceleration [m/s^2]\n c=3e8, # Speed of Light [m/s]\n h=6.6262e-34, # Planck [Js]\n k=1.38e-23, # Boltzmann [J/K]\n R=8.31441, # k*NA [J/(mol*kg)]\n NA=6.0225e23, # Avogadro [1/mol]\n gamma=6.67e11, # Gravitational Constant [Nm^2/kg^2]num\n qe=1.60219e-19, # Elementary charge [C]\n # (e is not free unfortunately)\n e0=8.854187816e-12, # Permittivity of Vacuum [As/(Vm)]\n epsilon0=8.854187816e-12, # Permittivity of Vacuum [As/(Vm)]\n mu0=4e-7*pi, # Permeability of Vacuum [Vs/(Am)]\n K=9e9, # 1/(4*pi*epsilon0) [Vm/(As)]\n me=9.1095e-31, # The mass of electron [kg]\n mu=1.66056e-27, # Atomic mass unit [kg]\n sigma=5.67e-8, # Stefan-Boltzmann Constant\n )\n exec(function_import, globals_)\n for i in ('pi', 'e', 'sin', 'sind', 'asin'):\n assert i in globals_\n\n values = {}\n\n # For example there is a variable k, it is not equal to k (Planck const)\n for variable in self.variable_list:\n exec('%s = None' % variable, globals_, values)\n\n for const in self.const_list:\n exec('%(name)s = %(value)g' % const, values)\n\n for intv in self.interval_list:\n value = interval_.random(intv['interval'])\n if intv['name']:\n name = intv['name']\n exec('%s = float(%g)' % (name, value), globals_, values)\n\n compute_list = self.compute_list[:]\n number_of_uncomputable_formulas = 0\n # The number of the failed computation after\n # a successful computation.\n\n while compute_list:\n compute = compute_list[0]\n try:\n exec(compute['formula'], globals_, values)\n\n except (NameError, TypeError):\n compute_list.append(compute_list.pop(0))\n # It writes the first item to the end\n\n number_of_uncomputable_formulas += 1\n if number_of_uncomputable_formulas == len(compute_list):\n raise UncomputableError(self.code)\n self.is_computable = False\n return\n continue\n except ValueError:\n print('Value Error. Formula is:')\n print(compute['formula'])\n return\n\n compute_list.pop(0)\n number_of_uncomputable_formulas = 0\n command = '%(name)s = %(right)s' % compute\n exec(command, globals_, values)\n\n possibilities = next(self.possibilities_cycle)\n erased_elements = set(self.variable_list) - possibilities\n self.list.append((values, erased_elements))", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def circuit():\n np.random.seed(1967)\n for gates in gates_per_layers:\n for gate in gates:\n qml.apply(gate)\n return qml.expval(qml.PauliZ(0))", "def get_variables(test_case, name):\n\n test_case = test_case.lower()\n variables = {\n # Variables for control packet\n \"src_ip\": \"16.0.0.1\",\n \"dst_ip\": \"16.0.1.1\",\n \"src_net\": \"16.0.0.0\",\n \"dst_net\": \"16.0.1.0\",\n \"src_port\": \"1234\",\n \"dst_port\": \"1234\",\n \"src_mac\": \"01:02:03:04:05:06\",\n \"dst_mac\": \"10:20:30:40:50:60\"}\n\n test_vars = {\n \"macip\": {\n # MACs classified directly\n \"classify_src\": \"12:23:34:45:56:67\",\n \"classify_dst\": \"89:9A:AB:BC:CD:DE\",\n # MACs classified through mask\n \"classify_src2\": \"01:02:03:04:56:67\",\n \"classify_dst2\": \"89:9A:AB:BC:50:60\",\n \"src_mask\": \"00:00:00:00:FF:FF\",\n \"dst_mask\": \"FF:FF:FF:FF:00:00\"\n },\n \"l3_ip4\": {\n # IPs for DUT interface setup\n \"dut_to_tg_if1_ip\": \"16.0.0.2\",\n \"dut_to_tg_if2_ip\": \"192.168.0.2\",\n \"prefix_length\": 24,\n \"gateway\": \"192.168.0.1\",\n # classified networks\n \"classify_src_net\": \"16.0.2.0\",\n \"classify_dst_net\": \"16.0.3.0\",\n # IPs in classified networks\n \"classify_src\": \"16.0.2.1\",\n \"classify_dst\": \"16.0.3.1\",\n },\n \"l3_ip6\": {\n # Override control packet addresses with IPv6\n \"src_ip\": \"10::1\",\n \"dst_ip\": \"11::1\",\n \"dst_net\": \"11::\",\n # IPs for DUT interface setup\n \"dut_to_tg_if1_ip\": \"10::2\",\n \"dut_to_tg_if2_ip\": \"20::2\",\n \"prefix_length\": 64,\n \"gateway\": \"20::1\",\n # classified networks\n \"classify_src_net\": \"12::\",\n \"classify_dst_net\": \"13::\",\n # IPs in classified networks\n \"classify_src\": \"12::1\",\n \"classify_dst\": \"13::1\",\n },\n \"l4\": {\n # IPs for DUT interface and route setup\n \"dut_to_tg_if1_ip\": \"16.0.0.2\",\n \"dut_to_tg_if2_ip\": \"192.168.0.2\",\n \"prefix_length\": 24,\n \"gateway\": \"192.168.0.1\",\n \"classify_dst_net\": \"16.0.3.0\",\n # Ports in classified ranges\n \"classify_src\": 60000,\n \"classify_dst\": 61000,\n },\n \"mixed\": {\n # IPs for DUT interface and route setup\n \"dut_to_tg_if1_ip\": \"16.0.0.2\",\n \"dut_to_tg_if2_ip\": \"192.168.0.2\",\n \"prefix_length\": 24,\n \"gateway\": \"192.168.0.1\",\n \"classify_dst_net\": \"16.0.3.0\",\n # IPs in classified networks\n \"classify_src_ip\": \"16.0.2.1\",\n \"classify_dst_ip\": \"16.0.3.1\",\n # Ports in classified ranges\n \"classify_src_port\": 60000,\n \"classify_dst_port\": 61000,\n },\n \"icmp\": {\n # ICMP code and type for control packet\n \"icmp_type\": 0,\n \"icmp_code\": 0,\n # classified ICMP code and type\n \"classify_type\": 3,\n \"classify_code\": 3\n\n },\n \"icmpv6\": {\n # Override control packet addresses with IPv6\n \"src_ip\": \"10::1\",\n \"dst_ip\": \"11::1\",\n \"dst_net\": \"11::\",\n # IPs for DUT interface setup\n \"dut_to_tg_if1_ip\": \"10::2\",\n \"dut_to_tg_if2_ip\": \"20::2\",\n \"prefix_length\": 64,\n \"gateway\": \"20::1\",\n # classified networks\n \"classify_src_net\": \"12::\",\n \"classify_dst_net\": \"13::\",\n # ICMP code and type for control packet\n \"icmp_type\": 1,\n \"icmp_code\": 0,\n # classified ICMP code and type\n \"classify_type\": 4,\n \"classify_code\": 2\n\n },\n \"reflex\": {\n # IPs for DUT interface setup\n \"dut_to_tg_if1_ip\": \"16.0.0.2\",\n \"dut_to_tg_if2_ip\": \"192.168.0.2\",\n \"prefix_length\": 24,\n \"gateway\": \"192.168.0.1\",\n \"gateway2\": \"192.168.0.1\",\n # classified networks\n \"classify_src_net\": \"16.0.2.0\",\n \"classify_dst_net\": \"16.0.3.0\",\n # IPs in classified networks\n \"classify_src\": \"16.0.2.1\",\n \"classify_dst\": \"16.0.3.1\",\n },\n \"block_all\": {}\n }\n acl_data = {\n # ACL configuration for L2 tests\n\n \"macip\": {\n \"acl\": [{\n \"name\": name,\n \"type\": \"vpp-acl:vpp-macip-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n\n \"eth\": {\n \"source-mac-address\": test_vars[\"macip\"][\"classify_src\"],\n \"source-mac-address-mask\": test_vars[\"macip\"][\"src_mask\"]\n },\n \"ipv4\": {\n\n \"source-ipv4-network\": \"16.0.0.0/24\"\n }\n },\n\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n\n \"eth\": {\n \"source-mac-address\": test_vars[\"macip\"][\"classify_src\"],\n \"source-mac-address-mask\": \"00:00:00:00:00:00\"\n },\n\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]}\n }\n ]\n },\n # ACL configuration for L3 IPv4 tests\n \"l3_ip4\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv4\": {\n \"destination-ipv4-network\": \"{0}/{1}\".format(\n test_vars[\"l3_ip4\"][\"classify_dst_net\"],\n test_vars[\"l3_ip4\"][\"prefix_length\"]),\n \"source-ipv4-network\": \"{0}/{1}\".format(\n test_vars[\"l3_ip4\"][\"classify_src_net\"],\n test_vars[\"l3_ip4\"][\"prefix_length\"])\n },\n \"udp\":{\n \"source-port\": {\n \"lower-port\": \"0\",\n \"upper-port\": \"65535\"\n },\n \"destination-port\": {\n \"lower-port\": \"0\",\n \"upper-port\": \"65535\"\n }\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv4\": {\n \"destination-ipv4-network\": \"0.0.0.0/0\",\n \"source-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n # ACL settings for L3 IPv6 tests\n \"l3_ip6\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv6\": {\n \"destination-ipv6-network\": \"{0}/{1}\".format(\n test_vars[\"l3_ip6\"][\"classify_dst_net\"],\n test_vars[\"l3_ip6\"][\"prefix_length\"]),\n \"source-ipv6-network\": \"{0}/{1}\".format(\n test_vars[\"l3_ip6\"][\"classify_src_net\"],\n test_vars[\"l3_ip6\"][\"prefix_length\"])\n },\n \"udp\":{\n \"source-port\": {\n \"lower-port\": \"0\",\n \"upper-port\": \"65535\"\n },\n \"destination-port\": {\n \"lower-port\": \"0\",\n \"upper-port\": \"65535\"\n }\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv6\": {\n \"destination-ipv6-network\": \"0::0/0\",\n \"source-ipv6-network\": \"0::0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n # ACL configuration for L4 tests\n \"l4\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\"\n },\n \"tcp\": {\n \"source-port\": {\n \"lower-port\": test_vars[\"l4\"][\"classify_src\"],\n \"upper-port\": test_vars[\"l4\"][\"classify_src\"] + 10\n },\n \"destination-port\":{\n \"lower-port\": test_vars[\"l4\"][\"classify_dst\"],\n \"upper-port\": test_vars[\"l4\"][\"classify_dst\"] + 10\n }\n }\n },\n \"actions\":{\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\",\n \"destination-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n \"mixed\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"ports\",\n \"matches\": {\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\"\n },\n \"tcp\": {\n \"source-port\": {\n \"lower-port\": test_vars[\"l4\"][\"classify_src\"],\n \"upper-port\": test_vars[\"l4\"][\"classify_src\"] + 10\n },\n \"destination-port\":{\n \"lower-port\": test_vars[\"l4\"][\"classify_dst\"],\n \"upper-port\": test_vars[\"l4\"][\"classify_dst\"] + 10\n }\n }\n },\n \"actions\":{\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv4\": {\n \"destination-ipv4-network\": \"0.0.0.0/0\",\n \"source-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n \"icmp\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\"\n },\n \"icmp\": {\n \"vpp-acl:vpp-icmp-ace\": {\n \"vpp-acl:icmp-type-range\": {\n \"first\": \"1\",\n \"last\": \"5\"\n },\n \"vpp-acl:icmp-code-range\": {\n \"first\": \"1\",\n \"last\": \"5\"\n }\n }\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv4\": {\n \"source-ipv4-network\": \"0.0.0.0/0\",\n \"destination-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n \"icmpv6\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv6\": {\n \"source-ipv6-network\": \"::/0\",\n },\n \"icmp\": {\n \"vpp-acl:vpp-icmp-ace\": {\n \"vpp-acl:icmp-type-range\": {\n \"first\": \"1\",\n \"last\": \"5\"\n },\n \"vpp-acl:icmp-code-range\": {\n \"first\": \"1\",\n \"last\": \"5\"\n }\n }\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n },\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv6\": {\n \"destination-ipv6-network\": \"0::0/0\",\n \"source-ipv6-network\": \"::/0\",\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:accept\"\n }\n }\n ]\n }\n }\n ]\n },\n \"reflex\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule1\",\n \"matches\": {\n \"ipv4\": {\n \"destination-ipv4-network\": \"{0}/{1}\".format(\n test_vars[\"reflex\"][\"classify_src_net\"],\n test_vars[\"reflex\"][\"prefix_length\"]),\n \"source-ipv4-network\": \"{0}/{1}\".format(\n test_vars[\"reflex\"][\"classify_dst_net\"],\n test_vars[\"reflex\"][\"prefix_length\"])\n }\n },\n \"actions\": {\n \"forwarding\": \"vpp-acl:accept-and-reflect\"\n }\n }\n ]\n }\n }\n ]\n },\n \"block_all\": {\n \"acl\": [\n {\n \"name\": name,\n \"type\": \"vpp-acl:vpp-acl\",\n \"aces\": {\n \"ace\": [\n {\n \"name\": \"rule_all\",\n \"matches\": {\n \"ipv4\": {\n \"destination-ipv4-network\": \"0.0.0.0/0\",\n \"source-ipv4-network\": \"0.0.0.0/0\"\n }\n },\n \"actions\": {\n \"forwarding\": \"ietf-access-control-list:drop\"\n }\n }\n ]\n }\n }\n ]\n },\n }\n\n try:\n ret_vars = {}\n ret_vars.update(variables)\n ret_vars.update(test_vars[test_case])\n ret_vars.update(\n {\"acl_settings\": acl_data[test_case]}\n )\n except KeyError:\n raise KeyError(\n \"Unrecognized test case {0}. Valid options are: {1}\".format(\n test_case, acl_data.keys()))\n return ret_vars", "def global_parameter_space():\n return [list(range(7, 22)),\n list(range(12, 27)),\n list(range(25, 40)),\n list(permutations(range(1, 5)))]", "def G():\n Pz=[40]\n Pp=[1,2,1]\n return Pz, Pp", "def standardize_names_groundings(stmts):\n print('Standardize names to groundings')\n for stmt in stmts:\n for concept in stmt.agent_list():\n db_ns, db_id = concept.get_grounding()\n if db_id is not None:\n if isinstance(db_id, list):\n db_id = db_id[0][0].split('/')[-1]\n else:\n db_id = db_id.split('/')[-1]\n db_id = db_id.replace('|', ' ')\n db_id = db_id.replace('_', ' ')\n db_id = db_id.replace('ONT::', '')\n db_id = db_id.capitalize()\n concept.name = db_id\n return stmts\n \"\"\"\n for stmt in stmts:\n for idx, agent in enumerate(stmt.agent_list()):\n if 'UN' in agent.db_refs:\n all_un_scores = []\n for ev in stmt.evidence:\n agent_annots = ev.annotations.get('agents')\n if agent_annots and 'raw_grounding' in agent_annots and \\\n 'UN' in agent_annots['raw_grounding'][idx]:\n un_score = agent_annots['raw_grounding'][idx]['UN'][0][1]\n all_un_scores.append(un_score)\n if all_un_scores:\n noisy_or_score = 1 - numpy.prod([1-x for x in\n all_un_scores])\n print('%s -> %.2f' % (str(all_un_scores), noisy_or_score))\n agent.db_refs['UN'][0] = (agent.db_refs['UN'][0][0],\n noisy_or_score)\n \"\"\"" ]
[ "0.586086", "0.5699041", "0.5360215", "0.53582025", "0.53362375", "0.53046995", "0.5301871", "0.52834463", "0.521038", "0.51668525", "0.5156945", "0.51415384", "0.5131329", "0.51303786", "0.51272744", "0.51083404", "0.51081413", "0.5107868", "0.50947726", "0.5074924", "0.5066942", "0.5065693", "0.5064234", "0.5060313", "0.5053041", "0.5035817", "0.5030404", "0.5029692", "0.50229186", "0.50115556" ]
0.62475884
0
` Given a joint probability table, format it for LaTeX. This function will have to be tailored for every paper. This function simply generates the {tabular} part of the table. The prologue and epilogue, including the caption and label, must be specified in the including file.
def formatJointTableForLaTeX(joints): (varList, atoms, probs) = joints cols = len(varList) + len (probs[0][1]) with open("table1.tex","w") as out: out.write ("\\begin{tabular}{|" + "|".join(["c"]*(cols-2))+"||c|c|}\n") out.write ("\\hline\n") # Table header out.write (" & ".join(varList) + " & " + " & ".join([a for a in atoms]) + " & Joint $p$ & ln~$p$ \\\\ \\hline\n") # Table rows logps = [] for (grounding, probs) in probs: out.write (" & ".join([val for (var, val) in grounding.varList]) + " & " + " & ".join([str(n.val)+" ({:.1f})".format(p) for (n,p) in probs[:-2]]) + " & {:.2f}".format(probs[-2]) + " & {:.2f}".format(probs[-1]) + "\\\\\n") logps.append(probs[-1]) # A line to indicate there are further entries in the DB out.write(" & ".join(["\ldots"]*cols) + "\\\\\n") # Close environment out.write ("\\hline\n\\end{tabular}\n") with open("tab1plogp.tex","w") as plogp: plogp.write("\\newcommand{\\pseudologp}{"+"{:.2f}".format(sum(logps)/len(logps))+"}\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_latex_table(true_hmm, sampled_hmm_list, conf=0.95, dt=1, time_unit='ms', obs_name='force', obs_units='pN', outfile=None):\n\n # confidence interval\n for sampled_hmm in sampled_hmm_list:\n sampled_hmm.set_confidence(conf)\n # dt\n dt = float(dt)\n # nstates\n nstates = sampled_hmm_list[0].nstates\n\n table = r\"\"\"\n\\begin{table*}\n\\caption{{\\bf Estimated mean model parameters and confidence intervals for synthetic timeseries data}}\n\\label{table:synthetic-confidence-intervals}\n\\begin{tabular*}{\\textwidth}{@{\\extracolsep{\\fill}}lccccc}\n\\hline\n& & & \\multicolumn{3}{c}{\\bf Estimated Model Parameters} \\\\ \\cline{4-6}\n\\multicolumn{2}{l}{\\bf Property} & \\bf True Value & \\bf 1 000 observations & \\bf 10 000 observations & \\bf 100 000 observations\\\\ \\hline\n\"\"\"\n # Stationary probability.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tEquilibrium probability '\n table += '\\t\\t& $\\pi_{%d}$ & $%0.3f$' % (i+1, true_hmm.stationary_distribution[i])\n for sampled_hmm in sampled_hmm_list:\n p = sampled_hmm.stationary_distribution_mean\n p_lo, p_hi = sampled_hmm.stationary_distribution_conf\n table += ' & $%0.3f_{\\:%0.3f}^{\\:%0.3f}$ ' % (p[i], p_lo[i], p_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # Transition probabilities.\n for i in range(nstates):\n for j in range(nstates):\n if (i == 0) and (j==0):\n table += '\\t\\tTransition probability ($\\Delta t = $%s) ' % (str(dt)+' '+time_unit)\n table += '\\t\\t& $T_{%d%d}$ & $%0.3f$' % (i+1, j+1, true_hmm.transition_matrix[i,j])\n for sampled_hmm in sampled_hmm_list:\n P = sampled_hmm.transition_matrix_mean\n P_lo, P_hi = sampled_hmm.transition_matrix_conf\n table += ' & $%0.3f_{\\:%0.3f}^{\\:%0.3f}$' % (P[i,j], P_lo[i,j], P_hi[i,j])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # Transition rates via pseudogenerator.\n index = 0\n for i in range(nstates):\n for j in range(nstates):\n if (i != j):\n if (index==0):\n table += '\\t\\tTransition rate (%s$^{-1}$) ' % time_unit\n Ktrue = compute_rate(true_hmm.transition_matrix, dt)\n table += '\\t\\t& $k_{%d%d}$ & $%2.3f$' % (i+1, j+1, Ktrue[i,j])\n for sampled_hmm in sampled_hmm_list:\n P = sampled_hmm.transition_matrix_mean\n P_lo, P_hi = sampled_hmm.transition_matrix_conf\n K = compute_rate(P, dt)\n K_lo = compute_rate(P_lo, dt)\n K_hi = compute_rate(P_hi, dt)\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (K[i,j], K_lo[i,j], K_hi[i,j])\n index += 1\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State mean lifetimes.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState mean lifetime (%s) ' % time_unit\n l = true_hmm.lifetimes\n l *= dt\n table += '\\t\\t& $t_{%d}$ & $%.3f$' % (i+1, l[i])\n for sampled_hmm in sampled_hmm_list:\n l = sampled_hmm.lifetimes_mean\n l *= dt\n l_lo, l_hi = sampled_hmm.lifetimes_conf\n l_lo *= dt; l_hi *= dt\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (l[i], l_lo[i], l_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State relaxation timescales.\n for i in range(nstates-1):\n if (i == 0):\n table += '\\t\\tRelaxation time (%s) ' % time_unit\n t = true_hmm.timescales\n t *= dt\n table += '\\t\\t& $\\\\tau_{%d}$ & $%.3f$' % (i+1, t[i])\n for sampled_hmm in sampled_hmm_list:\n t = sampled_hmm.timescales_mean\n t *= dt\n t_lo, t_hi = sampled_hmm.timescales_conf\n t_lo *= dt; t_hi *= dt\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (t[i], t_lo[i], t_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n if True:\n table += '\\t\\t\\hline' + '\\n'\n\n # State mean forces.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState %s mean (%s) ' % (obs_name, obs_units)\n m = true_hmm.output_model.means\n table += '\\t\\t& $\\mu_{%d}$ & $%.3f$' % (i+1, m[i])\n for sampled_hmm in sampled_hmm_list:\n m = sampled_hmm.means_mean\n m_lo, m_hi = sampled_hmm.means_conf\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (m[i], m_lo[i], m_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n # State force standard deviations.\n for i in range(nstates):\n if (i == 0):\n table += '\\t\\tState %s std dev (%s) ' % (obs_name, obs_units)\n s = true_hmm.output_model.sigmas\n table += '\\t\\t& $s_{%d}$ & $%.3f$' % (i+1, s[i])\n for sampled_hmm in sampled_hmm_list:\n s = sampled_hmm.sigmas_mean\n s_lo, s_hi = sampled_hmm.sigmas_conf\n table += ' & $%.3f_{\\:%.3f}^{\\:%.3f}$' % (s[i], s_lo[i], s_hi[i])\n table += ' \\\\\\\\' + '\\n'\n table += '\\t\\t\\hline' + '\\n'\n\n table += r\"\"\"\\hline\n\\end{tabular*}\n\\end{table*}\n\"\"\"\n\n # Write to file if desired.\n if outfile is not None:\n f = open(outfile,'w')\n f.write(table)\n f.close()\n\n return table", "def generate_latex_table(dictionary,filename,location=\".\"):\n if type(filename) != str:\n raise TypeError('filename should be string')\n if type(dictionary) != dict:\n raise TypeError('dictionary should be dictionary')\n\n head_code = \"\"\"\\\\documentclass{article}\n%In the preamble section include the arabtex and utf8 packages\n\\\\usepackage{arabtex}\n\\\\usepackage{utf8}\n\\\\usepackage{longtable}\n\\\\usepackage{color, colortbl}\n\\\\usepackage{supertabular}\n\\\\usepackage{multicol}\n\\\\usepackage{geometry}\n\\\\geometry{left=.1in, right=.1in, top=.1in, bottom=.1in}\n\n\\\\begin{document}\n\\\\begin{multicols}{6}\n\\\\setcode{utf8}\n\n\\\\begin{center}\"\"\"\n\n tail_code = \"\"\"\\\\end{center}\n\\\\end{multicols}\n\\\\end{document}\"\"\"\n\n begin_table = \"\"\"\\\\begin{tabular}{ P{2cm} P{1cm}}\n\\\\textbf{words} & \\\\textbf{\\\\#} \\\\\\\\\n\\\\hline\n\\\\\\\\[0.01cm]\"\"\"\n end_table= \"\"\"\\\\end{tabular}\"\"\"\n rows_num = 40\n if location != '.':\n filename = location +\"/\"+ filename\n\n try:\n file = open(filename+'.tex', 'w', encoding='utf8')\n file.write(head_code+'\\n')\n n= int(len(dictionary)/rows_num)\n words = [(\"\\\\<\"+word+\"> & \"+str(frequancy)+' \\\\\\\\ \\n') for word, frequancy in dictionary.items()]\n start=0\n end=rows_num\n new_words = []\n for i in range(n):\n new_words = new_words+ [begin_table+'\\n'] +words[start:end] +[end_table+\" \\n\"]\n start=end\n end+=rows_num\n remain_words = len(dictionary) - rows_num*n\n if remain_words > 0:\n new_words += [begin_table+\" \\n\"]+ words[-1*remain_words:]+[end_table+\" \\n\"]\n for word in new_words:\n file.write(word)\n file.write(tail_code)\n file.close()\n return True\n except:\n return False", "def setup_latex_table(self, tabletype, injected):\n self.texfile.write(r\"\\\\renewcommand{\\\\arraystretch}{1.6}\\n\")\n self.texfile.write(r\"\\n\")\n self.texfile.write(r\"\\\\begin{table}[t!]\\n\")\n self.texfile.write(r\" \\\\begin{center}\\n\")\n if tabletype == 'fiducial_fit_params':\n if injected:\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[2.0,c] | X[1,c] | X[1,c] | X[1,c]\"\n nextline += r\" | X[1,c] | X[1,c] | X[1,c] | X[1,c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n nextline = r\" \\multirow{2}{*}{\\\\textbf{Parameter}} \"\n nextline += r\"& \\multirow{2}{*}{\\\\textbf{Inj}} \"\n nextline += r\"& \\multicolumn{3}{c|}{h0} \"\n nextline += r\"& \\multicolumn{3}{c|}{h1} \"\n nextline += r\"\\\\\\\\ \\cline{3-8}\"\n self.texfile.write(nextline)\n nextline = r\" & & Prior & Fit & \\(\\Delta\\) \"\n nextline += r\"& Prior & Fit & \\(\\Delta\\) \\\\\\\\ \\hline\\n\"\n self.texfile.write(nextline)\n else:\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[c] | X[c] | X[c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n self.texfile.write(r\" Parameter & h0 & h1 \\\\\\\\ \\hline\\n\")\n elif tabletype == 'fiducial_fit_metrics':\n nextline = r\" \\\\begin{tabu} to 1.0\\\\textwidth \"\n nextline += r\"{| X[c] | X[c] | X[c] |}\\n\"\n self.texfile.write(nextline)\n self.texfile.write(r\" \\hline\\n\")\n self.texfile.write(r\" h0 & h1 & $\\Delta$ \\\\\\\\ \\hline\\n\")\n else:\n raise ValueError(\n \"This function is only for making fit metric or fit \"\n \"param tables in LaTeX. Got type %s\"%tabletype\n )", "def texify_table(table, labels=None, row_labels=None, align='c'):\n rows = len(table)\n cols = len(table[0])\n if labels is not None and len(labels) != cols:\n raise Exception(\"Invalid argument value: labels.\")\n if row_labels is not None and len(row_labels) != rows:\n raise Exception(\"Invalid argument value: row_labels.\")\n # begin table\n s = \"\\\\begin{tabular}{\"\n if row_labels is not None: s += 'l|'\n s += align * cols\n s += \"}\\n\"\n s += \"\\\\toprule\\n\"\n # header\n if labels is not None:\n if row_labels is not None: s += ' & '\n s += \" & \".join(labels)\n s += \" \\\\\\\\ \\n\"\n s += \"\\\\midrule\\n\"\n # table\n for idx, row in enumerate(table):\n if row_labels is not None: s += row_labels[idx] + \" & \"\n s += \" & \".join(map(str, row))\n s += \" \\\\\\\\ \\n\"\n # end table\n s += \"\\\\bottomrule\\n\"\n s += \"\\\\end{tabular}\" \n return s", "def latex_table():\n \n t = Table.read('../data/stream_origin.fits')\n N = len(t)\n \n f = open('../paper/stream_origin.tex', 'w')\n for i in range(N):\n t_ = t[i]\n for k in t_.colnames:\n if (t_[k]==np.nan) | (t_[k]=='nan'):\n t_[k] = '\\dots'\n #f.write('{:s} & {:s} & {:s} & {:s} & {:.1f}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['type'], t_['feh']))\n line = '{:s} & {:s} & {:s} & {:s} & {:s}\\\\\\\\ \\n'.format(t_['Name'], t_['host'], t_['progenitor'], t_['friends'], t_['type'])\n f.write(line)\n print(line)\n \n f.close()", "def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)", "def to_latex_table(self, tab=\" \", caption=\"TODO\", label=\"TODO\"):\n return \"\".join(\n (\n \"\\\\begin{center}\\n\",\n f\"{tab}\\\\begin{{table}}[ht]\\n\",\n f\"{tab*2}\\\\centering\\n\",\n f'{tab*2}\\\\rowcolors{{2}}{{white}}{{gray!25}}\\n'\n f\"{tab*2}\\\\begin{{tabular}}{{crrrrrr}}\\n\",\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} & \\\\multicolumn{{2}}{{c}}\"\n \"{BT\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}{BJ\"\n \"\\\\cellcolor[gray]{0.7}} & \\\\multicolumn{2}{c}\"\n \"{CBJ\\\\cellcolor[gray]{0.7}} \\\\\\\\\\n\"\n ),\n (\n f\"{tab*3}\\\\cellcolor[gray]{{0.7}} Test suite & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Nodes} & \"\n \"\\\\multicolumn{1}{c}{\\\\cellcolor[gray]{0.7}Time(s)}\\\\\\\\\\n\"\n ),\n \"\".join(\n (\n f\"{tab*3}{i} & {bt.nodes_expanded} & {bt.time} \"\n f\"& {bj.nodes_expanded} & {bj.time} & {cbj.nodes_expanded} & \"\n f\"{cbj.time}\\\\\\\\\\n\"\n for i, (bt, bj, cbj) in enumerate(zip(*self.data))\n )\n ),\n f\"{tab*2}\\\\end{{tabular}}\\n\"\n f\"{tab*2}\\\\caption{{{caption}}}\\n\"\n f\"{tab*2}\\\\label{{tab:{label}}}\\n\"\n f\"{tab}\\\\end{{table}}\\n\"\n \"\\\\end{center}\",\n )\n )", "def latex_table(samples, parameter_dict=None, labels=None):\n table = (\n \"\\\\begin{table}[hptb]\\n\\\\begin{ruledtabular}\\n\\\\begin{tabular}\"\n \"{l %s}\\n\" % (\"c \" * len(samples))\n )\n if labels:\n table += (\n \" & \" + \" & \".join(labels)\n )\n table += \"\\\\\\ \\n\\\\hline \\\\\\ \\n\"\n data = {i: i for i in samples[0].keys()}\n if parameter_dict is not None:\n import copy\n\n data = copy.deepcopy(parameter_dict)\n for param in parameter_dict.keys():\n if not all(param in samples_dict.keys() for samples_dict in samples):\n logger.warning(\n \"{} not in list of parameters. Not adding to \"\n \"table\".format(param)\n )\n data.pop(param)\n\n for param, desc in data.items():\n table += \"{}\".format(desc)\n for samples_dict in samples:\n median = samples_dict[param].average(type=\"median\")\n confidence = samples_dict[param].confidence_interval()\n table += (\n \" & $%s^{+%s}_{-%s}$\" % (\n np.round(median, 2),\n np.round(confidence[1] - median, 2),\n np.round(median - confidence[0], 2)\n )\n )\n table += \"\\\\\\ \\n\"\n table += (\n \"\\\\end{tabular}\\n\\\\end{ruledtabular}\\n\\\\caption{}\\n\\\\end{table}\"\n )\n return table", "def ppg_table(signals):\n\n # TODO: add more features\n summary = {}\n\n summary[\"PPG_Rate_Mean\"] = np.mean(signals[\"PPG_Rate\"])\n summary[\"PPG_Rate_SD\"] = np.std(signals[\"PPG_Rate\"])\n summary_table = pd.DataFrame(summary, index=[0]) # .transpose()\n\n # Make HTML and Markdown versions\n html = '<h2 style=\"background-color: #D60574\">Summary table</h1>' + summary_table.to_html(\n index=None\n )\n\n try:\n md = summary_table.to_markdown(index=None)\n except ImportError:\n md = summary_table # in case printing markdown export fails\n return html, md", "def insert_into_latex(body, url):\n latex_table_top = r\"\"\"\n% table automatically generated by rubberband, please have a look and check everything\n\\begin{table}\n\\caption{Performance comparison}\n\\label{tbl:rubberband_table}\n\\scriptsize\n\n\"\"\"\n latex_table_bottom = r\"\"\"\n\\end{table}\n\n\"\"\"\n return latex_table_top + body + latex_table_bottom + \"%% \" + url", "def create_latex_table(data, id):\n bd = data['bd']\n sd = data['sd']\n \n filename = 'LatestResults.tex'\n file = r'..\\latex\\tables\\\\' + filename\n\n if os.path.exists(file):\n f_temp = os.path.splitext(file)[0] # without extension\n os.rename(file, f_temp + '_' + id + '.tex')\n\n f = codecs.open(file, 'w', 'utf-8')\n \n f.write('\\n' + r'\\begin{table}' + '\\n')\n f.write(r' \\centering' + '\\n')\n f.write(r' \\caption{Results for each drum instrument with batch sizes 64, 256 and 512.}' + '\\n')\n f.write(r' \\begin{tabular}{l c c c}' + '\\n')\n f.write(r' \\textbf{Batch size} & Metric & BD & SD \\\\' + '\\n')\n f.write(r' \\midrule' + '\\n')\n f.write(r' \\midrule' + '\\n')\n \n for batch_size in BATCHES:\n f.write(' ' + str(batch_size).rstrip('\\n'))\n # 0.805 +- 0.02\n f.write(r' & P & ' + r'$' + '{:.3}'.format(bd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['p_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['p_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['p_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & R & ' + r'$' + '{:.3}'.format(bd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['r_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['r_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['r_std']) + '$' + r' \\\\' + '\\n')\n f.write(r' & F & ' + r'$' + '{:.3}'.format(bd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(bd[batch_size]['f_std']) + '$' + r' & ' + r'$' + '{:.3}'.format(sd[batch_size]['f_mean']) + r' \\pm ' + '{:.3f}'.format(sd[batch_size]['f_std']) + '$' + r' \\\\' + '\\n')\n # Don't write horizontal line on the last batch.\n if batch_size != BATCHES[-1]:\n f.write(r' \\midrule' + '\\n')\n\n f.write(r' \\end{tabular}' + '\\n')\n f.write(r' \\label{tab:ResultsTable}' + '\\n')\n f.write(r'\\end{table}' + '\\n')\n f.close()", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()", "def latex(\r\n samples,\r\n median_pdf_model=True,\r\n sigma=3.0,\r\n name_to_label=True,\r\n include_name=True,\r\n include_quickmath=False,\r\n prefix=\"\",\r\n suffix=\"\"\r\n) -> str:\r\n\r\n values = values_from_samples(samples=samples, median_pdf_model=median_pdf_model)\r\n errors_at_sigma = samples.errors_at_sigma(sigma=sigma, as_instance=False)\r\n\r\n table = []\r\n\r\n for i in range(samples.model.prior_count):\r\n\r\n label_value = frm.parameter_result_latex_from(\r\n parameter_name=samples.model.parameter_names[i],\r\n value=values[i],\r\n errors=errors_at_sigma[i],\r\n superscript=samples.model.superscripts[i],\r\n name_to_label=name_to_label,\r\n include_name=include_name,\r\n include_quickmath=include_quickmath\r\n )\r\n\r\n table.append(f\"{label_value}\")\r\n\r\n table = \"\".join(table)[:-3]\r\n\r\n return f\"{prefix}{table}{suffix}\"", "def to_latex_table(self, parameter_dict=None, save_to_file=None):\n import os\n\n if save_to_file is not None and os.path.isfile(\"{}\".format(save_to_file)):\n raise FileExistsError(\n \"The file {} already exists.\".format(save_to_file)\n )\n\n table = self.latex_table([self.samples_dict], parameter_dict)\n if save_to_file is None:\n print(table)\n elif os.path.isfile(\"{}\".format(save_to_file)):\n logger.warning(\n \"File {} already exists. Printing to stdout\".format(save_to_file)\n )\n print(table)\n else:\n with open(save_to_file, \"w\") as f:\n f.writelines([table])", "def format_prettytable(table):\r\n for i, row in enumerate(table.rows):\r\n for j, item in enumerate(row):\r\n table.rows[i][j] = format_output(item)\r\n ptable = table.prettytable()\r\n ptable.hrules = FRAME\r\n ptable.horizontal_char = '.'\r\n ptable.vertical_char = ':'\r\n ptable.junction_char = ':'\r\n return ptable", "def makeTexTable(\n tablefile, caption, sideways=False, footnotetext=None, clearpage=False, pos=\"h!\"\n):\n if sideways:\n tabletype = \"sidewaystable\"\n clearpage = True\n else:\n tabletype = \"table\"\n\n if clearpage:\n clearpagetext = r\"\\clearpage\"\n else:\n clearpagetext = \"\"\n\n if footnotetext is None:\n notes = \"\"\n else:\n notes = footnotetext\n\n tablestring = (\n dedent(\n r\"\"\"\n \\begin{%s}[%s]\n \\rowcolors{1}{CVCWhite}{CVCLightGrey}\n \\caption{%s}\n \\centering\n \\input{%s}\n \\end{%s}\n %s\n %s\n \"\"\"\n )\n % (tabletype, pos, caption, tablefile, tabletype, notes, clearpagetext)\n )\n return tablestring", "def generate_supertable(*subtables):\n DEFAULT_VALUE = '-'\n # Build a set of all \"names\" to appear on the left of the table\n all_keys = set()\n for d in subtables:\n all_keys.update(d['data'].keys())\n\n # Sort the keys so there's a standard order\n all_keys = sorted(list(all_keys))\n # Create a list of table headings to pass to the template...\n table_headings = []\n # ... and a list for the colums, in matching order\n table_data = []\n for d in subtables:\n table_headings.append(d['title'])\n column = []\n for key in all_keys:\n column.append(d['data'].get(key, DEFAULT_VALUE))\n table_data.append(column)\n\n table_rows = []\n for col_number in range(len(all_keys)):\n row = []\n for row_number in range(len(subtables)):\n row.append(\n table_data[row_number][col_number]\n )\n table_rows.append(row)\n\n return generate_table_html(table_headings, all_keys, table_rows)", "def prettyTable(self, heads, rows): \n # First calculate the maximum lengths for each column.\n lengths = map(len, heads)\n for row in rows:\n lengths = map(max, lengths, map(len, row))\n\n # Create a format string for the maximum lengths.\n formatString = (\"|{{:^{}}}\" * len(heads) + \"|\").format(*lengths)\n\n # Print the heads, then the contents.\n headLine = formatString.format(*heads)\n border = \"-\" * len(headLine)\n print(border)\n print(headLine)\n print(border)\n\n # Remake the format string right-justified.\n formatString = (\"|{{:>{}}}\" * len(heads) + \"|\").format(*lengths)\n for row in rows:\n print(formatString.format(*row))\n print(border)", "def out(lam, eng, mat): # {{{1\n print(\"\\\\begin{table}[!htbp]\")\n print(\" \\\\renewcommand{\\\\arraystretch}{1.2}\")\n txt = \" \\\\caption{{\\\\label{{tab:{0}}}properties of {0}}}\"\n # Raw underscores in LaTeX text mode produce “Missing $” errors.\n texlname = lam.name.replace('_', '\\_')\n print(txt.format(texlname))\n print(\" \\\\centering\\\\footnotesize{\\\\rule{0pt}{10pt}\")\n print(\" \\\\tiny calculated by lamprop {}\\\\\\\\[3pt]}}\".format(__version__))\n if eng:\n _engprop(lam)\n if mat:\n _matrices(lam)\n print(\"\\\\end{table}\\n\") # 1}}}", "def make_figure_table(image_files):\n cols = 2\n table_data = []\n row_data = []\n for i, fn in enumerate(image_files):\n row_data.append(p.Image(fn, 3.4 * u.inch, 3.0 * u.inch))\n if (i % cols) == (cols - 1):\n table_data.append(row_data)\n row_data = []\n\n # Determine if there are any images left to print\n if len(row_data) != 0:\n for i in range(len(row_data), cols):\n row_data.append(p.Paragraph(\"\", STYLES[\"body_style\"]))\n table_data.append(row_data)\n\n # Style this into a reportlab table and add to the story\n width = 3.75 * u.inch\n table = p.Table(table_data, colWidths=[width, width])\n table.setStyle(\n p.TableStyle(\n [\n (\"ALIGNMENT\", (0, 0), (-1, -1), \"CENTER\"),\n (\"VALIGN\", (0, 0), (-1, -1), \"CENTER\"),\n (\"TOPPADDING\", (0, 0), (-1, -1), 6.0),\n (\"BOTTOMPADDING\", (0, 0), (-1, -1), 6.0),\n ]\n )\n )\n return table", "def print_para_table(s):\n if MODE == 1:\n t = [['Parameter', 'Value', 'Unit'],\n ['Number of bends', NBENDS, '/'], \n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', LAMBDA*(NBENDS+1), 'm'],\n ['Arc wavelength', LAMBDA, 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', DS, 'm'],\n ['Transverse resolution', np.around(INTERVAL, decimals=4), 'm'],\n ['Streamwise # of pts', s.size + 2*int(LAMBDA/2/DS), '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n elif MODE == 2:\n if FNAME[0].islower():\n f = FNAME[0].upper() + FNAME[1:]\n else:\n f = FNAME\n t = [['Parameter', 'Value', 'Unit'],\n ['River name', f.rsplit('.', 1)[0], '/'],\n ['Width', WIDTH, 'm'],\n ['Depth', DEPTH, 'm'],\n ['Length', np.round(s[-1], decimals=2), 'm'],\n ['Slope', SLOPE, '/'],\n ['Streamwise resolution', np.round(np.mean(np.diff(s)), decimals=2), 'm'],\n ['Transverse resolution', np.round(INTERVAL, decimals=2), 'm'],\n ['Streamwise # of pts', s.size, '/'],\n ['Transverse # of pts', NUM*2+1, '/']]\n print(tabulate(t, tablefmt='psql', stralign='right', headers='firstrow'))", "def _make_tex_table(self, tabletitle):\r\n stattable = (\r\n r\"\"\"\r\n \\begin{table}[h!]\r\n \\caption{%s}\r\n \\centering\r\n \\begin{tabular}{l l l l l}\r\n \\toprule\r\n \\textbf{Statistic} & \\textbf{Inlet} & \\textbf{Outlet} \\\\\"\"\"\r\n % tabletitle\r\n )\r\n\r\n stats = [\r\n {\"name\": \"Count\", \"attribute\": \"N\", \"rule\": \"top\", \"forceint\": True},\r\n {\"name\": \"Number of NDs\", \"attribute\": \"ND\", \"forceint\": True},\r\n {\"name\": \"Min; Max\", \"attribute\": [\"min\", \"max\"], \"twoval\": True},\r\n {\"name\": \"Mean\", \"attribute\": \"mean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"mean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Standard Deviation\", \"attribute\": \"std\"},\r\n {\"name\": \"Log. Mean\", \"attribute\": \"logmean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"logmean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Log. Standard Deviation\", \"attribute\": \"logstd\"},\r\n {\"name\": \"Geo. Mean\", \"attribute\": \"geomean\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"geomean_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Coeff. of Variation\", \"attribute\": \"cov\"},\r\n {\"name\": \"Skewness\", \"attribute\": \"skew\"},\r\n {\"name\": \"Median\", \"attribute\": \"median\"},\r\n {\r\n \"name\": \"(95\\% confidence interval)\",\r\n \"attribute\": \"median_conf_interval\",\r\n \"twoval\": True,\r\n \"ci\": True,\r\n \"rule\": \"none\",\r\n },\r\n {\"name\": \"Quartiles\", \"attribute\": [\"pctl25\", \"pctl75\"], \"twoval\": True},\r\n {\r\n \"name\": \"Number of Pairs\",\r\n \"attribute\": \"n_pairs\",\r\n \"rule\": \"top\",\r\n \"fromdataset\": True,\r\n \"sigfigs\": 1,\r\n \"forceint\": True,\r\n },\r\n {\r\n \"name\": \"Wilcoxon p-value\",\r\n \"attribute\": \"wilcoxon_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n {\r\n \"name\": \"Mann-Whitney p-value\",\r\n \"attribute\": \"mannwhitney_p\",\r\n \"fromdataset\": True,\r\n \"pval\": True,\r\n \"tex\": True,\r\n },\r\n ]\r\n for s in stats:\r\n stattable += self._tex_table_row(**s)\r\n\r\n stattable += r\"\"\"\r\n \\bottomrule\r\n \\end{tabular}\r\n \\end{table}\"\"\"\r\n\r\n return stattable + \"\\n\"", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def generate_problems_pdf(args):\n contents = generate_table(start_int=args.start_int, end_int=args.end_int, table_type=args.table_type)\n convert_latex_to_pdf(args.filename, contents=contents, view=True)\n remove_temporary_files(args.filename)", "def _intermediary_to_markdown(tables, relationships):\n t = '\\n'.join(t.to_markdown() for t in tables)\n r = '\\n'.join(r.to_markdown() for r in relationships)\n return '{}\\n{}'.format(t, r)", "def end_latex_table(self, tabletype):\n self.texfile.write(r\" \\end{tabu}\\n\")\n self.texfile.write(r\" \\end{center}\\n\")\n self.texfile.write(r\" \\\\vspace{-10pt}\\n\")\n newline = r\" \\caption{shows the fiducial fit \"\n if tabletype == \"fiducial_fit_params\":\n newline += \"parameters\"\n elif tabletype == \"fiducial_fit_metrics\":\n newline += \"metrics\"\n else:\n raise ValueError(\n \"This function is only for ending fit metric or fit \"\n \"param tables in LaTeX. Got type %s\"%tabletype\n )\n if self.detector is not None:\n newline += \" obtained with the %s\"%self.detector\n if self.selection is not None:\n newline += \" %s sample\"%self.selection\n if self.selection is not None:\n newline += \" obtained with the %s\"%self.selection\n newline += \" for h0 of %s\"%self.tex_axis_label(\n self.labels.dict['h0_name']\n )\n newline += \" and h1 of %s.\"%self.tex_axis_label(\n self.labels.dict['h1_name']\n )\n if self.labels.dict['data_name'] == '':\n newline += \" The truth is %s.\"%self.tex_axis_label(\n self.labels.dict['data_name']\n )\n else:\n newline += \" This is from an analysis performed on data.\"\n newline += \"}\\n\"\n self.texfile.write(newline)\n newline = r\" \\label{tab:\"\n if self.detector is not None:\n newline += self.detector\n if self.selection is not None:\n newline += self.selection\n newline += \"%stable}\\n\"%tabletype\n self.texfile.write(newline)\n self.texfile.write(r\"\\end{table}\\n\")", "def generate_table(start_int=0, end_int=10, table_type='Addition'):\n lines = [r'\\documentclass{article}',\n r'\\usepackage{geometry}',\n r'\\geometry{landscape,a4paper,total={170mm,257mm},left=10mm,right=10mm,top=10mm}',\n r'\\usepackage{amsmath}',\n r'\\usepackage{amsfonts}',\n r'\\usepackage{amssymb}',\n r'\\usepackage{dcolumn}',\n r'\\newcolumntype{2}{D{.}{}{2.0}}',\n r'\\begin{document}',\n r'\\begin{large}',\n r'\\begin{center}',\n r'{\\Large ' + table_type + r' Table version 0.1\\par}',\n r'\\vspace*{25px}',\n r'\\renewcommand\\arraystretch{1.3}',\n r'\\setlength\\doublerulesep{0pt}',\n r'\\pagenumbering{gobble}',\n r'\\begin{tabular}{r||*{' + str(end_int - start_int + 1) + '}{3|}}']\n\n operator = {'Addition': r'$+$',\n 'Subtraction': r'$-$',\n 'Multiplication': r'$\\times$'}\n\n lines.append(operator[table_type] + ''.join([' & {} '.format(x) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline\\hline')\n for i in range(start_int, end_int + 1):\n if table_type == 'Addition':\n lines.append(str(i) + ''.join([' & {} '.format(x + i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Subtraction':\n lines.append(str(i) + ''.join([' & {} '.format(x - i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n if table_type == 'Multiplication':\n lines.append(str(i) + ''.join([' & {} '.format(x * i) for x in range(start_int, end_int + 1)]) + r'\\\\')\n lines.append('\\hline')\n\n lines.append(r'\\end{tabular}')\n lines.append(r'\\end{center}')\n lines.append(r'\\end{large}')\n lines.append(r'\\end{document}')\n\n return '\\n'.join(lines)", "def _latex(self, printer):\n return ''.join([\n self._latex_header, '{', printer.doprint(self.args[0]), '}'\n ])", "def print_table(seqids, data, outputfile, separator='\\t'):\n\n tags = data.keys()\n with open(outputfile, 'w') as out:\n out.write(separator.join([\"#Sequence ID\"] + list(tags)) + \"\\n\")\n for s in seqids:\n out.write(s)\n for t in tags:\n out.write(\"{}{}\".format(separator, data[t].get(s, \"\")))\n out.write(\"\\n\")" ]
[ "0.6535216", "0.6499372", "0.6494908", "0.6360739", "0.6058271", "0.605461", "0.60403585", "0.6008605", "0.59048015", "0.5904452", "0.59012544", "0.5884655", "0.58371955", "0.5809221", "0.5790608", "0.57705444", "0.576766", "0.57066625", "0.5678048", "0.56749755", "0.5656739", "0.5630516", "0.5629062", "0.5610359", "0.5570405", "0.55583566", "0.5536824", "0.551351", "0.55069685", "0.54920805" ]
0.8194145
0
Take replay file that was uploaded to ObjectStore and process data and store it to database and link to account
async def parse_replay(request, game): game = game.lower() replay_file = request.files.get("replay") if replay_file: if game == STARCRAFT: basic, result = await SC2Replay.process_replay(replay_file, request.args.get("load_map", False)) if result: # Lets create our db entry basic['private_replay'] = request.args.get('private_replay', False) replay_id = str(uuid.uuid4()) basic["_id"] = replay_id print(replay_id) unique_name = ".".join([replay_id, "SC2Replay"]) basic["replay_object_name"] = unique_name basic["game_name"] = request.args.get("replay_name", datetime.utcnow()) try: success = await request.app.object_storage.add_object(request.app.config.OS_CONTAINER, replay_file, unique_name) if success: # push results to mongoDB mongo = request.app.mongodb # Insert the basic information for the replay await mongo.starcraft_2_replays.info.insert_one(basic) # Insert event data events = dict(result['event']) events.update(basic) print(events) await mongo.starcraft_2_replays.replay_events.insert_one(events) # Insert stats data stats = dict(result['stats']) stats.update(basic) await mongo.starcraft_2_replays.replay_stats.insert_one(stats) return sanic.response.json(basic) except (swift.BluemixSwiftUnavailableError, swift.BluemixSwiftAuthenticationError, swift.BluemixSwiftRequestTimeoutError, bson.errors.InvalidDocument, pymongo.errors.ConnectionFailure): traceback.print_exc() data = { "error": "Internal Server Error", "success": False, "game": STARCRAFT } return sanic.response.json(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_to_restore(file_name, data):\n urlfetch.fetch(url=config.RESTORE_URL + '?name=' + file_name + '&source=db&packet',\n payload=urllib.urlencode({\"data\": services.event.entity_to_string(data)}),\n method=urlfetch.POST)", "def transfer(file_obj):", "def post_original_to_db( self, data, date_stamp ):\n post_result = 'init'\n try:\n post_result = utility_code.postFileData( identifier=date_stamp, file_data=data, update_type='original_file' )\n log.info( 'original_file post_result, `%s`' % post_result )\n except Exception, e:\n log.error( 'original_file post_result exception is: %s' % e )\n if not post_result == 'success':\n log.debug( 'post_result not \"success\"; but continuing' )\n return", "def upload_process(self, rec_id: str): # pylint: disable=unused-variable\n\n # Process the uploaded file\n if self.connector == \"overtime\":\n importer = action_process_overtime_data(\n get_record_path(rec_id), output=print, show_status=True\n )\n action_commit_overtime_data(importer, output=print)\n else:\n flash(\"Unknown upload file type :(\", \"error\")\n\n flash(\"Data successfully uploaded!\", \"info\")\n\n return redirect(url_for(f\"{self.endpoint}.upload\"))", "def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))", "def put_object(self, account, container, object, content):#put a file to server\n \n pass", "def upload_game():\n if (\"game_output\" not in flask.request.values or\n \"users\" not in flask.request.values):\n raise util.APIError(\n 400, message=\"Please provide both the game output and users.\")\n\n game_output = json.loads(flask.request.values[\"game_output\"])\n users = json.loads(flask.request.values[\"users\"])\n challenge = json.loads(flask.request.values.get(\"challenge\", \"null\"))\n\n replay_name = os.path.basename(game_output[\"replay\"])\n if replay_name not in flask.request.files:\n raise util.APIError(\n 400, message=\"Replay file not found in uploaded files.\")\n\n stats = parse_replay(decode_replay(flask.request.files[replay_name]))\n if stats is None:\n raise util.APIError(\n 400, message=\"Replay file cannot be parsed.\")\n\n # Store the replay and any error logs\n replay_key, bucket_class = store_game_artifacts(replay_name, users)\n\n with model.engine.begin() as conn:\n total_users = conn.execute(model.total_ranked_users).first()[0]\n # Sort the users to prevent deadlock in the stored_bot for update lock\n for user in sorted(users, key=lambda x: x['user_id']):\n stored_user = conn.execute(\n sqlalchemy.sql.select([\n model.users.c.id.label(\"user_id\"),\n model.users.c.on_email_list,\n model.users.c.github_email.label(\"email\"),\n model.users.c.player_level,\n model.users.c.creation_time,\n model.users.c.username,\n model.organizations.c.organization_name,\n ]).select_from(model.users.join(\n model.organizations,\n model.organizations.c.id == model.users.c.organization_id,\n isouter=True\n )).where(model.users.c.id == user[\"user_id\"])\n ).first()\n\n stored_bot = conn.execute(\n sqlalchemy.sql.select([\n model.bots.c.version_number,\n model.bots.c.language,\n model.bots.c.mu,\n model.bots.c.sigma,\n ], for_update=True).where(\n (model.bots.c.id == user[\"bot_id\"]) &\n (model.bots.c.user_id == user[\"user_id\"])\n )\n ).first()\n\n stored_rank = conn.execute(\n sqlalchemy.sql.select([\n model.ranked_bots_users.c.rank,\n ]).where(\n (model.ranked_bots_users.c.bot_id == user[\"bot_id\"]) &\n (model.ranked_bots_users.c.user_id == user[\"user_id\"])\n )\n ).first()\n\n if not stored_user or not stored_bot:\n raise util.APIError(400, message=\"User or bot doesn't exist\")\n\n # If the user has submitted a new bot in the meanwhile,\n # ignore the game\n if stored_bot[\"version_number\"] != user[\"version_number\"]:\n return util.response_success({\n \"message\": \"User {} has uploaded a new bot, discarding \"\n \"match.\".format(user[\"user_id\"])\n })\n\n user.update(dict(stored_user))\n user.update(dict(stored_bot))\n if stored_rank:\n user[\"leaderboard_rank\"] = stored_rank[\"rank\"]\n user[\"tier\"] = util.tier(stored_rank[\"rank\"], total_users)\n else:\n user[\"leaderboard_rank\"] = total_users\n user[\"tier\"] = util.tier(total_users, total_users)\n\n # Store game results in database\n game_id = store_game_results(conn, game_output, stats,\n replay_key, bucket_class,\n users, challenge)\n # Store game stats in database\n store_game_stats(conn, game_output, stats, game_id, users)\n # Update rankings\n if not challenge:\n update_rankings(conn, users)\n\n return util.response_success()", "def store(self, filename):", "def work(data):\n filepath = data['filepath']\n user = data['user']\n checksum_data = list(filter(lambda x: x['type'] == 'sha256', data['decrypted_checksums']))\n decrypted_checksum = checksum_data[0]['value']\n stable_id = data['accession_id']\n LOG.info(\"Mapping file with path %s and checksum %s to stable_id %s\", filepath, decrypted_checksum, stable_id)\n\n # Remove file from the inbox\n # TODO\n\n db.set_stable_id(filepath, user, decrypted_checksum, stable_id) # That will flag the entry as 'Ready'\n\n LOG.info(\"Stable ID %s mapped to %s\", stable_id, filepath)\n\n # Send message to mark file as completed on the CEGA side\n completed_data = data\n completed_data.pop(\"type\", None)\n LOG.info(f\"Reply message to files.completed: {completed_data}\")\n\n return (completed_data, False)", "def upload(det_file):\n db = DatabaseSession()\n\n try:\n LOG.info(f\"Copying REDCap DET records from {det_file.name}\")\n\n row_count = db.copy_from_ndjson((\"receiving\", \"redcap_det\", \"document\"), det_file)\n\n LOG.info(f\"Received {row_count:,} DET records\")\n LOG.info(\"Committing all changes\")\n db.commit()\n\n except:\n LOG.info(\"Rolling back all changes; the database will not be modified\")\n db.rollback()\n raise", "def file_record(db, user_name, original_file_name, target_user=None):\n\n record = db.get_file_record(user_name)\n\n if record is None:\n db.create_file_record(user_name, target_user, original_file_name)\n else:\n db.update_file_record(user_name, target_user, original_file_name)", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def test_record_update_file(appctx, db, record_with_file_processed, obj_name, content):\n record = CernSearchRecord.get_record(record_with_file_processed.id)\n initial_file_name = \"hello.txt\"\n initial_file = record.files[initial_file_name].obj # type: ObjectVersion\n initial_file_content = record.files_content[initial_file_name].obj # type: ObjectVersion\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert initial_file.file.readable is False\n assert initial_file.deleted is False\n assert initial_file_content.file.readable is True\n\n record.files[obj_name] = BytesIO(content)\n db.session.commit()\n\n # mimic file uploaded flow\n file_uploaded.send(record.files[obj_name].obj)\n\n record = CernSearchRecord.get_record(record.id)\n\n assert record[\"_bucket\"] == record.bucket_id\n assert record[\"_bucket_content\"] == record.bucket_content_id\n\n assert 1 == len(record.files)\n assert 1 == len(record.files_content)\n assert record.files[obj_name].obj.file.readable is False\n assert initial_file_content.file.readable is False\n\n # different file upload creates a delete marker\n if initial_file_name != obj_name:\n with raises(KeyError):\n record.files[initial_file_name]\n with raises(KeyError):\n record.files_content[initial_file_name]\n\n file_1 = record.files_content[obj_name]\n assert obj_name == file_1[\"key\"]\n\n storage = file_1.obj.file.storage() # type: FileStorage\n fp = storage.open(mode=READ_MODE_BINARY)\n\n try:\n assert content.decode() in json.load(fp)[\"content\"]\n finally:\n fp.close()", "def decode_replay(replay_file_obj):\n decoder = zstd.ZstdDecompressor()\n # Rewind to the beginning of the file obj, because\n # gcloud might have read it first\n replay_file_obj.seek(0)\n replay_data = replay_file_obj.read()\n try:\n decoded_data = decoder.decompress(replay_data)\n json_data = json.loads(decoded_data.decode('utf-8').strip())\n return json_data\n except zstd.ZstdError:\n # The replay file can't be decoded.\n return None\n finally:\n # Seek the replay file back to start so we can upload it.\n replay_file_obj.seek(0)", "def record_fileprep(data):\n order_detail = OrderDetail.objects.get(ac_od_id=data['ac_od_id'])\n order_detail.fileprep = data['fileprep']\n return order_detail.save()", "def post(self, request, *args, **kwargs):\n self.create_flow_file_db_entry()\n self.handle_chunk(request)\n return self.return_response(self.flow_file.identifier)", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def Open(self, file_object):", "def action_import(self):\n ctx = self._context\n attachment_obj = self.env['ir.attachment']\n invoice_obj = self.env['account.invoice']\n storage = attachment_obj._storage()\n filestore = attachment_obj._filestore()\n file_gc = attachment_obj._file_gc()\n indir = self.name#+'/E-Faktur'\n files_in_dir = os.listdir(indir)\n in_dir = []\n for x in files_in_dir:\n r = open(indir+\"/\"+x,'rb').read().encode('base64')\n _logger.info(\"_read_file reading %s\", x)\n if len(x) == 67:\n #_logger.info(\"_read_file valid file efaktur %s\", x)\n faktur_pajak = x.split(\"-\")\n #SEARCH INVOICE YG SUDAH TERFALIDASI DAN ADA FAKTUR PAJAK\n invoice_ids = invoice_obj.search([('nomor_faktur_id','!=',None),('move_id','!=',None),('nomor_faktur_id.number','ilike',faktur_pajak[1][8:])])\n #CARI APAKAH SUDAH TERATTACHMENT DI SISTEM\n attachment_ids = attachment_obj.search([('datas','!=',r),('res_id','in',invoice_ids.ids),('res_model','=','account.invoice'),('name','=',faktur_pajak[1])])\n if not attachment_ids and invoice_ids:\n for invoice in invoice_ids:\n values = {\n 'res_model': 'account.invoice',\n 'company_id': 1,\n 'res_name': invoice.number,#NOMOR INVOICE\n 'datas_fname': x,#NAMA FILE\n 'type': 'binary',\n 'res_id': invoice.id,\n 'name': x,#faktur_pajak[1],\n 'mimetype': 'application/pdf',\n 'store_fname': 'E-Faktur/'+x,\n 'datas': r,\n }\n attachment_obj.create(values)\n _logger.info(\"_uploaded_file %s\", x)", "def _upload(self, variables):\n required_vars = ['container', 'src', 'object']\n variables_dict = self._get_vars(variables, required=required_vars)\n\n container_name = variables_dict.pop('container')\n object_name = variables_dict.pop('object')\n src_path = variables_dict.pop('src')\n\n self._create_container(container_name=container_name)\n with open(src_path, 'rb') as f:\n self.swift.put_object(container_name, object_name, contents=f)\n\n object_data = self.swift.head_object(container_name, object_name)\n self.state_change = True\n return self._facts(facts=[object_data])", "def parse_replaydata(self):\n pass", "def upload():\n # verify user\n email = flask.request.args[\"email\"]\n username = flask.request.args[\"username\"]\n\n file = flask.request.files[\"file\"]\n print(file.filename)\n file_bytestr = file.read()\n\n # query ms api\n emotion = ms_emotion_api(file_bytestr)\n print(emotion)\n if emotion is None:\n return flask.jsonify(error=\"MS API error, possibly no human face\")\n\n # save to mongodb\n saved = mongo.db.images.insert_one({\n \"filename\": file.filename,\n \"content\": file_bytestr,\n \"emotion\": emotion,\n \"date\": datetime.datetime.utcnow(),\n \"user_username\": username,\n \"user_email\": email,\n })\n # print(saved.inserted_id)\n # create user if needed\n mongo.db.users.update_one(filter={\n \"email\": email,\n }, update={\n \"$set\": {\"username\": username},\n # image_ids: list of foreign ids to images\n \"$push\": {\"image_ids\": saved.inserted_id},\n }, upsert=True)\n\n # client resend image_id when reporting music\n emotion[\"image_id\"] = str(saved.inserted_id)\n return flask.jsonify(emotion)", "def process(self, object, from_file=None):\n raise NotImplementedError()", "def _replay_coupon(self, update, context, current_train: Train, image_path):\n self._reply_message(update, str(current_train))\n with open(image_path, 'rb') as qr_image:\n update.message.bot.send_chat_action(chat_id=update.effective_message.chat_id,\n action=ChatAction.UPLOAD_PHOTO)\n update.message.reply_photo(qr_image)\n\n context.user_data['last_train'] = current_train.to_dict()", "def upload_finish(self, cloud_file):", "def upload_file_obj_db_s3():\n\n # TODO: upload metadata to database\n temp_engine = create_engine(NEX2_URI)\n session_factory = sessionmaker(\n bind=temp_engine, extension=ZopeTransactionExtension(), expire_on_commit=False)\n db_session = scoped_session(session_factory)\n readme_file_id = None\n file_content_list = file_upload_to_obj()\n try:\n if file_content_list:\n sorted_content = sorted(\n file_content_list, key=itemgetter('file_extension'))\n for item in sorted_content:\n if item['readme_name']:\n readme = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == obj['readme_name']).one_or_none()\n\n if readme is None:\n logging.warning(\n 'unable to find README ' + obj['readme_name'])\n else:\n readme_file_id = readme.dbentity_id\n\n # see if file_meta already exists, else create\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n source_id = db_session.query(Source.source_id).filter(\n Source.display_name == item['source']).one_or_none()[0]\n\n d_name = item['display_name']\n f_ext = item['file_extension']\n temp_file_path = get_file_from_path_collection(f_ext, d_name)\n\n if not existing_file_meta_data:\n try:\n data_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['data_edam_id']).one_or_none()[0]\n\n format_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['format_edam_id']).one_or_none()[0]\n topic_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['topic_edam_id']).one_or_none()[0]\n item[\"data_id\"] = data_id\n item[\"format_id\"] = format_id\n item[\"topic_id\"] = topic_id\n item[\"source_id\"] = source_id\n item[\"readme_file_id\"] = readme_file_id\n\n except TypeError:\n logging.error(\n 'invalid EDAM id or source in row ' +\n str(row_num) + ' val in ' + item['data_edam_id'] +\n ', ' + item['format_edam_id'] +\n ', ' + item['topic_edam_id'])\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n upload_file_helper(CREATED_BY, remote_file, item)\n\n db_session.flush()\n else:\n existing_file_meta_data.display_name = item['display_name']\n existing_file_meta_data.description = item['description']\n existing_file_meta_data.status = item['status']\n existing_file_meta_data.is_public = item['is_public']\n existing_file_meta_data.is_in_spell = item['is_in_spell']\n existing_file_meta_data.is_in_browser = item['is_in_browser']\n existing_file_meta_data.source_id = source_id\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n #update file size\n if not existing_file_meta_data.file_size and existing_file_meta_data.s3_url:\n remote_file.seek(0, os.SEEK_END)\n file_size = remote_file.tell()\n remote_file.seek(0)\n existing_file_meta_data.file_size = file_size\n\n if item['file_date']:\n existing_file_meta_data.file_date = item['file_date']\n existing_file_meta_data.year = item['file_date'].year\n existing_file_meta_data.readme_file_id = readme_file_id\n remote_file.seek(0, os.SEEK_END)\n\n #transaction.commit()\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n # only upload s3 file if not defined\n if existing_file_meta_data.s3_url is None:\n existing_file_meta_data.upload_file_to_s3(\n remote_file, item['display_name'])\n db_session.flush()\n\n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)", "def parseUpload(dbconnection, fileName):\n nhContent = ParseText.nohupTranscriptionContent(fileName)\n count = 0\n while count < len(nhContent[0]):\n try:\n rtf = nhContent[0][count]\n transcription = nhContent[1][count].replace(\"'\", \"''\").replace(\"_\", \"\")\n dbID = nhContent[2][count].replace(\".\", \"\")\n duration = nhContent[3][count]\n DatabaseInteract.insertTranscription(dbconnection, rtf, transcription, duration, dbID)\n count += 1\n except:\n print(\"couldnt upload one at index \" + str(count))\n count += 1", "def remote_archiveUpload(self, talk_id, upload_id, role):\n source = yield self.getUpload(upload_id)\n extension = source.splitext()[1]\n\n # TODO: Check if the talk identified by talk_id exists and bind the\n # document to it.\n\n # TODO: Validate the given ``role`` argument (either strictly against a\n # list of known roles or loosely for sanity).\n\n # 2. Construct the final pathname\n version_id = ObjectId()\n basename = str(version_id) + extension\n destination = settings.data_root.child(talk_id).child(role)\n if not destination.exists():\n destination.makedirs()\n destination = destination.child(basename)\n\n # 3. move the file to its destination\n yield threads.deferToThread(source.moveTo, destination)\n\n # 2. Save the info to the database\n asset = Asset(\n _id=version_id,\n archiver_id=self.getID(),\n talk_id=talk_id,\n role=role\n )\n version = AssetVersion(\n version_id=version_id,\n filename=destination\n )\n asset.versions.append(version)\n\n yield asset.save()\n\n # 5. Start the upload triggers\n task = self.processAsset(asset)\n\n # TODO: Define the return value of this method. Shall it be the task,\n # the version_id/asset_id or both?\n defer.returnValue((str(version_id), task.id))", "def run(self):\n # Etapas de execucao:\n # 1 - Baixar o arquivo com os dados\n\n # First we will need a file containing a list of files\n self.log('Loading file list %s' % self.file_url)\n\n # Check if we are forcing some URL\n if self.file_url is None:\n self.log( 'You have to supply the file list. \\nFile list: %s' % self.file_url)\n return\n\n else:\n self.log( 'Loading file %s' % self.file_url.rstrip())\n self.response = self.send(self.file_url.rstrip())\n\n # Store the file and its hash somewhere\n exists = self.hash_control()\n if exists:\n # if The hash exists, abort operation\n return\n\n # 2 - Fazer o parsing do arquivo e armazenar cada registro como um\n # recurso no Ckan em formato RDF. Os campos devem ser armazenados como\n # metadados do recurso\n r = rdf.lightbaseParser()\n\n # Tenta abrir o arquivo e fazer o parsing. Se houver erro rejeita e loga\n try:\n # Deve retornar uma coleção de registros para armazenamento\n registros = r.collection(self.response)\n except:\n # Armazena os arquivos que não foram importados com sucesso\n file_dict = {\n 'tmpfile': self.response,\n 'filename': self.response,\n 'errmsg' : traceback.format_exc(),\n 'error_type' : 'FileCollectionError',\n 'package_file' : self.response\n }\n self.log_error(file_dict)\n\n for registro in registros.get('registros'):\n self.datastore(registro['base_name'],registro['rdf_identifier'],registro['rdf_collection'],registro['metadata'],'rdf')\n #print('2222222222222222222222222222: %s' % registro['rdf_identifier'])\n\n # Log import errors\n for registro in registros['import_error']:\n self.log_error(registro)\n #print('111111111111111111111111111111: %s' % registro)", "def upload(self, file_obj):\n file_path = ''\n file_name = file_obj.filename\n file_class, file_type = file_obj.content_type.split('/')\n\n def allowed_file():\n return '.' in file_name and file_name.split('.')[1] in ALLOWED_EXTENSIONS\n\n try:\n log.debug('Try to save file <%s> for user ID: %s', file_name, self.current_user.login)\n\n if not allowed_file():\n log.debug('Filetype not allowed')\n return {'success': False, 'errorMessage': 'Filetype not allowed'}\n\n upload_dir = os.path.join(UPLOAD_FOLDER, self.current_user.login)\n file_path = os.path.join(upload_dir, file_name)\n\n if os.path.isfile(file_path):\n log.debug('File was uploaded already')\n return {'success': False, 'errorMessage': 'File was uploaded already'}\n\n if not os.path.exists(upload_dir):\n log.debug('--> Create path: %s', upload_dir)\n os.makedirs(upload_dir)\n\n # save in File System\n with open(file_path, \"ab\") as f:\n data = file_obj.body\n f.write(bytes(data))\n\n os_f_size = os.stat(file_path).st_size\n\n # check file quota\n if (self.user_api.user_db.used_file_quota + os_f_size) > self.user_api.user_db.file_quota:\n os.remove(file_path)\n log.error('You don\\'t have empty space!')\n return {'success': False, 'errorMessage': 'You don\\'t have empty space!'}\n\n file_db = FileDB()\n file_db.name = file_name\n file_db.type = file_type\n file_db.f_class = file_class\n file_db.size = os_f_size\n file_db.user_id = self.current_user.id\n file_db.date_load = datetime.now().strftime(DATE_FORMAT)\n\n self.db.create(file_db)\n\n log.debug('--> File has been updated in DB.')\n\n # update user\n self.user_api.user_db.used_file_quota += os.stat(file_path).st_size # bytes\n #self.user_api.db.update(self.user_api.user_db)\n\n self.db.commit()\n self.user_api.db.commit()\n\n log.debug('--> User in DB has been updated.')\n\n return {'success': True, 'id': file_db.id}\n except StandardError:\n self.db.session.rollback()\n if os.path.isfile(file_path):\n log.error('File <%s> has been deleted', file_path)\n os.remove(file_path)\n log.exception('Cannot upload file')\n return SERVER_ERROR" ]
[ "0.59654164", "0.5942923", "0.58948725", "0.58847404", "0.5826352", "0.57525676", "0.5647753", "0.56387746", "0.5543896", "0.55360585", "0.5521623", "0.5506403", "0.5493576", "0.5484211", "0.54712117", "0.54331356", "0.5429796", "0.53877634", "0.5359096", "0.5353566", "0.5341829", "0.5311284", "0.52975315", "0.52895296", "0.52578443", "0.5256056", "0.5206786", "0.51986015", "0.51896536", "0.51839703" ]
0.62600714
0
_execute_ map the request name provided to an ID
def execute(self, requestName, conn = None, trans = False): self.sql = "SELECT request_id from reqmgr_request WHERE " self.sql += "request_name=:request_name" binds = {"request_name": requestName} reqID = self.dbi.processData(self.sql, binds, conn = conn, transaction = trans) result = self.formatOne(reqID) if result == []: return None return result[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _execute(self, _):\r\n pass", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def execute():", "def request(query):", "def perform_action(self, action_id: int) -> None:\r\n ...", "def _send_request(self, code):\n msg_id = uuid4().hex\n message = KernelClient.__create_execute_request(msg_id, code)\n\n # create response-queue and add to map for this msg_id\n self.response_queues[msg_id] = queue.Queue()\n\n self.kernel_socket.send(message)\n\n return msg_id", "def submit(id, host):", "def getEventIDName(*args, **kwargs):\n pass", "def execute(self, response):\n raise NotImplementedError()", "def process_actions(parameter: str, action_name: str) -> str:\n\n try:\n if action_name == \"login\":\n return create_followup_event_data(parameter[2])\n elif action_name == \"get_exam_date\":\n return create_data_response(DatabaseExtractor.get_exam_date(parameter[1]))\n elif action_name == \"get_assessment_form\":\n return create_data_response(DatabaseExtractor.get_assessment_form(parameter[1]))\n elif action_name == \"get_contact_mail\":\n return create_data_response(DatabaseExtractor.get_contact_mail(parameter[1]))\n elif action_name == \"get_contact_name\":\n return create_data_response(DatabaseExtractor.get_contact_name(parameter[1]))\n elif action_name == \"get_contact_phone\":\n return create_data_response(DatabaseExtractor.get_contact_phone(parameter[1]))\n elif action_name == \"get_contact_website\":\n return create_data_response(DatabaseExtractor.get_contact_website(parameter[1]))\n elif action_name == \"get_office\":\n return create_data_response(DatabaseExtractor.get_contact_office(parameter[1]))\n elif action_name == \"get_teaching_form\":\n return create_data_response(DatabaseExtractor.get_teaching_form(parameter[1]))\n elif action_name == \"get_course_name\":\n return create_data_response(DatabaseExtractor.get_course_name(parameter[1]))\n elif action_name == \"get_credit\":\n return create_data_response(DatabaseExtractor.get_credit(parameter[1]))\n elif action_name == \"get_url\":\n return create_data_response(DatabaseExtractor.get_url(parameter[1]))\n elif action_name == \"get_prereq_knowledge\":\n return create_data_response(DatabaseExtractor.get_prereq_knowledge(parameter[1]))\n elif action_name == \"get_course_content\":\n return create_data_response(DatabaseExtractor.get_course_content(parameter[1]))\n elif action_name == \"get_course_material\":\n return create_data_response(DatabaseExtractor.get_course_material(parameter[1]))\n elif action_name == \"get_teaching_form\":\n return create_data_response(DatabaseExtractor.get_teaching_form(parameter[1]))\n elif action_name == \"get_exercise_status\":\n return create_data_response(DatabaseExtractor.get_exercise_status(parameter[1], parameter[0]))\n elif action_name==\"get_exercise_scheme_approval\":\n return create_data_response(DatabaseExtractor.get_exercise_scheme_approval(parameter[1], parameter[0]))\n elif action_name==\"get_exercises_left\":\n return create_data_response(DatabaseExtractor.get_exercises_left(parameter[1], parameter[0]))\n elif action_name == \"get_next_event\":\n return create_data_response(DatabaseExtractor.get_next_event(username=parameter[0]))\n elif action_name == \"get_next_assignment\":\n return create_data_response(DatabaseExtractor.get_next_assignment(username=parameter[0]))\n elif action_name == \"get_this_weeks_schedule\":\n return create_data_response(DatabaseExtractor.get_this_week_schedule(username=parameter[0]))\n elif action_name == \"get_next_weeks_schedule\":\n return create_data_response(DatabaseExtractor.get_next_week_schedule(username=parameter[0]))\n elif action_name == \"get_next_weeks_events\":\n return create_data_response(DatabaseExtractor.get_next_weeks_events(username=parameter[0]))\n elif action_name == \"get_next_weeks_assignments\":\n return create_data_response(DatabaseExtractor.get_next_weeks_assignments(username=parameter[0]))\n elif action_name == \"get_this_weeks_assignments\":\n return create_data_response(DatabaseExtractor.get_this_weeks_assignments(username=parameter[0]))\n elif action_name == \"get_this_weeks_events\":\n return create_data_response(DatabaseExtractor.get_this_weeks_events(username=parameter[0]))\n elif action_name == \"get_exam_dates\":\n return create_data_response(DatabaseExtractor.get_exam_dates(username=parameter[0]))\n elif action_name == \"get_days_until_first_exam\":\n return create_data_response(DatabaseExtractor.get_days_until_first_exam(username=parameter[0]))\n elif action_name == \"get_course_codes\":\n return create_data_response(DatabaseExtractor.get_course_codes(username=parameter[0]))\n elif action_name == \"get_course_names\":\n return create_data_response(DatabaseExtractor.get_course_names(username=parameter[0]))\n elif action_name == \"get_number_of_courses\":\n return create_data_response(DatabaseExtractor.get_number_of_courses(username=parameter[0]))\n elif action_name == \"get_today_assignments\":\n return create_data_response(DatabaseExtractor.get_today_assignments(username=parameter[0]))\n elif action_name == \"get_tomorrow_assignments\":\n return create_data_response(DatabaseExtractor.get_tomorrow_assignments(username=parameter[0]))\n elif action_name == \"get_today_events\":\n return create_data_response(DatabaseExtractor.get_today_events(username=parameter[0]))\n elif action_name == \"get_tomorrow_events\":\n return create_data_response(DatabaseExtractor.get_tomorrow_events(username=parameter[0]))\n else:\n return create_data_response(\"I didn't understand anything, you probably broke me :(\")\n\n except:\n return create_data_response(\"Sorry, i can not answer that.\")", "def _cmd(self, name, *args, **kwargs):\n if name in ['FETCH', 'SORT', 'STORE', 'COPY', 'SEARCH']:\n try:\n typ, data = self.m.uid(name, *args)\n except imaplib.IMAP4.error as e:\n raise ImapError(e)\n if typ == \"NO\":\n raise ImapError(data)\n if name == 'FETCH':\n return FetchResponseParser().parse(data)\n return data\n\n try:\n typ, data = self.m._simple_command(name, *args)\n except imaplib.IMAP4.error as e:\n raise ImapError(e)\n if typ == \"NO\":\n raise ImapError(data)\n if 'responses' not in kwargs:\n if name not in self.m.untagged_responses:\n return None\n return self.m.untagged_responses.pop(name)\n res = []\n for r in kwargs['responses']:\n if r not in self.m.untagged_responses:\n return None\n res.append(self.m.untagged_responses.pop(r))\n return res", "def request_execute_plan(\n self,\n location: \"sy.workers.BaseWorker\",\n response_ids: List[Union[str, int]],\n *args,\n **kwargs,\n ) -> object:\n plan_name = f\"plan{self.id}\"\n # args, _, _ = hook_args.unwrap_args_from_function(\n # plan_name, args, {}\n # )\n args = [args, response_ids]\n\n command = (\"execute_plan\", self.id_at_location, args, kwargs)\n\n response = self.owner.send_command(\n message=command, recipient=location, return_ids=response_ids\n )\n response = hook_args.hook_response(plan_name, response, wrap_type=FrameworkTensor[0])\n response.garbage_collect_data = False\n return response", "def PostSearchesByID(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def process_id(job_id):\n pass # Not implemented yet", "def taskid(name):\n return \"(select id from event_type where name = '{}')\".format(name)", "def _add_identifiers_to_request(request, response_id):\n meta = request.meta\n meta['__id'] = response_id\n return request.replace(meta=meta)", "def execute(self, name=None, clientRequestToken=None):\n params = {'name': name if name else self.resource_id}\n if clientRequestToken:\n params.update({\"clientRequestToken\": clientRequestToken})\n self.logger.debug('Executing {resource_type} with parameters:'\n ' {params}'.format(resource_type=self.type_name,\n params=params))\n\n return self.client.start_pipeline_execution(**params)", "def execute(self, *args, **kwargs):", "def execute(self, *args, **kwargs):" ]
[ "0.55304587", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5463426", "0.5370444", "0.5351898", "0.53309184", "0.5321808", "0.5240275", "0.5239346", "0.52391875", "0.52137643", "0.52130556", "0.52076656", "0.5206376", "0.51634085", "0.51417744", "0.5138334", "0.513091", "0.5120133", "0.5120133" ]
0.5936447
0
Use float16 for faster IO during training.
def save_float16_npy(data, path): np.save(path, data.astype(np.float16))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_use_bfloat16(configs, use_bfloat16):\n configs[\"train_config\"].use_bfloat16 = use_bfloat16", "def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32", "def data_type():\n if FLAGS.use_fp16:\n return tf.float16\n else:\n return tf.float32", "def read(reader: BitStreamReader, _index: int) -> float:\n\n return reader.readFloat16()", "def float_to_int_16(x):\n return np.float16(x).view(np.int16)", "def benchmark_fp16_synth_forward_batch16(self):\n params = self._shared_params()._replace(batch_size=16, use_fp16=True)\n self._run_benchmark(params)", "def _float_to_16_bit_sample(value):\n sample = int(32767.0 * value)\n byte0 = sample & 255\n byte1 = (sample >> 8) & 255\n return byte0, byte1", "def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float16, mask)", "def dtype_float(dtype: DType):\n return promote_dtypes(dtype, np.float16)", "def test_convert_float16_to_float32(in_dtype):\n check_type_supported(in_dtype)\n\n f16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16).view(in_dtype)\n f32_output = convert_float_to_float32(f16_input)\n\n nan = f16_input.isnan()\n assert torch.all(f32_output[nan].isnan())\n inf = f16_input.isinf()\n assert torch.all(f32_output[inf].isinf())\n other = torch.logical_not(torch.logical_or(nan, inf))\n assert torch.all(f16_input[other] == f32_output[other])", "def cshort(queue=None):\n return np.float16", "def bfloat16_to_float32(tensor):\n if tensor.dtype == tf.bfloat16:\n return tf.cast(tensor, dtype=tf.float32)\n else:\n return tensor", "def benchmark_fp16_synth_forward_batch128(self):\n params = self._shared_params()._replace(batch_size=128, use_fp16=True)\n self._run_benchmark(params)", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat16(value)", "def benchmark_fp16_xla_synth_forward_batch16(self):\n params = self._shared_params()._replace(\n batch_size=16, use_fp16=True, xla=True)\n self._run_benchmark(params)", "def convert_uint16_to_float_ifneed(self, actual_np, expect_np):\n if actual_np.dtype == np.uint16:\n if expect_np.dtype in [np.float32, np.float64]:\n actual_np = convert_uint16_to_float(actual_np)\n self.rtol = 1.0e-2\n elif actual_np.dtype == np.float16:\n self.rtol = 1.0e-3\n else:\n self.rtol = 1.0e-5\n if (\n expect_np.dtype == np.uint16\n and actual_np.dtype == np.uint16\n ):\n nonlocal atol\n expect_np = convert_uint16_to_float(expect_np)\n actual_np = convert_uint16_to_float(actual_np)\n atol = max(atol, 0.03)\n return actual_np, expect_np", "def _shorts2float(lo_byte_pair, hi_byte_pair):\n\tba = bytearray(struct.pack(\"HH\", lo_byte_pair, hi_byte_pair))\n\t[f] = struct.unpack('f', ba)\n\treturn f", "def test_S2L1C_float32_uint16(self):\n test_dir = os.path.dirname(os.path.realpath(__file__))\n cache_folder = os.path.join(test_dir, 'cache_test')\n\n if os.path.exists(cache_folder):\n shutil.rmtree(cache_folder)\n\n task = SentinelHubInputTask(\n bands_feature=(FeatureType.DATA, 'BANDS'),\n additional_data=[(FeatureType.MASK, 'dataMask')],\n size=self.size,\n maxcc=self.maxcc,\n time_difference=self.time_difference,\n data_collection=DataCollection.SENTINEL2_L1C,\n max_threads=self.max_threads,\n cache_folder=cache_folder\n )\n\n eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)\n bands = eopatch[(FeatureType.DATA, 'BANDS')]\n is_data = eopatch[(FeatureType.MASK, 'dataMask')]\n\n self.assertTrue(np.allclose(array_stats(bands), [0.0233, 0.0468, 0.0252]))\n\n width, height = self.size\n self.assertTrue(bands.shape == (4, height, width, 13))\n self.assertTrue(is_data.shape == (4, height, width, 1))\n self.assertTrue(len(eopatch.timestamp) == 4)\n self.assertTrue(bands.dtype == np.float32)\n\n self.assertTrue(os.path.exists(cache_folder))\n\n # change task's bans_dtype and run it again\n task.bands_dtype = np.uint16\n\n eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)\n bands = eopatch[(FeatureType.DATA, 'BANDS')]\n\n self.assertTrue(np.allclose(array_stats(bands), [232.5769, 467.5385, 251.8654]))\n\n self.assertTrue(bands.dtype == np.uint16)\n\n shutil.rmtree(cache_folder)", "def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.bfloat16, mask)", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def ts_float32(val):\n return np.float64(val)", "def test_f8_xf16_roundtrip(in_dtype, out_dtype):\n check_type_supported(out_dtype)\n\n @triton.jit\n def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n input = tl.load(input_ptr + offsets, mask=mask)\n output = input\n tl.store(output_ptr + offsets, output, mask=mask)\n\n f8_tensor = torch.tensor(range(-128, 128), dtype=torch.int8, device='cuda')\n # f32_to_f8 doesn't handle nan, so we make sure f8_tensor doesn't contain any nan\n all_exp_ones = (f8_tensor & 0b01111100) == 128 - 2**in_dtype.fp_mantissa_width\n f8_tensor[all_exp_ones] = 0\n f8 = triton.reinterpret(f8_tensor, in_dtype)\n n_elements = f8_tensor.numel()\n xf16 = torch.empty_like(f8_tensor, dtype=out_dtype)\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n copy_kernel[grid](f8, xf16, n_elements, BLOCK_SIZE=1024)\n\n # exponent_mask = 0b01111100 for float8e5\n # exponent_mask = 0b01111000 for float8e4\n exponent_mask = 0b01111111 ^ ((1 << in_dtype.fp_mantissa_width) - 1)\n normal = torch.logical_and((f8_tensor & exponent_mask) != 0, (f8_tensor & exponent_mask) != exponent_mask)\n ref16 = convert_float_to_float32(f8_tensor, in_dtype)\n # WARN: currently only normal float8s are handled\n assert torch.all(xf16[normal] == ref16[normal])\n\n f8_output_tensor = torch.empty_like(xf16, dtype=torch.int8)\n f8_output = triton.reinterpret(f8_output_tensor, in_dtype)\n copy_kernel[grid](xf16, f8_output, n_elements, BLOCK_SIZE=1024)\n\n assert torch.all(f8_tensor == f8_output_tensor)", "def get_data_type(params):\n return tf.float16 if params.use_fp16 else tf.float32", "def data_convert2float32 (self, data):\r\n data = data.astype(np.float32)\r\n\r\n return data", "def benchmark_fp16_xla_synth_forward_batch128(self):\n params = self._shared_params()._replace(\n batch_size=128, use_fp16=True, xla=True)\n self._run_benchmark(params)", "def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)", "def test_f16_to_f8_rounding(in_dtype, out_dtype):\n @triton.jit\n def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr):\n offsets = tl.program_id(axis=0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)\n mask = offsets < n_elements\n input = tl.load(input_ptr + offsets, mask=mask)\n output = input\n tl.store(output_ptr + offsets, output, mask=mask)\n\n i16_input = torch.tensor(range(-int(2 ** (16 - 1)), int(2 ** (16 - 1))), dtype=torch.int16, device='cuda')\n f16_input = i16_input.view(out_dtype)\n n_elements = f16_input.numel()\n f8_output_tensor = torch.empty_like(f16_input, dtype=torch.int8)\n f8_output = triton.reinterpret(f8_output_tensor, in_dtype)\n grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),)\n copy_kernel[grid](f16_input, f8_output, n_elements, BLOCK_SIZE=1024)\n\n f16_output = torch.empty_like(f16_input, dtype=out_dtype)\n copy_kernel[grid](f8_output, f16_output, n_elements, BLOCK_SIZE=1024)\n\n abs_error = torch.abs(f16_input - f16_output)\n\n all_f8_vals_tensor = torch.tensor(range(2 ** 8), dtype=torch.uint8, device='cuda')\n all_f8_vals = triton.reinterpret(all_f8_vals_tensor, in_dtype)\n all_f8_vals_in_f16 = torch.empty_like(all_f8_vals_tensor, dtype=out_dtype)\n copy_kernel[grid](all_f8_vals, all_f8_vals_in_f16, n_elements=256, BLOCK_SIZE=1024)\n\n all_finite_f8_vals_in_f16 = all_f8_vals_in_f16[\n torch.isfinite(all_f8_vals_in_f16)\n ]\n\n min_error = torch.min(\n torch.abs(\n f16_input.reshape((-1, 1))\n - all_finite_f8_vals_in_f16.reshape((1, -1))\n ),\n dim=1,\n )[0]\n\n # WARN: only normalized numbers are handled\n f8_normal_min = 1 << in_dtype.fp_mantissa_width # 0b00001000 for float8e4\n f8_normal_max = 0b01111110 if in_dtype == tl.float8e4 else 0b01111011\n f16_min, f16_max, f16_max_minus_1 = convert_float_to_float32(torch.tensor([f8_normal_min, f8_normal_max, f8_normal_max - 1], dtype=torch.int8), in_dtype)\n assert torch.all(torch.isfinite(f16_min))\n assert torch.all(torch.isfinite(f16_max))\n thres_error = f16_max - f16_max_minus_1\n mismatch = torch.logical_and(\n torch.logical_or(abs_error != min_error, abs_error > thres_error), torch.logical_and(torch.isfinite(f16_input), torch.logical_and(torch.abs(f16_input) <= f16_max, torch.abs(f16_input) >= f16_min))\n )\n assert torch.all(\n torch.logical_not(mismatch)\n ), f\"f16_input[mismatch]={f16_input[mismatch]} f16_output[mismatch]={f16_output[mismatch]} abs_error[mismatch]={abs_error[mismatch]} min_error[mismatch]={min_error[mismatch]}\"", "def _shared_params_fp16(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50_v1.5',\n batch_size=256,\n distortions=False,\n use_fp16=True,\n optimizer='momentum',\n loss_type_to_report='base_loss',\n compute_lr_on_cpu=True,\n single_l2_loss_op=True\n )", "def read(reader: BitStreamReader, _index: int) -> float:\n\n return reader.readFloat64()", "def cfloat(queue=None):\n return np.float32" ]
[ "0.69121385", "0.65502363", "0.65130097", "0.647946", "0.6455792", "0.6408274", "0.63850254", "0.6354032", "0.63375044", "0.63351613", "0.6293508", "0.62589705", "0.6142071", "0.6129017", "0.60476065", "0.6029137", "0.59642065", "0.59619236", "0.5940161", "0.5863883", "0.584219", "0.58120924", "0.5797429", "0.5779282", "0.5775224", "0.57643133", "0.5752109", "0.5737403", "0.5707745", "0.57077396" ]
0.7207755
0
Return if Persona object passed into args is in defaul componenti propperty
def has_componente(self, persona): return True if persona.pk in self.pks_componenti else False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XCAFDoc_ShapeTool_IsComponent(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsComponent(*args)", "def IsComponent(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsComponent(*args)", "def __contains__(self, arg):\r\n\r\n return arg in self.grfx[0]", "def particleExists(*args, **kwargs)->bool:\n pass", "def __contains__(self, arg):\n # All arguments should have a default value of some sort\n if arg not in self.args:\n raise AttributeError(\n \"arg {} doesn't exist on {}\".format(arg, self.args)\n )\n # If the value is the sentinel then the argument was not provided AND\n # there is no default\n if getattr(self.args, arg) is NoArgument:\n return False\n\n if isinstance(getattr(self.args, arg), list):\n raise AttributeError(\n \"tried to check for presence of arg {} on {}, which is a \"\n \"list\".format(arg, self.args)\n )\n\n return True", "def is_mandatory_arg(self, arg_name): \n return arg_name in self.get_mandatory_args()", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def objExists(*args, **kwargs)->bool:\n pass", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def SBO_isParticipant(*args):\n return _libsbml.SBO_isParticipant(*args)", "def __contains__(self, component):\n if issubclass(component, Component):\n try:\n my_component = self.type.components[component.interface]\n except KeyError:\n return False\n else:\n return issubclass(my_component, component)\n else:\n return component in self.type.components", "def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()", "def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False", "def isParticipant(*args):\n return _libsbml.SBO_isParticipant(*args)", "def class_name_arg_required(args):\n no_class_name_flags = ['list_courses', 'version']\n return not any(\n getattr(args, flag)\n for flag in no_class_name_flags\n )", "def is_call_object_of(self, *args):\n return _ida_hexrays.cexpr_t_is_call_object_of(self, *args)", "def no_params(self) -> bool:\n result = True\n # Fixing issue #92\n if self.properties.parameters:\n return False\n else:\n return True\n # for parameter in self.properties.parameters:\n # if parameter == \"effect\":\n # continue\n # else:\n # result = False\n # break\n # return result", "def ismemberdescriptor(object):\r\n return False", "def is_parameter_present(self, obj):\n val_ref = obj.value_reference\n for p in self.parameters:\n if p.value_reference == val_ref:\n # there is already a parameter in the list with the same value_reference\n logger.error(\"There is already a parameter in the list with the same value reference: {0}\".format(val_ref))\n return True\n return False", "def has_custom_param(plot):\n return Plot.has_custom_param(plot)", "def is_procedure(vba_object):\n if hasattr(vba_object, 'statements'):\n return True\n else:\n return False", "def has_component(self, component):\n return component.name in self.components", "def has_component(self, var_name):\n if var_name in self._var_names:\n return True\n else:\n return False", "def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n\n if profile == obj.club.secy:\n return True\n\n if profile in obj.club.joint_secy.all():\n return True\n\n if profile == obj.club.council.gensec:\n return True\n\n if profile in obj.club.council.joint_gensec.all():\n return True\n\n return False", "def is_function(self):\n return self.args is not None", "def __bool__(self):\n return True if self._name is not None else False", "def arg_validation(arg, cla):\n if is_subclass(cla, arg):\n return arg\n else:\n print(str(arg)+\" is not a valid \" + cla.__module__ + \" name.\")\n sys.exit(2)", "def is_P(self):\n return isinstance(self,P)", "def __bool__(self):\n context, active_obj, actual_mode, mode = self.get_context()\n if not mode: return False\n \n if mode == 'OBJECT':\n return bool(context.selected_objects)\n elif mode == 'EDIT_MESH':\n mesh = active_obj.data\n if actual_mode == 'EDIT_MESH':\n return bool(mesh.total_vert_sel)\n else:\n return any(item.select for item in mesh.vertices)\n elif mode in {'EDIT_CURVE', 'EDIT_SURFACE'}:\n for spline in active_obj.data.splines:\n for item in spline.bezier_points:\n if (item.select_control_point or\n item.select_left_handle or\n item.select_right_handle):\n return True\n for item in spline.points:\n if item.select:\n return True\n elif mode == 'EDIT_METABALL':\n return bool(active_obj.data.elements.active)\n elif mode == 'EDIT_LATTICE':\n return any(item.select for item in active_obj.data.points)\n elif mode == 'EDIT_ARMATURE':\n return any(item.select_head or item.select_tail\n for item in active_obj.data.edit_bones)\n elif mode == 'POSE':\n return any(item.select for item in active_obj.data.bones)\n elif mode == 'PARTICLE':\n # Theoretically, particle keys can be selected,\n # but there seems to be no API for working with this\n pass\n else:\n pass # no selectable elements in other modes\n \n return False" ]
[ "0.6626677", "0.6543731", "0.6026395", "0.60095984", "0.59585935", "0.5927557", "0.5893944", "0.5893944", "0.58926296", "0.5830509", "0.5820323", "0.57275677", "0.5694341", "0.56732976", "0.5672737", "0.5656124", "0.5637087", "0.5625537", "0.5621446", "0.5609848", "0.5569029", "0.5564577", "0.5546229", "0.5536116", "0.5527483", "0.5497136", "0.5474361", "0.54593337", "0.5454757", "0.54544353" ]
0.71104115
0
If an bundle is for native iOS, it has these properties in the Info.plist
def is_info_plist_native(plist): return ( 'CFBundleSupportedPlatforms' in plist and 'iPhoneOS' in plist['CFBundleSupportedPlatforms'] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ios_app_info(self) -> Optional[pulumi.Input['IosAppInfoArgs']]:\n return pulumi.get(self, \"ios_app_info\")", "def system_properties(self):\r\n return dict(self._get_system_properties(self.java))", "def ios_app_info(self) -> 'outputs.IosAppInfoResponse':\n return pulumi.get(self, \"ios_app_info\")", "def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)", "def dummyProperties():\n global num_missing_properties\n num_missing_properties += 1 \n return ['main=appinventor.' + DUMMY_USER_NAME + '.' + DUMMY_PROJECT_NAME + '.Screen1\\n',\n 'name=' + DUMMY_PROJECT_NAME + '\\n',\n 'assets=../assets\\n',\n 'source=../src\\n',\n 'build=../build\\n',\n 'versioncode=1\\n',\n 'versionname=1.0\\n',\n 'useslocation=False\\n',\n 'aname=' + DUMMY_PROJECT_NAME + '\\n']", "def __init__(__self__, *,\n bundle_id: str,\n xcode_version: str):\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n pulumi.set(__self__, \"xcode_version\", xcode_version)", "def __init__(__self__, *,\n bundle_id: Optional[pulumi.Input[str]] = None,\n xcode_version: Optional[pulumi.Input[str]] = None):\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if xcode_version is not None:\n pulumi.set(__self__, \"xcode_version\", xcode_version)", "def select_app():\n panel = Cocoa.NSOpenPanel.openPanel()\n panel.setCanChooseFiles_(True)\n panel.setCanChooseDirectories_(True)\n panel.setResolvesAliases_(True)\n\n if(panel.runModal() == Cocoa.NSOKButton):\n pathArray = panel.filenames()\n path = pathlib.Path(pathArray[0])\n\n plistPath = path /'Contents'/'Info.plist'\n infoFile = plistPath\n\n try:\n appSize = subprocess.check_output(['du', '-shg', str(path)]).split()[0].decode('utf-8')\n n.views['appSize'].setStringValue_(str(appSize))\n except Exception as err:\n print(err)\n\n n.views['appLocation'].setStringValue_(str(path))\n\n try:\n plist = str(infoFile)\n with open(plist, 'rb') as f:\n info = plistlib.load(f)\n\n if 'CFBundleName' in info:\n global collectedName\n collectedName = info['CFBundleName']\n n.views['appName'].setStringValue_(collectedName)\n else:\n n.views['appName'].setStringValue_('')\n\n if 'CFBundleShortVersionString' in info:\n global collectedVersion\n collectedVersion= info['CFBundleShortVersionString']\n n.views['appVersion'].setStringValue_(collectedVersion)\n else:\n n.views['appVersion'].setStringValue_('')\n\n if 'CFBundleIconFile' in info:\n global collectedIcon\n collectedIcon = pathlib.Path(plist).parent / 'Resources' / info['CFBundleIconFile']\n n.views['appIcon'].setStringValue_(str(collectedIcon))\n else:\n n.views['appIcon'].setStringValue_('')\n\n if 'CFBundleIdentifier' in info:\n global collectedIdentifier\n collectedIdentifier = info['CFBundleIdentifier']\n n.views['appIdentifier'].setStringValue_(collectedIdentifier)\n else:\n n.views['appIdentifier'].setStringValue_('')\n\n except Exception as err:\n print('An Error Occured: {0}'.format(err))", "def is_apple():\n return sys.platform == \"darwin\"", "def security_compliance_notification_phones(self):\n if \"securityComplianceNotificationPhones\" in self._prop_dict:\n return self._prop_dict[\"securityComplianceNotificationPhones\"]\n else:\n return None", "def hide_ios(self) -> bool:\n return pulumi.get(self, \"hide_ios\")", "def launch_properties(self) -> Optional[pulumi.Input['BuildpackBindingLaunchPropertiesArgs']]:\n return pulumi.get(self, \"launch_properties\")", "def getProductInfo(path=\"./logs/SystemVersion/SystemVersion.plist\"):\n result = {\n \"ProductName\" : None,\n \"ProductionVersion\" : None,\n \"ProductBuildVersion\" : None\n }\n try:\n fd = open(path, 'rb')\n plist = plistlib.load(fd)\n for key in [\"ProductName\", \"ProductVersion\", \"ProductBuildVersion\", \"BuildID\", \"SystemImageID\"]:\n if key in plist.keys():\n result[key] = plist[key]\n else:\n print(\"WARNING: %s not found in %s plist\" % (key, path))\n fd.close()\n except Exception as e:\n print(\"Impossible to parse %s: %s\" % (path, str(e)))\n return result", "def _get_via_app_bundle(self, path: pathlib.Path | str) -> str:\n\n path = pathlib.Path(path) / \"Contents\" / \"Info.plist\"\n\n if not path.exists():\n logger.warning(\n f\"Could not determine application version. Missing: {path}...\"\n )\n return \"?\"\n\n with open(path, \"rb\") as f:\n data = plistlib.load(f)\n\n bundle_short_version: str = data.get(\"CFBundleShortVersionString\", \"?\")\n bundle_version: str = data.get(\"CFBundleVersion\", None)\n\n if bundle_version is None:\n return f\"{bundle_short_version}\"\n\n return f\"{bundle_short_version}-{bundle_version}\"", "def python_software_properties(self):\n self.install_package(\"python-software-properties\")", "def _has_native_dhcp_metadata(self):\n pass", "def custom_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"custom_properties\")", "def custom_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"custom_properties\")", "def _get_app_info(self):\n info_plist = None\n\n for data in self.filelist:\n if re.match(self.info_plist_regex, data.filename):\n info_plist = data\n\n if not info_plist:\n self._raise_ipa_error()\n\n info_plist = self.read(info_plist)\n self.app_info = readPlistFromString(info_plist)\n\n return self.app_info", "def getProperties():", "def is_osx():\n return sys.platform == \"darwin\"", "def test_plist_items(tmp_path: Path):\n create_package(tmp_path, source=PLIST_ITEMS_TEST[4])\n output = check_output(\n [sys.executable, \"setup.py\", \"bdist_mac\"],\n text=True,\n cwd=os.fspath(tmp_path),\n )\n print(output)\n # Test that the additional keys were correctly added to the plist.\n sys.path.insert(0, os.fspath(tmp_path))\n data = import_module(\"plist_data\")\n path = f\"{data.BUILD_DIR}/{data.BUNDLE_NAME}.app/Contents/Info.plist\"\n contents = plistlib.loads(tmp_path.joinpath(path).read_bytes())\n assert contents[data.TEST_KEY] == data.TEST_VALUE", "def platform_info(self):\n return self.msg.platform_info", "def is_osx():\r\n return sys.platform == \"darwin\"", "def config_bundle(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"config_bundle\")", "def web_hook_properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"web_hook_properties\")", "def is_jvm_app(self):\r\n return False", "def install_properties(self):\n\n return self._install_properties", "def get_properties():", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0" ]
[ "0.550685", "0.5398409", "0.5094902", "0.5077413", "0.5070626", "0.505988", "0.5054438", "0.49296305", "0.49282697", "0.48895228", "0.48720244", "0.48373243", "0.48084444", "0.48046267", "0.4804201", "0.47875956", "0.47442275", "0.47442275", "0.47250745", "0.4656228", "0.46466342", "0.46392918", "0.46200755", "0.46166146", "0.46164423", "0.45948228", "0.45732585", "0.45700577", "0.45539594", "0.45462084" ]
0.6288776
0
Sign all the dylibs in this directory
def sign_dylibs(self, signer, path): for dylib_path in glob.glob(join(path, '*.dylib')): dylib = signable.Dylib(self, dylib_path, signer) dylib.sign(self, signer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign_dylibs(self, cms_signer, path):\n for dylib_path in glob.glob(join(path, '*.dylib')):\n dylib = signable.Dylib(self, dylib_path, cms_signer)\n dylib.sign(self, cms_signer)", "def sign(self, signer):\n # log.debug(\"SIGNING: %s\" % self.path)\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(signer)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(signer, self.path)\n\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n plist_path = join(appex_path, 'Info.plist')\n if not exists(plist_path):\n continue\n plist = biplist.readPlist(plist_path)\n appex_exec_path = join(appex_path, plist['CFBundleExecutable'])\n appex = signable.Appex(self, appex_exec_path, signer)\n appex.sign(self, signer)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n # then sign the app\n executable = self.signable_class(self, self.get_executable_path(), signer)\n executable.sign(self, signer)", "def clean_libtomopy(install_prefix='.'):\n install_prefix = os.path.abspath(install_prefix)\n conf = get_config(install_prefix)\n dylib = os.path.abspath(os.path.join(\"..\", 'tomopy', 'sharedlibs', conf.sharedlib))\n clean_files = [dylib]\n for pattern in ('*.o', '*.gcda', '*.gcno', '*.gcov'):\n clean_files.extend(glob.glob(pattern))\n\n for fname in clean_files:\n try:\n os.unlink(fname)\n except OSError:\n logger.info(\"could not clean %s\" % fname)", "def test_sign_app(dummy_command, first_app_with_binaries, tmp_path):\n # Sign the app\n dummy_command.sign_app(\n first_app_with_binaries, identity=\"Sekrit identity (DEADBEEF)\"\n )\n\n # A request has been made to sign all the so and dylib files\n # This acts as a test of the discovery process:\n # * It discovers frameworks\n # * It discovers apps\n # * It discovers Mach-O binaries in various forms and guises\n # * It *doesn't* discover directories\n # * It *doesn't* discover non-Mach-O binaries\n # * It traverses in \"depth first\" order\n app_path = (\n tmp_path\n / \"base_path\"\n / \"build\"\n / \"first-app\"\n / \"macos\"\n / \"app\"\n / \"First App.app\"\n )\n lib_path = app_path / \"Contents\" / \"Resources\"\n frameworks_path = app_path / \"Contents\" / \"Frameworks\"\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(tmp_path, lib_path / \"subfolder\" / \"second_so.so\"),\n sign_call(tmp_path, lib_path / \"subfolder\" / \"second_dylib.dylib\"),\n sign_call(tmp_path, lib_path / \"special.binary\"),\n sign_call(tmp_path, lib_path / \"other_binary\"),\n sign_call(tmp_path, lib_path / \"first_so.so\"),\n sign_call(tmp_path, lib_path / \"first_dylib.dylib\"),\n sign_call(\n tmp_path, lib_path / \"Extras.app\" / \"Contents\" / \"MacOS\" / \"Extras\"\n ),\n sign_call(tmp_path, lib_path / \"Extras.app\"),\n sign_call(\n tmp_path,\n frameworks_path / \"Extras.framework\" / \"Resources\" / \"extras.dylib\",\n ),\n sign_call(tmp_path, frameworks_path / \"Extras.framework\"),\n sign_call(tmp_path, app_path),\n ],\n any_order=True,\n )\n\n # Also check that files are not signed after their parent directory has been\n # signed. Reduce the files mentions in the calls to the dummy command\n # to a list of path objects, then ensure that the call to sign any given file\n # does not occur *after* it's parent directory.\n sign_targets = [\n Path(call.args[0][1]) for call in dummy_command.tools.subprocess.run.mock_calls\n ]\n\n parents = set()\n for path in sign_targets:\n # Check parent of path is not in parents\n assert path.parent not in parents\n parents.add(path)", "def resign(self, deep, cms_signer, provisioner):\n # log.debug(\"SIGNING: %s\" % self.path)\n if deep:\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n log.debug('working on appex {}'.format(appex_path))\n # Appexes are essentially the same as app bundles, for signing purposes\n # They could be a different class, but there aren't any differences yet noted.\n # They will have the same OS (e.g. iOS, Watch) as their parent\n appex = self.__class__(appex_path)\n appex.resign(deep, cms_signer, provisioner)\n\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path, self.native_platforms)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(deep, cms_signer, provisioner)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(cms_signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(cms_signer, self.path)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n\n # then sign the executable\n executable = self.signable_class(self, self.get_executable_path(), cms_signer)\n executable.sign(self, cms_signer)\n\n log.debug(\"Resigned bundle at <%s>\", self.path)", "def libs(self):\n\n return LibraryList(\"/usr/lib/libSystem.dylib\")", "def _binaries_to_symbolize(self):\n raise NotImplementedError()", "def sign (self):\n print(\"*** signing the inno setup installer ***\")\n pfxfile = r'scripts\\%s.pfx' % self.lname\n if os.path.isfile(pfxfile):\n path = get_windows_sdk_path()\n signtool = os.path.join(path, \"bin\", \"signtool.exe\")\n if os.path.isfile(signtool):\n cmd = [signtool, 'sign', '/f', pfxfile, self.distfile]\n subprocess.check_call(cmd)\n else:\n print(\"No signed installer: signtool.exe not found.\")\n else:\n print(\"No signed installer: certificate %s not found.\" % pfxfile)", "def linking_library_dirs(self):", "def package_app(\n self, app: BaseConfig, sign_app=True, identity=None, adhoc_sign=False, **kwargs\n ):\n if sign_app:\n if adhoc_sign:\n identity = \"-\"\n\n print()\n print(\"[{app.app_name}] Signing app with adhoc identity...\".format(app=app))\n else:\n identity = self.select_identity(identity=identity)\n\n print()\n print(\"[{app.app_name}] Signing app with identity {identity}...\".format(\n app=app,\n identity=identity\n ))\n\n for path in itertools.chain(\n self.binary_path(app).glob('**/*.so'),\n self.binary_path(app).glob('**/*.dylib'),\n [self.binary_path(app)],\n ):\n self.sign(\n path,\n entitlements=self.bundle_path(app) / 'Entitlements.plist',\n identity=identity,\n )", "def adduserlibs():\n addlibdir(USERLIBDIR)\n for moduledir in os.listdir(USERLIBDIR):\n if moduledir.endswith('.sikuli'):\n addlibdir(os.path.join(USERLIBDIR, moduledir))", "def addMacOSCodeSignature(filenames):\n\n # Weak signing.\n identity = getMacOSSigningIdentity()\n\n command = [\n \"codesign\",\n \"-s\",\n identity,\n \"--force\",\n \"--deep\",\n \"--preserve-metadata=entitlements\",\n # ,\n ]\n\n if shallUseSigningForNotarization():\n command.append(\"--options=runtime\")\n\n assert type(filenames) is not str\n command.extend(filenames)\n\n with withMadeWritableFileMode(filenames):\n executeToolChecked(\n logger=postprocessing_logger,\n command=command,\n absence_message=_macos_codesign_usage,\n stderr_filter=_filterCodesignErrorOutput,\n )", "def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)", "def test_sign_app_with_failure(dummy_command, first_app_with_binaries, tmp_path):\n\n # Sign the app. Signing first_dylib.dylib will fail.\n def _codesign(args, **kwargs):\n if Path(args[1]).name == \"first_dylib.dylib\":\n raise subprocess.CalledProcessError(\n returncode=1, cmd=args, stderr=f\"{args[1]}: Unknown error\"\n )\n\n dummy_command.tools.subprocess.run.side_effect = _codesign\n\n # The invocation will raise an error; however, we can't predict exactly which\n # file will raise an error.\n with pytest.raises(\n BriefcaseCommandError, match=r\"Unable to code sign .*first_dylib\\.dylib\"\n ):\n dummy_command.sign_app(\n first_app_with_binaries, identity=\"Sekrit identity (DEADBEEF)\"\n )\n\n # There has been at least 1 call to sign files. We can't know how many are\n # actually signed, as threads are involved.\n dummy_command.tools.subprocess.run.call_count > 0", "def sign_jars(configs):\n print(\"Using Jenkins job to sign uploaded Jars...\")\n jenkins = Jenkins(configs[\"jenkins\"][\"url\"],\n configs[\"jenkins\"][\"username\"], configs[\"passwords\"][\"jenkins\"])\n sign_jar_job = jenkins[\"sign-jar\"]\n queue = sign_jar_job.invoke(block=True, build_params=configs[\"jenkins\"][\"signJar\"])\n\n if queue.get_build().get_status() == \"SUCCESS\":\n print(\"--Jars are signed successfully!\")\n else:\n raise Exception(\"Failed at jar signing. For details, please check \" +\n queue.get_build().get_result_url())", "def _clean_bins():\n rmtree(LIBS_DIR)\n rmtree(BINS_DIR)\n rmtree(HEADERS_DIR)", "def library_dirs(self):", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def verify():\n verbose = True\n log(\n \"Verifying current directory as a Dallinger experiment...\",\n verbose=verbose,\n )\n ok = verify_package(verbose=verbose)\n if ok:\n log(\"✓ Everything looks good!\", verbose=verbose)\n else:\n log(\"☹ Some problems were found.\", verbose=verbose)", "def on_libRoot(self):\n self.rf_libTree()\n self.rf_libPath()\n self.rf_libFileName()\n self.rf_delInfo()", "def _re_codesign(app_path, signing_identity, provision_path=None):\n bundle_type = PackageType.get_type(app_path)\n logger.debug('Re-codesigning %s...' % (bundle_type,))\n if bundle_type == PackageType.framework or bundle_type == PackageType.dylib:\n _cmd = '/usr/bin/codesign -f -s \"%s\" %s' % (signing_identity, app_path)\n if not safe_check_call(_cmd):\n return False\n return True\n\n code_signature_folder = os.path.join(app_path, '_CodeSignature')\n if os.path.isdir(code_signature_folder):\n shutil.rmtree(code_signature_folder)\n code_signature_file = os.path.join(app_path, 'CodeResources')\n if os.path.isfile(code_signature_file):\n os.remove(code_signature_file)\n\n app_provision_path = os.path.join(app_path, 'embedded.mobileprovision')\n if provision_path:\n shutil.copy(provision_path, app_provision_path)\n\n entitlement_plist_path = os.path.join('/tmp', 'entitlements%s.plist' % int(time.time()))\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n _cmd = '/usr/libexec/PlistBuddy -x -c \"print :Entitlements \" /dev/stdin <<< ' \\\n '$(security cms -D -i %s) > %s' % (app_provision_path, entitlement_plist_path)\n if not safe_check_call(_cmd):\n return False\n _cmd = \"/usr/libexec/PlistBuddy -c 'Set :get-task-allow true' %s\" % entitlement_plist_path\n if not safe_check_call(_cmd):\n return False\n\n frameworks_path = os.path.join(app_path, 'Frameworks')\n if os.path.isdir(frameworks_path):\n # _cmd = '/usr/bin/codesign -f -s \"%s\" %s/*' % (signing_identity, frameworks_path)\n # if not safe_check_call(_cmd):\n # return False\n for framework in os.listdir(frameworks_path):\n framework_path = os.path.join(frameworks_path, framework)\n _re_codesign_framework(framework_path, signing_identity)\n\n rule_file = os.path.join(app_path, 'ResourceRules.plist')\n if os.path.isfile(rule_file):\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--resource-rules %s ' \\\n '--entitlements %s %s' % (signing_identity, rule_file, entitlement_plist_path, app_path)\n else:\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--no-strict --entitlements %s %s' % (signing_identity, entitlement_plist_path, app_path)\n if not safe_check_call(_cmd):\n return False\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n logger.debug('Done.')\n return True", "def clean():\n C.libs.clear()\n shutil.rmtree(C.cache_dir, ignore_errors=True)", "def cleanup():\r\n compiledir = theano.config.compiledir\r\n for directory in os.listdir(compiledir):\r\n file = None\r\n try:\r\n try:\r\n filename = os.path.join(compiledir, directory, \"key.pkl\")\r\n file = open(filename, 'rb')\r\n #print file\r\n try:\r\n keydata = cPickle.load(file)\r\n for key in list(keydata.keys):\r\n have_npy_abi_version = False\r\n have_c_compiler = False\r\n for obj in flatten(key):\r\n if isinstance(obj, numpy.ndarray):\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n elif isinstance(obj, basestring):\r\n if obj.startswith('NPY_ABI_VERSION=0x'):\r\n have_npy_abi_version = True\r\n elif obj.startswith('c_compiler_str='):\r\n have_c_compiler = True\r\n elif (isinstance(obj, (theano.gof.Op, theano.gof.Type)) and\r\n hasattr(obj, 'c_code_cache_version')):\r\n v = obj.c_code_cache_version()\r\n if v not in [(), None] and v not in key[0]:\r\n #Reuse have_npy_abi_version to\r\n #force the removing of key\r\n have_npy_abi_version = False\r\n break\r\n\r\n if not have_npy_abi_version or not have_c_compiler:\r\n try:\r\n #This can happen when we move the compiledir.\r\n if keydata.key_pkl != filename:\r\n keydata.key_pkl = filename\r\n keydata.remove_key(key)\r\n except IOError, e:\r\n _logger.error(\r\n \"Could not remove file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n if len(keydata.keys) == 0:\r\n shutil.rmtree(os.path.join(compiledir, directory))\r\n\r\n except EOFError:\r\n _logger.error(\r\n \"Could not read key file '%s'. To complete \"\r\n \"the clean-up, please remove manually \"\r\n \"the directory containing it.\",\r\n filename)\r\n except IOError:\r\n _logger.error(\r\n \"Could not clean up this directory: '%s'. To complete \"\r\n \"the clean-up, please remove it manually.\",\r\n directory)\r\n finally:\r\n if file is not None:\r\n file.close()", "def dist(self):\n for arch, python in self.python:\n\n # Build the binary\n build_path = os.path.join(\"dist\", f\"safety-{arch}\")\n self.run(f\"{python} -m PyInstaller safety.spec\"\n f\" --distpath {build_path}\")\n\n # There seems to be no way to tell pyinstaller the binary name.\n # This leads to problems with appveyors artifact collector because\n # every binary is named the same.\n #\n # Move them around so they can be picked up correctly\n #\n artifact_path = os.path.join(\n os.getcwd(),\n \"dist\",\n f\"safety-{self.os}-{'i686' if arch == 32 else 'x86_64'}\"\n )\n binary_path = os.path.join(os.getcwd(), build_path, \"safety\")\n if self.os == self.WIN:\n self.run(f\"move {binary_path}.exe {artifact_path}.exe\")\n else:\n self.run(f\"cp {binary_path} {artifact_path}\")", "def _copy_binaries_to_archive(archive: PyfmuArchive) -> PyfmuArchive:\n\n binaries_path = Resources.get().binaries_dir\n\n\n archive_binaries_path = archive.root / 'binaries'\n\n copytree(binaries_path,archive_binaries_path)\n\n # paths\n archive.binaries_dir = archive_binaries_path\n archive.wrapper_win64 = archive.binaries_dir / 'win64' / 'pyfmu.dll'\n archive.wrapper_linux64 = archive.binaries_dir / 'linux64' / 'pyfmu.so'\n\n return archive", "def _unpack_stdlib(self):\n output_dir = self.manager.output_dir\n\n with tempfile.TemporaryDirectory() as td:\n tdp = Path(td)\n self.extract_one(self.app_archive, tdp)\n self.copy_one(tdp / \"package\", output_dir)\n\n self.maybe_timestamp(output_dir)", "def test_dig_sig(self):\n\n for using in [HashTypes.SHA1, HashTypes.SHA2, ]:\n self.do_test_dig_sig(using)", "def rescan_library(self):\n self.scan_dir(self.libpath)", "def sign(self, object):\n pass", "def test_pkglibdir(self):\n self.chck_triple('pkglibdir')" ]
[ "0.8432315", "0.6325134", "0.59998757", "0.5821335", "0.5536926", "0.5455998", "0.53087306", "0.51840645", "0.5171187", "0.5158678", "0.5043617", "0.4983382", "0.49256852", "0.4905008", "0.48366326", "0.48080102", "0.47965342", "0.47516686", "0.47421157", "0.47248307", "0.4704481", "0.46905252", "0.4653203", "0.46346974", "0.46167737", "0.4604531", "0.45787355", "0.45783833", "0.45635626", "0.45624366" ]
0.8295412
1
Sign everything in this bundle, recursively with subbundles
def sign(self, signer): # log.debug("SIGNING: %s" % self.path) frameworks_path = join(self.path, 'Frameworks') if exists(frameworks_path): # log.debug("SIGNING FRAMEWORKS: %s" % frameworks_path) # sign all the frameworks for framework_name in os.listdir(frameworks_path): framework_path = join(frameworks_path, framework_name) # log.debug("checking for framework: %s" % framework_path) try: framework = Framework(framework_path) # log.debug("resigning: %s" % framework_path) framework.resign(signer) except NotMatched: # log.debug("not a framework: %s" % framework_path) continue # sign all the dylibs under Frameworks self.sign_dylibs(signer, frameworks_path) # sign any dylibs in the main directory (rare, but it happens) self.sign_dylibs(signer, self.path) plugins_path = join(self.path, 'PlugIns') if exists(plugins_path): # sign the appex executables appex_paths = glob.glob(join(plugins_path, '*.appex')) for appex_path in appex_paths: plist_path = join(appex_path, 'Info.plist') if not exists(plist_path): continue plist = biplist.readPlist(plist_path) appex_exec_path = join(appex_path, plist['CFBundleExecutable']) appex = signable.Appex(self, appex_exec_path, signer) appex.sign(self, signer) # then create the seal # TODO maybe the app should know what its seal path should be... self.seal_path = code_resources.make_seal(self.get_executable_path(), self.path) # then sign the app executable = self.signable_class(self, self.get_executable_path(), signer) executable.sign(self, signer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resign(self, deep, cms_signer, provisioner):\n # log.debug(\"SIGNING: %s\" % self.path)\n if deep:\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n log.debug('working on appex {}'.format(appex_path))\n # Appexes are essentially the same as app bundles, for signing purposes\n # They could be a different class, but there aren't any differences yet noted.\n # They will have the same OS (e.g. iOS, Watch) as their parent\n appex = self.__class__(appex_path)\n appex.resign(deep, cms_signer, provisioner)\n\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path, self.native_platforms)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(deep, cms_signer, provisioner)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(cms_signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(cms_signer, self.path)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n\n # then sign the executable\n executable = self.signable_class(self, self.get_executable_path(), cms_signer)\n executable.sign(self, cms_signer)\n\n log.debug(\"Resigned bundle at <%s>\", self.path)", "def sign(self, object):\n pass", "def sign_dylibs(self, cms_signer, path):\n for dylib_path in glob.glob(join(path, '*.dylib')):\n dylib = signable.Dylib(self, dylib_path, cms_signer)\n dylib.sign(self, cms_signer)", "def package_app(\n self, app: BaseConfig, sign_app=True, identity=None, adhoc_sign=False, **kwargs\n ):\n if sign_app:\n if adhoc_sign:\n identity = \"-\"\n\n print()\n print(\"[{app.app_name}] Signing app with adhoc identity...\".format(app=app))\n else:\n identity = self.select_identity(identity=identity)\n\n print()\n print(\"[{app.app_name}] Signing app with identity {identity}...\".format(\n app=app,\n identity=identity\n ))\n\n for path in itertools.chain(\n self.binary_path(app).glob('**/*.so'),\n self.binary_path(app).glob('**/*.dylib'),\n [self.binary_path(app)],\n ):\n self.sign(\n path,\n entitlements=self.bundle_path(app) / 'Entitlements.plist',\n identity=identity,\n )", "def sign_dylibs(self, signer, path):\n for dylib_path in glob.glob(join(path, '*.dylib')):\n dylib = signable.Dylib(self, dylib_path, signer)\n dylib.sign(self, signer)", "def fetch_jwt_bundles(self) -> JwtBundleSet:", "def resign(self, signer):\n self.sign(signer)\n log.debug(\"Resigned bundle at <%s>\", self.path)", "def sign(self):\r\n self._reset()\r\n if hasattr(self, \"_privateKey\"):\r\n if \"fee\" not in self:\r\n setFees(self)\r\n if self.type == 4:\r\n missings = \\\r\n self.asset[\"multiSignature\"][\"min\"] - \\\r\n len(self.get(\"signature\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n self[\"signature\"] = dposlib.core.crypto.getSignature(\r\n self, self._privateKey\r\n )\r\n else:\r\n raise Exception(\"orphan transaction can not sign itsef\")", "def resign(self, deep, cms_signer, provisioner):\n # In the typical case, we add entitlements from the pprof into the app's signature\n if not cms_signer.is_adhoc():\n team_id = cms_signer.get_team_id()\n self.provision(team_id, provisioner)\n self.entitle(team_id, provisioner)\n\n # actually resign this bundle now\n super(App, self).resign(deep, cms_signer, provisioner)", "def verify_bundles(request: Request, policy: RequestPolicy, logger: Logger) -> None:\n logger.debug('Begin \"Verify KSR bundles\"')\n\n check_unique_ids(request, policy, logger)\n check_keys_match_zsk_policy(request, policy, logger)\n check_proof_of_possession(request, policy, logger)\n check_bundle_count(request, policy, logger)\n check_cycle_durations(request, policy, logger)\n\n logger.debug('End \"Verify KSR bundles\"')", "def sign (self):\n print(\"*** signing the inno setup installer ***\")\n pfxfile = r'scripts\\%s.pfx' % self.lname\n if os.path.isfile(pfxfile):\n path = get_windows_sdk_path()\n signtool = os.path.join(path, \"bin\", \"signtool.exe\")\n if os.path.isfile(signtool):\n cmd = [signtool, 'sign', '/f', pfxfile, self.distfile]\n subprocess.check_call(cmd)\n else:\n print(\"No signed installer: signtool.exe not found.\")\n else:\n print(\"No signed installer: certificate %s not found.\" % pfxfile)", "def fetch_x509_bundles(self) -> X509BundleSet:", "def sign(self, payload):\n raise NotImplementedError", "def multiSignWithSecret(self, secret):\r\n keys = dposlib.core.crypto.getKeys(secret)\r\n self.multiSignWithKey(keys[\"privateKey\"])", "def gpg_sign(configs, artifact_folder):\n print(\"GPG sign all files in artifact folder...\")\n for file_to_sign in os.listdir(artifact_folder):\n gpg_str = 'gpg --batch --passphrase {0} -ab {1}'.format(\n configs[\"passwords\"][\"gpg\"], os.path.join(artifact_folder, file_to_sign))\n print(\"--\" + gpg_str)\n subprocess.call(gpg_str)", "def sign_partial(self, *partial_signers: Keypair) -> None:\n underlying_signers = [signer.to_solders() for signer in partial_signers]\n self._solders.partial_sign(underlying_signers, self._solders.message.recent_blockhash)", "def _sign_document(self):\n return False", "def sign_jars(configs):\n print(\"Using Jenkins job to sign uploaded Jars...\")\n jenkins = Jenkins(configs[\"jenkins\"][\"url\"],\n configs[\"jenkins\"][\"username\"], configs[\"passwords\"][\"jenkins\"])\n sign_jar_job = jenkins[\"sign-jar\"]\n queue = sign_jar_job.invoke(block=True, build_params=configs[\"jenkins\"][\"signJar\"])\n\n if queue.get_build().get_status() == \"SUCCESS\":\n print(\"--Jars are signed successfully!\")\n else:\n raise Exception(\"Failed at jar signing. For details, please check \" +\n queue.get_build().get_result_url())", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def _re_codesign(app_path, signing_identity, provision_path=None):\n bundle_type = PackageType.get_type(app_path)\n logger.debug('Re-codesigning %s...' % (bundle_type,))\n if bundle_type == PackageType.framework or bundle_type == PackageType.dylib:\n _cmd = '/usr/bin/codesign -f -s \"%s\" %s' % (signing_identity, app_path)\n if not safe_check_call(_cmd):\n return False\n return True\n\n code_signature_folder = os.path.join(app_path, '_CodeSignature')\n if os.path.isdir(code_signature_folder):\n shutil.rmtree(code_signature_folder)\n code_signature_file = os.path.join(app_path, 'CodeResources')\n if os.path.isfile(code_signature_file):\n os.remove(code_signature_file)\n\n app_provision_path = os.path.join(app_path, 'embedded.mobileprovision')\n if provision_path:\n shutil.copy(provision_path, app_provision_path)\n\n entitlement_plist_path = os.path.join('/tmp', 'entitlements%s.plist' % int(time.time()))\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n _cmd = '/usr/libexec/PlistBuddy -x -c \"print :Entitlements \" /dev/stdin <<< ' \\\n '$(security cms -D -i %s) > %s' % (app_provision_path, entitlement_plist_path)\n if not safe_check_call(_cmd):\n return False\n _cmd = \"/usr/libexec/PlistBuddy -c 'Set :get-task-allow true' %s\" % entitlement_plist_path\n if not safe_check_call(_cmd):\n return False\n\n frameworks_path = os.path.join(app_path, 'Frameworks')\n if os.path.isdir(frameworks_path):\n # _cmd = '/usr/bin/codesign -f -s \"%s\" %s/*' % (signing_identity, frameworks_path)\n # if not safe_check_call(_cmd):\n # return False\n for framework in os.listdir(frameworks_path):\n framework_path = os.path.join(frameworks_path, framework)\n _re_codesign_framework(framework_path, signing_identity)\n\n rule_file = os.path.join(app_path, 'ResourceRules.plist')\n if os.path.isfile(rule_file):\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--resource-rules %s ' \\\n '--entitlements %s %s' % (signing_identity, rule_file, entitlement_plist_path, app_path)\n else:\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--no-strict --entitlements %s %s' % (signing_identity, entitlement_plist_path, app_path)\n if not safe_check_call(_cmd):\n return False\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n logger.debug('Done.')\n return True", "def sign(self, body, external_aad, private_key):", "def test_sign_app(dummy_command, first_app_with_binaries, tmp_path):\n # Sign the app\n dummy_command.sign_app(\n first_app_with_binaries, identity=\"Sekrit identity (DEADBEEF)\"\n )\n\n # A request has been made to sign all the so and dylib files\n # This acts as a test of the discovery process:\n # * It discovers frameworks\n # * It discovers apps\n # * It discovers Mach-O binaries in various forms and guises\n # * It *doesn't* discover directories\n # * It *doesn't* discover non-Mach-O binaries\n # * It traverses in \"depth first\" order\n app_path = (\n tmp_path\n / \"base_path\"\n / \"build\"\n / \"first-app\"\n / \"macos\"\n / \"app\"\n / \"First App.app\"\n )\n lib_path = app_path / \"Contents\" / \"Resources\"\n frameworks_path = app_path / \"Contents\" / \"Frameworks\"\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(tmp_path, lib_path / \"subfolder\" / \"second_so.so\"),\n sign_call(tmp_path, lib_path / \"subfolder\" / \"second_dylib.dylib\"),\n sign_call(tmp_path, lib_path / \"special.binary\"),\n sign_call(tmp_path, lib_path / \"other_binary\"),\n sign_call(tmp_path, lib_path / \"first_so.so\"),\n sign_call(tmp_path, lib_path / \"first_dylib.dylib\"),\n sign_call(\n tmp_path, lib_path / \"Extras.app\" / \"Contents\" / \"MacOS\" / \"Extras\"\n ),\n sign_call(tmp_path, lib_path / \"Extras.app\"),\n sign_call(\n tmp_path,\n frameworks_path / \"Extras.framework\" / \"Resources\" / \"extras.dylib\",\n ),\n sign_call(tmp_path, frameworks_path / \"Extras.framework\"),\n sign_call(tmp_path, app_path),\n ],\n any_order=True,\n )\n\n # Also check that files are not signed after their parent directory has been\n # signed. Reduce the files mentions in the calls to the dummy command\n # to a list of path objects, then ensure that the call to sign any given file\n # does not occur *after* it's parent directory.\n sign_targets = [\n Path(call.args[0][1]) for call in dummy_command.tools.subprocess.run.mock_calls\n ]\n\n parents = set()\n for path in sign_targets:\n # Check parent of path is not in parents\n assert path.parent not in parents\n parents.add(path)", "async def test_sign_endorse_recursive(image_config: ImageConfig):\n\n # Stack representation of a ternary tree\n stack = [{\"name\": \"?-Unsigned\", \"image_config\": image_config.clone()}]\n LOGGER.debug(\"Unsigned Canonical Digest: %s\", image_config.get_digest_canonical())\n\n async def append_new_image_config(\n *,\n config: ImageConfig,\n signature_type: SignatureTypes = SignatureTypes.SIGN,\n iteration,\n ):\n action = f\"X{signature_type.name}\"\n signer = FakeSigner(f\"[{iteration}-{action: <8}: {{0}}]\")\n await config.sign(signature_type=signature_type, signer=signer)\n stack.append({\"name\": f\"{iteration}-{action}\", \"image_config\": config})\n\n iterations = 6\n # Breadth first traversal ...\n for i in range(iterations):\n LOGGER.debug(\"Iteration %d\", i)\n for _ in range(len(stack)):\n frame = stack[0]\n LOGGER.debug(\" Checking %s\", frame[\"name\"])\n # Validate the signature / endorsement permutations of the first entry on the stack ...\n signatures = frame[\"image_config\"].get_signature_list()\n\n flat_list = \"\".join([signature.signature for signature in signatures])\n if f\"X{SignatureTypes.RESIGN.name}\" in flat_list:\n # Too lazy to calculate how many signatures were removed ...\n assert len(signatures) <= i\n else:\n assert len(signatures) == i\n\n for sig, signature in enumerate(signatures):\n LOGGER.debug(\" %s\", signature.signature)\n if f\"X{SignatureTypes.ENDORSE.name}\" in signature.signature:\n # Endorsement digests should include all entities of a lower order.\n temp = frame[\"image_config\"].clone()\n temp.set_signature_list(signatures=temp.get_signature_list()[:sig])\n assert signature.digest == temp.get_digest_canonical()\n assert temp.get_digest_canonical() in signature.signature\n else:\n # Signature digests should be independent of the number of signatures.\n # Re-signed images should always contain 1 signature.\n assert signature.digest == image_config.get_digest_canonical()\n assert image_config.get_digest_canonical() in signature.signature\n\n # Unshift the first image configuration, append three more image configurations on to the stack: ...\n # ... one signed ...\n await append_new_image_config(\n config=frame[\"image_config\"].clone(), iteration=i\n )\n # ... one endorsed ...\n await append_new_image_config(\n config=frame[\"image_config\"].clone(),\n signature_type=SignatureTypes.ENDORSE,\n iteration=i,\n )\n # ... one resigned ...\n await append_new_image_config(\n config=stack.pop(0).get(\"image_config\"),\n signature_type=SignatureTypes.RESIGN,\n iteration=i,\n )", "def sign(self, privkey):\n seckey = CIoncoinSecret.from_secret_bytes(x(ioncointools.encode_privkey(privkey, \"hex\")))\n\n for i in range(len(self.tx.vin)):\n txin_scriptPubKey = self.tx.vin[i].scriptSig\n sighash = SignatureHash(txin_scriptPubKey, self.tx, i, SIGHASH_ALL)\n sig = seckey.sign(sighash) + struct.pack('<B', SIGHASH_ALL)\n self.tx.vin[i].scriptSig = CScript([sig, seckey.pub])\n\n VerifyScript(self.tx.vin[i].scriptSig, txin_scriptPubKey, self.tx, i, (SCRIPT_VERIFY_P2SH,))", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)", "def test_sign_file_deep_sign(dummy_command, tmp_path, capsys):\n # First call raises the deep sign warning; second call succeeds\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\n [\" code object is not signed at all\", None]\n )\n\n # Sign the file\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\", identity=\"Sekrit identity (DEADBEEF)\"\n )\n\n # 2 attempt to codesign was made; the second enabled the deep argument.\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n deep=True,\n ),\n ],\n any_order=False,\n )\n\n # The console includes a warning about the attempt to deep sign\n assert \"... file requires a deep sign; retrying\\n\" in capsys.readouterr().out", "def js_bundle_names(self):\n yield 'djblets-utils'\n yield 'djblets-avatars-config'\n\n for service in self.avatar_service_registry.configurable_services:\n for bundle in service.config_form_class.js_bundle_names:\n yield bundle", "def test_fail_signed_in_wrong_order(self):\n # Swap the signatures from the first and second keys.\n # Note that the keys have security level of 3, so we need to swap\n # out a total of 6 signatures.\n sig_1_1 = self.bundle[1].signature_message_fragment\n sig_1_2 = self.bundle[2].signature_message_fragment\n sig_1_3 = self.bundle[3].signature_message_fragment\n\n sig_2_1 = self.bundle[4].signature_message_fragment\n sig_2_2 = self.bundle[5].signature_message_fragment\n sig_2_3 = self.bundle[6].signature_message_fragment\n\n self.bundle[1].signature_message_fragment = sig_2_1\n self.bundle[2].signature_message_fragment = sig_2_2\n self.bundle[3].signature_message_fragment = sig_2_3\n\n self.bundle[4].signature_message_fragment = sig_1_1\n self.bundle[5].signature_message_fragment = sig_1_2\n self.bundle[6].signature_message_fragment = sig_1_3\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 1 has invalid signature (using 8 fragments).',\n ],\n )", "def sendToSign(cmd):\n\tsubprocess.call([\"./sign.sh\", cmd])" ]
[ "0.5995381", "0.5584379", "0.55598193", "0.55531466", "0.55146366", "0.54876906", "0.52974087", "0.52845377", "0.5166314", "0.51473546", "0.5102368", "0.50621337", "0.50598043", "0.5002848", "0.49882516", "0.49192393", "0.4897663", "0.48194253", "0.48167092", "0.48128116", "0.47864738", "0.47588503", "0.47543335", "0.47540075", "0.47372723", "0.47276503", "0.4723787", "0.46865982", "0.46859807", "0.46829343" ]
0.59542173
1
signs bundle, modifies in place
def resign(self, signer): self.sign(signer) log.debug("Resigned bundle at <%s>", self.path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sign(self, object):\n pass", "def sign(self, payload):\n raise NotImplementedError", "def _re_codesign(app_path, signing_identity, provision_path=None):\n bundle_type = PackageType.get_type(app_path)\n logger.debug('Re-codesigning %s...' % (bundle_type,))\n if bundle_type == PackageType.framework or bundle_type == PackageType.dylib:\n _cmd = '/usr/bin/codesign -f -s \"%s\" %s' % (signing_identity, app_path)\n if not safe_check_call(_cmd):\n return False\n return True\n\n code_signature_folder = os.path.join(app_path, '_CodeSignature')\n if os.path.isdir(code_signature_folder):\n shutil.rmtree(code_signature_folder)\n code_signature_file = os.path.join(app_path, 'CodeResources')\n if os.path.isfile(code_signature_file):\n os.remove(code_signature_file)\n\n app_provision_path = os.path.join(app_path, 'embedded.mobileprovision')\n if provision_path:\n shutil.copy(provision_path, app_provision_path)\n\n entitlement_plist_path = os.path.join('/tmp', 'entitlements%s.plist' % int(time.time()))\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n _cmd = '/usr/libexec/PlistBuddy -x -c \"print :Entitlements \" /dev/stdin <<< ' \\\n '$(security cms -D -i %s) > %s' % (app_provision_path, entitlement_plist_path)\n if not safe_check_call(_cmd):\n return False\n _cmd = \"/usr/libexec/PlistBuddy -c 'Set :get-task-allow true' %s\" % entitlement_plist_path\n if not safe_check_call(_cmd):\n return False\n\n frameworks_path = os.path.join(app_path, 'Frameworks')\n if os.path.isdir(frameworks_path):\n # _cmd = '/usr/bin/codesign -f -s \"%s\" %s/*' % (signing_identity, frameworks_path)\n # if not safe_check_call(_cmd):\n # return False\n for framework in os.listdir(frameworks_path):\n framework_path = os.path.join(frameworks_path, framework)\n _re_codesign_framework(framework_path, signing_identity)\n\n rule_file = os.path.join(app_path, 'ResourceRules.plist')\n if os.path.isfile(rule_file):\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--resource-rules %s ' \\\n '--entitlements %s %s' % (signing_identity, rule_file, entitlement_plist_path, app_path)\n else:\n _cmd = '/usr/bin/codesign -f -s \"%s\" ' \\\n '--no-strict --entitlements %s %s' % (signing_identity, entitlement_plist_path, app_path)\n if not safe_check_call(_cmd):\n return False\n if os.path.isfile(entitlement_plist_path):\n os.remove(entitlement_plist_path)\n logger.debug('Done.')\n return True", "def sign(self):\r\n self._reset()\r\n if hasattr(self, \"_privateKey\"):\r\n if \"fee\" not in self:\r\n setFees(self)\r\n if self.type == 4:\r\n missings = \\\r\n self.asset[\"multiSignature\"][\"min\"] - \\\r\n len(self.get(\"signature\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n self[\"signature\"] = dposlib.core.crypto.getSignature(\r\n self, self._privateKey\r\n )\r\n else:\r\n raise Exception(\"orphan transaction can not sign itsef\")", "def resign(self, deep, cms_signer, provisioner):\n # log.debug(\"SIGNING: %s\" % self.path)\n if deep:\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n log.debug('working on appex {}'.format(appex_path))\n # Appexes are essentially the same as app bundles, for signing purposes\n # They could be a different class, but there aren't any differences yet noted.\n # They will have the same OS (e.g. iOS, Watch) as their parent\n appex = self.__class__(appex_path)\n appex.resign(deep, cms_signer, provisioner)\n\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path, self.native_platforms)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(deep, cms_signer, provisioner)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(cms_signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(cms_signer, self.path)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n\n # then sign the executable\n executable = self.signable_class(self, self.get_executable_path(), cms_signer)\n executable.sign(self, cms_signer)\n\n log.debug(\"Resigned bundle at <%s>\", self.path)", "def resign(self, deep, cms_signer, provisioner):\n # In the typical case, we add entitlements from the pprof into the app's signature\n if not cms_signer.is_adhoc():\n team_id = cms_signer.get_team_id()\n self.provision(team_id, provisioner)\n self.entitle(team_id, provisioner)\n\n # actually resign this bundle now\n super(App, self).resign(deep, cms_signer, provisioner)", "def sign(self, signer):\n # log.debug(\"SIGNING: %s\" % self.path)\n frameworks_path = join(self.path, 'Frameworks')\n if exists(frameworks_path):\n # log.debug(\"SIGNING FRAMEWORKS: %s\" % frameworks_path)\n # sign all the frameworks\n for framework_name in os.listdir(frameworks_path):\n framework_path = join(frameworks_path, framework_name)\n # log.debug(\"checking for framework: %s\" % framework_path)\n try:\n framework = Framework(framework_path)\n # log.debug(\"resigning: %s\" % framework_path)\n framework.resign(signer)\n except NotMatched:\n # log.debug(\"not a framework: %s\" % framework_path)\n continue\n # sign all the dylibs under Frameworks\n self.sign_dylibs(signer, frameworks_path)\n\n # sign any dylibs in the main directory (rare, but it happens)\n self.sign_dylibs(signer, self.path)\n\n plugins_path = join(self.path, 'PlugIns')\n if exists(plugins_path):\n # sign the appex executables\n appex_paths = glob.glob(join(plugins_path, '*.appex'))\n for appex_path in appex_paths:\n plist_path = join(appex_path, 'Info.plist')\n if not exists(plist_path):\n continue\n plist = biplist.readPlist(plist_path)\n appex_exec_path = join(appex_path, plist['CFBundleExecutable'])\n appex = signable.Appex(self, appex_exec_path, signer)\n appex.sign(self, signer)\n\n # then create the seal\n # TODO maybe the app should know what its seal path should be...\n self.seal_path = code_resources.make_seal(self.get_executable_path(),\n self.path)\n # then sign the app\n executable = self.signable_class(self, self.get_executable_path(), signer)\n executable.sign(self, signer)", "def sign(self, body, external_aad, private_key):", "def util_sign_release():\n os.chdir(REPO_PATH)\n dr = DebRepo()\n keyname = dr.read_keyname()\n out, err = dr.sign_release(keyname)\n print(out)\n print(err)", "def signSign(self):\r\n if \"signature\" in self: # or \"signatures\" in self ?\r\n self.pop(\"id\", False)\r\n try:\r\n self[\"signSignature\"] = dposlib.core.crypto.getSignature(\r\n self, self._secondPrivateKey,\r\n exclude_second_sig=True,\r\n )\r\n except AttributeError:\r\n raise Exception(\"no second private Key available\")\r\n else:\r\n raise Exception(\"transaction not signed\")", "def package_app(\n self, app: BaseConfig, sign_app=True, identity=None, adhoc_sign=False, **kwargs\n ):\n if sign_app:\n if adhoc_sign:\n identity = \"-\"\n\n print()\n print(\"[{app.app_name}] Signing app with adhoc identity...\".format(app=app))\n else:\n identity = self.select_identity(identity=identity)\n\n print()\n print(\"[{app.app_name}] Signing app with identity {identity}...\".format(\n app=app,\n identity=identity\n ))\n\n for path in itertools.chain(\n self.binary_path(app).glob('**/*.so'),\n self.binary_path(app).glob('**/*.dylib'),\n [self.binary_path(app)],\n ):\n self.sign(\n path,\n entitlements=self.bundle_path(app) / 'Entitlements.plist',\n identity=identity,\n )", "def test_sign(self):\n self.signer.Sign(b'notadb')\n self.assertTrue(True)", "def add_sign(self):\n if self.is_signed():\n self.remove_sign()\n \n data = self._document.read()\n encrypted = self._encryptor.encrypt_cbc(data, self._init_vector)\n hash_value = encrypted[-16:]\n self._document.write(self._seperator.encode() + hash_value + self._seperator.encode())\n print(\"The document is signed!\")", "def sendToSign(cmd):\n\tsubprocess.call([\"./sign.sh\", cmd])", "def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer", "def _sign_document(self):\n return False", "def resign(self, signer, provisioning_profile, alternate_entitlements_path=None):\n\n # TODO all this mucking about with entitlements feels wrong. The entitlements_path is\n # not actually functional, it's just a way of passing it to later stages of signing.\n # Maybe we should determine entitlements data in isign/archive.py or even isign/isign.py,\n # and then embed it into Signer?\n\n # In the typical case, we add entitlements from the pprof into the app's signature\n if alternate_entitlements_path is None:\n # copy the provisioning profile in\n self.provision(provisioning_profile)\n\n entitlements = self.extract_entitlements(provisioning_profile)\n else:\n log.info(\"signing with alternative entitlements: {}\".format(alternate_entitlements_path))\n entitlements = biplist.readPlist(alternate_entitlements_path)\n self.write_entitlements(entitlements)\n\n # actually resign this bundle now\n super(App, self).resign(signer)", "def _sign(self, cert, keypair, certs, crls, flags):\n\n # pylint: disable=W0201\n cms = self.POW_class()\n cms.sign(cert, keypair, self.encode(), certs, crls, self.econtent_oid, flags)\n self.POW = cms", "def sign(cls, upload, location=None):\n path = \"uploader/sign/%s\" % upload[\"id\"]\n kwargs = {\"md5\": upload[\"md5\"], \"location\": location}\n try:\n return Backend.put(path, kwargs, headers=Backend.headers())\n except requests.HTTPError as err:\n if err.response.status_code == 410:\n LOGGER.warning(\"Cannot Touch file %s. Already finished \\\n (not active) (410)\", upload[\"id\"])\n raise err\n except:\n raise", "def signWithSecret(self, secret):\r\n self.link(secret)\r\n self.sign()", "def sign (self):\n print(\"*** signing the inno setup installer ***\")\n pfxfile = r'scripts\\%s.pfx' % self.lname\n if os.path.isfile(pfxfile):\n path = get_windows_sdk_path()\n signtool = os.path.join(path, \"bin\", \"signtool.exe\")\n if os.path.isfile(signtool):\n cmd = [signtool, 'sign', '/f', pfxfile, self.distfile]\n subprocess.check_call(cmd)\n else:\n print(\"No signed installer: signtool.exe not found.\")\n else:\n print(\"No signed installer: certificate %s not found.\" % pfxfile)", "def sign(self): \n body_in_list = self.body.split()\n body_in_list.pop()\n self.body = ' '.join(body_in_list)", "def sign(private_key: RsaKey, content: dict) -> None:\n\n signer = PKCS1_v1_5.new(private_key)\n encoded_content = json.dumps(content, sort_keys=True).encode()\n h = SHA256.new(encoded_content)\n signature = signer.sign(h)\n\n return binascii.hexlify(signature).decode('ascii')", "def test_sign_file_unknown_bundle_format(dummy_command, tmp_path, capsys):\n # Raise an error caused by an unknown bundle format during codesign\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\n \"bundle format unrecognized, invalid, or unsuitable\"\n )\n\n # Sign the file\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n )\n\n # An attempt to codesign was made\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n ],\n any_order=False,\n )\n\n # The console includes a warning about not needing a signature.\n assert \"... no signature required\\n\" in capsys.readouterr().out", "def _rsa_sign(blob, private_key_pem):\n # Lazy import crypto. It is not available in unit tests outside of sandbox.\n from Crypto.Hash import SHA256\n from Crypto.PublicKey import RSA\n from Crypto.Signature import PKCS1_v1_5\n pkey = RSA.importKey(private_key_pem)\n return PKCS1_v1_5.new(pkey).sign(SHA256.new(blob))", "def sign(self, path, entitlements, identity):\n try:\n print(\"Signing\", path)\n self.subprocess.run(\n [\n 'codesign',\n '--sign', identity,\n '--entitlements', str(entitlements),\n '--deep', str(path),\n '--force',\n '--options', 'runtime',\n ],\n check=True,\n )\n except subprocess.CalledProcessError:\n print()\n raise BriefcaseCommandError(\n \"Unable to code sign {path}.\".format(path=path)\n )", "def add_signature(self, pubkey: PublicKey, signature: Signature) -> None:\n presigner = Presigner(pubkey.to_solders(), signature)\n self._solders.partial_sign([presigner], self._solders.message.recent_blockhash)", "def signSignWithSecondSecret(self, secondSecret):\r\n self.link(None, secondSecret)\r\n self.signSign()", "def sign(self, inputs):\n pass", "def sign(self, inputs):\n pass" ]
[ "0.6849241", "0.65445966", "0.6377147", "0.6371257", "0.63314426", "0.6310475", "0.62609345", "0.61872184", "0.61623085", "0.6129731", "0.61247724", "0.607729", "0.6056486", "0.60021913", "0.59386194", "0.59081775", "0.58716357", "0.5862046", "0.5852787", "0.58021504", "0.57783604", "0.5771533", "0.57405466", "0.5702065", "0.5684372", "0.5680466", "0.56745815", "0.5674524", "0.56527334", "0.56527334" ]
0.66369
1
Given a path to a provisioning profile, return the entitlements encoded therein
def extract_entitlements(provision_path): cmd = [ 'smime', '-inform', 'der', '-verify', # verifies content, prints verification status to STDERR, # outputs content to STDOUT. In our case, will be an XML plist '-noverify', # accept self-signed certs. Not the opposite of -verify! '-in', provision_path ] # this command always prints 'Verification successful' to stderr. (profile_text, err) = openssl_command(cmd, data=None, expect_err=True) if err and err.strip() != 'Verification successful': log.error('Received unexpected error from openssl: {}'.format(err)) plist_dict = biplist.readPlistFromString(profile_text) if 'Entitlements' not in plist_dict: log.debug('failed to get entitlements in provisioning profile') raise Exception('could not find Entitlements in {}'.format(provision_path)) return plist_dict['Entitlements']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def resign(self, signer, provisioning_profile, alternate_entitlements_path=None):\n\n # TODO all this mucking about with entitlements feels wrong. The entitlements_path is\n # not actually functional, it's just a way of passing it to later stages of signing.\n # Maybe we should determine entitlements data in isign/archive.py or even isign/isign.py,\n # and then embed it into Signer?\n\n # In the typical case, we add entitlements from the pprof into the app's signature\n if alternate_entitlements_path is None:\n # copy the provisioning profile in\n self.provision(provisioning_profile)\n\n entitlements = self.extract_entitlements(provisioning_profile)\n else:\n log.info(\"signing with alternative entitlements: {}\".format(alternate_entitlements_path))\n entitlements = biplist.readPlist(alternate_entitlements_path)\n self.write_entitlements(entitlements)\n\n # actually resign this bundle now\n super(App, self).resign(signer)", "def _get_identities_from_provisioning_profile(mpf):\n for identity in mpf[\"DeveloperCertificates\"]:\n if not isinstance(identity, bytes):\n # Old versions of plistlib return the deprecated plistlib.Data type\n # instead of bytes.\n identity = identity.data\n yield _certificate_fingerprint(identity)", "def _parse_mobileprovision_file(mobileprovision_file):\n plist_xml = subprocess.check_output([\n \"security\",\n \"cms\",\n \"-D\",\n \"-i\",\n mobileprovision_file,\n ])\n return plist_from_bytes(plist_xml)", "def profile_files(profile):\n flist = os.listdir(osp.join(profile, 'startup'))\n profile_path = osp.join(osp.abspath('.'), profile)\n return [osp.join(profile_path, 'startup', x) for x in flist]", "def profile_expiry_to_env():\n parser = argparse.ArgumentParser(description=profile_expiry_to_env.__doc__)\n if \"_ARGCOMPLETE\" in os.environ:\n parser.add_argument(\"profile\", help=\"The profile to read expiry info from\").completer = \\\n ChoicesCompleter(read_expiring_profiles())\n argcomplete.autocomplete(parser)\n else:\n parser.add_argument(\"profile\", help=\"The profile to read expiry info from\")\n args = parser.parse_args()\n print_profile_expiry(args.profile)", "def get_own_cert_chain_as_string(self):\n# _log.debug(\"get_own_cert_chain_as_string: node_name={}\".format(self.node_name))\n cert_path = self.get_own_cert_path()\n try:\n cert_chain_str = open(cert_path, 'rt').read()\n return cert_chain_str\n except Exception as err:\n # Certificate not available\n _log.debug(\"No runtime certificate string can be found, err={}\".format(err))\n return None", "def _find_codesign_identity(mobileprovision):\n mpf = _parse_mobileprovision_file(mobileprovision)\n ids_codesign = set(_find_codesign_identities())\n for id_mpf in _get_identities_from_provisioning_profile(mpf):\n if id_mpf in ids_codesign:\n return id_mpf", "def profile_directory_path(request, file):\n return directory_path('profile', file)", "def profileForEncrypt(self, emailAddress):\n kvstring = profileFor(emailAddress)\n # Turn the string into bytes\n kvbytes = kvstring.encode()\n # Encrypt it.\n data = padPkcs7(kvbytes, 16)\n return self.__aes.ecbEncrypt(data)", "def pathinfo():\n info = {}\n pdir = None\n if 'SUZUPROFDIR' in os.environ:\n pdir = os.environ['SUZUPROFDIR']\n elif sys.platform == 'win32':\n try:\n pdir = os.path.join(os.environ['APPDATA'], 'suzu')\n except KeyError:\n pdir = None\n else:\n try:\n pdir = os.path.join(os.environ['HOME'], '.suzu')\n except KeyError:\n pdir = None\n\n if pdir:\n info['profiledir'] = pdir\n info['config'] = os.path.join(pdir, 'config.json')\n\n return info\n\n return None", "def load_profiles_from_file(file_path):\r\n profiles = {}\r\n\r\n lastused = \"\"\r\n\r\n if File.Exists(file_path):\r\n try:\r\n with StreamReader(file_path) as xmlfile:\r\n xmldoc = XmlDocument()\r\n xmldoc.Load(xmlfile)\r\n\r\n if xmldoc.DocumentElement.Name == \"Profiles\":\r\n nodes = xmldoc.SelectNodes(\"Profiles/Profile\")\r\n #Individual exported profiles are saved with the document element as Profile\r\n elif xmldoc.DocumentElement.Name == \"Profile\":\r\n nodes = xmldoc.SelectNodes(\"Profile\")\r\n\r\n #Changed from 1.7 to 2.0 to use Profiles/Profile instead of Settings/Setting\r\n elif xmldoc.DocumentElement.Name == \"Settings\":\r\n nodes = xmldoc.SelectNodes(\"Settings/Setting\")\r\n elif xmldoc.DocumentElement.Name == \"Setting\":\r\n nodes = xmldoc.SelectNodes(\"Setting\")\r\n\r\n #No valid root elements\r\n else:\r\n MessageBox.Show(file_path + \" is not a valid Library Organizer profile file.\", \"Not a valid profile file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n return profiles, lastused\r\n\r\n if nodes.Count > 0:\r\n for node in nodes: \r\n profile = Profile()\r\n profile.Name = node.Attributes[\"Name\"].Value\r\n result = profile.load_from_xml(node)\r\n\r\n #Error loading the profile\r\n if result == False:\r\n MessageBox.Show(\"An error occured loading the profile \" + profile.Name + \". That profile has been skipped.\")\r\n\r\n else:\r\n profiles[profile.Name] = profile\r\n\r\n\r\n #Load the last used profile\r\n rootnode = xmldoc.DocumentElement\r\n if rootnode.HasAttribute(\"LastUsed\"):\r\n lastused = rootnode.Attributes[\"LastUsed\"].Value.split(\",\")\r\n\r\n except Exception, ex:\r\n MessageBox.Show(\"Something seems to have gone wrong loading the xml file.\\n\\nThe error was:\\n\" + str(ex), \"Error loading file\", MessageBoxButtons.OK, MessageBoxIcon.Error)\r\n\r\n return profiles, lastused", "def test_sign_file_entitlements(dummy_command, tmp_path):\n # Sign the file with an ad-hoc identity\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n entitlements=tmp_path\n / \"base_path\"\n / \"build\"\n / \"first-app\"\n / \"macos\"\n / \"app\"\n / \"Entitlements.plist\",\n )\n\n # An attempt to codesign was made without the runtime option\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(tmp_path, tmp_path / \"base_path\" / \"random.file\"),\n ],\n any_order=False,\n )", "def get_group_entitlements(self):\n response = self._send(http_method='GET',\n location_id='9bce1f43-2629-419f-8f6c-7503be58a4f3',\n version='6.0-preview.1')\n return self._deserialize('[GroupEntitlement]', self._unwrap_collection(response))", "def get_runtime_certificate_chain_as_string(self):\n# _log.debug(\"get_runtime_certificate_chain_as_string: my_node_name={}\".format(self.node_name))\n try:\n files = os.listdir(os.path.join(self.runtime_dir, \"mine\"))\n with open(self.get_own_cert_path(), 'rb') as f:\n cert_str=f.read()\n return cert_str\n except Exception as err:\n _log.debug(\"Failed to get the runtimes certificate chain, err={}\".format(err))\n raise Exception(\"Failed to get the runtimes certificate chain\")", "def entitlements(self) -> Entitlements:\n return self.__entitlements", "def get_bundle_file():\n if FLAGS.bundle_file is None:\n return None\n else:\n return os.path.expanduser(FLAGS.bundle_file)", "def getProductInfo(path=\"./logs/SystemVersion/SystemVersion.plist\"):\n result = {\n \"ProductName\" : None,\n \"ProductionVersion\" : None,\n \"ProductBuildVersion\" : None\n }\n try:\n fd = open(path, 'rb')\n plist = plistlib.load(fd)\n for key in [\"ProductName\", \"ProductVersion\", \"ProductBuildVersion\", \"BuildID\", \"SystemImageID\"]:\n if key in plist.keys():\n result[key] = plist[key]\n else:\n print(\"WARNING: %s not found in %s plist\" % (key, path))\n fd.close()\n except Exception as e:\n print(\"Impossible to parse %s: %s\" % (path, str(e)))\n return result", "def get_profile(path=\"~\"):\n global profiles\n profile = profiles.get(path,None)\n if not profile:\n profile = InitFileConfig(os.path.join(path,\".myradioprofile\"), {} )\n profiles[path] = profile\n return profile", "def import_profiles(file_path):\r\n profiles, lastused = load_profiles_from_file(file_path)\r\n\r\n return profiles", "def read_keys(path):\n with open(path) as walletfile:\n b_keys = walletfile.read()\n p_keys = base64.b64decode(b_keys)\n return pickle.loads(p_keys)", "def _load_profile(self, profile_name, cwd):\n\n profile_path = self.get_profile_path(profile_name, cwd)\n try:\n text = load_user_encoded(profile_path)\n except Exception as e:\n raise ConanException(f\"Cannot load profile:\\n{e}\")\n\n # All profiles will be now rendered with jinja2 as first pass\n base_path = os.path.dirname(profile_path)\n file_path = os.path.basename(profile_path)\n context = {\"platform\": platform,\n \"os\": os,\n \"profile_dir\": base_path,\n \"profile_name\": file_path,\n \"conan_version\": conan_version}\n rtemplate = Environment(loader=FileSystemLoader(base_path)).from_string(text)\n text = rtemplate.render(context)\n\n try:\n return self._recurse_load_profile(text, profile_path)\n except ConanException as exc:\n raise ConanException(\"Error reading '%s' profile: %s\" % (profile_name, exc))", "def loadProfiles():\n with open(userProfilesDir, \"r\") as infile:\n profiles = json.loads(\"\\n\".join(infile.readlines()))\n infile.close()\n return profiles", "def profiles_path(self) -> Path:\n return self._config.data_path / \"hmm\" / \"profiles.hmm\"", "def get_certificate(self, path: Union[bytes, str]) -> str:\n path = _to_bytes_or_null(path)\n certificate = ffi.new(\"char **\")\n ret = lib.Fapi_GetCertificate(self._ctx, path, certificate)\n _chkrc(ret)\n # certificate is guaranteed to be a null-terminated string\n return ffi.string(_get_dptr(certificate, lib.Fapi_Free)).decode()", "def resource_string(self, path):\n\t\tdata = pkg_resources.resource_string(__name__, path)\n\t\treturn data.decode(\"utf8\")", "def resource_string(path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def get_current_profile_extent(request):\n\n # Get the path of the yaml\n path = local_profile_directory_path(request)\n\n if os.path.exists(os.path.join(path, APPLICATION_YAML)):\n profile_stream = open(os.path.join(path, APPLICATION_YAML))\n profile_config = yaml.load(profile_stream)\n\n if 'application' not in profile_config:\n # Not a valid config file\n return 'null'\n\n if 'geometry' in profile_config['application']:\n return profile_config['application']['geometry']\n\n # No local or global file found\n return 'null'", "def _grab_instructions(self, job):\n payload = {\n \"repository\": job.repository.name,\n \"provider\": job.repository.provider,\n \"owner\": str(job.repository.owner),\n \"file_path\": job.repository.file_path,\n \"is_organization\": job.repository.is_organization\n }\n\n response = requests.get(self.settings.AUTH_FILE_ENDPOINT, json=payload)\n\n if response.status_code >= 400:\n return None\n\n data = response.json()\n content = data.get('content')\n content = self._normalize_job_file(content)\n\n return content", "def _get_path_to_key_file():\n\n if 'private_key_path' not in ctx.node.properties:\n raise NonRecoverableError(\n 'Unable to get key file path, private_key_path not set.')\n\n return os.path.expanduser(ctx.node.properties['private_key_path'])" ]
[ "0.5083228", "0.49126115", "0.49077302", "0.47404766", "0.4691661", "0.4553458", "0.454329", "0.4531003", "0.4473628", "0.44085193", "0.4406375", "0.44050503", "0.439604", "0.43790016", "0.43628758", "0.4326153", "0.43213052", "0.43070796", "0.43030575", "0.42926168", "0.42628294", "0.42610988", "0.42593572", "0.42569873", "0.4256195", "0.42447776", "0.42447776", "0.42376953", "0.42336375", "0.4215439" ]
0.7646078
0
Write entitlements to self.entitlements_path. This actually doesn't matter to the app, it's just used later on by other parts of the signing process.
def write_entitlements(self, entitlements): biplist.writePlist(entitlements, self.entitlements_path, binary=False) log.debug("wrote Entitlements to {0}".format(self.entitlements_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entitlements(self) -> Entitlements:\n return self.__entitlements", "def resign(self, signer, provisioning_profile, alternate_entitlements_path=None):\n\n # TODO all this mucking about with entitlements feels wrong. The entitlements_path is\n # not actually functional, it's just a way of passing it to later stages of signing.\n # Maybe we should determine entitlements data in isign/archive.py or even isign/isign.py,\n # and then embed it into Signer?\n\n # In the typical case, we add entitlements from the pprof into the app's signature\n if alternate_entitlements_path is None:\n # copy the provisioning profile in\n self.provision(provisioning_profile)\n\n entitlements = self.extract_entitlements(provisioning_profile)\n else:\n log.info(\"signing with alternative entitlements: {}\".format(alternate_entitlements_path))\n entitlements = biplist.readPlist(alternate_entitlements_path)\n self.write_entitlements(entitlements)\n\n # actually resign this bundle now\n super(App, self).resign(signer)", "def resign(self, deep, cms_signer, provisioner):\n # In the typical case, we add entitlements from the pprof into the app's signature\n if not cms_signer.is_adhoc():\n team_id = cms_signer.get_team_id()\n self.provision(team_id, provisioner)\n self.entitle(team_id, provisioner)\n\n # actually resign this bundle now\n super(App, self).resign(deep, cms_signer, provisioner)", "def save(self):\n path = self.user.get_session_path()\n with open(path, 'a', encoding='utf8') as file:\n self.write(file=file)", "def test_sign_file_entitlements(dummy_command, tmp_path):\n # Sign the file with an ad-hoc identity\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n entitlements=tmp_path\n / \"base_path\"\n / \"build\"\n / \"first-app\"\n / \"macos\"\n / \"app\"\n / \"Entitlements.plist\",\n )\n\n # An attempt to codesign was made without the runtime option\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(tmp_path, tmp_path / \"base_path\" / \"random.file\"),\n ],\n any_order=False,\n )", "def save(self):\n if not self.fileKey:\n log.error(\"attempted to save a closed wallet\")\n return\n encrypted = self.fileKey.encrypt(tinyjson.dump(self).encode()).hex()\n w = tinyjson.dump({\n \"keyparams\": self.fileKey.params(),\n \"wallet\": encrypted,\n })\n helpers.saveFile(self.path, w)", "def sign(self, path, entitlements, identity):\n try:\n print(\"Signing\", path)\n self.subprocess.run(\n [\n 'codesign',\n '--sign', identity,\n '--entitlements', str(entitlements),\n '--deep', str(path),\n '--force',\n '--options', 'runtime',\n ],\n check=True,\n )\n except subprocess.CalledProcessError:\n print()\n raise BriefcaseCommandError(\n \"Unable to code sign {path}.\".format(path=path)\n )", "def write_to_file(self):\n \"\"\"\n Saves this app to a file in it's protobuf notation\n This way, it can be parsed using the same constructor as aps received over the internet\n The file extension stands for Protocol buffer Apk INformation\n \"\"\"\n file_name = f'{self.package_name()}({self.version_code()}).pain'\n dir_path = self.path()\n os.makedirs(dir_path, exist_ok=True)\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, 'wb+') as file:\n file.write(self.proto.SerializeToString())\n LOGGER.debug(f'Wrote metadata for {self.package_name()} to {file_path}')\n return file_path", "def save(self):\n # Ensure store path exists\n store_path = self.manager.store_path\n if not os.path.exists(store_path):\n os.makedirs(store_path)\n \n # Get filepath\n filename = self._filename\n \n # Write into file\n raw = self.to_json()\n self.service.log.store('Saving %s' % filename)\n f = open(filename, 'w')\n f.write(raw)\n f.close()", "def _write_to_file(dir_path: Text,\n filename: Text,\n content: Text,\n executable: bool = False):\n path = os.path.join(dir_path, filename)\n with open(path, 'w') as f:\n f.write(content)\n if executable:\n st = os.stat(path)\n os.chmod(path, st.st_mode | stat.S_IXUSR)", "def _write_to_file(dir_path: Text,\n filename: Text,\n content: Text,\n executable: bool = False):\n path = os.path.join(dir_path, filename)\n with open(path, 'w') as f:\n f.write(content)\n if executable:\n st = os.stat(path)\n os.chmod(path, st.st_mode | stat.S_IXUSR)", "def write_cert(filename, content):\r\n with open(filename, 'w') as cert_file:\r\n cert_file.write(content)", "def _write_all(self, auths):\n exit_status = 0\n self._verify_keystore()\n oldmask = None\n try:\n oldmask = os.umask(0o77)\n with open(self.keystore_file, \"w\") as filep:\n json.dump(auths, filep)\n os.umask(oldmask)\n except (IOError, OSError):\n if oldmask is not None:\n os.umask(oldmask)\n exit_status = 1\n return exit_status\n return exit_status", "def write(self, target):\n mpath = path.join(self._working_dir, 'manifest.json')\n with open(mpath, 'w') as mani:\n json.dump(self.item, mani)\n\n directory = path.abspath(self._working_dir)\n with zipfile.ZipFile(target, 'w', allowZip64=True) as zip:\n for root, dirs, files in walk(directory):\n for f in files:\n abspath = path.join(root, f)\n relpath = path.relpath(abspath, directory)\n zip.write(abspath, relpath)\n return target", "def write_contents(self):\n dfile = open(os.path.join(self.directory, self.file_name), 'w')\n dfile.write(self.contents.strip())", "def save(self, save_dir):\n path = os.path.join(save_dir, self.name + \".pem\")\n with open(path, \"wb\") as f:\n f.write(self.public_key)", "def save(self, path=None):\n if path is None:\n path = self.path\n try:\n with open(path, 'w') as fd:\n for entry in self:\n fd.write('{}\\n'.format(entry))\n except Exception as e:\n raise SSHKeyError('Error writing {}: {}'.format(path, e))", "def test_write(self):\n temp_file = tempfile.mkstemp()[1]\n try:\n with open(temp_file, \"w+\") as fh:\n self.new_manifest.write(fh)\n tools.eq_(self.new_manifest, load_manifest(temp_file))\n finally:\n os.unlink(temp_file)", "def save(self, pretty=True):\n self.endInstance()\n if pretty:\n _indent(self.root, whitespace=self._whiteSpace)\n tree = ET.ElementTree(self.root)\n tree.write(self.path, encoding=\"utf-8\", method='xml', xml_declaration=True)\n if self.logger:\n self.logger.info(\"Writing %s\", self.path)", "def save(self):\n file = Path(\"config/event_{0}.json\".format(self.name))\n try:\n file.write_text(self.toJSON())\n except Exception as err:\n raise(err)", "def write(self):\n if self.skip_bootloader: # pylint: disable=no-member\n return\n\n if self.update_only: # pylint: disable=no-member\n self.update()\n return\n\n try:\n os.sync()\n self.stage2_device.format.sync(root=util.getTargetPhysicalRoot()) # pylint: disable=no-member\n self.install()\n finally:\n self.write_config() # pylint: disable=no-member", "def Write(self):\n template_mappings = {}\n\n template_file = os.path.join(self._l2tdevtools_path, self._TEMPLATE_FILE)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n file_content = file_content.encode('utf-8')\n\n with open(self.PATH, 'wb') as file_object:\n file_object.write(file_content)", "def save(self):\n\t\tPath(self.PATH).mkdir(parents=True,exist_ok=True)\n\n\t\twith open(self.account_file, \"wb\") as file:\n\t\t\tpickle.dump(self, file)", "def writeLocalEnv(self):\n \n # open file\n f = open(self.installPath + \"/build_env.sh\", 'w')\n \n # write to file\n f.write( 80*'#' + os.linesep + \"# Environment script generated by ilcsoft-install on \" + time.ctime() + os.linesep )\n f.write( \"# for \" + self.name + \" located at [ \" + self.installPath + \" ]\" + os.linesep + 80*'#' + os.linesep )\n\n # global environment variables\n if( len( self.parent.env ) > 0 ):\n f.write( 2*os.linesep + \"#\" + 80*'-' + os.linesep + \"#\" + 5*' ' + \"Global Environment Variables\" + os.linesep \\\n + \"#\" + 80*'-' + os.linesep )\n for k, v in self.parent.env.iteritems():\n f.write( \"export \" + str(k) + \"=\\\"\" + str(v) + \"\\\"\" + os.linesep )\n \n\n # write environment recursively to file\n self.writeEnv(f, [])\n \n\n f.write( \"# --- additional comands ------- \" + os.linesep ) \n print \"\\n ----- adding additional commands to build_env.sh : \\n \"\n for c in self.envcmds:\n f.write( c + os.linesep ) \n print \"\\n ----- adding additional command to build_env.sh \" + c + \"\\n\"\n\n # close file\n f.close()", "def save(self, filename):\n aead_f = open(filename, \"wb\")\n fmt = \"< B I %is %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE, len(self.data))\n version = 1\n packed = struct.pack(fmt, version, self.key_handle, self.nonce, self.data)\n aead_f.write(YHSM_AEAD_File_Marker + packed)\n aead_f.close()", "def _write_requirements(filename, requirements):\n LOG.info(\"Saving requirements to %s.\" % filename)\n with open(filename, \"w\") as f:\n for entity in requirements:\n f.write(str(entity))\n f.write(\"\\n\")", "def save(self):\n self.touch()\n self._lastmtime = self.get_lock_file_mtime()", "def write_key(self):\n\t key = Fernet.generate_key()\n\t with open(\"key.key\", \"wb\") as key_file:\n\t key_file.write(key)", "def add_key(self, device, key):\n if not self.enabled:\n return\n self.keys[device] = key\n fh = open(self.path, \"w\")\n json.dump(self.keys, fh)\n fh.close()\n os.chmod(self.path, 0o600)", "def save_manifest(self, filename: Text) -> None:\r\n file = open(filename, \"w\")\r\n for element in self.elements:\r\n line = element.url\r\n for tag in element.tags:\r\n line += \",\" + tag\r\n file.write(line + \"\\n\")\r\n file.close()" ]
[ "0.54211116", "0.5367477", "0.51338154", "0.51240337", "0.50888306", "0.508606", "0.4937517", "0.48715833", "0.48602337", "0.48598403", "0.48598403", "0.4844882", "0.48447084", "0.4842885", "0.48324612", "0.48164335", "0.47696745", "0.4747149", "0.4736882", "0.46891227", "0.46873337", "0.46822476", "0.46686876", "0.46675608", "0.46604466", "0.465259", "0.46460605", "0.4643608", "0.46412978", "0.4633235" ]
0.7737914
0
this function shows the log in window
def log_in(self): self.clear_screen() lbl_log_in = Label(self.root, text="Welcome. Please log in to the system.", font=self.title_font, bg=self.bg_color) lbl_log_in.pack(pady=5, padx=10) user_name = Label(self.root, text="enter user name", font=self.text_font, bg=self.bg_color) user_name.pack(pady=5, padx=10) user_name_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25) user_name_entry.pack(pady=5, padx=10) password = Label(self.root, text="enter password", font=self.text_font, bg=self.bg_color) password.pack(pady=5, padx=10) password_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*") password_entry.pack(pady=5, padx=10) passcode = Label(self.root, text="enter passcode", font=self.text_font, bg=self.bg_color) passcode.pack(pady=5, padx=10) passcode_entry = Entry(self.root, font='Helvetica 14', fg='blue', width=25, show="*") passcode_entry.pack(pady=5, padx=10) button_enter_log = Button(self.root, text="log in", command=lambda: self.submit_log_in( user_name_entry, password_entry, passcode_entry)) button_enter_log.pack(pady=10) button_sign_in = Button(self.root, text="Don't have a user? Sign in", command=self.sign_in) button_sign_in.pack(pady=10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_in(self):\n\t\tpass", "def show(self, window):\r\n\r\n return", "def iniciaUI(self):\n\n self.setGeometry(100,100, 250, 250)\n self.setWindowTitle(\"Login\")\n self.displayWidgets()\n\n self.show()", "def OnShowLog(self, event):\n dlg = LogViewer(self)\n dlg.OnLogRefresh(event)\n dlg.ShowModal()\n dlg.Destroy()", "def log_in(self):\n print('-=' * 12 + \" Log in \" + '-=' * 12)\n mob_num, password = self._input_mob_num('Mobile Number :'), input(\"Password: \")\n self._user = self.auth.log_in(mob_num, password)\n if self._user:\n print(\"you are logged in, Welcome '{}'\".format(self._user.username))\n self.homepage()\n else:\n print(\"Mobile number or/and password is/are Invaild \\n\" + '-=' * 30)\n options = {1: self.log_in, 2: self.logging_page, 3: self.exit}\n print_out = \"(1) Try Again \\n (2) Back to Logging Page \\n (3) Exit\"\n self._take_option(options, print_out)", "def login(self):\n self.new_window = tk.Toplevel(self.acesso)\n Entrar(self.new_window, self.acesso)", "def einloggen(self):\n \n self.c.login(self.username.text(), self.password.text(), \"1\")", "def show_login():\n # Generate a unique session token\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n # return \"The current session state is %s\" % login_session['state']\n return render_template('login.html', STATE=state)", "def logging_page(self):\n print('-=' * 12 + \" Logging Page \" + '-=' * 12)\n options = {1: self.sign_up, 2: self.log_in, 3: self.delete_account, 4: self.exit}\n print_out = \"(1) Sign up \\n (2) Log in \\n (3) Delete Account \\n (4) Exit\"\n return self._take_option(options, print_out)", "def show_window(self):\n self.show()", "def viewLog(self, event):\n logcontent = \"\"\n if Config.GetOption(\"ActLog\") == True:\n\n logFrame = wx.Frame(None, -1, \"View Log\", size=(500, 500))\n panel5 = wx.Panel(logFrame)\n data = wx.richtext.RichTextCtrl(panel5, pos=(0, 0), size=(500,\n 500))\n data.AppendText(Log.ReadLog())\n logFrame.Centre()\n logFrame.Show()\n else:\n\n inform = wx.MessageDialog(None,\n \"The Log is disabled!\\\n \\nEnable it to view.\",\n \"Log Status\", wx.OK)\n inform.ShowModal()", "def show_messages(self):\n self.masterlog.revealme()", "def call(self, **kwargs):\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.logged.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n moderator=True)\n self.w(html)", "def evt_login(self, event):\n\n # Hide current pane, show PaneMain, then reset the active sizer and call Layout()\n self.parent.Hide()\n self.pane_landing.Show()\n self.parent.parent.SetSizer(self.szr_landing)\n self.parent.parent.Layout()", "def view():\n login_dict = _open_cnfg()\n login_name, login_url, login_api, login_hid = ['Login name'], ['URL'], ['API key'], ['History ID']\n for lgn in login_dict['logins']:\n login_name.append(lgn)\n login_url.append(login_dict['logins'][lgn]['url'])\n login_api.append(login_dict['logins'][lgn]['api_key'])\n login_hid.append(login_dict['logins'][lgn]['hid'])\n click.echo(\"You are currently using active login: \" + click.style(login_dict['active_login'], bold=True))\n utils._tabulate([login_name, login_url, login_api, login_hid])", "def __display_login_info(self):\n print(f'\\nYour card has been created\\n'\n f'Your card number:\\n'\n # f'{self.__card_display()}\\n' # uncomment this line and comment out line below for pretty display\n f'{self.card_number}\\n'\n f'Your card PIN:\\n'\n f'{self.__account_pin}\\n', )", "def login():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )", "def show(self,window):\n self.showFunctions(window)", "def call(self, **kwargs):\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.logged.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n moderator=False)\n self.w(html)", "def do_login(self):\n if self.app.authentication_only:\n self.app.stop()\n else:\n self.set_screen(EXPLORER)", "def ShowLogin():\n current_user = helpers.get_current_user()\n if current_user is None:\n return render_template('login.html')\n else:\n return redirect('/')", "def showLogin():\r\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\r\n for x in xrange(32))\r\n login_session['state'] = state\r\n return render_template('login.html', STATE=state)", "def show(self):\n self.Show()", "def setup_log_panel(window, src_window=None):\n view = window.create_output_panel(\"YouTubeEditor Log\")\n view.set_read_only(True)\n view.settings().set(\"gutter\", False)\n view.settings().set(\"rulers\", [])\n view.settings().set(\"word_wrap\", False)\n view.settings().set(\"context_menu\", \"YouTubeLog.sublime-menu\")\n\n if src_window:\n src_view = src_window.find_output_panel(\"YouTubeEditor Log\")\n if src_view:\n text = src_view.substr(sublime.Region(0, len(src_view)))\n view.run_command(\"append\", {\n \"characters\": text,\n \"force\": True,\n \"scroll_to_end\": True\n })", "def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)", "def showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)", "def evt_login(self, event):\n if self.pair_correct(self.wgt_txt_login_user.GetValue(), self.wgt_txt_login_pass.GetValue()):\n self.parent.Hide()\n self.pane_landing.Show()\n self.parent.parent.SetSizer(self.szr_landing)\n self.parent.parent.Layout()\n else:\n if self.invalid_text == None:\n self.invalid_text = wx.StaticText(self, size=(60, -1), label=\"INVALID USER/PASSKEY PAIR\", style=wx.ALIGN_CENTER)\n self.invalid_text.SetBackgroundColour('red')\n self.szr_login_inner.Add(self.invalid_text, flag=wx.EXPAND)\n self.szr_login_inner.AddSpacer(self.temp_space)\n self.Fit()\n else:\n self.invalid_text.SetLabel(\"C'mon, I said it's not a bloody valid passkey\")\n self.invalid_count += 1\n self.Layout()", "def __configure(self):\n e5App().getObject(\"UserInterface\").showPreferences(\"logViewerPage\")", "def LogIn():\n result, user, date = self.CheckLogin()\n if result:\n # Save the database file and load the program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, user, login_date=date)", "def display():\n return render_template(\"signin.html\")" ]
[ "0.6801101", "0.6755017", "0.6737144", "0.66676944", "0.65847325", "0.6565406", "0.655964", "0.64743286", "0.6425789", "0.64248353", "0.6400713", "0.63787353", "0.6243237", "0.624312", "0.62406", "0.62182665", "0.62177074", "0.6210246", "0.61971325", "0.61968756", "0.61666393", "0.61522853", "0.6143585", "0.6133136", "0.6115753", "0.6115753", "0.61116546", "0.6108483", "0.60847646", "0.60692555" ]
0.685493
0