query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Merge an existing AbideRegistry into this one, keeping properties already present in this one and only merging properties that don't yet exist. | def merge(self, registry):
for property_name, property_item in registry.items():
if property_name not in self:
self.set_property(property_item) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n return self.update({})",
"def _merge(self, other: dict):\n self._storage = dict_merge(self._storage, other)",
"def merge(self, other):\n merged = copy.deepcopy(self.__dict__())\n for k, v in other.__dict__():\n if k in merged and getattr(self, k):\n if isinstance(v, (string_types, bool)):\n pass\n else:\n list_of_stuff = merged.get(k, [])\n for entry in v:\n if entry not in list_of_stuff:\n list_of_stuff.append(entry)\n merged[k] = list_of_stuff\n else:\n merged[k] = v\n return CondaEnvironmentProvider(**merged)",
"def copy(self):\n new = super().copy()\n new.drip_cal_config = deepcopy(self.drip_cal_config)\n new.drip_config = deepcopy(self.drip_config)\n new.pipecal_config = deepcopy(self.pipecal_config)\n return new",
"def merge(self: Dict[str, Arg], argument: Arg):\n dest = argument.destination\n if dest in self:\n self[dest].merge_all(argument)\n return\n self[dest] = argument",
"def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)",
"def extend(self, other):\n overlap = [key for key in other.defaults if key in self.defaults]\n if overlap:\n raise ValueError(\n \"Duplicate hyperparameter(s): %s\" % \" \".join(overlap))\n new = dict(self.defaults)\n new.update(other.defaults)\n return HyperparameterDefaults(**new)",
"def copy(self, **kwargs):\n # type: (...) -> SalusConfig\n return deepcopy(self).update(**kwargs)",
"def override(self, parent):\n return self.__class__(Cfg._mergedicts(self, parent, True))",
"def extend(self, router):\n self.registry.extend(router.registry)",
"def __add__(self, other):\n self.__dict__.update(other)\n return self",
"def combine(self, existing):\n return self",
"def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self",
"def __copy__(self):\n d = dict()\n d.update(self.items())\n return d",
"def base_extend(self, R):\n if R not in _Fields:\n raise ValueError('Not a field: '+str(R))\n if self.base_ring() is R:\n return self\n if not R.has_coerce_map_from(self.base_ring()):\n raise ValueError('no natural map from the base ring (=%s) to R (=%s)!'\n % (self.base_ring(), R))\n return self.change_ring(R)",
"def update(self, other):\n _merge_dicts(self, other)",
"def merge(self, other):\n for p in other:\n for key, val in p.items():\n self.contents[key] = val\n\n return self",
"def merge(self, obj):\n pass",
"def copy(self):\n return self.from_dict(self.to_dict(True))",
"def get_rllib_full_config(self):\n return merged_dict(self.get_default_config(), self.get_config())",
"def copy(self) -> AF:\n if self._base == OrderedDict:\n kopied = dict(self)\n else:\n kopied = self._base.copy(self)\n return self.__class__(kopied, use_fuzzy=self.use_fuzzy, dottable=self._dottable)",
"def __add__(self, other):\n merged_profile = super().__add__(other)\n\n # unstruct specific property merging\n merged_profile._empty_line_count = (\n self._empty_line_count + other._empty_line_count)\n merged_profile.memory_size = self.memory_size + other.memory_size\n samples = list(dict.fromkeys(self.sample + other.sample))\n merged_profile.sample = random.sample(list(samples),\n min(len(samples), 5))\n\n # merge profiles\n merged_profile._profile = self._profile + other._profile\n\n return merged_profile",
"def include(self, registry):\n for cls in registry.values():\n db_to_element = {}\n\n props = sorted([(k,v) for k,v in cls.__dict__.items()\n if isinstance(v, Property)]\n , key=lambda p:p[1].instance_idx)\n for prop_name, prop_value in props:\n value_name = prop_value.name\n if value_name:\n db_to_element[value_name] = prop_name\n prop_name = value_name\n else:\n db_to_element[prop_name] = prop_name\n\n self.guard_reserved_words(prop_name, cls)\n\n self.props_from_db[cls] = self.create_props_mapping(db_to_element)\n self.init_broker_for_class(cls)\n self.registry[cls.registry_name] = cls",
"def _merge(self):\n raise NotImplementedError",
"def merge(self, other):\n\n assert self.ins_addr == other.ins_addr\n assert self.type == other.type\n\n o = self.copy()\n o.targets |= other.targets\n\n return o",
"def extend(clself, other):\n clself._cfg_def.extend(other._cfg_def)\n for key, optdef in clself._cfg_def.options.iteritems():\n setattr(clself, key, optdef)",
"def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result",
"def merge(self, a, b, path=None):\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n if key == 'attributes':\n self.merge_attribute_defs(b, a)\n else:\n self.merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n # raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n self.append_or_replace(a,b,key, '/'.join(path + [str(key)]));\n else:\n a[key] = b[key]\n return a",
"def osl_fill_from(self, other):\n #TODO: What about inherited properties?\n for p in self._osl.properties:\n conditional_copy(other, self, p[0])\n return self",
"def merge(a, b):\n if isinstance(a, CONFIG_VALID) \\\n and isinstance(b, CONFIG_VALID):\n # dict update\n if isinstance(a, dict) and isinstance(b, dict):\n a.update(b)\n return a\n # list update\n _a = list(a)\n for x in list(b):\n if x not in _a:\n _a.append(x)\n return _a\n if a and b:\n raise Exception(\"Cannot merge\")\n raise NotImplementedError"
]
| [
"0.55507815",
"0.55414605",
"0.532423",
"0.53020734",
"0.5290227",
"0.5225598",
"0.5142893",
"0.51419336",
"0.51071876",
"0.5073005",
"0.50702655",
"0.50646394",
"0.50576466",
"0.49336034",
"0.49330747",
"0.49016225",
"0.48852488",
"0.48832685",
"0.488003",
"0.4870947",
"0.48627895",
"0.4852947",
"0.4849969",
"0.48485705",
"0.48397645",
"0.48249614",
"0.48167402",
"0.48042205",
"0.47778687",
"0.47745267"
]
| 0.6592531 | 0 |
change nubmber of missing, whle fixing k, qi and size of dataset | def get_result_missing(att_trees, data, k=DEFAULT_K, n=DEFAULT_K):
data_back = copy.deepcopy(data)
length = len(data_back)
qi_len = len(data[0]) - 1
raw_missing = raw_missing_record = 0
print "K=%d" % k
for record in data:
flag = False
for value in record:
if value == '*':
raw_missing += 1
flag = True
if flag:
raw_missing_record += 1
# print "Missing Percentage %.2f" % (raw_missing * 100.0 / (length * qi_len)) + '%%'
# each evaluation varies add 5% missing values
check_percentage = [5, 10, 25, 50, 75]
datasets = []
for p in check_percentage:
joint = int(0.01 * p * length * qi_len) - raw_missing
datasets.append(joint)
all_ncp = []
all_rtime = []
all_pollution = []
deletion_all_ncp = []
deletion_all_rtime = []
for i, joint in enumerate(datasets):
ncp = rtime = pollution = 0.0
for j in range(n):
gen_missing_dataset(data, joint)
if __DEBUG:
missing_rate(data)
_, eval_result = mondrian(att_trees, data, k)
data = copy.deepcopy(data_back)
ncp += eval_result[0]
rtime += eval_result[1]
pollution += eval_result[2]
ncp /= n
rtime /= n
pollution /= n
if __DEBUG:
print "check_percentage", check_percentage[i]
print "Add missing %d" % joint
print "Average NCP %0.2f" % ncp + "%"
print "Running time %0.2f" % rtime + "seconds"
print "Missing Pollution = %.2f" % pollution + "%"
print '#' * 30
all_ncp.append(round(ncp, 2))
all_rtime.append(round(rtime, 2))
all_pollution.append(round(pollution, 2))
ncp = rtime = pollution = 0.0
for j in range(n):
gen_missing_dataset(data, joint)
if __DEBUG:
missing_rate(data)
_, eval_result = mondrian_delete_missing(att_trees, data, k)
data = copy.deepcopy(data_back)
ncp += eval_result[0]
rtime += eval_result[1]
ncp /= n
rtime /= n
if __DEBUG:
print "Add missing %d" % joint
print "Average NCP %0.2f" % ncp + "%"
print "Running time %0.2f" % rtime + "seconds"
print "Missing Pollution = %.2f" % pollution + "%"
print '#' * 30
deletion_all_ncp.append(round(ncp, 2))
deletion_all_rtime.append(round(rtime, 2))
print "Mondrian"
print "All NCP", deletion_all_ncp
print "All Running time", deletion_all_rtime
print "Enhanced Mondrian"
print "All NCP", all_ncp
print "All Running time", all_rtime
print "Missing Pollution", all_pollution
print '#' * 30 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pivot_num(data: PandasDF, var: str, performance: str = 'bad_ind',\n n: int = 10, ks: float = True, max_ks_only: float = False) -> PandasDF:\n temp = data.loc[:, [var, performance]].copy()\n temp_missing = temp.loc[temp[var].isnull(), :]\n temp_noMissing = temp.loc[~temp[var].isnull(), :]\n temp_noMissing.sort_values(var, inplace=True)\n length = round(temp_noMissing.shape[0]/n)\n\n group = temp_noMissing.groupby(np.arange(temp_noMissing.shape[0]) // length).apply(\n lambda obj: pd.Series({\n 'var': var,\n 'level': str(obj[var].min()) + ' - ' + str(obj[var].max()),\n 'bad rate': obj[performance].mean(),\n 'count': len(obj[performance])\n }))\n group_missing = pd.DataFrame({\n 'var': var,\n 'level': np.nan,\n 'bad rate': temp_missing[performance].mean(),\n 'count': temp_missing.shape[0],\n 'ks': np.nan\n }, index=[n+1, ])\n # temp = group[['bad rate', 'count']].copy()\n if ks or max_ks_only:\n group['bad'] = [r * c for r, c in zip(group['bad rate'], group['count'])]\n group['cum_bad'] = [sum(group.loc[0:i, 'bad']) for i in range(group.shape[0])]\n group['cum_count'] = [sum(group.loc[0:i, 'count']) for i in range(group.shape[0])]\n group['cum_good'] = [c - b for c, b in zip(group['cum_count'], group['cum_bad'])]\n group['ks'] = [\n (100 * abs(g/group.loc[group.shape[0]-1, 'cum_good'] - b/group.loc[group.shape[0]-1, 'cum_bad']))\n for g, b in zip(group.cum_good, group.cum_bad)]\n max_index = group['ks'].idxmax()\n if max_ks_only:\n return group.loc[[max_index, ], ['var', 'ks']]\n group['ks'] = ['%.1f%%' % x for x in group['ks']]\n group = group.append(group_missing)\n group['bad rate'] = ['%.2f%%' % (x * 100) for x in group['bad rate']]\n\n group.style.applymap(highlight, subset=pd.IndexSlice[max_index, ['ks']])\n\n return group[['var', 'level', 'bad rate', 'count', 'cum_bad', 'cum_good', 'ks']]\n else:\n group = group.append(group_missing[['var', 'level', 'bad rate', 'count']])\n group.rename(columns={'bad rate': 'avg %s' % performance}, inplace=True)\n return group[['var', 'level', 'avg %s' % performance, 'count']]",
"def dummy_data(size):\n add_dummy_data(size)",
"def fine_interpolation_factor(self, n):\n ds = self.data\n ds.metadata[\"fine_interpolation_factor\"] = n\n for da in ds.data_vars.values():\n da.metadata[\"fine_interpolation_factor\"] = n",
"def test_batch_missing_data():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n zs[2] = None\n Rs = [1]*len(zs)\n Rs[2] = None\n Ms, Ps = kf.batch_filter(zs)",
"def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())",
"def test_daal_pca_bad_no_of_k(self):\n with self.assertRaisesRegexp(Exception, \"k must be less than or equal to number of observation columns\"):\n self.context.daaltk.models.dimreduction.pca.train(self.frame,\n [\"X1\", \"X2\", \"X3\", \"X4\", \"X5\",\n \"X6\", \"X7\", \"X8\", \"X9\", \"X10\"],\n k=11)",
"def n_train(self):\n return self.factors[0].shape[0]",
"def __len__(self):\n return 9 # logsfr_ratios has 6 bins",
"def truncate_sample_size(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n \n #size_min=np.amin(sample_sizes) # smallest sample size\n size_max=np.amax(sample_sizes) # largest sample size\n \n if size_max<max_size_given:\n max_size_given=size_max\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others",
"def fill_gaps(st,isi,N=200, max_ratio=1.5, min_freq=10):\n \n # just delete all isis above a certain number\n running_median = RunningFunc(isi,N)\n isi[isi>running_median*max_ratio] = np.nan\n isi[1/(isi)<min_freq] = np.nan\n\n return st, isi",
"def perm4missing(flights, col, N):\n\n return ...",
"def initialize_k_mediods(data, k):\n return random.sample(range(len(data)), k)",
"def decomposition_into_s_n_irreducibles(self, n):\r\n w5 = partitions_list(n)\r\n M5 = form_matrix_yt(w5)\r\n card = math.factorial(n)\r\n vec_dic = {}\r\n for k in range(self.dimension()+1):\r\n D = {}\r\n uu = []\r\n vv = []\r\n p = k \r\n A = self.matrix_simmetric_representate(p)\r\n if (p >0 and (p <= self.dimension())):\r\n null = nullspace(A)\r\n w3 = []\r\n for i in range(len(null[0])):\r\n w = []\r\n for j in range(len(null)):\r\n w.append(null[j][i])\r\n w3.append(w) \r\n null = w3\r\n M = np.matrix(w3, dtype= np.float64).transpose()\r\n Mi = np.linalg.pinv(M)\r\n else:\r\n if (p == 0):\r\n M = A\r\n null = []\r\n for i in range(A.shape[0]):\r\n aux = []\r\n for j in range(A.shape[1]):\r\n aux.append(M[i,j])\r\n null.append(aux)\r\n M = np.matrix(null, dtype=np.float64)\r\n Mi = M\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n A1=self.matrix_simmetric_representate(p)\r\n col = columnspace(A1)\r\n w4 = []\r\n for i in range(len(col[0])):\r\n w = []\r\n for j in range(len(col)):\r\n w.append(col[j][i])\r\n w4.append(w)\r\n col = w4\r\n M1 = np.matrix(w4, dtype=np.float64).transpose()\r\n Mii = np.linalg.pinv(M1)\r\n for h in w5:\r\n p = k \r\n if (p >0 and (p <= self.dimension())):\r\n if (all(elem == 0 for elem in null[0])):\r\n l1 = 0\r\n else:\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n if (p == 0):\r\n he = self.basis_group_oriented_p_chains(p) \r\n on1 = np.ones(len(list(he.dic.keys())), dtype=np.float64) \r\n v = P_chains([],[])\r\n v = P_chains(list(he.dic.keys()),on1)\r\n v1 = permutation_in_simplex_test(v, make_permutation(h))\r\n D1={}\r\n c1 = 0\r\n for i in list(v1.dic.keys()):\r\n c2 = 1\r\n for j in list(he.dic.keys()):\r\n if (i == j):\r\n if (v1.dic[i] == he.dic[j]):\r\n D1[c1] = c2\r\n else:\r\n D1[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M.shape[0]\r\n cc = M.shape[1]\r\n Ma = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Ma[i,:] = (M[(abs(D1[i])-1),:]*(np.sign(D1[i])))\r\n l1 = 0\r\n for j in range(cc):\r\n l1 = np.dot(Mi[j,:],Ma[:,j])[0,0] + l1\r\n else:\r\n l1 = 0\r\n p = k + 1\r\n if (p>0 and (p <= self.dimension())):\r\n hi = self.basis_group_oriented_p_chains(p-1) \r\n on1i = np.ones(len(list(hi.dic.keys())), dtype=np.float64) \r\n vi = P_chains([],[])\r\n vi = P_chains(list(hi.dic.keys()),on1i)\r\n v1i = permutation_in_simplex_test(vi, make_permutation(h))\r\n D1i={}\r\n c1 = 0\r\n for i in list(v1i.dic.keys()):\r\n c2 = 1\r\n for j in list(hi.dic.keys()):\r\n if (i == j):\r\n if (v1i.dic[i] == hi.dic[j]):\r\n D1i[c1] = c2\r\n else:\r\n D1i[c1] = -c2\r\n c2 = c2 + 1\r\n c1 = c1 + 1\r\n rr = M1.shape[0]\r\n cc = M1.shape[1]\r\n Mai = np.zeros([rr,cc],dtype=np.float64)\r\n for i in range(rr):\r\n Mai[i,:] = (M1[(abs(D1i[i])-1),:]*(np.sign(D1i[i])))\r\n l2 = 0\r\n for j in range(cc):\r\n l2 = np.dot(Mii[j,:],Mai[:,j])[0,0] + l2\r\n else:\r\n l2 = 0\r\n uu.append(l1-l2) \r\n vv.append(size_conjugacy_class(h,n))\r\n for i in range(M5.shape[0]):\r\n Ip = 0\r\n for j in range(M5.shape[1]):\r\n Ip = Ip + M5[i,j]*uu[j]*vv[j]\r\n Ip = Ip/card\r\n D[tuple(w5[i])] = abs(round(Ip))\r\n '''Note that I am using round, only because the results obtained are \r\n not esthetics'''\r\n vec_dic[k] = D\r\n return vec_dic",
"def nachalnye_dannie(pkx, size):\r\n return pkx.sum(axis=0) / size",
"def nits(self):",
"def fill_nans(data):\n for col in data.columns:\n data[col].fillna(-999, inplace=True)",
"def eda_base():\n ######################################\n # Missing Values\n ######################################\n # cat_cols, num_cols, cat_but_car, num_but_cat = grab_col_names(df)\n # Observations: 356255\n # Variables: 122\n # cat_cols: 15\n # num_cols: 67\n # cat_but_car: 1\n # num_but_cat: 39\n global train, test, df\n train = pd.read_csv('datasets/home-credit-default-risk/application_train.csv')\n test = pd.read_csv('datasets/home-credit-default-risk/application_test.csv')\n df = train.append(test).reset_index(drop=True)\n\n df.isnull().sum()\n df.isnull().sum().sum() # 10670198\n df.shape\n # df.dropna(inplace=True)\n # msno.matrix(df.sample(250))\n # plt.show()\n\n df = df[df['CODE_GENDER'] != 'XNA']\n df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)\n\n df[df.columns[df.isnull().any()]]\n\n cat_cols, num_cols, cat_but_car, num_but_cat = grab_col_names(df)\n\n na_cols_cat = [col for col in cat_cols if df[col].isnull().sum() > 0]\n df[na_cols_cat] = df[na_cols_cat].apply(lambda x: x.fillna(x.mode()), axis=0)\n\n na_cols_num = [col for col in num_cols if df[col].isnull().sum() > 0 and \"TARGET\" not in col]\n df[na_cols_num] = df[na_cols_num].apply(lambda x: x.fillna(x.median()), axis=0)\n\n na_cols_cat_but_car = [col for col in cat_but_car if df[col].isnull().sum() > 0]\n df[na_cols_cat_but_car] = df[na_cols_cat_but_car].apply(lambda x: x.fillna(x.mode()), axis=0)\n\n na_cols_num_but_cat = [col for col in num_but_cat if df[col].isnull().sum() > 0 and \"TARGET\" not in col]\n df[na_cols_num_but_cat] = df[na_cols_num_but_cat].apply(lambda x: x.fillna(x.median()), axis=0)\n\n df['OCCUPATION_TYPE'] = df['OCCUPATION_TYPE'].fillna(df['OCCUPATION_TYPE'].mode()[0])\n\n ######################################\n # Feature Engineering\n ######################################\n\n #############################################\n # Outliers\n #############################################\n\n #############################################\n # Label Encoding\n #############################################\n\n #############################################\n # Rare Encoding\n #############################################\n\n #############################################\n # One-Hot Encoding\n #############################################\n df = pd.get_dummies(df, dummy_na=True)\n df.shape\n #############################################\n # Standart Scaler\n #############################################\n\n ######################################\n # Modeling\n ######################################\n global train_df, test_df\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()].drop(\"TARGET\", axis=1)\n\n global X, y, X_train, X_test, y_train, y_test\n y = train_df[\"TARGET\"]\n X = train_df.drop([\"SK_ID_CURR\", \"TARGET\"], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1)",
"def __init__(self, k):\n self.k = k\n self.N = 2**self.k",
"def preprocess_dataset(dataset=None, remove_missing=60, remove_empty_rows=True):\n print('feature size before dropping:{}'.format(dataset.shape[1]))\n dataset_after_drop = dataset.dropna(thresh=dataset.shape[0]*remove_missing/100, how='all',axis=1)\n print('feature size after dropping:{}'.format(dataset_after_drop.shape[1]))\n print('row size before dropping:{}'.format(dataset_after_drop.shape[0]))\n if remove_empty_rows is True:\n df_final = dataset_after_drop.dropna(inplace=False).reset_index (drop=True)\n print('row size after dropping:{}'.format(df_final.shape[0]))\n print('---------------')\n print('final shape:{}'.format(df_final.shape))\n return df_final\n else:\n return dataset_after_drop",
"def createlabel(q, n):\n # When using dec2base function make sure to pad the string with the right number of zeros e.g for base 3 dec2base\n # gives 1 rather than 01 if we were dealing with 2 qubits.\n # The number of kraus matrices or labels is n^q\n\n label = []\n for i in range(pow(n, q)):\n label.append(dec2base(i, n))\n\n # Next we make sure that each element in the label list has length the number of qubits if not add a zero\n for x in range(len(label)):\n if len(label[x]) < q:\n label[x] = label[x].zfill(q)\n else:\n break\n return label",
"def reset(self):\n self.sum = [0.] * len(self.topk)\n self.data_num = 0\n self.pfm = [0.] * len(self.topk)",
"def fix_bands_dim_b2t(degen_bands, num_of_bands=30):\n\n tmp = np.array(degen_bands)\n\n # A filter: bands dimension must larger than num_of_bands\n fixed_bands = tmp[0:num_of_bands, :]\n # print(np.shape(fixed_bands))\n return fixed_bands",
"def clean_data(df):\n \n # Put in code here to execute all main cleaning steps:\n # convert missing value codes into NaNs, ...\n count_miss = df.isnull().sum(axis=0).values #find number of nans for each column\n count_miss = [val for val in count_miss]\n \n drop_cols = []\n\n for ind, val in enumerate(count_miss):\n if val > 200000:\n drop_cols.append(ind)\n \n df_drop_cols = list(azdias.columns[drop_cols])\n df = df.drop(df_drop_cols, axis=1)\n \n for col in range(df.shape[1]): #loop through columns\n column_name = df.columns[col] #get column name\n missing_list = feat_info.iloc[col,3] #get missing_or_unknown column from feature info\n missing_list = missing_list.replace('[','') #remove left bracket from string\n missing_list = missing_list.replace(']','') #remove right bracket from string\n missing_list = missing_list.split(',') #split into individual strings\n \n #find data that is natually missing and continue loop to omit\n if missing_list == ['']:\n continue\n \n else:\n for dat_type in missing_list: \n if df[column_name].dtype == 'object': #find values that contain x\n df.loc[df[column_name] == dat_type, column_name] = np.nan #replace x with nan\n \n else:\n dat_type = int(dat_type) #if no x, convert to integer and replace with nan\n df.loc[df[column_name] == dat_type, column_name] = np.nan\n \n # select, re-encode, and engineer column values.\n \n # encode OST_WEST_KZ\n df.loc[df['OST_WEST_KZ'] == 'W','OST_WEST_KZ'] = 0\n df.loc[df['OST_WEST_KZ'] == 'O','OST_WEST_KZ'] = 1\n \n # Re-encode categorical variable(s) to be kept in the analysis.\n \n \n #get list of attributes with type categorical\n feat_info[feat_info['type'] == 'categorical']\n \n cat_new_cols = [] #initialize\n for i in feat_info[feat_info['type'] == 'categorical']['attribute']:\n cat_new_cols.append(i)\n \n for cols in df.columns:\n if cols in cat_new_cols:\n if df[cols].nunique(dropna=True) > 2: #if the number of unique values is greater than 2 \n df = df.drop(cols, axis=1) #drop from the analysis\n print(\"more than 2 categories: {}\".format(cols))\n \n else:\n if not df[cols].unique()[0] > 0:\n #if not df[cols].unique()[0] > 0:\n dummies = pd.get_dummies(df[cols], prefix=cols)\n df = df.drop(cols, axis=1) #create dummy variable\n df = df.join(dummies)\n print(\"transformed to dummy variable: {}\".format(cols))\n \n # create variable: MOVEMENT\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,3,5,8,10,12,14]),'MOVEMENT'] = 1\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([2,4,6,7,9,11,13,15]),'MOVEMENT'] = 2\n \n #Capture Decade\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([1,2]), 'DECADE'] = 40\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([3,4]), 'DECADE'] = 50\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([5,6,7]), 'DECADE'] = 60\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([8,9]), 'DECADE'] = 70\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([10,11,12,13]), 'DECADE'] = 80\n df.loc[df['PRAEGENDE_JUGENDJAHRE'].isin([14,15]), 'DECADE'] = 90\n \n df['CAMEO_INTL_2015'] = df['CAMEO_INTL_2015'].astype(float)\n\n # create new variable: WEALTH\n df.loc[df['CAMEO_INTL_2015'].isin([51,52,53,54,55]), 'WEALTH'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([41,42,43,44,45]), 'WEALTH'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([31,32,33,34,35]), 'WEALTH'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([21,22,23,24,25]), 'WEALTH'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([11,12,13,14,15]), 'WEALTH'] = 5\n \n # create new variable: LIFE_STAGE\n df.loc[df['CAMEO_INTL_2015'].isin([11,21,31,41,51]),'LIFE_STAGE'] = 1\n df.loc[df['CAMEO_INTL_2015'].isin([12,22,32,42,52]),'LIFE_STAGE'] = 2\n df.loc[df['CAMEO_INTL_2015'].isin([13,23,33,43,53]),'LIFE_STAGE'] = 3\n df.loc[df['CAMEO_INTL_2015'].isin([14,24,34,44,54]),'LIFE_STAGE'] = 4\n df.loc[df['CAMEO_INTL_2015'].isin([15,25,35,45,55]),'LIFE_STAGE'] = 5\n \n # remove selected columns and rows, ...\n df = df.drop('PRAEGENDE_JUGENDJAHRE', axis=1)\n df = df.drop('CAMEO_INTL_2015',axis=1)\n \n # Return the cleaned dataframe.\n return df",
"def balance_sample_size_increase(data,classes,others=None,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this) \n \n size_max=np.amax(sample_sizes) # largest sample size\n \n if max_size_given and size_max<max_size_given:\n size_max=max_size_given \n \n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n \n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n #replacetf=True if sample_sizes[i]<size_max else False\n if sample_sizes[i]>=size_max:\n ind_this_increased=ind_this_num[rng.choice(sample_sizes[i],size=size_max,replace=False)]\n indices_all=np.append(indices_all,ind_this_increased)\n else: # make sure each sample is used at least once\n ind_this_increased=ind_this_num\n ind_this_increased2=ind_this_num[rng.choice(sample_sizes[i],size=size_max-sample_sizes[i],replace=True)]\n indices_all=np.append(indices_all,ind_this_increased)\n indices_all=np.append(indices_all,ind_this_increased2)\n \n # increase the data \n data=data[indices_all]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,others",
"def idealize(self) -> None:\n self.k = np.zeros(6, dtype=float)\n self.p = np.zeros(2, dtype=float)\n self.c = np.zeros(2, dtype=float)",
"def ADM_QCD(nf):\n gamma_QCD_T = 32/3 * np.eye(5)\n gamma_QCD_1 = np.zeros((70,154))\n gamma_QCD_2 = np.hstack((np.zeros((5,70)),gamma_QCD_T,np.zeros((5,79))))\n gamma_QCD_3 = np.zeros((3,154))\n gamma_QCD_4 = np.hstack((np.zeros((5,78)),gamma_QCD_T,np.zeros((5,71))))\n gamma_QCD_5 = np.zeros((71,154))\n gamma_QCD = [np.vstack((gamma_QCD_1, gamma_QCD_2, gamma_QCD_3, gamma_QCD_4, gamma_QCD_5))]\n\n if nf == 5:\n return gamma_QCD\n elif nf == 4:\n return np.delete(np.delete(gamma_QCD, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 2)\n elif nf == 3:\n return np.delete(np.delete(gamma_QCD, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 2)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")",
"def ADM_QED2(nf):\n\n # Mixing of Q_{11}^(7) into Q_{5,f}^(7) and Q_{12}^(7) into Q_{6,f}^(7), adapted from Hill et al. [1409.8290]. \n gamma_gf = -8\n gamma_QED2_gf = np.array([5*[gamma_gf]])\n gamma_QED2_1 = np.zeros((86,154))\n gamma_QED2_2 = np.hstack((np.zeros((1,38)),gamma_QED2_gf,np.zeros((1,111))))\n gamma_QED2_3 = np.hstack((np.zeros((1,46)),gamma_QED2_gf,np.zeros((1,103))))\n gamma_QED2_4 = np.zeros((66,154))\n gamma_QED2 = np.vstack((gamma_QED2_1, gamma_QED2_2, gamma_QED2_3, gamma_QED2_4))\n\n if nf == 5:\n return gamma_QED2\n elif nf == 4:\n return np.delete(np.delete(gamma_QED2, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 0)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\n elif nf == 3:\n return np.delete(np.delete(gamma_QED2, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 0)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")",
"def conv_idx_numlist_to_missing(self):\n \n ct = 0\n for indx in self.num_list_to_missing:\n self.list_with_missing[indx] = self.num_list[ct]\n ct += 1",
"def fill_numeric_data(df,neighbors = 2):\r\n imputer = KNNImputer(n_neighbors=neighbors, weights=\"uniform\")\r\n cols = df.columns\r\n filled_array = imputer.fit_transform(df)\r\n df_filled = pd.DataFrame(filled_array, columns = cols)\r\n return df_filled",
"def DealWithMissingValues(data_set: pd.DataFrame):\n data_set.fillna(method=\"pad\", inplace=True)"
]
| [
"0.5625626",
"0.5465201",
"0.54609203",
"0.53622466",
"0.52481824",
"0.5229756",
"0.5151207",
"0.51455116",
"0.5095228",
"0.509389",
"0.5092229",
"0.5091221",
"0.50859225",
"0.50685453",
"0.5050776",
"0.50357234",
"0.50298697",
"0.4998754",
"0.49985066",
"0.49860984",
"0.49819556",
"0.4976853",
"0.49751282",
"0.49710044",
"0.4970843",
"0.49706262",
"0.49657688",
"0.49373764",
"0.49233848",
"0.49217254"
]
| 0.6014288 | 0 |
Transforms the given source dict to a target dict based on the implemented ruleset. | def transform(self, source: dict) -> dict:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def subst_dict(target, source):\n dict = {}\n\n if target:\n def get_tgt_subst_proxy(thing):\n try:\n subst_proxy = thing.get_subst_proxy()\n except AttributeError:\n subst_proxy = thing # probably a string, just return it\n return subst_proxy\n tnl = NLWrapper(target, get_tgt_subst_proxy)\n dict['TARGETS'] = Targets_or_Sources(tnl)\n dict['TARGET'] = Target_or_Source(tnl)\n\n # This is a total cheat, but hopefully this dictionary goes\n # away soon anyway. We just let these expand to $TARGETS\n # because that's \"good enough\" for the use of ToolSurrogates\n # (see test/ToolSurrogate.py) to generate documentation.\n dict['CHANGED_TARGETS'] = '$TARGETS'\n dict['UNCHANGED_TARGETS'] = '$TARGETS'\n else:\n dict['TARGETS'] = NullNodesList\n dict['TARGET'] = NullNodesList\n\n if source:\n def get_src_subst_proxy(node):\n try:\n rfile = node.rfile\n except AttributeError:\n pass\n else:\n node = rfile()\n try:\n return node.get_subst_proxy()\n except AttributeError:\n return node # probably a String, just return it\n snl = NLWrapper(source, get_src_subst_proxy)\n dict['SOURCES'] = Targets_or_Sources(snl)\n dict['SOURCE'] = Target_or_Source(snl)\n\n # This is a total cheat, but hopefully this dictionary goes\n # away soon anyway. We just let these expand to $TARGETS\n # because that's \"good enough\" for the use of ToolSurrogates\n # (see test/ToolSurrogate.py) to generate documentation.\n dict['CHANGED_SOURCES'] = '$SOURCES'\n dict['UNCHANGED_SOURCES'] = '$SOURCES'\n else:\n dict['SOURCES'] = NullNodesList\n dict['SOURCE'] = NullNodesList\n\n return dict",
"def apply(s, transformation_s, target=None):\n if target is None: target = {}\n\n if not isinstance(transformation_s, dict):\n for transformation in transformation_s:\n target = s.apply(transformation, target)\n return target\n\n transformation = transformation_s\n for key, value in transformation.items():\n s._merge_key_into_dict(key, value, target)\n return target",
"def _merge_sources(dest: Dict[str, Any], source: ConfigSource) -> Dict[str, Any]:\n for key, val in source.items():\n if isinstance(val, dict):\n if key in dest:\n dest[key] = _merge_sources(dest[key], val)\n else:\n dest[key] = val.copy()\n else:\n dest[key] = val\n return dest",
"def convert_resnet_state_dict(src_dict):\n dst_dict = {}\n for k, v in src_dict.items():\n toks = k.split('.')\n if k.startswith('layer'):\n assert len(toks[0]) == 6\n res_id = int(toks[0][5]) + 1\n name = '.'.join(['res%d' % res_id] + toks[1:])\n dst_dict[name] = v\n elif k.startswith('fc'):\n continue\n else:\n name = '.'.join(['res1'] + toks)\n dst_dict[name] = v\n return dst_dict",
"def merge_dicts(source, destination):\n for key, value in source.items():\n key = key.lower() if isinstance(key, str) else key\n if isinstance(value, Mapping):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n return destination",
"def merge_dict_recursive(target, src):\r\n for k in src.keys():\r\n if ((k in target and isinstance(target[k], dict) and\r\n isinstance(src[k], collections.Mapping))):\r\n merge_dict_recursive(target[k], src[k])\r\n else:\r\n target[k] = src[k]",
"def convert_state_dict(src_dict, model_dict):\n dst_dict = {}\n res_block_n = np.array([1, 4, 7, 14, 18])\n for k, v in src_dict.items():\n toks = k.split('.')\n id_n = int(toks[1])\n if id_n < 18 and '17.conv.7' not in k and 'classifier' not in k:\n res_n = np.where(res_block_n > id_n)[0][0] + 1\n n = res_n - 2 if res_n >= 2 else 0\n res_n_m = 0 if id_n - res_block_n[n] < 0 else id_n - res_block_n[n]\n toks[0] = 'res%s' % res_n\n toks[1] = '%s' % res_n_m\n name = '.'.join(toks)\n dst_dict[name] = v\n return dst_dict",
"def _apply_transforms(self, inputs: Dict) -> Dict:\n results = inputs.copy()\n inputs = {k: v for k, v in inputs.items() if v is not IgnoreKey}\n outputs = self.transforms(inputs)\n\n if outputs is None:\n raise ValueError(\n f'Transforms wrapped by {self.__class__.__name__} should '\n 'not return None.')\n\n results.update(outputs) # type: ignore\n return results",
"def normalize_dict(from_dict: Dict[str, Any], key_mapping: Dict[str, str]) -> Dict[str, Any]:\n to_dict = {}\n\n for new_key, old_key in key_mapping.items():\n if old_key in from_dict:\n to_dict[new_key] = from_dict[old_key]\n\n return to_dict",
"def map_dict(dictionary, transform):\n return dict(transform(k, v) for k, v in dictionary.items())",
"def _init_from_dictionary(self, from_dictionary, template_model=None):\n\n if not isinstance(from_dictionary, dict):\n raise TypeError(\"from_dictionary must be of type dict, %s \\\n provided\" % from_dictionary.__class__.__name__)\n rewrite_map = None\n if template_model is not None:\n\n rewrite_map = template_model.attribute_rewrite_reverse_map()\n\n if not isinstance(template_model, prestans.types.DataCollection):\n raise TypeError(\"template_model should be a prestans model in AttributeFilter \\\n init (from dictionary), %s provided\" % template_model.__class__.__name__)\n\n for key, value in from_dictionary.iteritems():\n\n target_key = key\n\n #:\n #: Minification support\n #:\n if rewrite_map is not None:\n target_key = rewrite_map[key]\n\n #:\n #: Check to see we can work with the value\n #:\n if not isinstance(value, (bool, dict)):\n raise TypeError(\"AttributeFilter input for key %s must be \\\n boolean or dict, %s provided\" % (key, value.__class__.__name__))\n\n #:\n #: Ensure that the key exists in the template model\n #:\n if template_model is not None and not template_model.has_key(target_key):\n\n unwanted_keys = list()\n unwanted_keys.append(target_key)\n raise prestans.exception.AttributeFilterDiffers(unwanted_keys)\n\n #:\n #: Either keep the value of wrap it up with AttributeFilter\n #:\n if isinstance(value, bool):\n setattr(self, target_key, value)\n elif isinstance(value, dict):\n\n sub_map = None\n if template_model is not None:\n\n sub_map = getattr(template_model, target_key)\n\n #: prestans Array support\n if isinstance(sub_map, prestans.types.Array):\n sub_map = sub_map.element_template\n\n setattr(self, target_key, \\\n AttributeFilter(from_dictionary=value, template_model=sub_map))",
"def preprocess_constraint(self, constraint):\n for source, dests in constraint.items():\n constraint[source] = list(dests)\n return constraint",
"def filter_state_dict(\n dst_state: Dict[str, Union[float, torch.Tensor]],\n src_state: Dict[str, Union[float, torch.Tensor]],\n):\n match_state = {}\n for key, value in src_state.items():\n if key in dst_state and (dst_state[key].size() == src_state[key].size()):\n match_state[key] = value\n else:\n if key not in dst_state:\n logging.warning(\n f\"Filter out {key} from pretrained dict\"\n + \" because of name not found in target dict\"\n )\n else:\n logging.warning(\n f\"Filter out {key} from pretrained dict\"\n + \" because of size mismatch\"\n + f\"({dst_state[key].size()}-{src_state[key].size()})\"\n )\n return match_state",
"def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result",
"def translate_to(common_form, target):\r\n # retrieve the correct translation dictionary\r\n target_dict = get_dict(target)\r\n # recreate the form with the translated keys\r\n target_form = {target_dict[key]: common_form[key]\r\n for key in target_dict.keys()}\r\n return target_form",
"def merge_dicts(self, source, target):\n\n source_keys = set(source.keys())\n target_keys = set(target.keys())\n\n # added\n new = source_keys - target_keys\n for key in new:\n target[key] = source[key]\n\n # removed\n removed = target_keys - source_keys\n\n # changed\n common = target_keys.intersection(source_keys)\n\n diff = set()\n better = 0\n for key in common:\n if not self.ignore_fuzzy:\n fuzzy_source = 'fuzzy' in source[key].flags or \\\n source[key].obsolete\n fuzzy_target = 'fuzzy' in target[key].flags or \\\n target[key].obsolete\n else:\n fuzzy_source = fuzzy_target = False\n\n # sources that are not fuzzy/obsolete and which are of better\n # quality than the existing target keys (empty or fuzzy/obsolete)\n # are taken as non-conflicting but summed as changed\n better_singular = source[key].msgstr and \\\n (not target[key].msgstr or fuzzy_target)\n better_plural = source[key].msgstr_plural and \\\n (not target[key].msgstr_plural or fuzzy_target)\n if not fuzzy_source and (better_singular or better_plural):\n target[key] = source[key]\n better += 1\n elif target[key].msgstr != source[key].msgstr or \\\n target[key].msgstr_plural != source[key].msgstr_plural:\n diff.add(key)\n\n if len(diff) > 0:\n if self.merge_conflict:\n target = None\n elif self.merge_overwrite:\n for key in diff:\n te = target[key]\n se = source[key]\n te.msgstr = se.msgstr\n te.msgid_plural = se.msgid_plural\n te.msgstr_plural = se.msgstr_plural\n te.flags = se.flags\n elif self.merge_skip:\n pass # nothing has to be done\n else:\n self.log(\"We have a bug.\")\n\n return (target, len(new), len(diff) + better, len(removed))",
"def original2target(self) -> Dict[str, str]:\n return {\n self.keywords[i]: self.target_words[i]\n for i in range(len(self.keywords))\n }",
"def merge_dicts(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n node = destination.setdefault(key, {})\n merge_dicts(value, node)\n else:\n destination[key] = value\n\n return destination",
"def transform(attrs: dict) -> dict:\n\n pass",
"def makeTargetFieldsDict(tgt_fields):\n global RES, NRES, HOTEL\n out_dict = {}\n for fld in tgt_fields:\n use, suffix = fld.split(\"_SF_\")\n if use in RES:\n act_field = \"RES\"\n elif use in NRES:\n act_field = \"JOB\"\n elif use in HOTEL:\n act_field = \"HOTEL\"\n else:\n # This is an untracked ause\n continue\n share_field = \"shr_{}\".format(use)\n sqft_field = \"{}_sqft\".format(use)\n out_dict[fld] = (act_field, share_field, sqft_field)\n return out_dict",
"def target2original(self) -> Dict[str, str]:\n return {\n self.target_words[i]: self.keywords[i] \n for i in range(len(self.keywords))\n }",
"def _map_input(self, data: Dict,\n mapping: Optional[Dict]) -> Dict[str, Any]:\n\n if mapping is None:\n return data.copy()\n\n def _map(data, m):\n if isinstance(m, dict):\n # m is a dict {inner_key:outer_key, ...}\n return {k_in: _map(data, k_out) for k_in, k_out in m.items()}\n if isinstance(m, (tuple, list)):\n # m is a list or tuple [outer_key1, outer_key2, ...]\n # This is the case when we collect items from the original\n # data to form a list or tuple to feed to the wrapped\n # transforms.\n return m.__class__(_map(data, e) for e in m)\n\n # allow manually mark a key to be ignored by ...\n if m is ...:\n return IgnoreKey\n\n # m is an outer_key\n if self.allow_nonexist_keys:\n return data.get(m, IgnoreKey)\n else:\n return data.get(m)\n\n collected = _map(data, mapping)\n\n # Retain unmapped items\n inputs = data.copy()\n inputs.update(collected)\n\n return inputs",
"def merge_dicts(dest, src):\n\n for k, v in src.items():\n if isinstance(v, collections.Mapping):\n dest_v = dest.get(k, {})\n if not isinstance(dest_v, collections.Mapping):\n msg = \"Attempted to merge {0!r} with {1!r}\".format(dest_v, v)\n raise TypeError(msg)\n\n dest[k] = merge_dicts(dest_v, v)\n else:\n dest[k] = src[k]\n\n return dest",
"def rename_state_dict_keys(source, key_transformation, target=None):\n if target is None:\n target = source\n\n state_dict = torch.load(source)\n # state_dict = state_dict.state_dict() \n new_state_dict = OrderedDict()\n\n for key, value in state_dict.items():\n new_key = key_transformation(key)\n new_state_dict[new_key] = value\n\n torch.save(new_state_dict, target)",
"def _map_merge(dest: \"BaseContainer\", src: \"BaseContainer\") -> None:\n from omegaconf import AnyNode, DictConfig, OmegaConf, ValueNode\n\n assert isinstance(dest, DictConfig)\n assert isinstance(src, DictConfig)\n src_type = src._metadata.object_type\n src_ref_type = get_ref_type(src)\n assert src_ref_type is not None\n\n # If source DictConfig is:\n # - an interpolation => set the destination DictConfig to be the same interpolation\n # - None => set the destination DictConfig to None\n if src._is_interpolation() or src._is_none():\n dest._set_value(src._value())\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n return\n\n dest._validate_merge(value=src)\n\n def expand(node: Container) -> None:\n rt = node._metadata.ref_type\n val: Any\n if rt is not Any:\n if is_dict_annotation(rt):\n val = {}\n elif is_list_annotation(rt):\n val = []\n else:\n val = rt\n elif isinstance(node, DictConfig):\n val = {}\n else:\n assert False\n\n node._set_value(val)\n\n if (\n src._is_missing()\n and not dest._is_missing()\n and is_structured_config(src_ref_type)\n ):\n # Replace `src` with a prototype of its corresponding structured config\n # whose fields are all missing (to avoid overwriting fields in `dest`).\n src = _create_structured_with_missing_fields(\n ref_type=src_ref_type, object_type=src_type\n )\n\n if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():\n expand(dest)\n\n for key, src_value in src.items_ex(resolve=False):\n src_node = src._get_node(key, validate_access=False)\n dest_node = dest._get_node(key, validate_access=False)\n\n if isinstance(dest_node, DictConfig):\n dest_node._validate_merge(value=src_node)\n\n missing_src_value = _is_missing_value(src_value)\n\n if (\n isinstance(dest_node, Container)\n and OmegaConf.is_none(dest, key)\n and not missing_src_value\n and not OmegaConf.is_none(src_value)\n ):\n expand(dest_node)\n\n if dest_node is not None and dest_node._is_interpolation():\n target_node = dest_node._dereference_node(\n throw_on_resolution_failure=False\n )\n if isinstance(target_node, Container):\n dest[key] = target_node\n dest_node = dest._get_node(key)\n\n if (\n dest_node is None\n and is_structured_config(dest._metadata.element_type)\n and not missing_src_value\n ):\n # merging into a new node. Use element_type as a base\n dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)\n dest_node = dest._get_node(key)\n\n if dest_node is not None:\n if isinstance(dest_node, BaseContainer):\n if isinstance(src_value, BaseContainer):\n dest_node._merge_with(src_value)\n elif not missing_src_value:\n dest.__setitem__(key, src_value)\n else:\n if isinstance(src_value, BaseContainer):\n dest.__setitem__(key, src_value)\n else:\n assert isinstance(dest_node, ValueNode)\n assert isinstance(src_node, ValueNode)\n # Compare to literal missing, ignoring interpolation\n src_node_missing = src_value == \"???\"\n try:\n if isinstance(dest_node, AnyNode):\n if src_node_missing:\n node = copy.copy(src_node)\n # if src node is missing, use the value from the dest_node,\n # but validate it against the type of the src node before assigment\n node._set_value(dest_node._value())\n else:\n node = src_node\n dest.__setitem__(key, node)\n else:\n if not src_node_missing:\n dest_node._set_value(src_value)\n\n except (ValidationError, ReadonlyConfigError) as e:\n dest._format_and_raise(key=key, value=src_value, cause=e)\n else:\n from omegaconf import open_dict\n\n if is_structured_config(src_type):\n # verified to be compatible above in _validate_merge\n with open_dict(dest):\n dest[key] = src._get_node(key)\n else:\n dest[key] = src._get_node(key)\n\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n\n # explicit flags on the source config are replacing the flag values in the destination\n flags = src._metadata.flags\n assert flags is not None\n for flag, value in flags.items():\n if value is not None:\n dest._set_flag(flag, value)",
"def map_targets(y, mapping=None):\r\n y_converted = []\r\n\r\n if mapping is None:\r\n y_converted = y\r\n else:\r\n if isinstance(mapping, list) or isinstance(mapping, (np.ndarray, np.generic)):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array1\")\r\n\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = mapping[y_tmp]\r\n else: # if list\r\n print(\"array1\")\r\n\r\n y_converted = np.array(mapping[y])\r\n\r\n elif isinstance(mapping, dict):\r\n if isinstance(y[0], list) or isinstance(y[0], (np.ndarray)): # if nested targets\r\n y_converted = y.copy()\r\n\r\n print(\"array of array2\")\r\n for indy, y_tmp in enumerate(y):\r\n y_converted[indy] = [mapping.get(y_tmp2) for y_tmp2 in y_tmp]\r\n else:\r\n print(\"array2\")\r\n\r\n y_converted = np.array([mapping.get(y_tmp) for y_tmp in y])\r\n else:\r\n raise TypeError('y must be list, ndarray, dict or None')\r\n\r\n return y_converted",
"def rule_convert(source_path, build_path):\n logging.info(\n \"Searching path `{}` for YAML rule definitions to convert ...\".format(\n source_path\n )\n )\n set_logger()\n convert_rules(source_path, build_path)",
"def modify_on(class_reference, from_dict, to_dict, all=False, custom_condition='', custom_function=False):\n _entries = select_from(class_reference, all, custom_condition, **from_dict)\n _modify = 0\n if custom_function:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key](_entry.__dict__['_'+_key])\n _entry.put()\n _modify += 1\n else:\n for _entry in _entries:\n for _key in to_dict:\n _entry.__dict__['_'+_key] = to_dict[_key]\n _entry.put()\n _modify += 1\n return _modify",
"def replace_by_dict(source_string, mapping):\n return_string = copy.copy(source_string)\n for key, value in mapping.items():\n return_string = return_string.replace(key, value)\n return return_string",
"def merge(source, destination):\n for key, value in source.items():\n if isinstance(value, dict):\n # get node or create one\n node = destination.setdefault(key, {})\n merge(value, node)\n else:\n destination[key] = value\n\n return destination"
]
| [
"0.6293355",
"0.6013593",
"0.5974697",
"0.58764845",
"0.5870711",
"0.5825023",
"0.58013415",
"0.5561623",
"0.55460215",
"0.5544287",
"0.55300653",
"0.5520241",
"0.55021775",
"0.54956675",
"0.5470732",
"0.5413718",
"0.5398131",
"0.5393067",
"0.5354263",
"0.5331905",
"0.5330366",
"0.53231597",
"0.5301037",
"0.52932495",
"0.5289123",
"0.5288401",
"0.52671176",
"0.5243192",
"0.52235353",
"0.518703"
]
| 0.7035352 | 0 |
Searches for the specified node in the target dict object for further processing. | def _node_search(self, mapping: WizardDataMappingBaseEnum, root_node: str, target_dict: dict) -> tuple:
keys = mapping.get_registration_field_reference(root_node).split(".")
max_depth: int = len(keys) - 1
return self._recursive_search(target_dict, keys, max_depth) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None",
"def depth_first_search(self, target: Dict) -> Optional[Node]:\n\n def search(current_node: Node):\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n for child in current_node.children:\n ret = search(child)\n if ret:\n return ret\n return search(self.root_node)",
"def breadth_first_search(self, target: Dict) -> Optional[Node]:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node: Node = assist_queue.popleft()\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)\n return None",
"def find(self, target):\n try:\n if type(target) is int:\n for key, value in self.index.table.items():\n if value == target:\n return(key)\n elif type(target) is str:\n for key, value in self.index.table.items():\n if key == target:\n return(value)\n except Exception as error:\n print(f\"Error: self.find({target}) -> {error}\")",
"def __getitem__(self, target):\r\n return self.by_target[target]",
"def __getitem__(self, target):\n return self.by_target[target]",
"def query(self, target):\n try:\n if type(target) is int:\n for key, value in self.index.items():\n if value == target:\n return(key)\n elif type(target) is str:\n for key, value in self.index.items():\n if key == target:\n return(value)\n except Exception as error:\n print(f\"Error: self.query({target}) -> {error}\")",
"def lookup(self, key):",
"def _traverse_node_tree(self, cur_node, search_node_list):\n for _, sub_node in cur_node.get_children():\n sub_nodes = []\n self._traverse_node_tree(sub_node, sub_nodes)\n sub_node_dict = {\n 'name': sub_node.node_name,\n 'type': sub_node.node_type,\n 'is_dynamic_shape_node': sub_node.is_dynamic_shape_node,\n 'nodes': sub_nodes\n }\n search_node_list.append(sub_node_dict)",
"def update(self, target, query):\n node = self._data[target]\n name = \"%s node %.8s\" % (node['type'], target)\n\n query.update({\n 'type': node['type'],\n 'model': node['model']\n })\n\n logger.info(\"Validating query\")\n NodeValidator.validate(query)\n\n self._data[target] = dict_update(node, query, name)\n logger.info(\"Updated parameters above of %s\" % name)\n\n return {target: self._data[target]}",
"def _get(self, key, current_node):\n pass",
"def search(self, key):\n if key in self.key_list:\n return (self.nodes)[key]\n return None",
"def findNode(key,nodesList):\n for node in nodesList:\n if node[\"key\"] == key:\n return node\n print(\"Error:: Could not find node with given key\")",
"def _target(path: List[Any], dictionary: Dict[str, Any]) -> Any:\n if not path:\n return dictionary\n current = dictionary\n for key in path:\n try:\n current = current[key]\n except KeyError as error:\n path = \" -> \".join(path)\n raise CertumException(f\"The path '{path}' doesn't exist\") from error\n return current",
"def search(self, target):\n if DEBUG: print('search({})'.format(target))\n\n result = False\n\n cur = self.head\n \n output = \"\\tPath: \"\n \n while cur:\n output += \"{}\".format(cur.val)\n if not cur.next and not cur.below:\n output += \" END\"\n break\n elif cur.next == None or\\\n target < cur.next.val:\n cur = cur.below\n output += \" v \"\n elif cur.next.val == target:\n result = True\n output += \" -> {}! FOUND\".format(target)\n break\n elif target > cur.next.val:\n output += \" -> \"\n cur = cur.next\n else:\n print(\"\\thow did i get here\")\n\n if DEBUG: print(output)\n if DEBUG: print('\\t{}'.format(result))\n return result",
"def search_target(self, search_target):\n\n self._search_target = search_target",
"def search(self, word):\n node = self.root\n for char in word:\n if char in node.dict:\n node = node.dict[char]\n else:\n return False\n if node.end:\n return True\n return False",
"def do_process_pass(self, stage, node, mapping_dict):\n for item in node.keys():\n if item in mapping_dict:\n value = self.process_property(stage, item, node[item], node)\n node[item] = value\n for child in node.get_children():\n self.do_process_pass(stage, child, mapping_dict)",
"def _read_current_value(self, mapping: WizardDataMappingBaseEnum, root_node: str, target_dict: dict) -> any:\n key, node = self._node_search(mapping, root_node, target_dict)\n\n return node[key]",
"def search(self, key, print_path=False):\r\n _, search_node = self.__compare(key, method='search', print_path=print_path)\r\n if not search_node.key:\r\n print(\"Node doesn't exist!\")\r\n else:\r\n print(\"ID: {}\\nValue: {}\\nColor: {}\".format(search_node.key, search_node.value, search_node.get_color()))",
"def search(dictionary_node: Dictionary, previous_row: list):\n\n for current_source_letter in dictionary_node.children:\n current_row = [previous_row[0] + 1]\n\n for i in range(1, len(processed_query_word) + 1):\n value = min(\n previous_row[i] + 1,\n current_row[i - 1] + 1,\n previous_row[i - 1]\n + self._replace(\n current_source_letter, processed_query_word[i - 1]\n ),\n )\n current_row.append(value)\n\n if (\n current_row[-1] <= max_distance\n and dictionary_node.children[current_source_letter].words_at_node\n is not None\n ):\n for word in dictionary_node.children[\n current_source_letter\n ].words_at_node:\n suggestions.append({\"word\": word, \"distance\": current_row[-1]})\n\n if min(current_row) <= max_distance:\n search(dictionary_node.children[current_source_letter], current_row)",
"def find(self, p):\n pass",
"def set_nodes_values(self, node_dict):\n\n # Requires nodes to have type defined in lookup array\n raise Exception(\"Not yet implemented.\")",
"def _search(cls, node, value):\n if node is None:\n return False\n\n if node.value == value:\n return True\n\n return cls._search(node.next_, value)",
"def contains(self, target):\n current = self \n while True:\n if current.value == target:\n return True # Here is base case we are working towards. \n elif target < current.value:\n if current.left is not None:\n current = current.left # this line is our recursive or could be recursive in the while that is working towards base case. \n else:\n return False # it doesn't exist if we have reached a leaf which is the only way we can reach None. \n elif target > current.value:\n if current.right is not None:\n current = current.right #this line is our recursive or could be recursive in the while that is working towards base case.\n else:\n return False # it doesn't exist if we have reached a leaf which is the only way we can reach None. ",
"def FindObject(self, tagged_address):\n raise NotImplementedError",
"def get_node(self, key: str) -> Node:",
"def _targetof(node):\r\n if node is None: return None\r\n return node.target",
"def search_key(cls, key, element):\n if isinstance(element, dict):\n for k, v in element.items():\n if k == key:\n return v\n elif isinstance(v, dict):\n cls.search_key(key, v)\n elif isinstance(v, list):\n cls.search_key(key, v)\n elif isinstance(element, list):\n for obj in element:\n v = cls.search_key(key, obj)\n if v:\n return v",
"def node_mapping(self):\n ..."
]
| [
"0.6458002",
"0.636581",
"0.6159206",
"0.60092944",
"0.57412994",
"0.57050276",
"0.5686447",
"0.56060046",
"0.5532294",
"0.55157566",
"0.54548234",
"0.5422413",
"0.53641313",
"0.53610045",
"0.5332658",
"0.5328531",
"0.53281033",
"0.53203183",
"0.530248",
"0.5295509",
"0.52568734",
"0.51969796",
"0.51740986",
"0.51570654",
"0.51262057",
"0.51163906",
"0.5102084",
"0.508033",
"0.50779945",
"0.50708324"
]
| 0.6678959 | 0 |
Returns summary for the clustering in form of which objects were assigned to which cluster and how many samples of each object were assigned to which cluster. | def summarize(k, samples):
cluster_assignments = dict()
sample_assignments = dict()
unique, counts = np.unique(samples[:, 0], return_counts=True)
sample_objects = dict(zip(unique, counts))
for oid in sample_objects:
# count the number of samples that belonged to each cluster for this object
sample_assignments[oid] = dict()
for i, s in enumerate(samples):
if s[0] == oid:
l = k.labels_[i]
if not l in sample_assignments[oid]:
sample_assignments[oid][l] = 0
sample_assignments[oid][l] += 1
largest_label = max(sample_assignments[oid], key=sample_assignments[oid].get)
# add the object to the cluster which it has most belonging samples to
if largest_label not in cluster_assignments:
cluster_assignments[largest_label] = []
cluster_assignments[largest_label].append(oid)
return cluster_assignments, sample_assignments | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")",
"def get_stats(self):\n stats = \\\n 'cluster: %s\\ncount = %d, size = %d, minvar = %f, avg_dist = %s\\n'\\\n % (self.name, self.count, self.size, self.minvar, self.avg_dist)\n return stats",
"def print_cluster_attributes(self, objects):\n print(\"\\n\")\n print((\"ClusterName\".ljust(35),\":\",objects.ClusterName.value()))\n print((\"Repository Disk\".ljust(35),\":\", \\\n objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value()))\n print(\"\\nNodes in the cluster :\\n-----------------------\")\n for Node in objects.Node.Node :\n print((\"HostName\".ljust(35),\":\",\\\n Node.HostName.value()))\n print((\"PartitionID\".ljust(35),\":\", \\\n Node.PartitionID.value()))\n print()",
"def test_rr_summary_cluster(model, clusters):\n test_result = model.fit(groupvar=clusters, cov_type=\"cluster\").summary()\n assert isinstance(test_result.tables, list)\n assert len(test_result.tables) == 3\n assert len(test_result.extra_txt) > 0",
"def __str__(self):\n return \"Clustering\"",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def metadata_summary(idx):\n tax_per_cluster = []\n genomes_per_tax = []\n genes_per_genome = []\n for cluster_id,v in idx.items():\n tax_per_cluster.append(len(v.keys()))\n for tax,vv in v.items():\n genomes_per_tax.append(len(vv.keys()))\n for genomeID,gene_ids in vv.items():\n genes_per_genome.append(len(set(gene_ids)))\n sum_stats(tax_per_cluster, 'Clades per cluster')\n sum_stats(genomes_per_tax, 'Gemomes per clade')\n sum_stats(genes_per_genome, 'Genes per genome')",
"def prepare_statistics(self):\n\n # statistics of clustering files\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n\n percentage_stars = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[shorter_index]))\n percentage_starlets = \"%.2f\" % (100.0 * float(self.shared_spec_num)/float(self.cluster_spectra_num[longer_index]))\n\n head = \"{0:<25}{1:<20}{2:<20}\\n\".format(\"name\", \"number\", \"description\")\n rows = \"\"\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"stars No.\", self.stars_length, \"in file with less(or equal) clusters: file\" + str(shorter_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"starlets No.\", self.starlets_length, \"in file with more(or equal) clusters: file\" + str(longer_index))\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"identical cluster No.\", self.similarity_dist[10], \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[shorter_index], \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"spectrum No\", self.cluster_spectra_num[longer_index], \"in starlets \")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum No\", self.shared_spec_num, \"between them\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_stars, \"in stars\")\n rows += \"{0:<25}{1:<20}{2:<20}\\n\".format(\"shared spectrum percent\", percentage_starlets, \"in starlets\")\n self.tables.append(('statistics of files', head, rows))\n\n # distribution of cluster size in stars\n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_star_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[shorter_index].keys()):\n value = self.cluster_size_dist[shorter_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.stars_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in stars', head, rows))\n \n head = '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(\"cluster size\",\"No.\", \"percentage\", \"accumulate pecentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_starlet_size), \"average\")\n accumulate_num = 0\n for key in sorted(self.cluster_size_dist[longer_index].keys()):\n value = self.cluster_size_dist[longer_index][key]\n accumulate_num += value\n percent = \"%.2f\" % (100 * value/self.starlets_length)\n accum_percent = \"%.2f\" % (100 * accumulate_num/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent, accum_percent)\n self.tables.append(('distribution of cluster size in starlets', head, rows))\n\n # distribution of similarity\n head = \"{0:<20}{1:<20}{2:<20}{3:<20}\\n\".format(\"similarity score\", \"pairs of clusters\", \"percentage(stars)\", \"percentage(starlets)\")\n rows = \"\"\n for key in reversed(sorted(self.similarity_dist.keys())):\n value = self.similarity_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}{3:<20}\\n'.format(key, value, percent_star, percent_starlet)\n self.tables.append(('distribution of similarity (identical = 10)', head, rows))\n\n # distribution of star divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_star), \"average\")\n for key in sorted(self.star_divide_factor_dist.keys()):\n value = self.star_divide_factor_dist[key]\n percent_star = \"%.2f\" % (100.0*value/self.stars_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_star)\n self.tables.append(('distribution of star divide factors', head, rows))\n\n # distribution of starlet divide factors\n head = '{0:<20}{1:<20}{2:<20}\\n'.format(\"divide factor\",\"No.\",\"percentage\")\n rows = \"\"\n rows += \"{0:<20}{1:<20}\\n\".format(\"%.2f\" % (self.ave_divide_factor_starlet), \"average\")\n for key in sorted(self.starlet_divide_factor_dist.keys()):\n value = self.starlet_divide_factor_dist[key]\n percent_starlet = \"%.2f\" % (100.0*value/self.starlets_length)\n rows += '{0:<20}{1:<20}{2:<20}\\n'.format(key, value, percent_starlet)\n self.tables.append(('distribution of starlet divide factors', head, rows))",
"def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))",
"def clustering_metrics(clusts, node_assn, node_pred):\n pred_vox = cluster_to_voxel_label(clusts, node_pred)\n true_vox = cluster_to_voxel_label(clusts, node_assn)\n ari = ARI(pred_vox, true_vox)\n ami = AMI(pred_vox, true_vox)\n sbd = SBD(pred_vox, true_vox)\n pur, eff = purity_efficiency(pred_vox, true_vox)\n return ari, ami, sbd, pur, eff",
"def summary(self):\n summary = defaultdict(int)\n\n for r in self.results:\n summary[r.result] += 1\n\n return summary",
"def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def print_cluster(self, cluster, value):\n total = 0\n ham = 0\n spam = 0\n for message in cluster:\n if self.spamorham[self.ids[message]] == 'ham':\n ham += 1\n elif self.spamorham[self.ids[message]] == 'spam':\n spam += 1\n else:\n print(\"ERROR!\")\n total += 1\n\n print(\"Total number of messages in the {0} cluster: {1}\\n\"\n \"Percentage of SPAM messages in the {2} cluster: {3}\\n\"\n \"Percentage of HAM messages in the {4} cluster: {5}\".format(value, total, value,\n str((float(spam) / total) * 100), value,\n str((float(ham) / total) * 100)))",
"def cluster_counter(self):\n return Counter(self.model.labels_.tolist())",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def print_cluster_summary(algo, i):\n assert algo in ['DBSCAN', 'KMeans', 'DBSCAN_topics', 'KMeans_topics']\n \n cluster_df = apps_df.copy()\n cluster_df = cluster_df[cluster_df[algo] == i]\n print('Cluster {} consists out of {} apps.'.format(str(i), str(cluster_df.shape[0])))\n titles = list(cluster_df['title'])\n print('The apps are:\\n {}'.format('\\n\\t'.join(titles)))",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def k_means(n_clust, data_frame, true_labels):\n k_means = KMeans(n_clusters=n_clust, random_state=123, n_init=30)\n k_means.fit(data_frame)\n c_labels = k_means.labels_\n df = pd.DataFrame({'clust_label': c_labels, 'orig_label': true_labels.tolist()})\n ct = pd.crosstab(df['clust_label'], df['orig_label'])\n y_clust = k_means.predict(data_frame)\n display(ct)\n print('% 9s' % 'inertia homo compl v-meas ARI AMI silhouette')\n print('%i %.3f %.3f %.3f %.3f %.3f %.3f'\n % (k_means.inertia_,\n homogeneity_score(true_labels, y_clust),\n completeness_score(true_labels, y_clust),\n v_measure_score(true_labels, y_clust),\n adjusted_rand_score(true_labels, y_clust),\n adjusted_mutual_info_score(true_labels, y_clust),\n silhouette_score(data_frame, y_clust, metric='euclidean')))",
"def all_cluster_summary_fn(self):\n return op.join(self.combined_dir, 'all.cluster_summary.json')",
"def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")",
"def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)",
"def summarize(self) -> Mapping[str, int]:\n return dict(\n compounds=self.count_compounds(),\n side_effects=self.count_side_effects(),\n indications=self.count_indications(),\n umls=self.count_umls(),\n )",
"def total_cost(clusters):\n inter = 0\n intra = 0\n dm = 0\n for clst in clusters:\n # print clst.label, \"has cost: \", str(clst.inter_cost), str(clst.intra_cost), str(clst.dm_cost)\n inter += clst.inter_cost\n intra += clst.intra_cost\n dm += clst.dm_cost\n total = inter + intra + dm\n #iic = inter + intra\n #print \"inter \" + str(inter) + \" intra \" + str(intra) + \" dm \" + str(dm) + \" total \" + str(total) + \" iic \" + str(iic)\n print str(inter) + \"\\t\" + str(intra) + \"\\t\" + str(dm) + \"\\t\" + str(total) # + \" in \" + str(inr)\n return inter, intra, dm, total",
"def connected_component_statistics(self, printStats=False):\n lengths = self.connected_component_lengths()\n lengthDict = dict(collections.Counter(lengths))\n\n if printStats:\n orderedLengthDict = collections.OrderedDict(sorted(lengthDict.items()))\n numberOfGroups = nx.number_connected_components(self.return_undirected())\n for k, v in orderedLengthDict.iteritems():\n percent = round((100.00*v / numberOfGroups), 2)\n print str(k) + ' nodes: ' + str(v) + ' (' + str(percent) + '%) groups'\n print '-----------------------------------------'\n print 'TOTAL: ' + str(super(SynonymNetwork, self).number_of_nodes()) + ' nodes in network, ' + str(numberOfGroups) + ' distinct groups'\n else:\n return lengthDict",
"def cluster(self):\n\t\tself.index[\"cluster\"] = {}\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"cluster\"][item] = [{\"weight\" : float(len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))))/float(len(self.index[\"items\"][item])) , \"name\" : id, \"authority\" : set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id])) } for id in self.index[\"items\"] if id != item and len(set(self.index[\"items\"][item]).intersection( set(self.index[\"items\"][id]))) >= 1]\n\n\t\treturn self.index",
"def matching_clusterization(self):\n result = []\n self.reclustering(self.groups.copy(deep=True), result)\n self.result = pd.DataFrame(result)\n return self.result.sort_values(by=['cluster_size'], ascending=False)",
"def analysis_function_details(self,clustering):\n return clustering.details",
"def summarize_contacts_nodb(tac_clustered):\n rec_l = []\n for tacnum, cluster in tac_clustered.groupby('group'):\n rec = {'cluster': tacnum}\n \n # Start and stop of cluster\n rec['frame_start'] = cluster['frame'].min()\n rec['frame_stop'] = cluster['frame'].max()\n rec['duration'] = rec['frame_stop'] - rec['frame_start'] + 1\n \n # Mean tip and fol of cluster\n rec['tip_x'] = cluster['tip_x'].mean()\n rec['tip_y'] = cluster['tip_y'].mean()\n rec['fol_x'] = cluster['fol_x'].mean()\n rec['fol_y'] = cluster['fol_y'].mean()\n rec['pixlen'] = np.sqrt(\n (rec['tip_y'] - rec['fol_y']) ** 2 +\n (rec['tip_x'] - rec['fol_x']) ** 2)\n \n rec_l.append(rec)\n contacts_summary = pandas.DataFrame.from_records(rec_l).set_index('cluster')\n \n return contacts_summary",
"def __str__(self):\n return \"Cluster\""
]
| [
"0.7024156",
"0.67413795",
"0.65553635",
"0.6400142",
"0.6399458",
"0.63829273",
"0.63645506",
"0.63288116",
"0.6261311",
"0.62351966",
"0.62264615",
"0.62164265",
"0.61641264",
"0.6156463",
"0.6149437",
"0.6143532",
"0.6119567",
"0.6118515",
"0.61017936",
"0.6093622",
"0.60787666",
"0.6033866",
"0.6024166",
"0.59946644",
"0.59941083",
"0.5989882",
"0.59889644",
"0.59882456",
"0.5984766",
"0.5942803"
]
| 0.7156205 | 0 |
Temporary function that configures logging to go straight to console. | def setup_logging():
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
root = logging.getLogger()
root.addHandler(console)
root.setLevel(logging.DEBUG) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_console_logger ():\n\t\tconsole = logging.StreamHandler()\n\t\tconsole.setLevel(logging.INFO) # Change level for console logger in development mode\n\t\tformatter = logging.Formatter('%(levelname)-8s %(message)s')\n\t\tconsole.setFormatter(formatter)\n\t\tlogging.getLogger('').addHandler(console)",
"def configure_logging(self):\n\n root_logger = logging.getLogger('')\n root_logger.setLevel(logging.DEBUG)\n\n console = logging.StreamHandler()\n console_level = self.LOG_LEVEL_MAP.get(self.options.verbose_level,\n logging.WARNING)\n console.setLevel(console_level)\n formatter = logging.Formatter(config.DEFAULT_MESSAGE_FORMAT)\n console.setFormatter(formatter)\n root_logger.addHandler(console)",
"def __logger_console(self):\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(self.__formatter)\n console_handler.setLevel(logging.INFO)\n self.__logger.addHandler(console_handler)",
"def enable_console():\n global CONSOLE\n if CONSOLE is None:\n # define a Handler which writes messages to sys.stderr\n CONSOLE = logging.StreamHandler()\n CONSOLE.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n # tell the handler to use this format\n CONSOLE.setFormatter(formatter)\n logger.addHandler(CONSOLE)",
"def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)",
"def configure_logging(self):\r\n root_logger = logging.getLogger('')\r\n\r\n # Set up logging to a file\r\n root_logger.setLevel(logging.DEBUG)\r\n\r\n # Send higher-level messages to the console via stderr\r\n console = logging.StreamHandler(self.stderr)\r\n console_level = {self.WARNING_LEVEL: logging.WARNING,\r\n self.INFO_LEVEL: logging.INFO,\r\n self.DEBUG_LEVEL: logging.DEBUG,\r\n }.get(self.options.verbose_level, logging.DEBUG)\r\n console.setLevel(console_level)\r\n if logging.DEBUG == console_level:\r\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\r\n else:\r\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\r\n console.setFormatter(formatter)\r\n root_logger.addHandler(console)\r\n return",
"def configure_logging():\n # console_handler = TTSHandler()\n root = logging.getLogger('node_' + __name__)\n root.setLevel(logging.INFO)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console_handler.setFormatter(formatter)\n root.addHandler(console_handler)\n\n root = logging.getLogger()\n root.addHandler(console_handler)\n # the default formatter just returns the message\n root.setLevel(logging.DEBUG)",
"def configure_logging(self):\n root_logger = logging.getLogger('')\n\n # Set up logging to a file\n root_logger.setLevel(logging.DEBUG)\n\n # Send higher-level messages to the console via stderr\n console = logging.StreamHandler(self.stderr)\n console_level = {self.WARNING_LEVEL: logging.WARNING,\n self.INFO_LEVEL: logging.INFO,\n self.DEBUG_LEVEL: logging.DEBUG,\n }.get(self.options.verbose_level, logging.DEBUG)\n # The default log level is INFO, in this situation, set the\n # log level of the console to WARNING, to avoid displaying\n # useless messages. This equals using \"--quiet\"\n if console_level == logging.INFO:\n console.setLevel(logging.WARNING)\n else:\n console.setLevel(console_level)\n if logging.DEBUG == console_level:\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\n else:\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\n logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n console.setFormatter(formatter)\n root_logger.addHandler(console)\n return",
"def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)",
"def set_daemon_log():\n global toconsole\n toconsole = False",
"def enable_console_logging(log):\n\n log.setLevel(logging.DEBUG)\n # create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n # add ch to logger\n log.addHandler(ch)",
"def setup_logger_console(log_level='info'):\n # Configureer threshold log level DEBUG voor de root logger (i.p.v. WARNING).\n setup_logger()\n\n # Configureer de console handler.\n console_handler = customize_handler(logging.StreamHandler(), log_level)\n # Koppel console handler aan de root logger.\n logging.getLogger('').addHandler(console_handler)\n\n return console_handler",
"def _setup_logging():\n logging.Formatter.converter = time.gmtime\n logging.basicConfig(\n format='%(asctime)s %(message)s',\n level=logging.DEBUG,\n filename='conduit-proxy.log')\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger().addHandler(console)",
"def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)",
"def setup_logging():\n logger = logging.getLogger()\n logger.level = logging.DEBUG\n stream_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stream_handler)",
"def _configure_logging(self):\n pass",
"def setup_logging(self):\n console_handler = logging.StreamHandler()\n request_logging.assign_request_filter(console_handler,\n self.additional_fields)\n logging.basicConfig(level=self.level,\n format=self.format_string,\n handlers=[console_handler])\n for handler in logging.root.handlers:\n handler.setFormatter(RedactionFormatter(handler.formatter))\n logger = logging.getLogger(__name__)\n logger.info('Established logging defaults')\n self._setup_log_levels()",
"def main(verbose=False):\n if verbose:\n global VERBOSE\n VERBOSE = True\n verbose_handler = logging.StreamHandler(sys.stdout)\n verbose_handler.setLevel(logging.INFO)\n verbose_handler.setFormatter(log_formatter)\n logger.addHandler(verbose_handler)\n click.echo(\"Logging to {}\\n\".format(LOGFILE))",
"def _prepare_logging():\n log = logging.getLogger(__name__)\n log.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(formatter)\n log.addHandler(console_handler)\n return log",
"def setup_logging():\n product_name = \"plasma\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))",
"def setupLogging():\n global enabled, dummyInstance\n from pyemma.util.config import conf_values\n args = conf_values['Logging']\n\n if args.enabled:\n if args.tofile and args.file:\n filename = args.file\n else:\n filename = None\n try:\n logging.basicConfig(level=args.level,\n format=args.format,\n datefmt='%d-%m-%y %H:%M:%S',\n filename=filename,\n filemode='a')\n except IOError as ie:\n import warnings\n warnings.warn('logging could not be initialized, because of %s' % ie)\n return\n \"\"\" in case we want to log to both file and stream, add a separate handler\"\"\"\n if args.toconsole and args.tofile:\n ch = logging.StreamHandler()\n ch.setLevel(args.level)\n ch.setFormatter(logging.Formatter(args.format))\n logging.getLogger('').addHandler(ch)\n else:\n dummyInstance = dummyLogger()\n\n enabled = args.enabled",
"def log_to_console(self):\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.DEBUG)\n stream_handler.setFormatter(self.formatter)\n self.log.addHandler(stream_handler)\n return self",
"def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)",
"def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)",
"def set_logger( logger_fn: Callable[[str,str],any] = lambda llvl, msg: sys.stdout.write( \"[%s]: %s\\n\" % (llvl, msg) ) ):\n global LOGGER\n\n LOGGER = logger_fn",
"def setup_global_logging():\n\n global global_logging_started\n\n if global_logging_started:\n return\n\n orig_logger_class = logging.getLoggerClass()\n logging.setLoggerClass(StreamTeeLogger)\n try:\n stdout_logger = logging.getLogger(__name__ + '.stdout')\n stderr_logger = logging.getLogger(__name__ + '.stderr')\n finally:\n logging.setLoggerClass(orig_logger_class)\n\n stdout_logger.setLevel(logging.INFO)\n stderr_logger.setLevel(logging.ERROR)\n stdout_logger.set_stream(sys.stdout)\n stderr_logger.set_stream(sys.stderr)\n sys.stdout = stdout_logger\n sys.stderr = stderr_logger\n\n exception_logger = logging.getLogger(__name__ + '.exc')\n sys.excepthook = LoggingExceptionHook(exception_logger)\n\n logging.captureWarnings(True)\n\n rawinput = 'input'\n builtins._original_raw_input = getattr(builtins, rawinput)\n setattr(builtins, rawinput, global_logging_raw_input)\n\n global_logging_started = True",
"def _setup_cmd_logger():\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = ColoredFormatter('%(log_color)s[%(levelname)8s] %(message)s%(reset)s')\n ch.setLevel(level=logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)",
"def configure_logging():\n\n level = logging.INFO\n logging.getLogger().setLevel(level)\n logging.basicConfig(\n level=level,\n format=(\n \"[%(asctime)s][%(levelname)s][%(filename)s:%(lineno)d]\"\n + \"[%(processName)s] %(message)s\"\n ),\n )",
"def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)",
"def disable_console():\n logger.removeHandler(CONSOLE)"
]
| [
"0.7744444",
"0.7212908",
"0.71922755",
"0.6954237",
"0.6946993",
"0.6937946",
"0.6893693",
"0.68809277",
"0.68545187",
"0.684831",
"0.6822329",
"0.67961013",
"0.67405695",
"0.67387825",
"0.6733418",
"0.6722712",
"0.6687495",
"0.6649076",
"0.6637922",
"0.6631609",
"0.6619285",
"0.66166186",
"0.65508157",
"0.6544174",
"0.6533465",
"0.65147686",
"0.65114117",
"0.6492962",
"0.6492462",
"0.64720917"
]
| 0.7359638 | 1 |
Sets default configuration values for pokered. These should eventually be moved into the configuration module. | def configure_for_pokered(config=config):
attrs = {
"version": "red",
"map_dir": os.path.join(config.path, 'maps/'),
"gfx_dir": os.path.join(config.path, 'gfx/tilesets/'),
"to_gfx_name": red_gfx_name,
"block_dir": os.path.join(config.path, 'gfx/blocksets/'), # not used
"block_ext": '.bst', # not used
"palettes_on": False,
"constants_filename": os.path.join(config.path, 'constants.asm'),
"time_of_day": 1,
}
return attrs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue",
"def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10",
"def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")",
"def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default",
"def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)",
"def defaults(self):\n self.lib.iperf_defaults(self._test)",
"def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)",
"def set_default_parameters(self):\n super().set_default_parameters()",
"def get_default_config(self):\n \n config = {}\n \n # default z_0_hat, zeros, flexible\n config['z_0_hat_option'] = 'flexible'\n config['initial_z_0_hat'] = np.zeros(self.dimension)\n \n # default P_0_hat, identity times a small scalar, flexible\n config['P_0_hat_option'] = 'flexible'\n config['initial_P_0_hat'] = 0.1 * np.eye(self.dimension)\n \n # default A, identity, flexible\n config['AB_option'] = 'flexible'\n config['initial_A'] = np.eye(self.dimension)\n config['initial_B'] = np.zeros((self.dimension, self.control_dimension))\n \n # default Q, identity times a small scalar, flexible\n config['Q_option'] = 'flexible'\n config['initial_Q'] = 0.1 * np.eye(self.dimension)\n \n # default R, identity times a small scalar, flexible\n config['R_option'] = 'flexible'\n config['initial_R'] = 0.1 * np.eye(self.dimension)\n \n # default stopping criteria, threshold 1e-5, num_iterations 1000\n # stop whenever either of the two critieria is reached\n config['threshold'] = 1e-5\n config['num_iterations'] = 1000\n\n return config",
"def build_config(self, config):\n config.setdefaults('Makesmith Settings', {'COMport': 'COM5', 'xPitch': 20, 'openFile': \" \"})",
"def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')",
"def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }",
"def set_defaults(self):\n\n for k, v in self.DEFAULTS.items():\n if not getattr(self, k, None):\n setattr(self, k, v)",
"def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)",
"def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}",
"def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)",
"def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])",
"def _set_default_options(options):\n\n options_defaults = {\n 'run_storage_base': None,\n 'watch': False,\n 'verbose': True,\n # 'uploader_config': 'uploader_config.toml',\n 'logging_config': 'logging_config.toml',\n 'notify_frequency': 60*24, # daily\n 'skip_bad_permissions': True,\n }\n\n for k, v in options_defaults.items():\n # Tranfer any known values set in options.config to the top level\n # options.\n # Any key not present in the config file gets set to the default value.\n if k not in options.config:\n options[k] = v\n else:\n options[k] = options.config[k]\n del options.config[k]\n\n if options[k] is None:\n options[k] = v\n\n return options",
"def init_config(self):\n pass",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)",
"def defaultPreset (self):\n assert False, \"To be implemented by child\"",
"def __init__(self, defaults=None, default_sec=\"Uncategorized\"):\n super(XFasterConfig, self).__init__(dict_type=OrderedDict)\n self.default_sec = default_sec\n self.add_section(default_sec)\n if defaults is not None:\n self.update(defaults)",
"def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())",
"def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)",
"def _init_config_(self):\n self._config= {}"
]
| [
"0.68206334",
"0.66279453",
"0.64431655",
"0.64198345",
"0.64198345",
"0.64198345",
"0.6298531",
"0.62911874",
"0.62797886",
"0.6273936",
"0.6201167",
"0.61984444",
"0.6187273",
"0.61839336",
"0.6172492",
"0.61234754",
"0.60714173",
"0.60698897",
"0.6066412",
"0.6030387",
"0.6023291",
"0.59717476",
"0.5932747",
"0.5918859",
"0.5901959",
"0.5885238",
"0.58710784",
"0.58697385",
"0.585028",
"0.5843618"
]
| 0.6774129 | 1 |
Sets default configuration values for pokecrystal. These should eventually be moved into the configuration module. | def configure_for_pokecrystal(config=config):
attrs = {
"version": "crystal",
"map_dir": os.path.join(config.path, 'maps/'),
"gfx_dir": os.path.join(config.path, 'gfx/tilesets/'),
"to_gfx_name": lambda x : '%.2d' % x,
"block_dir": os.path.join(config.path, 'tilesets/'),
"block_ext": '_metatiles.bin',
"palettes_on": True,
"palmap_dir": os.path.join(config.path, 'tilesets/'),
"palette_dir": os.path.join(config.path, 'tilesets/'),
"asm_dir": os.path.join(config.path, 'maps/'),
"constants_filename": os.path.join(config.path, 'constants.asm'),
"header_dir": os.path.join(config.path, 'maps/'),
"time_of_day": 1,
}
return attrs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])",
"def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }",
"def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')",
"def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)",
"def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue",
"def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}",
"def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)",
"def defaults(self):\n self.lib.iperf_defaults(self._test)",
"def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")",
"def config(self, **kw):\n self.cfg_fixture.config(**kw)",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }",
"def setDefaults(self) -> None:\n self.night_boundary = -12.0\n self.new_moon_phase_threshold = 20.0",
"def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config",
"def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)",
"def load_standard_parameters(self):\n paradic = {'x':'0',\n 'y':'0',\n 'n_oct':'8',\n 'n_spo':'3',\n 'sigma_min':'0.8',\n 'delta_min':'0.5',\n 'sigma_in':'0.5',\n 'C_DoG':'0.015',\n 'C_edge':'10',\n 'n_bins':'36',\n 'lambda_ori':'1.5',\n 't':'0.8',\n 'n_hist':'4',\n 'n_ori':'8',\n 'lambda_descr':'6',\n 'flag_match':'1',\n 'C_match':'0.6'}\n self.cfg['param']['paradic'] = paradic\n self.cfg.save()",
"def set_default_protein_options(treebuilder):\n treebuilder.options = get_default_options()",
"def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)",
"def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)",
"def build_config(self, config):\n config.setdefaults('Makesmith Settings', {'COMport': 'COM5', 'xPitch': 20, 'openFile': \" \"})",
"def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)",
"def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10",
"def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config",
"def setMplDefaults():\n\n rcParams['figure.dpi'] = 300\n rcParams['figure.figsize'] = (4.5, 3)\n rcParams['savefig.dpi'] = 300\n rcParams['axes.grid'] = True\n rcParams['grid.linewidth'] = 0.5\n rcParams['grid.linestyle'] = ':'\n rcParams['font.family'] = 'Arial', 'Helvetica', 'DejaVu Sans'\n rcParams['font.size'] = 6\n rcParams['lines.markersize'] = 4\n rcParams['lines.linestyle'] = '-'\n rcParams['savefig.transparent'] = False\n rcParams['figure.subplot.bottom'] = 0.15\n rcParams['figure.subplot.top'] = 0.85\n rcParams['figure.subplot.left'] = 0.15\n rcParams['figure.subplot.right'] = 0.9",
"def override_config(self, config_path):\n self.config = p_config.ProsperConfig(config_path)",
"def _set_config():\n\n\tdebug_msg = \"load default config yaml file\"\n\tlogger.debug(debug_msg)\n\n\tconfig_file_parser(paths.CONFIG_FILE, override_options=True)",
"def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()"
]
| [
"0.6538164",
"0.65269065",
"0.6479381",
"0.638243",
"0.63516474",
"0.63516474",
"0.63516474",
"0.62303483",
"0.61623925",
"0.6151685",
"0.61513054",
"0.59917414",
"0.5970082",
"0.59475154",
"0.59347796",
"0.5927527",
"0.5883482",
"0.5882603",
"0.5876634",
"0.58725595",
"0.5859741",
"0.5838842",
"0.5828273",
"0.58266085",
"0.5807829",
"0.5751658",
"0.5717748",
"0.5716498",
"0.5710552",
"0.57090646"
]
| 0.6968128 | 0 |
Route for the processing of an essay submitted by POST request | def essay_post_new():
text0 = request.form['text']
essay = {}
status = 200
if (app.debug is True):
essay = Flask_process_text(text0)
else:
try:
essay = Flask_process_text(text0)
except Exception as e:
## Any unsupported exceptions coming from code
## TODO: get a better error message (not revealing internal error)
status = 500
essay = { 'error' : {
'status' : status,
'msg' : e.message}}
return jsonify(essay) , status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):",
"def post():\n pass",
"def post(self, request):\n pass",
"def do_POST(self):\r\n self._send_handler_response('POST')",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def do_POST(self): # pylint: disable=invalid-name\n self.handle_request()",
"def post(self, *args, **kwargs):\n return self.handle_post_request()",
"def post(self, *args, **kwargs):\n return self._hit(\"POST\", *args, **kwargs)",
"def post(self, *args, **kwargs):\n self.request(\"post\", *args, **kwargs)",
"def do_POST(self):\r\n self.do_GET()",
"def requestSubmitted(request):",
"def post(self, request):\n return self.serviceHandler.addEvent(request.data)",
"def post(self, url_pattern):\n return self.route(url_pattern, methods=['POST'])",
"def on_post(self, req, resp):\n LOGGER = logging.getLogger()\n \n resp.set_header('Content-Type', 'text/json')\n\n raw_json = req.stream.read().decode('utf-8')\n content = json.loads(raw_json, encoding='utf-8')\n\n try:\n # Parse parameters\n pdf_template = content.get(\"pdf_template\")\n pages = int(content.get(\"pages\"))\n starting_date = content.get(\"date\")\n email = content.get(\"email\")\n font = content.get(\"font\")\n\n a4_diary = create.create_a4_diary(pdf_template,\n pages,\n starting_date,\n email=email,\n font=font)\n\n a5_booklet = create.convert_to_a5_booklet(a4_diary)\n resp.body = json.dumps([str(a4_diary), str(a5_booklet)])\n LOGGER.info(\"Document created {}\".format(pdf_template)) \n except Exception as e:\n LOGGER.error(\"Error creating document\" , exc_info=True)\n raise falcon.HTTPInternalServerError(title=\"Error creating document: \" + str(type(e)),\n description=(str(e) +\n ','.join(traceback.format_tb(e.__traceback__))))",
"def post(self, pattern, handler):\n return self.route(Router.POST, pattern, handler)",
"def post(self):\n # se captura y se parsea a json el body del request recibido por el\n # webhook\n request_body = json.loads(self.request.body)\n\n for body in request_body:\n \"\"\" Evaluar el tipo de evento ya que trae campos diferentes \"\"\"\n logging.info(request_body)\n\n event = str(body['event'])\n correo = str(body['email'])\n numero_folio = str(body['numero_folio'])\n tipo_dte = str(body['tipo_dte'])\n\n logging.info(event)\n\n if event and correo and numero_folio and tipo_dte:\n\n if event == 'processed':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.processed_event = event\n email_model.processed_sg_event_id = body['sg_event_id']\n email_model.processed_sg_message_id = body['sg_message_id']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.processed_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.processed_event = event\n e.processed_sg_event_id = body['sg_event_id']\n e.processed_sg_message_id = body['sg_message_id']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'delivered':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.delivered_event = event\n email_model.delivered_sg_event_id = body['sg_event_id']\n email_model.delivered_sg_message_id = body['sg_message_id']\n email_model.delivered_response = body['response']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.delivered_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.delivered_event = event\n e.delivered_sg_event_id = body['sg_event_id']\n e.delivered_sg_message_id = body['sg_message_id']\n e.delivered_response = body['response']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'open':\n model = EmailModel()\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n if email_model.opened_first_date == None:\n email_model.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.opened_event = event\n email_model.opened_ip = body['ip']\n email_model.opened_user_agent = body['useragent']\n email_model.opened_sg_event_id = body['sg_event_id']\n email_model.opened_sg_message_id = body['sg_message_id']\n model.email_add_count(email_model)\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n if e.opened_first_date == None:\n e.opened_first_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_last_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.opened_event = event\n e.opened_ip = body['ip']\n e.opened_user_agent = body['useragent']\n e.opened_sg_event_id = body['sg_event_id']\n e.opened_sg_message_id = body['sg_message_id']\n e.email_add_count(e)\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'dropped':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.smtp_id = body['smtp-id']\n email_model.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.dropped_sg_event_id = body['sg_event_id']\n email_model.dropped_sg_message_id = body['sg_message_id']\n email_model.dropped_reason = body['reason']\n email_model.dropped_event = event\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.smtp_id = body['smtp-id']\n e.dropped_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.dropped_sg_event_id = body['sg_event_id']\n e.dropped_sg_message_id = body['sg_message_id']\n e.dropped_reason = body['reason']\n e.dropped_event = event\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'bounce':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.bounce_event = event\n email_model.bounce_sg_event_id = body['sg_event_id']\n email_model.bounce_sg_message_id = body['sg_message_id']\n email_model.bounce_reason = body['reason']\n email_model.bounce_status = body['status']\n email_model.bounce_type = body['type']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.bounce_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.bounce_event = event\n e.bounce_sg_event_id = body['sg_event_id']\n e.bounce_sg_message_id = body['sg_message_id']\n e.bounce_reason = str(body['reason']).decode(\"utf-8\")\n e.bounce_status = body['status']\n e.bounce_type = body['type']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n\n elif event == 'unsubscribe':\n email_model = EmailModel.search_email(correo, numero_folio, tipo_dte)\n if not email_model == None:\n email_model.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n email_model.unsubscribe_uid = body['uid']\n email_model.unsubscribe_purchase = body['purchase']\n email_model.unsubscribe_id = body['id']\n email_model.unsubscribe_event = body['event']\n email_model.correo = str(body['email'])\n email_model.numero_folio = str(body['numero_folio'])\n email_model.tipo_dte = str(body['tipo_dte'])\n email_model.put()\n else:\n e = EmailModel()\n e.unsubscribe_date = datetime.datetime.fromtimestamp(body['timestamp'])\n e.unsubscribe_uid = body['uid']\n e.unsubscribe_purchase = body['purchase']\n e.unsubscribe_id = body['id']\n e.unsubscribe_event = body['event']\n e.correo = str(body['email'])\n e.numero_folio = str(body['numero_folio'])\n e.tipo_dte = str(body['tipo_dte'])\n e.put()\n else:\n logging.info('body con campos vacios')",
"def post(self):\n send_slack_log('Entered /slack/submit')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n if request.form.get('payload') is None:\n send_slack_log('Invalid request: no payload')\n return\n else:\n return handle_interaction(json.loads(request.form['payload']))"
]
| [
"0.6570866",
"0.65123093",
"0.6362852",
"0.6359819",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63241047",
"0.63001585",
"0.6274297",
"0.6242376",
"0.6239611",
"0.6177351",
"0.60859674",
"0.60752136",
"0.5986174",
"0.59689474",
"0.59600776",
"0.59449214",
"0.59004843"
]
| 0.6817374 | 0 |
Class decorator. Takes a dictionary of mapping from keybind > signal. If one of the keybinds is detected, the specified signal is sent with the calling object as the only argument. | def signal_map(mapping):
def decorator(cls):
if not hasattr(cls, 'signals'):
return cls
cls.__keymap = mapping
def keypress(self, size, key):
if key in self.__keymap:
signal = self.__keymap[key]
logger.debug("Keypress '{}' sent signal '{}'".format(
key, signal))
# Emit signal with self as the only argument
urwid.emit_signal(self, self.__keymap[key], self)
super(cls, self).keypress(size, None)
return True
return super(cls, self).keypress(size, key)
cls.keypress = keypress
return cls
return decorator | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_forwards_args(self):\n seen_kwargs = {}\n\n def callback(**kwargs):\n seen_kwargs.update(kwargs)\n\n SignalHook(self.test_extension, self.signal, callback)\n self.signal.send(sender=self, foo=1, bar=2)\n\n self.assertTrue('foo', seen_kwargs)\n self.assertEqual(seen_kwargs['foo'], 1)\n self.assertTrue('bar', seen_kwargs)\n self.assertEqual(seen_kwargs['bar'], 2)",
"def connect(signal):\n def wrapper(func):\n REGISTRY.setdefault(signal, Signal(signal)).connect(func)\n return func\n return wrapper",
"def update_signal_processing_parameters(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.__dict__:\n self.__dict__[key] = value",
"def signal(self, args):\n pass",
"def receiver(signal, **kwargs):\n def _decorator(func):\n signal.connect(func, **kwargs)\n return func\n return _decorator",
"def send_signal(self, sig):\r\n sig = { 0x01 : \"HUP\",\r\n 0x02 : \"INT\",\r\n 0x03 : \"NEWNYM\",\r\n 0x0A : \"USR1\",\r\n 0x0C : \"USR2\",\r\n 0x0F : \"TERM\" }.get(sig,sig)\r\n self.sendAndRecv(\"SIGNAL %s\\r\\n\"%sig)",
"def wrap(signal_name, sender=dispatcher.Anonymous, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg\n safe = kwargs.pop('safe', False)\n signal_name = signal_name.upper().replace('-', '_')\n send_func = safe_send if safe else send\n try:\n before_signal = globals()['BEFORE_' + signal_name]\n success_signal = globals()['SUCCESSFUL_' + signal_name]\n after_signal = globals()['AFTER_' + signal_name]\n except KeyError:\n raise ValueError('Invalid wrapped signal name: {}'.format(signal_name))\n try:\n send_func(before_signal, sender, *args, **kwargs)\n yield\n send_func(success_signal, sender, *args, **kwargs)\n finally:\n _, exc, _ = sys.exc_info()\n if exc:\n log_error_func(exc)\n send_func(after_signal, sender, *args, **kwargs)",
"def send(signal, *args, **kwargs):\n _dispatcher.send(signal=signal, *args, **kwargs)",
"def set_signal_handlers(cls, signals):\n for sig in signals:\n try:\n original_handler = signal.getsignal(sig)\n if original_handler == cls.signal_handler:\n continue\n signal.signal(sig, cls.signal_handler)\n cls.__signal_handlers[sig] = original_handler\n except Exception as e:\n pass",
"def catchall_signal_handler(*args, **kwargs): \n print(\"Caught signal (in catchall handler) \" + kwargs['dbus_interface'] + \".\" + kwargs['member'])\n for arg in args:\n print(\" \" + str(arg))",
"def __call__(self, *args, **kwargs):\n for key, obj in self._dict.items():\n key[0](obj, *args, **kwargs)",
"def bind(self, keysym, func):\n if type(keysym) == list:\n [self.bind(key, func) for key in keysym]\n elif keysym in self.binds:\n self.binds[keysym].append(func)\n else:\n self.binds[keysym] = [func]",
"def fire(obj, name, *args, **kwargs):\n for func in _signals(obj, name):\n func(*args, **kwargs)",
"def enableSignalDebugging(self, **kwargs: Any) -> None:\n\n def f(*args):\n return None\n connectCall: Callable = kwargs.get('connectCall', f)\n disconnectCall: Callable = kwargs.get('disconnectCall', f)\n emitCall: Callable = kwargs.get('emitCall', f)\n\n def printIt(msg: str) -> Callable:\n\n def call(*args: Any) -> None:\n print(msg, args)\n\n return call\n\n # Monkey-patch.\n\n QtCore.QObject.connect = self._wrapConnect(connectCall)\n QtCore.QObject.disconnect = self._wrapDisconnect(disconnectCall)\n\n def new_emit(self, *args: Any) -> None: # type:ignore\n emitCall(self, *args)\n self._oldEmit(self, *args)\n\n QtCore.QObject.emit = new_emit",
"def _handle_key_event(self, key, modifiers, mapping):\n if key in mapping:\n for callback in mapping[key]:\n callback()",
"def add_signals(self, signals):\n\n self.signals = {**self.signals, **signals} # merge the two",
"def wrapped(signal_name, sender=dispatcher.Anonymous, safe=False):\n @wrapt.decorator\n def signal_wrapped(wrapped_func, _, args, kwargs):\n def signal_wrapper(*args, **kwargs):\n with wrap(signal_name, sender, safe):\n return wrapped_func(*args, **kwargs)\n\n return signal_wrapper(*args, **kwargs)\n\n return signal_wrapped",
"def _send_signals(self, svc_names: List[str], sig: str):\n pass",
"def signal(sig, action): # real signature unknown; restored from __doc__\n pass",
"def __init__(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], BaseSignal):\n # Pretend it is a hs signal, copy axes and metadata\n sdict = args[0]._to_dictionary()\n self.__class__.__init__(self, **sdict)\n else:\n BaseSignal.__init__(self, *args, **kwargs)",
"def set_kwargs(self, kwargs):\n accept = {\"command\" : None,\n \"clicked\" : False,\n \"unclick\" : False,\n \"active\" : True,\n \"key_bindings\" : []}\n for kwarg in kwargs:\n if kwarg in accept:\n accept[kwarg] = kwargs[kwarg]\n self.__dict__.update(accept)",
"def init_func_bind_map(self):\n self.protocol_func_bind_map = {\n \"StartProxy\": self.on_start_proxy,\n \"BindStatus\": self.on_bind_status,\n \"ReqProxy\": self.on_req_proxy,\n \"AuthResp\": self.on_auth_resp,\n \"NewTunnel\": self.on_new_tunnel,\n \"TokenExpired\": self.on_token_expired,\n \"Pong\": self.on_pong,\n \"ResetClientid\": self.on_reset_clientid\n }",
"def press_bound_key(self, event):\n if event.key in self.key_bindings:\n self.press()\n elif (event.key, pg.key.get_mods()) in self.key_bindings:\n self.press()",
"def signalAll(self, signal, startswithname=None):\n for name in self.processes.keys():\n if startswithname is None or name.startswith(startswithname):\n self.signalProcess(signal, name)",
"def signal_callbacks(self):\n for name in self.lookup_dict[self.__class__]:\n yield name, getattr(self, name)",
"def set_handler(key):\n def wrapper(func):\n func.set_key = key\n return func\n\n return wrapper",
"def keypress_signal_from_behaviors_coding_map(self, event):\n self.keyPressEvent(event)",
"def connect_event(self, widget, signal_str, event, **event_map):\n def callback(*args):\n data = AttrDict()\n for key, value in event_map.items():\n data[key] = q_simplify(args[value])\n self.fire(event, data)\n self.connect(widget, signal_str, callback)",
"def __methodDict(cls, _dict):\n baseList = list(cls.__bases__)\n baseList.reverse()\n for _super in baseList:\n __methodDict(_super, _dict)\n for key, value in cls.__dict__.items():\n if type(value) == types.FunctionType:\n _dict[key] = value",
"def make_keyhandler(events):\n def handler(key):\n for k in events:\n if key == simplegui.KEY_MAP[k]:\n events[k]()\n return handler"
]
| [
"0.56830674",
"0.56747514",
"0.56612265",
"0.5526517",
"0.5519064",
"0.55119467",
"0.5493346",
"0.54629815",
"0.53654486",
"0.5305662",
"0.5279558",
"0.52793956",
"0.5256346",
"0.52491015",
"0.5239506",
"0.5238507",
"0.5208698",
"0.52058035",
"0.5129249",
"0.51212853",
"0.5103472",
"0.5091046",
"0.5090852",
"0.50878084",
"0.5073453",
"0.5072991",
"0.50698006",
"0.5038755",
"0.50286126",
"0.5002024"
]
| 0.75173485 | 0 |
Downloads a discovery document for the given api_name and version. This utility assumes that the API for which a discovery document is being retrieved is publicly accessible. However, you may access whitelisted resources for a public API if you are added to its whitelist and specify the associated label. | def DownloadDiscoveryDocument(api_name, version, path=_DEFAULT_DISCOVERY_PATH,
label=None):
credentials = _GetCredentials()
auth_session = AuthorizedSession(credentials)
discovery_service = build('discovery', 'v1')
discovery_rest_url = None
discovery_response = discovery_service.apis().list(
name=api_name).execute()
if 'items' in discovery_response:
for api in discovery_response['items']:
if api['version'] == version:
discovery_rest_url = api['discoveryRestUrl']
break
if discovery_rest_url:
if label:
# Apply the label query parameter if it exists.
path_params = '&labels=%s' % label
discovery_rest_url += path_params
discovery_response = auth_session.get(discovery_rest_url)
if discovery_response.status_code == 200:
with open(path, 'wb') as handler:
handler.write(discovery_response.text)
else:
raise ValueError('Unable to retrieve discovery document for api name "%s" '
'and version "%s" via discovery URL: %s'
% (api_name, version, discovery_rest_url))
else:
raise ValueError('API with name "%s" and version "%s" was not found.'
% (api_name, version)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def document_api(\n name, version, uri, doc_destination_dir, artifact_destination_dir=DISCOVERY_DOC_DIR\n):\n http = build_http()\n resp, content = http.request(\n uri\n or uritemplate.expand(\n FLAGS.discovery_uri_template, {\"api\": name, \"apiVersion\": version}\n )\n )\n\n if resp.status == 200:\n discovery = json.loads(content)\n service = build_from_document(discovery)\n doc_name = \"{}.{}.json\".format(name, version)\n discovery_file_path = artifact_destination_dir / doc_name\n revision = None\n\n pathlib.Path(discovery_file_path).touch(exist_ok=True)\n\n # Write discovery artifact to disk if revision equal or newer\n with open(discovery_file_path, \"r+\") as f:\n try:\n json_data = json.load(f)\n revision = json_data[\"revision\"]\n except json.JSONDecodeError:\n revision = None\n\n if revision is None or discovery[\"revision\"] >= revision:\n # Reset position to the beginning\n f.seek(0)\n # Write the changes to disk\n json.dump(discovery, f, indent=2, sort_keys=True)\n # Truncate anything left as it's not needed\n f.truncate()\n\n elif resp.status == 404:\n print(\n \"Warning: {} {} not found. HTTP Code: {}\".format(name, version, resp.status)\n )\n return\n else:\n print(\n \"Warning: {} {} could not be built. HTTP Code: {}\".format(\n name, version, resp.status\n )\n )\n return\n\n document_collection_recursive(\n service,\n \"{}_{}.\".format(name, safe_version(version)),\n discovery,\n discovery,\n doc_destination_dir,\n artifact_destination_dir,\n )",
"def document_api_from_discovery_document(\n discovery_url, doc_destination_dir, artifact_destination_dir=DISCOVERY_DOC_DIR\n):\n http = build_http()\n response, content = http.request(discovery_url)\n discovery = json.loads(content)\n\n service = build_from_document(discovery)\n\n name = discovery[\"version\"]\n version = safe_version(discovery[\"version\"])\n\n document_collection_recursive(\n service,\n \"{}_{}.\".format(name, version),\n discovery,\n discovery,\n doc_destination_dir,\n artifact_destination_dir,\n )",
"def ApiFromDiscoveryDoc(self, path):\n\n f = open(os.path.join(os.path.dirname(__file__), 'testdata', path))\n discovery_doc = simplejson.loads(f.read())\n f.close()\n return Api(discovery_doc)",
"def api_get(self, name):\n try:\n r = self._get(['apis', name])\n except requests.HTTPError:\n return None\n else:\n return r",
"def init(api_name, api_version, api_settings_dict, discovery_filename=None):\n\n # Set logging levels so we don't log stuff that doesn't really matter\n logging.getLogger(\"googleapiclient.discovery_cache\").setLevel(logging.ERROR)\n logging.getLogger(\"googleapiclient.discovery\").setLevel(logging.WARNING)\n\n # Import libraries from oauth2client\n try:\n from oauth2client import client\n from oauth2client import file\n from oauth2client import tools\n except ImportError:\n raise ImportError(\n 'GoogleApi requires oauth2client. Please install oauth2client and try again.')\n\n # Set the Google API scope\n scope = 'https://www.googleapis.com/auth/' + api_name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend([])\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args([])\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n client_secrets = os.path.join(os.path.dirname(__file__),\n api_settings_dict['client_secrets_file'])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets,\n scope=scope,\n message=tools.message_if_missing(client_secrets))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=build_http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n # print('Constructing a service object via the discovery service.')\n service = discovery.build(api_name, api_version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return service",
"def get_client(\n service: str,\n version: str,\n http: Optional[Union[httplib2.Http, api_httplib.HttpMock]] = None,\n request_builder: Union[\n Type[api_httplib.HttpRequest],\n api_httplib.RequestMockBuilder] = api_httplib.HttpRequest\n) -> discovery.Resource:\n static_discovery = False if isinstance(http, api_httplib.HttpMock) else None\n return discovery.build(\n service,\n version,\n num_retries=_NUMBER_OF_RETRIES,\n http=http,\n requestBuilder=request_builder,\n static_discovery=static_discovery)",
"def get_service(api_name, api_version, scope, client_secrets_path):\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n client_secrets_path, scope=scope,\n message=tools.message_if_missing(client_secrets_path))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n service = build(api_name, api_version, http=http)\n\n return service",
"def url(self, api_name):\n return \"https://%s/api/%s/%s/\" % (self.host, self.api_version, api_name)",
"def get_from_api(url, *, verbose=False):\n vprint = lambda *a, **kwa: print(*a, **kwa) if verbose else None\n\n with open(\"APIKey.txt\", \"r\") as keyFile:\n apiKey=keyFile.readline()\n if apiKey[-1] == '\\n':\n apiKey = apiKey[:-1]\n \n headers = {'X-API-Key': apiKey}\n vprint(\"getting\", url, \"with headers\", headers, \"...\")\n r = requests.get(url, headers=headers)\n vprint(\"...done\")\n return r",
"def getAPIservice(args, name, version, client_secrets_file, scope=None, parents=[], discovery_filename=None):\n if scope is None:\n scope = 'https://www.googleapis.com/auth/' + name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend(parents)\n parser = argparse.ArgumentParser(\n description=\"Google API v3 Service Provider\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(args)\n print(\"args = %s\" % (args))\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n # client_secrets = os.path.join(os.path.dirname(filename),\n # 'client_secrets.json')\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets_file,\n scope=scope,\n message=tools.message_if_missing(client_secrets_file))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http = httplib2.Http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return (service, flags)",
"def get(host, port=None, version=None):\n port = 8081 if port is None else port\n version = \"v1\" if version is None else version\n\n if version not in VERSIONS.keys():\n raise RestException(f\"Unknown REST API version: {version}\")\n api_client_cls = VERSIONS[version]\n return api_client_cls(host=host, port=port)",
"def get(self, version):\n version_found = False\n api_spec = self._create_api_spec(version)\n for base_api in current_app.appbuilder.baseviews:\n if isinstance(base_api, BaseApi) and base_api.version == version:\n base_api.add_api_spec(api_spec)\n version_found = True\n if version_found:\n return self.response(200, **api_spec.to_dict())\n else:\n return self.response_404()",
"def generate_all_api_documents(\n directory_uri=DIRECTORY_URI,\n doc_destination_dir=BASE,\n artifact_destination_dir=DISCOVERY_DOC_DIR,\n):\n api_directory = collections.defaultdict(list)\n http = build_http()\n resp, content = http.request(directory_uri)\n if resp.status == 200:\n directory = json.loads(content)[\"items\"]\n for api in directory:\n document_api(\n api[\"name\"],\n api[\"version\"],\n api[\"discoveryRestUrl\"],\n doc_destination_dir,\n artifact_destination_dir,\n )\n api_directory[api[\"name\"]].append(api[\"version\"])\n\n # sort by api name and version number\n for api in api_directory:\n api_directory[api] = sorted(api_directory[api])\n api_directory = collections.OrderedDict(\n sorted(api_directory.items(), key=lambda x: x[0])\n )\n\n markdown = []\n for api, versions in api_directory.items():\n markdown.append(\"## %s\" % api)\n for version in versions:\n markdown.append(\n \"* [%s](http://googleapis.github.io/google-api-python-client/docs/dyn/%s_%s.html)\"\n % (version, api, safe_version(version))\n )\n markdown.append(\"\\n\")\n\n with open(doc_destination_dir / \"index.md\", \"w\") as f:\n markdown = \"\\n\".join(markdown)\n f.write(markdown)\n\n else:\n sys.exit(\"Failed to load the discovery document.\")",
"def create_site_retriever_api(client):\n response = client.create_rest_api(\n name='siteretriever',\n description='get site data from a list of sites',\n version='1'\n )\n file_name = \"api_info.pickle\"\n pickle_dictionary_to_file(response, file_name)",
"def _download_project(name, apikey):\n payload = {'apikey': apikey, 'project': name, 'version': 'portia'}\n r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload)\n return r.content",
"def get_api_json():\n with open(join(__DOCS_ROOT, 'api.yaml'), 'r') as file:\n api = yaml.load(file)\n\n return jsonify(api)",
"def GetApiClient(creds, api_service_name=None, api_version=None):\n if api_service_name is None:\n api_service_name = DEFAULT_API_SERVICE_NAME\n if api_version is None:\n api_version = DEFAULT_API_VERSION\n\n base_http_client = httplib2.Http()\n auth_http_client = creds.authorize(base_http_client)\n ab_client = apiclient.discovery.build(api_service_name, api_version,\n http=auth_http_client)\n return ab_client",
"def get_api_info(self, api_version, **kwargs):\n\n all_params = ['api_version']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_api_info\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'api_version' is set\n if ('api_version' not in params) or (params['api_version'] is None):\n raise ValueError(\"Missing the required parameter `api_version` when calling `get_api_info`\")\n\n resource_path = '/api/info/{apiVersion}'.replace('{format}', 'json')\n path_params = {}\n if 'api_version' in params:\n path_params['apiVersion'] = params['api_version']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['privileges', 'apikey']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiInfo',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response",
"def get(self, api_url, timeout=30):\n return self._request('GET', api_url, timeout=timeout)",
"def get_service(credentials_folder, version='v3'):\n credentials = get_credentials(credentials_folder)\n http = credentials.authorize(httplib2.Http(cache=\".cache\"))\n service = discovery.build('drive', version, http=http)\n return service",
"def get_authenticated_service(api_name: str, api_version: str) -> Resource:\n\n if CREDS_FILENAME.exists():\n credentials = Credentials.from_authorized_user_file(str(CREDS_FILENAME))\n # TODO make request to the access token endpoint???\n\n # FIXME verifying token\n # credentials.refresh(requests.Request())\n # print(credentials.token, credentials.expiry)\n\n # idinfo = id_token.verify_oauth2_token(\n # credentials.token, requests.Request(), credentials.client_id)\n\n # if idinfo['iss'] not in ['accounts.google.com',\n # 'https://accounts.google.com']:\n # # CREDS_FILENAME.unlink()\n # raise ValueError('Wrong issuer.')\n\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)\n credentials = flow.run_local_server(\n host=\"localhost\",\n port=8080,\n authorization_prompt_message=\"Please visit this URL: {url}\",\n success_message=\"The auth flow is complete; you may close this window.\",\n open_browser=True,\n )\n\n creds_data = {\n \"token\": None,\n \"refresh_token\": credentials.refresh_token,\n \"token_uri\": credentials.token_uri,\n \"client_id\": credentials.client_id,\n \"client_secret\": credentials.client_secret,\n \"scopes\": credentials.scopes,\n }\n\n with CREDS_FILENAME.open(\"w\") as outfile:\n json.dump(creds_data, outfile)\n\n return build(api_name, api_version, credentials=credentials)",
"def get_api_doc(key=None):\n if key is None:\n return \"\"\n\n elif key in API_FILES:\n file = API_FILES[key]['file']\n realpath = os.path.join(os.path.dirname(__file__), '..', file)\n return _get_file_content(realpath, ignore_undocumented=True)\n\n return ERROR",
"def probe_api():\n\n info = loads(get(url).text)\n return info",
"def fusion_api_get_resource(self, uri, api=None, headers=None):\n if api:\n headers = self.fusion_client._set_req_api_version(api=api)\n elif not headers:\n headers = self.fusion_client._headers\n uri = 'https://%s%s' % (self.fusion_client._host, uri)\n return self.fusion_client.get(uri, headers)",
"def api(self, url, response_checker=default_checker.default_checker):\n return self._api_call(url, response_checker)",
"def _get_spec(test_api_path=None):\n if test_api_path:\n with open(test_api_path) as fp:\n api_spec_dict = json.load(fp)\n else:\n api_spec_dict = requests.get(\"https://hca-dss.czi.technology/v1/swagger.json\").json()\n return api_spec_dict",
"async def get(name, version, results, progress):\n\n buf = BytesIO()\n\n client = httpx.AsyncClient()\n\n async with client.stream(\n \"GET\", f\"https://pydocs.github.io/pkg/{name}-{version}.zip\"\n ) as response:\n total = int(response.headers[\"Content-Length\"])\n\n download_task = progress.add_task(f\"Download {name} {version}\", total=total)\n async for chunk in response.aiter_bytes():\n buf.write(chunk)\n progress.update(download_task, completed=response.num_bytes_downloaded)\n\n if response.status_code != 200:\n results[(name, version)] = None\n else:\n buf.seek(0)\n results[(name, version)] = buf.read()",
"def _query_api(\n master_url=settings.OPENSHIFT_API['NP']['OPENSHIFT_MASTER'],\n api_token=settings.OPENSHIFT_API['NP']['API_TOKEN'],\n endpoint='/oapi/v1/buildconfigs'):\n\n openshift_api_url = 'https://' + master_url\n openshift_api_get_endpoint = openshift_api_url + endpoint\n bearer_token_header = {'Authorization': 'Bearer ' + api_token }\n\n try:\n response = requests.get(openshift_api_get_endpoint,headers=bearer_token_header, timeout=2.0)\n except requests.ConnectTimeout as e:\n logger.error(e)\n return None\n except requests.ConnectionError as e:\n logger.error(e)\n return None\n\n if not response.ok:\n logger.error(response.status_code)\n return None\n else:\n return response",
"def download_file_konfuzio_api(document_id: int, ocr: bool = True, session=konfuzio_session()):\n if ocr:\n url = get_document_ocr_file_url(document_id)\n else:\n url = get_document_original_file_url(document_id)\n\n r = session.get(url)\n\n try:\n r.raise_for_status()\n except Exception:\n if r.status_code != 200:\n logger.exception(\"Requests error\")\n raise FileNotFoundError(json.loads(r.text)[\"detail\"])\n\n content_type = r.headers.get('content-type')\n if content_type not in ['application/pdf', 'image/jpeg', 'image/png', 'image/jpg']:\n raise FileNotFoundError(f'CONTENT TYP of {document_id} is {content_type} and no PDF or image.')\n\n logger.info(f'Downloaded file {document_id} from {KONFUZIO_HOST}.')\n return r.content",
"def _NDL_API(self, api, args, user=None):\n\n if user is None:\n user = os.environ['USER']\n args['username'] = user\n req = urllib.request.Request(self.RESERVATION_SERVER + \"/testlab/API/\" + api)\n if sys.version_info >= (3,3):\n req.data=urllib.parse.urlencode(args).encode('UTF-8')\n else:\n req.data=urllib.urlencode(args).encode('UTF-8')\n\n logger.debug(\"ndl::_NDL_API::request %s\" % req.get_full_url() )\n try:\n response = urllib.request.urlopen(req)\n except urllib.request.HTTPError as e:\n logger.error(\"Error %d from reservation server: %s\" % (e.getcode(), e.readline()))\n raise\n if response.getcode() != 200:\n raise urllib.resuest.HTTPError\n return response.read().decode('UTF-8')"
]
| [
"0.72981805",
"0.6734526",
"0.64512706",
"0.6035877",
"0.5581646",
"0.54990536",
"0.5306303",
"0.52876794",
"0.52130306",
"0.5148488",
"0.5062018",
"0.5056105",
"0.50496435",
"0.5046927",
"0.5018025",
"0.50053316",
"0.4952479",
"0.48979062",
"0.4880371",
"0.4877402",
"0.48739973",
"0.48665357",
"0.48634997",
"0.48522824",
"0.48337764",
"0.48128694",
"0.478482",
"0.47732762",
"0.47622964",
"0.47305477"
]
| 0.85871446 | 0 |
Builds the Google Cloud Pub/Sub service for the specified version. | def GetCloudPubSubService(version):
credentials = _GetCredentials()
service = build('pubsub', version, credentials=credentials)
return service | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_subresource(servicePath,\n version,\n **kargs):\n splits = servicePath.split('.')\n serviceName = splits[0]\n subresources = splits[1:]\n\n client = discovery.build(serviceName, version, **kargs)\n\n for subresource in subresources:\n subresource_fn = getattr(client, subresource)\n client = subresource_fn()\n\n return client",
"def slackbuild_pubsub(data, context):\n global config\n global slack\n\n print(data)\n print(context)\n\n build, template = BuildStatus.toMessage(data, config)\n\n msg = slack.render_message(build, template)\n\n return slack.post_message(msg)",
"def _CreatePubsubClient():\n client = pubsub_client.PubSubClient()\n client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)\n client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)\n return client",
"def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service",
"def build_gmail_api_v1():\n\n credentials = build_credentials()\n return googleapiclient.discovery.build('gmail', 'v1', credentials=credentials)",
"def __init__(self, app: NDNApp, client_prefix: FormalName, repo_prefix: FormalName):\n self.app = app\n self.client_prefix = client_prefix\n self.repo_prefix = repo_prefix\n self.pb = PubSub(self.app, self.client_prefix)",
"def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service",
"def build_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n f\"{EMAIL_ACCOUNT_FILE}\", SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service",
"def main():\n configure_logging()\n\n # Attributes tell us what subscription has been created for us to listen to.\n project = get_metadata('instance/attributes/pubsub_subscription_project')\n service_account = get_metadata('instance/attributes/pubsub_service_account')\n subscription = get_metadata('instance/attributes/pubsub_subscription')\n pubsub = PubSub(service_account=service_account)\n\n while True:\n logging.info('Polling for new messages')\n ack_ids = []\n start_time = time.time()\n response = pubsub.pull(subscription, project)\n for message in response.get('receivedMessages', []):\n ack_ids.append(message['ackId'])\n attributes = message['message'].get('attributes', {})\n message = base64.b64decode(message['message'].get('data', ''))\n logging.info(\n 'Received message: %s\\nAttributes: %s',\n message,\n json.dumps(attributes, indent=2),\n )\n\n if message == 'CONNECT' and attributes.get('swarming_server'):\n if os.path.exists(SWARMING_UPSTART_CONFIG_DEST):\n os.remove(SWARMING_UPSTART_CONFIG_DEST)\n shutil.copy2(SWARMING_UPSTART_CONFIG_SRC, SWARMING_UPSTART_CONFIG_DEST)\n\n if not os.path.exists(SWARMING_BOT_DIR):\n os.mkdir(SWARMING_BOT_DIR)\n chrome_bot = pwd.getpwnam(CHROME_BOT)\n os.chown(SWARMING_BOT_DIR, chrome_bot.pw_uid, chrome_bot.pw_gid)\n\n if os.path.exists(SWARMING_BOT_ZIP):\n # Delete just the zip, not the whole directory so logs are kept.\n os.remove(SWARMING_BOT_ZIP)\n\n bot_code = urllib2.urlopen(urlparse.urljoin(\n attributes.get('swarming_server'), 'bot_code'))\n with open(SWARMING_BOT_ZIP, 'w') as fd:\n shutil.copyfileobj(bot_code, fd)\n os.chown(SWARMING_BOT_ZIP, chrome_bot.pw_uid, chrome_bot.pw_gid)\n\n pubsub.acknowledge(subscription, project, ack_ids)\n subprocess.check_call(['/sbin/shutdown', '-r', 'now'])\n elif message == 'LEASED' and attributes.get('lease_expiration_ts'):\n with open(LEASE_EXPIRATION_FILE, 'w') as f:\n f.write(attributes['lease_expiration_ts'])\n\n if ack_ids:\n pubsub.acknowledge(subscription, project, ack_ids)\n if time.time() - start_time < 1:\n # Iterate at most once per second (chosen arbitrarily).\n time.sleep(1)",
"def gdocs_service(secrets: Dict):\n return build(\n \"docs\", \"v1\", credentials=google_credentials(secrets), cache_discovery=False\n )",
"def create_pubsub_subscription(client, project, topic, name):\n topic_name = pubsub.topic_name(project, topic)\n full_name = pubsub.subscription_name(project, name)\n if client.get_subscription(full_name):\n return\n\n client.create_subscription(full_name, topic_name)",
"def pubsub(self, **kwargs):\n if not self._pubsub:\n self._pubsub = Pubsub(self, **kwargs)\n return self._pubsub",
"def create_service(service, version, creds=None):\n # Instantiate an Http instance\n http = httplib2.Http()\n\n if creds:\n # Authorize the Http instance with the passed credentials\n creds.authorize(http)\n\n return build(service, version, http=http)",
"def from_service_account_json(cls, service_account_path) -> 'PubSubClient':\n client = pubsub.PublisherClient.from_service_account_json(\n filename=service_account_path)\n return cls(client)",
"def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id",
"def build(_):",
"def format_release_version(version, build_id_to_inject):\n subs = version.split(\".\")\n subs[-1] = build_id_to_inject\n return '.'.join(subs)",
"def __call__(\n self,\n request: pubsub.Subscription,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.Subscription:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"put\",\n \"uri\": \"/v1/{name=projects/*/subscriptions/*}\",\n \"body\": \"*\",\n },\n ]\n request, metadata = self._interceptor.pre_create_subscription(\n request, metadata\n )\n pb_request = pubsub.Subscription.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n # Jsonify the request body\n\n body = json_format.MessageToJson(\n transcoded_request[\"body\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n data=body,\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.Subscription()\n pb_resp = pubsub.Subscription.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_create_subscription(resp)\n return resp",
"def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service",
"def pubsub(self) -> Optional[pulumi.Input['PubSubArgs']]:\n return pulumi.get(self, \"pubsub\")",
"def hello_pubsub(event, context):\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n print(pubsub_message)\n x = json.loads(pubsub_message)\n vm_name = x[\"jsonPayload\"][\"resource\"][\"name\"]\n vm_zone = x[\"jsonPayload\"][\"resource\"][\"zone\"]\n project_id = x[\"resource\"][\"labels\"][\"project_id\"]\n print(\"vm_name=\"+vm_name)\n print(\"vm_zone=\"+vm_zone)\n print(\"project_id=\"+project_id)\n \n compute = discovery.build('compute', 'v1')\n \n print(\"getting metadata fingerprint\")\n request = compute.instances().get(project= project_id, zone= vm_zone, instance= vm_name)\n response = request.execute()\n pprint(response)\n metadata_fingerprint= response['metadata']['fingerprint']\n print(\"existing metadata fingerprint = \" + metadata_fingerprint)\n\n vm_status=response['status']\n print(\"vm_status = \" + vm_status)\n while vm_status != 'RUNNING' :\n print(\"going to sleep for 1 second...\")\n time.sleep(1)\n request = compute.instances().get(project= project_id, zone= vm_zone, instance= vm_name)\n response = request.execute()\n vm_status=response['status']\n print(\"vm_status = \" + vm_status)\n\n print(\"Setting VM metadata\")\n metadata_body = {\n \"fingerprint\": metadata_fingerprint,\n \"items\": [\n {\n \"key\": \"startup-script-url\",\n \"value\": \"gs://mybucket/my_script.sh\"\n }\n ]\n }\n\n request = compute.instances().setMetadata(project=project_id, zone=vm_zone, instance=vm_name, body=metadata_body)\n response = request.execute()\n pprint(response)\n\n print(\"Restarting VM\")\n request = compute.instances().reset(project=project_id, zone=vm_zone, instance=vm_name)\n response = request.execute()\n pprint(response)",
"def build():\n local('python' + python_version + ' setup.py bdist_egg')",
"def getAPIservice(args, name, version, client_secrets_file, scope=None, parents=[], discovery_filename=None):\n if scope is None:\n scope = 'https://www.googleapis.com/auth/' + name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend(parents)\n parser = argparse.ArgumentParser(\n description=\"Google API v3 Service Provider\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(args)\n print(\"args = %s\" % (args))\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n # client_secrets = os.path.join(os.path.dirname(filename),\n # 'client_secrets.json')\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets_file,\n scope=scope,\n message=tools.message_if_missing(client_secrets_file))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http = httplib2.Http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return (service, flags)",
"def create_client(self, version=None, unstable=False, **kwargs):\n version_data = self._calculate_version(version, unstable)\n return self._create_client(version_data, **kwargs)",
"def build():",
"def create_pubsub_notification(context, depends_on, status_string):\n\n return [{\n 'name': 'pubsub-notification-{}'.format(status_string),\n 'action': 'gcp-types/pubsub-v1:pubsub.projects.topics.publish',\n 'properties': {\n 'topic':\n context.properties['pubsubTopic'],\n 'messages': [{\n 'attributes': {\n 'projectId': context.properties['projectId'],\n 'status': status_string,\n }\n }]\n },\n 'metadata': {\n # The notification should only run after *all* project-related\n # resources have been deployed.\n 'dependsOn': depends_on,\n # Only trigger the pubsub message when the deployment is created (not on\n # update or delete).\n 'runtimePolicy': ['UPDATE_ALWAYS'],\n },\n }]",
"def from_service_account_info(cls, info: dict, *args, **kwargs):\n return PublisherServiceClient.from_service_account_info.__func__(PublisherServiceAsyncClient, info, *args, **kwargs) # type: ignore",
"def initialize_service():\r\n http = httplib2.Http()\r\n credentials = prepare_credentials()\r\n http = credentials.authorize(http)\r\n return build('analytics', 'v3', http=http)",
"def _build_version(self, version, num_of_digits):\n version = \"{}\".format(version).replace(\".\", \"\").replace(\" \", \"\").strip()\n num_of_digits_to_add = (num_of_digits - len(version))\n version += (\"0\" * num_of_digits_to_add)\n version = int(version)\n return version",
"def GetService(version=_DEFAULT_VERSION, developer_key=None):\n credentials = _GetCredentials()\n\n if version in _REALTIME_BIDDING_VERSIONS:\n # Initialize client for the Real-Time Bidding API.\n service = build(_REALTIME_BIDDING_API_NAME, version, credentials=credentials,\n developerKey=developer_key)\n else:\n raise ValueError('Invalid version provided. Supported versions are: %s'\n % ', '.join(_REALTIME_BIDDING_VERSIONS))\n\n return service"
]
| [
"0.57153666",
"0.5608188",
"0.52320915",
"0.5231218",
"0.5215677",
"0.5124253",
"0.49410287",
"0.4928817",
"0.4889315",
"0.47986013",
"0.47937685",
"0.47748703",
"0.47745627",
"0.4770796",
"0.47336197",
"0.47038734",
"0.47037455",
"0.46962577",
"0.46865353",
"0.46845642",
"0.4682576",
"0.46777144",
"0.46728435",
"0.46614912",
"0.4644941",
"0.4630377",
"0.46275306",
"0.46190694",
"0.46178034",
"0.4598639"
]
| 0.75297475 | 0 |
Builds the realtimebidding service for the specified version. | def GetService(version=_DEFAULT_VERSION, developer_key=None):
credentials = _GetCredentials()
if version in _REALTIME_BIDDING_VERSIONS:
# Initialize client for the Real-Time Bidding API.
service = build(_REALTIME_BIDDING_API_NAME, version, credentials=credentials,
developerKey=developer_key)
else:
raise ValueError('Invalid version provided. Supported versions are: %s'
% ', '.join(_REALTIME_BIDDING_VERSIONS))
return service | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service",
"def build_subresource(servicePath,\n version,\n **kargs):\n splits = servicePath.split('.')\n serviceName = splits[0]\n subresources = splits[1:]\n\n client = discovery.build(serviceName, version, **kargs)\n\n for subresource in subresources:\n subresource_fn = getattr(client, subresource)\n client = subresource_fn()\n\n return client",
"def __init__(self,\n version: date,\n authenticator: Authenticator = None,\n ) -> None:\n if version is None:\n raise ValueError('version must be provided')\n\n BaseService.__init__(self,\n service_url=self.DEFAULT_SERVICE_URL,\n authenticator=authenticator)\n self.version = version",
"def build():\n\n app = flask.Flask(\"cnc-forge-api\")\n app.api = flask_restful.Api(app)\n\n with open(\"/opt/service/secret/redis.json\", \"r\") as redis_file:\n app.redis = redis.Redis(charset=\"utf-8\", decode_responses=True, **json.loads(redis_file.read()))\n\n app.api.add_resource(Health, '/health')\n app.api.add_resource(Forge, '/forge', '/forge/<id>')\n app.api.add_resource(CnC, '/cnc', '/cnc/<id>')\n\n return app",
"def __init__(self, version=None):\n super(VirtualNetworkApplianceService, self).__init__(\n service_type='virtual-network-appliance',\n version=version\n )",
"def create_client(self, version=None, unstable=False, **kwargs):\n version_data = self._calculate_version(version, unstable)\n return self._create_client(version_data, **kwargs)",
"def build(_):",
"def get_client(\n service: str,\n version: str,\n http: Optional[Union[httplib2.Http, api_httplib.HttpMock]] = None,\n request_builder: Union[\n Type[api_httplib.HttpRequest],\n api_httplib.RequestMockBuilder] = api_httplib.HttpRequest\n) -> discovery.Resource:\n static_discovery = False if isinstance(http, api_httplib.HttpMock) else None\n return discovery.build(\n service,\n version,\n num_retries=_NUMBER_OF_RETRIES,\n http=http,\n requestBuilder=request_builder,\n static_discovery=static_discovery)",
"def __init__(self, baseurl, resmeta=None, version=\"1.0\"):\n super(SLAService, self).__init__(baseurl, \"sla\", version, resmeta)",
"def main():\n cfg.CONF(sys.argv[1:], project='blazar', prog='blazar-api')\n notifier.init()\n service_utils.prepare_service(sys.argv)\n if not CONF.enable_v1_api:\n app = v2_app.make_app()\n else:\n app = wsgi_app.VersionSelectorApplication()\n\n wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app)",
"def client(\n service_name: str, version: str = \"v1\", secrets: Secrets = None\n) -> Resource:\n credentials = load_credentials(secrets=secrets)\n return build(service_name, version=version, credentials=credentials)",
"def build():",
"def build_api_version(self, build_api_version):\n\n self._build_api_version = build_api_version",
"def cli_build(\n family: str,\n versions: Union[str, List[str]],\n ports: Union[str, List[str]],\n boards: Union[str, List[str]],\n clean: bool,\n force: bool,\n # stub_type: str,\n):\n\n # lists please\n versions = list(versions)\n ports = list(ports)\n boards = list(boards)\n\n # db = get_database(publish_path=CONFIG.publish_path, production=production)\n log.info(f\"Build {family} {versions} {ports} {boards}\")\n\n results = build_multiple(\n family=family,\n versions=versions,\n ports=ports,\n boards=boards,\n production=True, # use production database during build\n force=force,\n clean=clean,\n )\n # log the number of results with no error\n log.info(f\"Built {len([r for r in results if not r['error']])} stubs\")\n print(tabulate(results, headers=\"keys\"))",
"def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)",
"def create_service(service, version, creds=None):\n # Instantiate an Http instance\n http = httplib2.Http()\n\n if creds:\n # Authorize the Http instance with the passed credentials\n creds.authorize(http)\n\n return build(service, version, http=http)",
"def get_service(api_name, api_version, scope, client_secrets_path):\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n client_secrets_path, scope=scope,\n message=tools.message_if_missing(client_secrets_path))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n service = build(api_name, api_version, http=http)\n\n return service",
"def GetCloudPubSubService(version):\n credentials = _GetCredentials()\n\n service = build('pubsub', version, credentials=credentials)\n\n return service",
"def __init__(self, # pylint: disable=too-many-arguments, too-many-locals\n service_subscription: \"ServiceSubscription\",\n instance_id: str,\n instance_name: str = None,\n service_type: str = None,\n service_role: str = None,\n environment_context: str = None,\n workload_context: str = None,\n created_at: str = None,\n updated_at: str = None,\n resource_version: str = None,\n description: str = None,\n model_invariant_id: str = None,\n model_version_id: str = None,\n persona_model_version: str = None,\n widget_model_id: str = None,\n widget_model_version: str = None,\n bandwith_total: str = None,\n vhn_portal_url: str = None,\n service_instance_location_id: str = None,\n selflink: str = None,\n orchestration_status: str = None,\n input_parameters: str = None) -> None:\n super().__init__(resource_version=resource_version,\n model_invariant_id=model_invariant_id,\n model_version_id=model_version_id)\n self.service_subscription: \"ServiceSubscription\" = service_subscription\n self.instance_id: str = instance_id\n self.instance_name: str = instance_name\n self.service_type: str = service_type\n self.service_role: str = service_role\n self.environment_context: str = environment_context\n self.workload_context: str = workload_context\n self.created_at: str = created_at\n self.updated_at: str = updated_at\n self.description: str = description\n self.bandwith_total: str = bandwith_total\n self.vhn_portal_url: str = vhn_portal_url\n self.service_instance_location_id: str = service_instance_location_id\n self.selflink: str = selflink\n self.orchestration_status: str = orchestration_status\n self.input_parameters: str = input_parameters\n self.persona_model_version: str = persona_model_version\n self.widget_model_id: str = widget_model_id\n self.widget_model_version: str = widget_model_version\n self._sdc_service: Optional[Service] = None",
"def _build_version(self, version, num_of_digits):\n version = \"{}\".format(version).replace(\".\", \"\").replace(\" \", \"\").strip()\n num_of_digits_to_add = (num_of_digits - len(version))\n version += (\"0\" * num_of_digits_to_add)\n version = int(version)\n return version",
"def get(self, version):\n version_found = False\n api_spec = self._create_api_spec(version)\n for base_api in current_app.appbuilder.baseviews:\n if isinstance(base_api, BaseApi) and base_api.version == version:\n base_api.add_api_spec(api_spec)\n version_found = True\n if version_found:\n return self.response(200, **api_spec.to_dict())\n else:\n return self.response_404()",
"def build_dauth_services(target):\n if target == \"debug\":\n cmd = [\"cargo\", \"build\"]\n elif target == \"release\":\n cmd = [\"cargo\", \"build\", \"--release\"]\n else:\n raise ValueError(\"Invalid target mode: {}\".format(target))\n\n log.info(\"Running build command: %s\", cmd)\n subprocess.run(cmd, check=True, cwd=\"../services\")",
"def build(self, distribution):\n\t\t# get latets rpms from the latest distribution snapshot\n\t\ttry:\n\t\t\tartefact = StorageReader().retrieve({\n\t\t\t\t\"artefact\": ARTEFACT_GOLANG_DISTRIBUTION_SNAPSHOT,\n\t\t\t\t\"distribution\": distribution.json()\n\t\t\t})\n\t\texcept KeyError:\n\t\t\traise KeyError(\"Distribution snapshot for '%s' not found\" % distribution)\n\n\t\tcounter = 0\n\t\tbuilder = DatasetBuilder()\n\n\t\tbuilds = DistributionSnapshot().read(data).builds()\n\t\tbuilds_total = len(builds)\n\t\tbuilds_counter = 0\n\t\tfor pkg in builds:\n\t\t\tbuilds_counter = builds_counter + 1\n\t\t\tlogger.info(\"%s/%s Processing %s\" % (builds_counter, builds_total, builds[pkg][\"build\"]))\n\n\t\t\t# get artefact\n\t\t\tdata = {\n\t\t\t\t\"product\": distribution.product(),\n\t\t\t\t\"distribution\": distribution.version(),\n\t\t\t\t\"build\": {\n\t\t\t\t\t\"name\": builds[pkg][\"build\"],\n\t\t\t\t\t\"rpms\": map(lambda l: {\"name\": l}, builds[pkg][\"rpms\"])\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor rpm in builds[pkg][\"rpms\"]:\n\t\t\t\tartefact_key = {\n\t\t\t\t\t\"artefact\": ARTEFACT_GOLANG_PROJECT_DISTRIBUTION_PACKAGES,\n\t\t\t\t\t\"product\": distribution.product(),\n\t\t\t\t\t\"distribution\": distribution.version(),\n\t\t\t\t\t\"build\": builds[pkg][\"build\"],\n\t\t\t\t\t\"rpm\": rpm,\n\t\t\t\t}\n\n\t\t\t\ttry:\n\t\t\t\t\tartefact = StorageReader().retrieve(artefact_key)\n\t\t\t\texcept KeyError:\n\t\t\t\t\tWorker(\"scandistributionbuild\").setPayload({\n\t\t\t\t\t\t\"product\": product,\n\t\t\t\t\t\t\"distribution\": version,\n\t\t\t\t\t\t\"build\": {\n\t\t\t\t\t\t\t\"name\": builds[pkg][\"build\"],\n\t\t\t\t\t\t\t\"rpms\": builds[pkg][\"rpms\"],\n\t\t\t\t\t\t}\n\t\t\t\t\t}).do()\n\n\t\t\t\ttry:\n\t\t\t\t\tartefact = StorageReader().retrieve(artefact_key)\n\t\t\t\texcept KeyError as e:\n\t\t\t\t\tlogger.error(e)\n\t\t\t\t\tcontinue\n\n\t\t\t\tbuilder.addDistributionArtefact(artefact)\n\n\t\treturn builder.build().dataset()",
"def build(self) -> cern.lsa.domain.settings.Knob:\n ...",
"def build_client(module):\n return drac.DRACClient(module.params['address'],\n module.params['username'],\n module.params['password'])",
"def build(self, *args, **kwargs):\n return",
"def _build(self, **kwargs):",
"def beta_create_KratosService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('kratos.KratosService', 'AddRule'): AddRuleRequest.SerializeToString,\n ('kratos.KratosService', 'AddService'): AddServiceRequest.SerializeToString,\n ('kratos.KratosService', 'DeleteRule'): DeleteRuleRequest.SerializeToString,\n ('kratos.KratosService', 'DeleteService'): DeleteServiceRequest.SerializeToString,\n ('kratos.KratosService', 'ResetCounter'): ResetCounterRequest.SerializeToString,\n ('kratos.KratosService', 'Status'): StatusRequest.SerializeToString,\n }\n response_deserializers = {\n ('kratos.KratosService', 'AddRule'): AddRuleResponse.FromString,\n ('kratos.KratosService', 'AddService'): AddServiceResponse.FromString,\n ('kratos.KratosService', 'DeleteRule'): DeleteRuleResponse.FromString,\n ('kratos.KratosService', 'DeleteService'): DeleteServiceResponse.FromString,\n ('kratos.KratosService', 'ResetCounter'): ResetCounterResponse.FromString,\n ('kratos.KratosService', 'Status'): StatusResponse.FromString,\n }\n cardinalities = {\n 'AddRule': cardinality.Cardinality.UNARY_UNARY,\n 'AddService': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteRule': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteService': cardinality.Cardinality.UNARY_UNARY,\n 'ResetCounter': cardinality.Cardinality.UNARY_UNARY,\n 'Status': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'kratos.KratosService', cardinalities, options=stub_options)",
"def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)",
"def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service"
]
| [
"0.5569035",
"0.552676",
"0.5366935",
"0.51349723",
"0.5069962",
"0.50330245",
"0.49777514",
"0.49529108",
"0.49461666",
"0.49356827",
"0.49331987",
"0.4930468",
"0.4911512",
"0.4851224",
"0.48050913",
"0.47942278",
"0.47915474",
"0.4762772",
"0.47495747",
"0.47264645",
"0.47244528",
"0.46911246",
"0.46872652",
"0.46825424",
"0.46810123",
"0.46470878",
"0.46345624",
"0.46314448",
"0.46212843",
"0.46047488"
]
| 0.6628569 | 0 |
Builds a service using the specified discovery document. | def GetServiceFromFile(discovery_file):
credentials = _GetCredentials()
with open(discovery_file, 'r') as handler:
discovery_doc = handler.read()
service = build_from_document(service=discovery_doc, credentials=credentials)
return service | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def document_api_from_discovery_document(\n discovery_url, doc_destination_dir, artifact_destination_dir=DISCOVERY_DOC_DIR\n):\n http = build_http()\n response, content = http.request(discovery_url)\n discovery = json.loads(content)\n\n service = build_from_document(discovery)\n\n name = discovery[\"version\"]\n version = safe_version(discovery[\"version\"])\n\n document_collection_recursive(\n service,\n \"{}_{}.\".format(name, version),\n discovery,\n discovery,\n doc_destination_dir,\n artifact_destination_dir,\n )",
"def build_service():\n creds = None\n\n # the file token.json stores the user's access and refresh tokens, and is \n # created automatically when the authorization flow completes for the first time\n \n if os.path.exists('../creds/token.json'):\n creds = Credentials.from_authorized_user_file('../creds/token.json', SCOPES)\n\n # if there are no (valid) credentials, ask the user to login\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n '../creds/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n with open('../creds/token.json', 'w') as token:\n token.write(creds.to_json())\n\n service = build('drive', 'v3', credentials=creds)\n return service",
"def build_subresource(servicePath,\n version,\n **kargs):\n splits = servicePath.split('.')\n serviceName = splits[0]\n subresources = splits[1:]\n\n client = discovery.build(serviceName, version, **kargs)\n\n for subresource in subresources:\n subresource_fn = getattr(client, subresource)\n client = subresource_fn()\n\n return client",
"def document_api(\n name, version, uri, doc_destination_dir, artifact_destination_dir=DISCOVERY_DOC_DIR\n):\n http = build_http()\n resp, content = http.request(\n uri\n or uritemplate.expand(\n FLAGS.discovery_uri_template, {\"api\": name, \"apiVersion\": version}\n )\n )\n\n if resp.status == 200:\n discovery = json.loads(content)\n service = build_from_document(discovery)\n doc_name = \"{}.{}.json\".format(name, version)\n discovery_file_path = artifact_destination_dir / doc_name\n revision = None\n\n pathlib.Path(discovery_file_path).touch(exist_ok=True)\n\n # Write discovery artifact to disk if revision equal or newer\n with open(discovery_file_path, \"r+\") as f:\n try:\n json_data = json.load(f)\n revision = json_data[\"revision\"]\n except json.JSONDecodeError:\n revision = None\n\n if revision is None or discovery[\"revision\"] >= revision:\n # Reset position to the beginning\n f.seek(0)\n # Write the changes to disk\n json.dump(discovery, f, indent=2, sort_keys=True)\n # Truncate anything left as it's not needed\n f.truncate()\n\n elif resp.status == 404:\n print(\n \"Warning: {} {} not found. HTTP Code: {}\".format(name, version, resp.status)\n )\n return\n else:\n print(\n \"Warning: {} {} could not be built. HTTP Code: {}\".format(\n name, version, resp.status\n )\n )\n return\n\n document_collection_recursive(\n service,\n \"{}_{}.\".format(name, safe_version(version)),\n discovery,\n discovery,\n doc_destination_dir,\n artifact_destination_dir,\n )",
"def ApiFromDiscoveryDoc(self, path):\n\n f = open(os.path.join(os.path.dirname(__file__), 'testdata', path))\n discovery_doc = simplejson.loads(f.read())\n f.close()\n return Api(discovery_doc)",
"def build_service():\n\n\tstore = file.Storage('credentials.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)\n\t creds = tools.run_flow(flow, store)\n\tservice = build('gmail', 'v1', http=creds.authorize(Http(disable_ssl_certificate_validation=True)))\n\treturn service",
"def create_service_object(credentials):\n http_auth = httplib2.Http()\n http_auth = credentials.authorize(http_auth)\n service = discovery.build('analytics', 'v3', http=http_auth)\n return service",
"def _service_object(\n self,\n ports: List[ServicePort],\n service_name: str = None,\n service_type: ServiceType = \"ClusterIP\",\n additional_labels: dict = None,\n additional_selectors: dict = None,\n additional_annotations: dict = None,\n ) -> Service:\n if not service_name:\n service_name = self._app\n labels = {\"app.kubernetes.io/name\": self._app}\n if additional_labels:\n labels.update(additional_labels)\n selector = {\"app.kubernetes.io/name\": self._app}\n if additional_selectors:\n selector.update(additional_selectors)\n return Service(\n apiVersion=\"v1\",\n kind=\"Service\",\n metadata=ObjectMeta(\n namespace=self._namespace,\n name=service_name,\n labels=labels,\n annotations=additional_annotations, # type: ignore[arg-type]\n ),\n spec=ServiceSpec(\n selector=selector,\n ports=ports,\n type=service_type,\n ),\n )",
"def createService(data):\n return Service(data).create()",
"def new_instance(cls,\n version: date,\n service_name: str = DEFAULT_SERVICE_NAME,\n ) -> 'DirectLinkApisV1':\n if version is None:\n raise ValueError('version must be provided')\n\n authenticator = get_authenticator_from_environment(service_name)\n service = cls(\n version,\n authenticator\n )\n service.configure_service(service_name)\n return service",
"def gdocs_service(secrets: Dict):\n return build(\n \"docs\", \"v1\", credentials=google_credentials(secrets), cache_discovery=False\n )",
"def make_service(self, endpoint_type, service_name, **client_kwargs):\n binding = self._make_binding(endpoint_type, service_name)\n service_cache_key = (binding, str(client_kwargs))\n\n if service_cache_key in self._service_cache:\n srvc = self._service_cache[service_cache_key]\n else:\n client = self._make_client(\n endpoint_type,\n service_name,\n **client_kwargs\n )\n srvc = client.create_service(binding, client.wsdl.location)\n self._service_cache[service_cache_key] = srvc\n return srvc",
"def get_service(api_name, api_version, scope, client_secrets_path):\n # Parse command-line arguments.\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=[tools.argparser])\n flags = parser.parse_args([])\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(\n client_secrets_path, scope=scope,\n message=tools.message_if_missing(client_secrets_path))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(api_name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http=httplib2.Http())\n\n # Build the service object.\n service = build(api_name, api_version, http=http)\n\n return service",
"def create_service():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=9797)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('drive', 'v3', credentials=creds)\n return service",
"def get_client(\n service: str,\n version: str,\n http: Optional[Union[httplib2.Http, api_httplib.HttpMock]] = None,\n request_builder: Union[\n Type[api_httplib.HttpRequest],\n api_httplib.RequestMockBuilder] = api_httplib.HttpRequest\n) -> discovery.Resource:\n static_discovery = False if isinstance(http, api_httplib.HttpMock) else None\n return discovery.build(\n service,\n version,\n num_retries=_NUMBER_OF_RETRIES,\n http=http,\n requestBuilder=request_builder,\n static_discovery=static_discovery)",
"def start_service(self):\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Starting up service\")\n\n self.start_swarm()\n\n container_spec = docker.types.ContainerSpec(\n image=self.dkr_image,\n command=self.dkr_command,\n env=self.dkr_env\n )\n task_tmpl = docker.types.TaskTemplate(container_spec)\n svc = self.api_client().create_service(\n name=self.dkr_name,\n task_template=task_tmpl)\n\n self.dkr_service = svc",
"def build_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n f\"{EMAIL_ACCOUNT_FILE}\", SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('gmail', 'v1', credentials=creds)\r\n return service",
"def get_client(service_account_json):\n api_scopes = ['https://www.googleapis.com/auth/cloud-platform']\n api_version = 'v1'\n discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'\n service_name = 'cloudiotcore'\n\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json)\n scoped_credentials = credentials.with_scopes(api_scopes)\n\n discovery_url = '{}?version={}'.format(\n discovery_api, api_version)\n\n return discovery.build(\n service_name,\n api_version,\n discoveryServiceUrl=discovery_url,\n credentials=scoped_credentials)",
"def get_service(api_name, api_version, scopes, key_file_location):\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n key_file_location, scopes=scopes)\n\n # Build the service object.\n service = build(api_name, api_version, credentials=credentials)\n\n return service",
"def get_service(api_name, api_version, scopes, key_file_location):\r\n\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\r\n key_file_location, scopes=scopes)\r\n\r\n # Build the service object.\r\n service = build(api_name, api_version, credentials=credentials)\r\n\r\n return service",
"def service(self, service_name):\n return Service('/'.join((self._url, self._services_path, service_name)))",
"def create_service(service, version, creds=None):\n # Instantiate an Http instance\n http = httplib2.Http()\n\n if creds:\n # Authorize the Http instance with the passed credentials\n creds.authorize(http)\n\n return build(service, version, http=http)",
"def build(self, factory, *factory_args, **factory_kw):\n return self._instantiate(\"\", factory, factory_args, factory_kw)",
"def getAPIservice(args, name, version, client_secrets_file, scope=None, parents=[], discovery_filename=None):\n if scope is None:\n scope = 'https://www.googleapis.com/auth/' + name\n\n # Parser command-line arguments.\n parent_parsers = [tools.argparser]\n parent_parsers.extend(parents)\n parser = argparse.ArgumentParser(\n description=\"Google API v3 Service Provider\",\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(args)\n print(\"args = %s\" % (args))\n\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the API Access tab on the Google APIs\n # Console <http://code.google.com/apis/console>.\n # client_secrets = os.path.join(os.path.dirname(filename),\n # 'client_secrets.json')\n\n # Set up a Flow object to be used if we need to authenticate.\n flow = client.flow_from_clientsecrets(client_secrets_file,\n scope=scope,\n message=tools.message_if_missing(client_secrets_file))\n\n # Prepare credentials, and authorize HTTP object with them.\n # If the credentials don't exist or are invalid run through the native client\n # flow. The Storage object will ensure that if successful the good\n # credentials will get written back to a file.\n storage = file.Storage(name + '.dat')\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n http = credentials.authorize(http = httplib2.Http())\n\n if discovery_filename is None:\n # Construct a service object via the discovery service.\n service = discovery.build(name, version, http=http)\n else:\n # Construct a service object using a local discovery document file.\n with open(discovery_filename) as discovery_file:\n service = discovery.build_from_document(\n discovery_file.read(),\n base='https://www.googleapis.com/',\n http=http)\n return (service, flags)",
"def get_service(credentials_folder, version='v3'):\n credentials = get_credentials(credentials_folder)\n http = credentials.authorize(httplib2.Http(cache=\".cache\"))\n service = discovery.build('drive', version, http=http)\n return service",
"def get_service():\n \n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n token_path = f\"{sys.path[0]}/creds/token.pickle\"\n if os.path.exists(token_path):\n with open(token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n cred_path = f\"{sys.path[0]}/creds/credentials.json\"\n flow = InstalledAppFlow.from_client_secrets_file(\n cred_path, SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n return service",
"def async_get_service_discovery(hass, discovery_info):\n notification_devices = []\n for device_name in discovery_info[ATTR_DISCOVER_DEVICES]:\n device = hass.data[DATA_KNX].xknx.devices[device_name]\n notification_devices.append(device)\n return (\n KNXNotificationService(notification_devices) if notification_devices else None\n )",
"def init(argv, doc, filename, parents=None):\n service = None\n flags = None\n parent_parsers = [tools.argparser, argparser]\n if parents is not None:\n parent_parsers.extend(parents)\n\n parser = argparse.ArgumentParser(\n description=doc,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n parents=parent_parsers)\n flags = parser.parse_args(argv[1:])\n\n auth_path = os.path.dirname(filename)\n client_secrets_path = os.path.join(auth_path, CLIENT_SECRETS_FILE)\n service_account_path = os.path.join(auth_path, SERVICE_ACCOUNT_FILE)\n\n credentials = None\n if os.path.isfile(service_account_path):\n credentials = ServiceAccountCredentials.from_json_keyfile_name(\n service_account_path,\n scopes=API_SCOPE)\n elif os.path.isfile(client_secrets_path):\n message = tools.message_if_missing(client_secrets_path)\n flow = client.flow_from_clientsecrets(client_secrets_path,\n scope=API_SCOPE,\n message=message)\n storage_path = os.path.join(auth_path, SERVICE_NAME + '.dat')\n storage = Storage(storage_path)\n credentials = storage.get()\n if credentials is None or credentials.invalid:\n credentials = tools.run_flow(flow, storage, flags)\n else:\n print('No OAuth2 authentication files found. Checked:', file=sys.stderr)\n print('- %s' % service_account_path, file=sys.stderr)\n print('- %s' % client_secrets_path, file=sys.stderr)\n print('Please read the accompanying documentation.', file=sys.stderr)\n sys.exit(1)\n\n http = credentials.authorize(http=httplib2.Http())\n service = discovery.build(SERVICE_NAME, SERVICE_VERSION, http=http)\n return (service, flags)",
"def create_service(self, service_name, *args, **kwargs):\n\n creator = self._service_creators.get(service_name, None)\n\n if creator is None:\n return None\n\n return creator(*args, **kwargs)",
"def get_gcal_service(credentials):\n app.logger.debug(\"Entering get_gcal_service\")\n http_auth = credentials.authorize(httplib2.Http())\n service = discovery.build('calendar', 'v3', http=http_auth)\n app.logger.debug(\"Returning service\")\n return service"
]
| [
"0.64967304",
"0.6030143",
"0.589598",
"0.5847395",
"0.5738073",
"0.56972104",
"0.56820416",
"0.5582655",
"0.5537132",
"0.55302894",
"0.5434771",
"0.54292744",
"0.5420864",
"0.5398877",
"0.53854674",
"0.5381908",
"0.5316397",
"0.53089416",
"0.5304767",
"0.5292204",
"0.5289053",
"0.527826",
"0.52426225",
"0.5221466",
"0.5207445",
"0.51842797",
"0.51182616",
"0.5107374",
"0.5082331",
"0.5075072"
]
| 0.6653792 | 0 |
Return an inspyred crossover function based on the given function. This function generator takes a function that operates on only two parent candidates to produce an iterable sequence of offspring (typically two). The generator handles the pairing of selected parents and collecting of all offspring. The generated function chooses every odd candidate as a 'mom' and every even as a 'dad' (discounting the last candidate if there is an odd number). For each momdad pair, offspring are produced via the `cross` function. | def crossover(cross):
@functools.wraps(cross)
def inspyred_crossover(random, candidates, args):
if len(candidates) % 2 == 1:
candidates = candidates[:-1]
moms = candidates[::2]
dads = candidates[1::2]
children = []
for i, (mom, dad) in enumerate(zip(moms, dads)):
cross.index = i
offspring = cross(random, mom, dad, args)
for o in offspring:
children.append(o)
return children
inspyred_crossover.single_crossover = cross
return inspyred_crossover | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover",
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population",
"def arithmetic_crossover(random, mom, dad, args):\n ax_alpha = args.setdefault('ax_alpha', 0.5)\n ax_points = args.setdefault('ax_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if ax_points is None:\n ax_points = list(range(min(len(bro), len(sis))))\n for i in ax_points:\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def arithmetic_crossover(random, mom, dad, args):\r\n ax_alpha = args.setdefault('ax_alpha', 0.5)\r\n ax_points = args.setdefault('ax_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if ax_points is None:\r\n ax_points = list(range(min(len(bro), len(sis))))\r\n for i in ax_points:\r\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\r\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def general_cross_function(verbosity, function):\r\n ret = 1\r\n first_errors = [False, False]\r\n for count in range(10, 25, 5):\r\n for points in range(5, 10):\r\n for ax_c in range(3, 5):\r\n axes = []\r\n for _ in range(ax_c):\r\n axes.append(((np.random.random_sample() * 2), (3 + np.random.random_sample() * 4)))\r\n population = GeneticAlgorithms.random_population(count, points, axes) # assumes this works\r\n for _ in range(len(population)):\r\n rd1 = np.random.choice(population)\r\n rd2 = np.random.choice(population)\r\n crs = function(rd1, rd2)\r\n if crs.shape != rd1.shape:\r\n ret = 0\r\n if verbosity > 0 and first_errors[0]:\r\n first_errors[0] = True\r\n print(\"ERROR: cross function doesn't return correct shape\")\r\n for i in range(points):\r\n for j in range(ax_c):\r\n if crs[i][j] < min(rd1[i][j], rd2[i][j]) or crs[i][j] > max(rd1[i][j], rd2[i][j]):\r\n ret = 0\r\n if verbosity > 0 and first_errors[1]:\r\n first_errors[1] = True\r\n print(\"ERROR: cross function doesn't return in correct range\")\r\n return ret",
"def doCrossover(self, cross_func, member1, member2):\n\t\tif cross_func in self.crossover_external_data:\n\t\t\treturn cross_func(member1, member2, *(self.crossover_external_data[cross_func]))\n\t\telse:\n\t\t\treturn cross_func(member1, member2)",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def n_point_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n num_crossover_points = args.setdefault('num_crossover_points', 1)\n children = []\n if random.random() < crossover_rate:\n num_cuts = min(len(mom)-1, num_crossover_points)\n cut_points = random.sample(range(1, len(mom)), num_cuts)\n cut_points.sort()\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n normal = True\n for i, (m, d) in enumerate(zip(mom, dad)):\n if i in cut_points:\n normal = not normal\n if not normal:\n bro[i] = m\n sis[i] = d\n normal = not normal\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def cross(self, mother, father):\n\n cross_method = 2\n child1 = []\n child2 = []\n if cross_method == 1:\n locus = random.randint(1, self.chromosome_length - 1)\n\n for i in range(self.chromosome_length):\n if i < locus:\n child1.append(mother[i])\n child2.append(father[i])\n else:\n child1.append(father[i])\n child2.append(mother[i])\n else:\n parent = True\n\n locus_count = math.floor(self.chromosome_length / self.cross_rate)\n locus = []\n for i in range(int(locus_count)):\n tmp = 0\n while tmp in locus:\n random.randint(1, self.chromosome_length - 1)\n for i in range(self.chromosome_length):\n if i in locus:\n parent = not parent\n if parent:\n child1.append(mother[i])\n child2.append(father[i])\n else:\n child1.append(father[i])\n child2.append(mother[i])\n\n return [child1, child2]",
"def uniform_crossover(random, mom, dad, args):\n ux_bias = args.setdefault('ux_bias', 0.5)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d) in enumerate(zip(mom, dad)):\n if random.random() < ux_bias:\n bro[i] = m\n sis[i] = d\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()",
"def uniform_crossover(random, mom, dad, args):\r\n ux_bias = args.setdefault('ux_bias', 0.5)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n if random.random() < ux_bias:\r\n bro[i] = m\r\n sis[i] = d\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def n_point_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n num_crossover_points = args.setdefault('num_crossover_points', 1)\r\n children = []\r\n if random.random() < crossover_rate:\r\n num_cuts = min(len(mom)-1, num_crossover_points)\r\n cut_points = random.sample(range(1, len(mom)), num_cuts)\r\n cut_points.sort()\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n normal = True\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n if i in cut_points:\r\n normal = not normal\r\n if not normal:\r\n bro[i] = m\r\n sis[i] = d\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def partially_matched_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n size = len(mom)\n points = random.sample(range(size), 2)\n x, y = min(points), max(points)\n bro = copy.copy(dad)\n bro[x:y+1] = mom[x:y+1]\n sis = copy.copy(mom)\n sis[x:y+1] = dad[x:y+1]\n for parent, child in zip([dad, mom], [bro, sis]):\n for i in range(x, y+1):\n if parent[i] not in child[x:y+1]:\n spot = i\n while x <= spot <= y:\n spot = parent.index(child[spot])\n child[spot] = parent[i]\n return [bro, sis]\n else:\n return [mom, dad]",
"def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def crossing(self, *args):\n return self.phy2abs.crossing(*args)",
"def partially_matched_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n size = len(mom)\r\n points = random.sample(range(size), 2)\r\n x, y = min(points), max(points)\r\n bro = copy.copy(dad)\r\n bro[x:y+1] = mom[x:y+1]\r\n sis = copy.copy(mom)\r\n sis[x:y+1] = dad[x:y+1]\r\n for parent, child in zip([dad, mom], [bro, sis]):\r\n for i in range(x, y+1):\r\n if parent[i] not in child[x:y+1]:\r\n spot = i\r\n while x <= spot <= y:\r\n spot = parent.index(child[spot])\r\n child[spot] = parent[i]\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def crossover(f,P_c_min,P_c_max,i,D,V,P,U):\n #ADAPTIVE Crossover\n if f[i] < np.mean(f):\n P_c = P_c_min + (P_c_max-P_c_min)*((f[i]-np.mean(f))/(np.max(f)-np.mean(f)))\n else:\n P_c = P_c_min\n\n delta = np.random.randint(0,D-1) \n for j in np.arange(D):\n if np.random.uniform(0,1) <= P_c or delta == j:\n U[i,j] = V[j]\n else:\n U[i,j]=P[i,j]\n\n return U",
"def simulated_binary_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n di = args.setdefault('sbx_distribution_index', 10)\n bounder = args['_ec'].bounder\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\n try:\n if m > d:\n m, d = d, m\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\n u = random.random() \n if u <= (1.0 / alpha):\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\n else:\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\n bro_val = max(min(bro_val, ub), lb) \n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\n sis_val = max(min(sis_val, ub), lb)\n if random.random() > 0.5:\n bro_val, sis_val = sis_val, bro_val\n bro[i] = bro_val\n sis[i] = sis_val\n except ZeroDivisionError:\n # The offspring already have legitimate values for every element,\n # so no need to take any special action here.\n pass\n return [bro, sis]\n else:\n return [mom, dad]",
"def c_test_cross_function(self, function):\r\n return general_cross_function(self.verbosity, function)",
"def c_test_cross_function(self, function):\r\n return general_cross_function(self.verbosity, function)",
"def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)",
"def crossoverIndividuals(father, mother, bwsFitnessFunction, highIsGood):\n\n #choose depth of crossover point at random\n crossoverDepth = round(random.uniform(1,father.getDepth()))\n\n #get all subtrees of father and mother at that layer of deepness\n fatherNodesAtLayer = father.getNodesAtDepth(crossoverDepth)\n motherNodesAtLayer = mother.getNodesAtDepth(crossoverDepth)\n\n numberOfNodesinLayer = pow(2, crossoverDepth)\n\n #if no fitnessfunction is supplied, use random crossover\n if bwsFitnessFunction is None:\n indexM = round(random.uniform(0,numberOfNodesinLayer - 1))\n indexF = round(random.uniform(0,numberOfNodesinLayer - 1))\n\n #if bws (Best-Worst-Subtree) crossover is used, at crossoverDepth\n #find the best subtree from father and the worst from mother\n else:\n fitnessValuesOfFatherNodes = list(map(bwsFitnessFunction, fatherNodesAtLayer))\n fitnessValuesOfMotherNodes = list(map(bwsFitnessFunction, motherNodesAtLayer))\n\n if highIsGood:\n indexF = fitnessValuesOfFatherNodes.index(max(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(min(fitnessValuesOfMotherNodes))\n else:\n indexF = fitnessValuesOfFatherNodes.index(min(fitnessValuesOfFatherNodes))\n indexM = fitnessValuesOfMotherNodes.index(max(fitnessValuesOfMotherNodes))\n\n fatherCrossOverNode = copy.deepcopy(fatherNodesAtLayer[indexF])\n\n #exchange identified crossover nodes\n child = copy.deepcopy(mother)\n child.updateSubTree(crossoverDepth, indexM, fatherCrossOverNode)\n\n return child",
"def blend_crossover(random, mom, dad, args):\n blx_alpha = args.setdefault('blx_alpha', 0.1)\n blx_points = args.setdefault('blx_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if blx_points is None:\n blx_points = list(range(min(len(bro), len(sis))))\n for i in blx_points:\n smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])\n delta = blx_alpha * (largest - smallest)\n bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def blend_crossover(random, mom, dad, args):\r\n blx_alpha = args.setdefault('blx_alpha', 0.1)\r\n blx_points = args.setdefault('blx_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if blx_points is None:\r\n blx_points = list(range(min(len(bro), len(sis))))\r\n for i in blx_points:\r\n smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])\r\n delta = blx_alpha * (largest - smallest)\r\n bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\r\n sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop"
]
| [
"0.75560653",
"0.59480983",
"0.5935825",
"0.5895034",
"0.5812965",
"0.5759852",
"0.5736582",
"0.5733353",
"0.5718653",
"0.57111734",
"0.57025594",
"0.56801486",
"0.5678542",
"0.5672112",
"0.56708926",
"0.5608201",
"0.552107",
"0.5520685",
"0.5502717",
"0.5495076",
"0.5481087",
"0.54799783",
"0.5425853",
"0.5425853",
"0.54250747",
"0.53557676",
"0.5350474",
"0.53481156",
"0.5311639",
"0.52868336"
]
| 0.7708781 | 0 |
Return the offspring of uniform crossover on the candidates. This function performs uniform crossover (UX). For each element of the parents, a biased coin is flipped to determine whether the first offspring gets the 'mom' or the 'dad' element. An optional keyword argument in args, ``ux_bias``, determines the bias. | def uniform_crossover(random, mom, dad, args):
ux_bias = args.setdefault('ux_bias', 0.5)
crossover_rate = args.setdefault('crossover_rate', 1.0)
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
if random.random() < ux_bias:
bro[i] = m
sis[i] = d
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uniform_crossover(random, mom, dad, args):\r\n ux_bias = args.setdefault('ux_bias', 0.5)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n if random.random() < ux_bias:\r\n bro[i] = m\r\n sis[i] = d\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def crossover(self, parents):\n\n randomCategory = random.sample(list(ga_.Category), 1)[0]\n randomParent1 = random.sample(parents, 1)[0]\n randomParent2 = None\n for parent in parents:\n if parent != randomParent1:\n randomParent2 = parent\n \n\n # put randomCategory from random parent to the new offpring and the remainder from the second parent\n offspring = ga_.Outfit()\n if randomCategory == ga_.Category.TOP:\n offspring.top = randomParent1.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.BOTTOM:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent1.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.SHOES:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent1.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.NECK:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent1.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.HANDBAG:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent1.handbag\n\n return offspring",
"def simulated_binary_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n di = args.setdefault('sbx_distribution_index', 10)\n bounder = args['_ec'].bounder\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\n try:\n if m > d:\n m, d = d, m\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\n u = random.random() \n if u <= (1.0 / alpha):\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\n else:\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\n bro_val = max(min(bro_val, ub), lb) \n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\n sis_val = max(min(sis_val, ub), lb)\n if random.random() > 0.5:\n bro_val, sis_val = sis_val, bro_val\n bro[i] = bro_val\n sis[i] = sis_val\n except ZeroDivisionError:\n # The offspring already have legitimate values for every element,\n # so no need to take any special action here.\n pass\n return [bro, sis]\n else:\n return [mom, dad]",
"def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def crossover(cross):\n @functools.wraps(cross)\n def inspyred_crossover(random, candidates, args):\n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for i, (mom, dad) in enumerate(zip(moms, dads)):\n cross.index = i\n offspring = cross(random, mom, dad, args)\n for o in offspring:\n children.append(o)\n return children\n inspyred_crossover.single_crossover = cross\n return inspyred_crossover",
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover",
"def recombination(parents):\n\n # pick 5 random numbers that add up to 1\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n\n # those random values will serve as weights for the genes 2 offspring get (whole arithmetic recombination)\n offspring1 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # repeat for offspring 2\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n offspring2 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # the other 2 offspring will come from 4-point crossover\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n\n # to make it so that it won't always be p1 who gives the first portion of DNA etc, we shuffle the parents\n np.random.shuffle(parents)\n\n # add the genes together\n offspring3 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # repeat for offspring 4\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n np.random.shuffle(parents)\n offspring4 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # return the offspring\n return np.concatenate(([offspring1], [offspring2], [offspring3], [offspring4]))",
"def crossing(self, *args):\n return self.phy2abs.crossing(*args)",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n new_cxpb=cxpb/(cxpb+mutpb)\n new_mutpb=mutpb/(cxpb+mutpb)\n \n #num_cx=int(new_cxpb*len(offspring))\n #num_mu=len(offspring)-num_cx\n #print(new_cxpb, new_mutpb)\n # Apply crossover and mutation on the offspring\n i = 1\n while i < len(offspring):\n if random.random() < new_cxpb:\n if (offspring[i - 1] == offspring[i]):\n offspring[i - 1], = toolbox.mutate(offspring[i - 1])\n offspring[i], = toolbox.mutate(offspring[i])\n else:\n offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i])\n del offspring[i - 1].fitness.values, offspring[i].fitness.values\n i = i + 2\n else:\n offspring[i], = toolbox.mutate(offspring[i])\n del offspring[i].fitness.values\n i = i + 1\n return offspring",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n \n # Apply crossover and mutation on the offspring\n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < cxpb:\n toolbox.mate(ind1, ind2)\n del ind1.fitness.values, ind2.fitness.values\n \n for ind in offspring:\n if random.random() < mutpb:\n toolbox.mutate(ind)\n del ind.fitness.values\n \n return offspring",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def _undistort_oulu(\n self, xy: np.ndarray, iterations: int = 20, tolerance: Number = 0\n ) -> np.ndarray:\n # Initial guess\n uxy = xy\n for _ in range(iterations):\n r2 = np.sum(uxy ** 2, axis=1)\n if any(self.p) and not any(self.k):\n uxy = xy - self._tangential_distortion(uxy, r2)\n elif any(self.k) and not any(self.k):\n uxy = xy * (1 / self._radial_distortion(r2))\n else:\n uxy = (xy - self._tangential_distortion(uxy, r2)) * (\n 1 / self._radial_distortion(r2)\n )\n if tolerance > 0 and np.all(\n (np.abs(self._distort(uxy) - xy)) < tolerance / self.f.mean()\n ):\n break\n return uxy",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)",
"def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population",
"def unbias_var(w=None,N_eff=None,avoid_pathological=False):\n if N_eff is None:\n N_eff = 1/(w@w)\n if avoid_pathological and weight_degeneracy(w):\n ub = 1 # Don't do in case of weights collapse\n else:\n ub = 1/(1 - 1/N_eff) # =N/(N-1) if w==ones(N)/N.\n return ub",
"def laplace_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n bounder = args['_ec'].bounder\n a = args.setdefault('lx_location', 0)\n b = args.setdefault('lx_scale', 0.5)\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d) in enumerate(zip(mom, dad)):\n u = random.random()\n if random.random() <= 0.5:\n beta = a - b * math.log(u)\n else:\n beta = a + b * math.log(u)\n bro[i] = m + beta * abs(m - d)\n sis[i] = d + beta * abs(m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n return [bro, sis]\n else:\n return [mom, dad]",
"def crossover(self, parents: ChromList) -> ChromList:\n raise NotImplementedError",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def laplace_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n bounder = args['_ec'].bounder\r\n a = args.setdefault('lx_location', 0)\r\n b = args.setdefault('lx_scale', 0.5)\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n u = random.random()\r\n if random.random() <= 0.5:\r\n beta = a - b * math.log(u)\r\n else:\r\n beta = a + b * math.log(u)\r\n bro[i] = m + beta * abs(m - d)\r\n sis[i] = d + beta * abs(m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def uniform_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs uniform crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if random_number <= self.crossover_prob:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_node_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_node_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_node_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_node_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict",
"def arithmetic_crossover(random, mom, dad, args):\n ax_alpha = args.setdefault('ax_alpha', 0.5)\n ax_points = args.setdefault('ax_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if ax_points is None:\n ax_points = list(range(min(len(bro), len(sis))))\n for i in ax_points:\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def offspring_zygosity(parent_1, parent_2):\n genotype_1, genotype_2 = tuple(parent_1), tuple(parent_2)\n offspring = it.product(genotype_1, genotype_2)\n\n homozygous_dominant, heterozygous, homozygous_recessive = 0, 0, 0\n\n for allele_1, allele_2 in offspring:\n dominant = (allele_1.isupper(), allele_2.isupper())\n if all(dominant):\n homozygous_dominant += 1\n elif not any(dominant):\n homozygous_recessive += 1\n else:\n heterozygous += 1\n\n return (homozygous_dominant, heterozygous, homozygous_recessive)",
"def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)",
"def ciou(pred, target, eps=1e-7):\n # overlap\n lt = torch.max(pred[:, :2], target[:, :2])\n rb = torch.min(pred[:, 2:], target[:, 2:])\n wh = (rb - lt).clamp(min=0)\n overlap = wh[:, 0] * wh[:, 1]\n\n # union\n ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n union = ap + ag - overlap + eps\n\n # IoU\n ious = overlap / union\n\n # enclose area\n enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n cw = enclose_wh[:, 0]\n ch = enclose_wh[:, 1]\n\n c2 = cw**2 + ch**2 + eps\n\n b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n b2_x1, b2_y1 = target[:, 0], target[:, 1]\n b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n rho2 = left + right\n\n factor = 4 / math.pi**2\n v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n # CIoU\n cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))\n return cious",
"def generate_offspring(self, parents, generation, pool=None):\n # Do this only the first time, when the first N agents are generated\n if self.initial_pop:\n self.initial_pop = False\n # We do not store the agents of the initial pop in the archive. Just use them to init the emitters\n # Init emitter population with all agents in the initial population.\n for agent in parents:\n self.emitters_pop.append(OptimizingEmitter(agent['genome'], agent['id'], 0.5, self.bounds, self.params))\n\n # Now select emitter to use\n self.emitter_idx = np.argmin([em.stored for em in self.emitters_pop]) # Select emitter that generated the least solutions\n\n offsprings = Population(self.params, init_size=0, name='offsprings')\n for i in range(self.params.emitter_population): # The batch is the pop size\n off = self.agent_template.copy() # Get new agent\n off['genome'] = self.emitters_pop[self.emitter_idx].ask()\n off['parent'] = self.emitters_pop[self.emitter_idx].id\n off['ancestor'] = self.emitters_pop[self.emitter_idx].id\n offsprings.add(off)\n\n offs_ids = parents.agent_id + np.array(range(len(offsprings))) # Calculate offs IDs\n offsprings['id'] = offs_ids # Update offs IDs\n offsprings['born'] = [generation] * offsprings.size\n parents.agent_id = max(offs_ids) + 1 # This saves the maximum ID reached till now\n return offsprings",
"def blend_crossover(random, mom, dad, args):\n blx_alpha = args.setdefault('blx_alpha', 0.1)\n blx_points = args.setdefault('blx_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if blx_points is None:\n blx_points = list(range(min(len(bro), len(sis))))\n for i in blx_points:\n smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])\n delta = blx_alpha * (largest - smallest)\n bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def arithmetic_crossover(random, mom, dad, args):\r\n ax_alpha = args.setdefault('ax_alpha', 0.5)\r\n ax_points = args.setdefault('ax_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if ax_points is None:\r\n ax_points = list(range(min(len(bro), len(sis))))\r\n for i in ax_points:\r\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\r\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children"
]
| [
"0.61603284",
"0.57518995",
"0.51962733",
"0.51483315",
"0.512234",
"0.4940673",
"0.4929111",
"0.49239802",
"0.48817232",
"0.48575172",
"0.48472595",
"0.4789144",
"0.47818902",
"0.47776258",
"0.47735626",
"0.47145042",
"0.46981046",
"0.46507037",
"0.46464217",
"0.46441185",
"0.46210808",
"0.46142644",
"0.46114713",
"0.45980737",
"0.4576859",
"0.4570398",
"0.45668867",
"0.45653874",
"0.45628986",
"0.45563835"
]
| 0.62081856 | 0 |
Return the offspring of arithmetic crossover on the candidates. This function performs arithmetic crossover (AX), which is similar to a generalized weighted averaging of the candidate elements. The allele of each parent is weighted by the ax_alpha keyword argument, and the allele of the complement parent is weighted by 1 ax_alpha. This averaging is only done on the alleles listed in the ax_points keyword argument. If this argument is ``None``, then all alleles are used. This means that if this function is used with all default values, then offspring are simple averages of their parents. This function also makes use of the bounder function as specified in the EC's ``evolve`` method. | def arithmetic_crossover(random, mom, dad, args):
ax_alpha = args.setdefault('ax_alpha', 0.5)
ax_points = args.setdefault('ax_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if ax_points is None:
ax_points = list(range(min(len(bro), len(sis))))
for i in ax_points:
bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]
sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def arithmetic_crossover(random, mom, dad, args):\r\n ax_alpha = args.setdefault('ax_alpha', 0.5)\r\n ax_points = args.setdefault('ax_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if ax_points is None:\r\n ax_points = list(range(min(len(bro), len(sis))))\r\n for i in ax_points:\r\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\r\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def crossing(self, *args):\n return self.overlap(*args, type='point')",
"def _cross_over(self,mp,cross_rate,eta):",
"def points_earned(self):\n delta_counts = self.alive_counts - self.initial_counts\n points = self.points_table * delta_counts\n points = points.reshape(-1,72) # unravel the points for easier sum\n return np.sum(points, axis=1) + super().current_points()",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def crossover(self, parents):\n\n randomCategory = random.sample(list(ga_.Category), 1)[0]\n randomParent1 = random.sample(parents, 1)[0]\n randomParent2 = None\n for parent in parents:\n if parent != randomParent1:\n randomParent2 = parent\n \n\n # put randomCategory from random parent to the new offpring and the remainder from the second parent\n offspring = ga_.Outfit()\n if randomCategory == ga_.Category.TOP:\n offspring.top = randomParent1.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.BOTTOM:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent1.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.SHOES:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent1.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.NECK:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent1.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.HANDBAG:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent1.handbag\n\n return offspring",
"def overlap_ss(Ax, Ay, Az, Cx, Cy, Cz, alpha_bra, alpha_ket, c1, c2):\n A = np.array([Ax, Ay, Az])\n C = np.array([Cx, Cy, Cz])\n alpha_sum = alpha_bra + alpha_ket\n return c1 * c2 * (np.pi / alpha_sum)**(3/2) * np.exp((-alpha_bra * alpha_ket * np.dot(A-C, A-C)) / alpha_sum)",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def crossing(self, *args):\n return self.phy2abs.crossing(*args)",
"def onepoint_crossover(p_0, p_1, within_used=True):\n # Get the chromosomes\n c_p_0, c_p_1 = p_0.genome, p_1.genome\n # Uniformly generate crossover points. If within_used==True,\n # points will be within the used section.\n if within_used:\n max_p_0, max_p_1 = p_0.used_codons, p_1.used_codons\n else:\n max_p_0, max_p_1 = len(c_p_0), len(c_p_1)\n pt_p_0, pt_p_1 = random.randint(1, max_p_0), random.randint(1, max_p_1)\n # Make new chromosomes by crossover: these slices perform copies\n if random.random() < CROSSOVER_PROBABILITY:\n c_0 = c_p_0[:pt_p_0] + c_p_1[pt_p_1:]\n c_1 = c_p_1[:pt_p_1] + c_p_0[pt_p_0:]\n else:\n c_0, c_1 = c_p_0[:], c_p_1[:]\n # Put the new chromosomes into new individuals\n return [Individual(c_0), Individual(c_1)]",
"def recombination(parents):\n\n # pick 5 random numbers that add up to 1\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n\n # those random values will serve as weights for the genes 2 offspring get (whole arithmetic recombination)\n offspring1 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # repeat for offspring 2\n random_values = np.random.dirichlet(np.ones(5),size=1)[0]\n offspring2 = random_values[0] * parents[0] + random_values[1] * parents[1] + random_values[2] * parents[2] + random_values[3] * parents[3] + \\\n random_values[4] * parents[4]\n\n # the other 2 offspring will come from 4-point crossover\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n\n # to make it so that it won't always be p1 who gives the first portion of DNA etc, we shuffle the parents\n np.random.shuffle(parents)\n\n # add the genes together\n offspring3 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # repeat for offspring 4\n random_points = np.sort(np.random.randint(1, parents[0].shape[0]-2, 4))\n np.random.shuffle(parents)\n offspring4 = np.concatenate((parents[0][0:random_points[0]], parents[1][random_points[0]:random_points[1]], parents[2][random_points[1]:random_points[2]],\\\n parents[3][random_points[2]:random_points[3]], parents[4][random_points[3]:]))\n\n # return the offspring\n return np.concatenate(([offspring1], [offspring2], [offspring3], [offspring4]))",
"def zeroCrossing(self,evap_threshold):\r\n\t\tself.splitBaseline =(np.mean(self.splitData[0:10]))\t\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData)\r\n\r\n\t\tif split_max_index >= split_min_index:\r\n\t\t\treturn self.zeroCrossingPosSlope(evap_threshold)\r\n\t\t\r\n\t\tif split_max_index < split_min_index:\r\n\t\t\treturn self.zeroCrossingNegSlope(evap_threshold)",
"def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover",
"def crossover_simulated_binary(\n self,\n parent1,\n parent2,\n eta: float\n ):\n # Calculate Gamma (Eq. 9.11)\n rand = self.rng.random(parent1.shape)\n gamma = np.empty(parent1.shape)\n gamma[rand <= 0.5] = (2 * rand[rand <= 0.5]) ** (1.0 / (eta + 1)) # First case of equation 9.11\n gamma[rand > 0.5] = (1.0 / (2.0 * (1.0 - rand[rand > 0.5]))) ** (1.0 / (eta + 1)) # Second case\n\n # Calculate Child 1 chromosome (Eq. 9.9)\n offspring1 = 0.5 * ((1 + gamma) * parent1 + (1 - gamma) * parent2)\n # Calculate Child 2 chromosome (Eq. 9.10)\n offspring2 = 0.5 * ((1 - gamma) * parent1 + (1 + gamma) * parent2)\n\n return offspring1, offspring2",
"def get_bprop_acos(self):\n input_grad = G.ACosGrad()\n\n def bprop(x, out, dout):\n dx = input_grad(x, dout)\n return (dx,)\n return bprop",
"def crossover(self, candidates):\n xver = []\n for par1, par2 in candidates:\n n = min(par1.enc_path.shape[0], par2.enc_path.shape[0])\n x_point = np.random.randint(0, n - 1)\n child = Path()\n child.enc_path = np.vstack((par1.enc_path[0:x_point], par2.enc_path[x_point:]))\n xver.append(child)\n return xver",
"def crossSelf(self, e=1e-10):\n results = []\n l = len(self.segments)\n for i in range(l):\n for j in range(i + 1, l):\n point = self.segments[i].crossSegment(self.segments[j])\n if point:\n if point in self.points:\n results.append(point)\n return results",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def estimation_of_distribution_variation(random, candidates, args):\r\n num_offspring = args.setdefault('num_offspring', 1)\r\n bounder = args['_ec'].bounder\r\n\r\n cs_copy = list(candidates)\r\n num_genes = max([len(x) for x in cs_copy])\r\n genes = [[x[i] for x in cs_copy] for i in range(num_genes)]\r\n mean = [float(sum(x)) / float(len(x)) for x in genes]\r\n stdev = [sum([(x - m) ** 2 for x in g]) / float(len(g) - 1) for g, m in zip(genes, mean)]\r\n offspring = []\r\n for _ in range(num_offspring):\r\n child = copy.copy(cs_copy[0])\r\n for i, (m, s) in enumerate(zip(mean, stdev)):\r\n child[i] = m + random.gauss(0, s)\r\n child = bounder(child, args)\r\n offspring.append(child)\r\n\r\n return offspring",
"def externalEnergy(self, controlpoints):\n # compute the factor the energy of each control points get's weighed with\n external = 0.0\n if len(self.controlpoints) > 0:\n factor = float(1)/len(self.controlpoints)\n else:\n factor = 1\n \n # check if the given controlpoints are equal to the current ones\n if np.equal(controlpoints, self.controlpoints).all():\n # take the current normals\n normals = self.normals\n else:\n # otherwise calculate the according normals\n spline = Spline()\n spline.addControlPoints(*controlpoints)\n normals = spline.normals\n \n # ACHTUNG! hier müssen die Normalen zur Berechnung gedreht werden,\n # falls flip es vorgibt\n if self.flip:\n normals = map(lambda n: rotateVector(n, angle=pi), normals)\n \n # only remember each external control point energy if the given control points are\n # the snakes current control points\n memorize_energies = np.equal(controlpoints, self.controlpoints).all()\n # reset the controlpointenergy list if necessary\n if memorize_energies:\n self.ext_energies = []\n \n # sum up the energies at the single control points multiplied by the inverse\n # of the number of control points\n for i in range(len(controlpoints)):\n point = controlpoints[i]\n \n# if len(normals) > 0:\n# normal = normals[i]\n# else:\n# normal = None\n normal = normals[i]\n \n pointenergy = self.ExternalEnergy.getEnergy(point, iteration=self.iteration, normal=normal)\n # check wether to save the point energy\n if memorize_energies:\n #self.ext_energies.append(self.ExternalEnergy.getEnergy(point))\n self.ext_energies.append(pointenergy)\n external += pointenergy * factor\n return external",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def crossover_binary(parent1: numpy.ndarray, parent2: numpy.ndarray, eta: float):\n # Ref: https://github.com/Chrispresso/SnakeAI/blob/master/genetic_algorithm/crossover.py\n rand = numpy.random.random(size=parent1.shape)\n gamma = numpy.empty(shape=parent1.shape)\n gamma[rand <= 0.5] = (2 * rand[rand <= 0.5]) ** (1.0 / (eta + 1)) # First case of equation 9.11\n gamma[rand > 0.5] = (1.0 / (2.0 * (1.0 - rand[rand > 0.5]))) ** (1.0 / (eta + 1)) # Second case\n\n # Calculate Child 1 chromosome (Eq. 9.9)\n chromosome1 = 0.5 * ((1 + gamma) * parent1 + (1 - gamma) * parent2)\n # Calculate Child 2 chromosome (Eq. 9.10)\n chromosome2 = 0.5 * ((1 - gamma) * parent1 + (1 + gamma) * parent2)\n\n return chromosome1, chromosome2",
"def crossover(self, parents: ChromList) -> ChromList:\n raise NotImplementedError",
"def generate_offspring(self, parents, generation, pool=None):\n # Do this only the first time, when the first N agents are generated\n if self.initial_pop:\n self.initial_pop = False\n # We do not store the agents of the initial pop in the archive. Just use them to init the emitters\n # Init emitter population with all agents in the initial population.\n for agent in parents:\n self.emitters_pop.append(OptimizingEmitter(agent['genome'], agent['id'], 0.5, self.bounds, self.params))\n\n # Now select emitter to use\n self.emitter_idx = np.argmin([em.stored for em in self.emitters_pop]) # Select emitter that generated the least solutions\n\n offsprings = Population(self.params, init_size=0, name='offsprings')\n for i in range(self.params.emitter_population): # The batch is the pop size\n off = self.agent_template.copy() # Get new agent\n off['genome'] = self.emitters_pop[self.emitter_idx].ask()\n off['parent'] = self.emitters_pop[self.emitter_idx].id\n off['ancestor'] = self.emitters_pop[self.emitter_idx].id\n offsprings.add(off)\n\n offs_ids = parents.agent_id + np.array(range(len(offsprings))) # Calculate offs IDs\n offsprings['id'] = offs_ids # Update offs IDs\n offsprings['born'] = [generation] * offsprings.size\n parents.agent_id = max(offs_ids) + 1 # This saves the maximum ID reached till now\n return offsprings",
"def iterate(self):\n ret = super(ExpandableAlgorithm, self).pre_iteration()\n if ret is None:\n return None\n active, passive, neighbors, features_active, features_passive = ret\n params = [features_active, features_passive]\n if self._post_args:\n params += self._post_args\n s = self._overlap_function(*params)\n if self.condition_axelrod:\n if self.__condition_axelrod(s, features_active, features_passive):\n return True\n if self.condition_centola:\n if self.__condition_centola(s, active, passive, neighbors):\n return True",
"def asymtotic_approx(self, zero_crossings):\n x = np.sqrt(2 * np.log(zero_crossings))\n return x + 0.5772 / x",
"def aerosols(self):\n raise NotImplementedError(\"Subclass must implement\")",
"def zeroCrossingPosSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData[0:split_max_index])\r\n\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\tsplit_min_value = self.splitData[split_min_index]\r\n\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >=evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_min_index, split_max_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] < self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\tif self.splitData[index] >= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1 \r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tzero_crossing = -2 \r\n\t\t\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing",
"def mutateAndRecombine(self,instances):\n if self.verbose:\n print(\"MUTATING AND RECOMBINING\")\n print(\"Initial instances:\")\n for i in instances: \n print(i.lsystem.niterations)\n print(i)\n\n crossedOffspring = self.evolver.crossover(instances)\n if self.verbose:\n print(\"After crossover:\")\n for i in crossedOffspring: print(i)\n mutatedOffspring = self.evolver.mutate(crossedOffspring) # TODO: make sure that no fitness is used there, so that we do not perform more operations than needed\n if self.verbose:\n print(\"After mutation:\")\n for i in mutatedOffspring: print(i)\n return mutatedOffspring"
]
| [
"0.53981245",
"0.5329241",
"0.51723456",
"0.5150698",
"0.5106049",
"0.49826026",
"0.49341378",
"0.47726318",
"0.47720823",
"0.4746147",
"0.47242197",
"0.46623877",
"0.46561575",
"0.4653956",
"0.46342048",
"0.46092457",
"0.45876846",
"0.4583629",
"0.45682684",
"0.4557402",
"0.45553547",
"0.45387068",
"0.4535348",
"0.4522081",
"0.4503456",
"0.44930762",
"0.44862145",
"0.44849947",
"0.44715247",
"0.44476116"
]
| 0.5358517 | 1 |
Return the offspring of blend crossover on the candidates. This function performs blend crossover (BLX), which is similar to arithmetic crossover with a bit of mutation. It creates offspring whose values are chosen randomly from a range bounded by the parent alleles but that is also extended by some amount proportional to the blx_alpha keyword argument. It is this extension of the range that provides the additional exploration. This averaging is only done on the alleles listed in the blx_points keyword argument. If this argument is ``None``, then all alleles are used. This function also makes use of the bounder function as specified in the EC's ``evolve`` method. | def blend_crossover(random, mom, dad, args):
blx_alpha = args.setdefault('blx_alpha', 0.1)
blx_points = args.setdefault('blx_points', None)
crossover_rate = args.setdefault('crossover_rate', 1.0)
bounder = args['_ec'].bounder
children = []
if random.random() < crossover_rate:
bro = copy.copy(dad)
sis = copy.copy(mom)
if blx_points is None:
blx_points = list(range(min(len(bro), len(sis))))
for i in blx_points:
smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])
delta = blx_alpha * (largest - smallest)
bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)
bro = bounder(bro, args)
sis = bounder(sis, args)
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def blend_crossover(random, mom, dad, args):\r\n blx_alpha = args.setdefault('blx_alpha', 0.1)\r\n blx_points = args.setdefault('blx_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if blx_points is None:\r\n blx_points = list(range(min(len(bro), len(sis))))\r\n for i in blx_points:\r\n smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])\r\n delta = blx_alpha * (largest - smallest)\r\n bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\r\n sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def wind_turbines_blend_crossover(random, mom, dad, args):\n blx_alpha = args.setdefault('blx_alpha', 0.1)\n blx_points = args.setdefault('blx_points', None)\n crossover_rate = args.setdefault('crossover_rate', 0.2)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if blx_points is None:\n blx_points = list(range(min(len(bro), len(sis))))\n for i in blx_points:\n smallest, largest = min(mom[i], dad[i]), max(mom[i], dad[i])\n delta = blx_alpha * (largest - smallest)\n bro[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n sis[i] = smallest - delta + random.random() * (largest - smallest + 2 * delta)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def simulated_binary_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n di = args.setdefault('sbx_distribution_index', 10)\n bounder = args['_ec'].bounder\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\n try:\n if m > d:\n m, d = d, m\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\n u = random.random() \n if u <= (1.0 / alpha):\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\n else:\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\n bro_val = max(min(bro_val, ub), lb) \n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\n sis_val = max(min(sis_val, ub), lb)\n if random.random() > 0.5:\n bro_val, sis_val = sis_val, bro_val\n bro[i] = bro_val\n sis[i] = sis_val\n except ZeroDivisionError:\n # The offspring already have legitimate values for every element,\n # so no need to take any special action here.\n pass\n return [bro, sis]\n else:\n return [mom, dad]",
"def cross_on_arb_seq(self, slmax=6):\n p1_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n p2_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n gene_of_p1 = self.population[p1_index]\n gene_of_p2 = self.population[p2_index]\n\n p1_begin = myrandint(0, len(gene_of_p1)-1)\n p1_end = p1_begin + myrandint(1, int_min(slmax, len(gene_of_p1)-p1_begin))\n p2_begin = myrandint(0, len(gene_of_p2)-1)\n p2_end = p2_begin + myrandint(1, int_min(slmax, len(gene_of_p2)-p2_begin))\n new_chromosome = []\n new_chromosome += gene_of_p1.chromosome[:p1_begin]\n new_chromosome += gene_of_p2.chromosome[p2_begin:p2_end]\n new_chromosome += gene_of_p1.chromosome[p1_end:]\n new_chromosome2 = []\n new_chromosome2 += gene_of_p2.chromosome[:p2_begin]\n new_chromosome2 += gene_of_p1.chromosome[p1_begin:p1_end]\n new_chromosome2 += gene_of_p2.chromosome[p2_end:]\n self.remove_repeatable(new_chromosome)\n self.remove_repeatable(new_chromosome2)\n return Gene(chromosome=new_chromosome), Gene(chromosome=new_chromosome2)",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def estimation_of_distribution_variation(random, candidates, args):\r\n num_offspring = args.setdefault('num_offspring', 1)\r\n bounder = args['_ec'].bounder\r\n\r\n cs_copy = list(candidates)\r\n num_genes = max([len(x) for x in cs_copy])\r\n genes = [[x[i] for x in cs_copy] for i in range(num_genes)]\r\n mean = [float(sum(x)) / float(len(x)) for x in genes]\r\n stdev = [sum([(x - m) ** 2 for x in g]) / float(len(g) - 1) for g, m in zip(genes, mean)]\r\n offspring = []\r\n for _ in range(num_offspring):\r\n child = copy.copy(cs_copy[0])\r\n for i, (m, s) in enumerate(zip(mean, stdev)):\r\n child[i] = m + random.gauss(0, s)\r\n child = bounder(child, args)\r\n offspring.append(child)\r\n\r\n return offspring",
"def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def blend_crossover(parent_1, parent_2):\n\talpha = 0.5 # ref Eshelmann & Schafer\n\n\tchild_genotype = np.zeros((parent_1.num_genes,))\n\tfor i in range(parent_1.num_genes):\n\t\tdifference = abs(parent_1.genotype[i] - parent_2.genotype[i])\n\t\tbound_1 = min(parent_1.genotype[i], parent_2.genotype[i]) - alpha * difference\n\t\tbound_2 = max(parent_1.genotype[i], parent_2.genotype[i]) + alpha * difference\n\t\tchild_genotype[i] = np.random.uniform(bound_1, bound_2)\n\n\treturn child_genotype",
"def loopy_belief_propagation(tests, groups,\n base_infection_rate,\n sensitivity, specificity,\n min_iterations, max_iterations,\n atol):\n n_groups, n_patients = groups.shape\n if np.size(groups) == 0:\n if np.size(base_infection_rate) == 1: # only one rate\n marginal = base_infection_rate * np.ones(n_patients)\n return marginal, 0\n elif np.size(base_infection_rate) == n_patients:\n return base_infection_rate, 0\n else:\n raise ValueError(\"Improper size for vector of base infection rates\")\n\n mu = -jax.scipy.special.logit(base_infection_rate)\n\n groups_size = np.sum(groups, axis=1)\n sensitivity = utils.select_from_sizes(sensitivity, groups_size)\n specificity = utils.select_from_sizes(specificity, groups_size)\n gamma0 = np.log(sensitivity + specificity - 1) - np.log(1 - sensitivity)\n gamma1 = np.log(sensitivity + specificity - 1) - np.log(sensitivity)\n gamma = tests * gamma1 + (1 - tests) * gamma0\n test_sign = 1 - 2 * tests[:, np.newaxis]\n\n # Initialization\n alphabeta = np.zeros((2, n_groups, n_patients))\n alpha_beta_iteration = [alphabeta, 0]\n\n # return marginal from alphabeta\n def marginal_from_alphabeta(alphabeta):\n beta_bar = np.sum(alphabeta[1, :, :], axis=0)\n return jax.scipy.special.expit(-beta_bar - mu)\n\n # lbp loop\n def lbp_loop(_, alphabeta):\n alpha = alphabeta[0, :, :]\n beta = alphabeta[1, :, :]\n\n # update alpha\n beta_bar = np.sum(beta, axis=0)\n alpha = jax.nn.log_sigmoid(beta_bar - beta + mu)\n alpha *= groups\n\n # update beta\n alpha_bar = np.sum(alpha, axis=1, keepdims=True)\n beta = np.log1p(test_sign *\n np.exp(-alpha + alpha_bar + gamma[:, np.newaxis]))\n beta *= groups\n return np.stack((alpha, beta), axis=0)\n\n def cond_fun(alpha_beta_iteration):\n alphabeta, iteration = alpha_beta_iteration\n marginal = marginal_from_alphabeta(alphabeta)\n marginal_plus_one_iteration = marginal_from_alphabeta(\n lbp_loop(0, alphabeta))\n converged = np.allclose(marginal, marginal_plus_one_iteration, atol=atol)\n return (not converged) and (iteration < max_iterations)\n\n def body_fun(alpha_beta_iteration):\n alphabeta, iteration = alpha_beta_iteration\n alphabeta = jax.lax.fori_loop(0, min_iterations, lbp_loop, alphabeta)\n iteration += min_iterations\n return [alphabeta, iteration]\n\n # Run LBP while loop\n while cond_fun(alpha_beta_iteration):\n alpha_beta_iteration = body_fun(alpha_beta_iteration)\n\n alphabeta, _ = alpha_beta_iteration\n\n # Compute two consecutive marginals\n marginal = marginal_from_alphabeta(alphabeta)\n marginal_plus_one_iteration = marginal_from_alphabeta(lbp_loop(0, alphabeta))\n\n return marginal, np.amax(np.abs(marginal - marginal_plus_one_iteration))",
"def blend2(*args, autoAnchor: bool=True, autoNormal: bool=True, caching: bool=True,\n flipLeftNormal: bool=False, flipRightNormal: bool=False, leftAnchor: Union[float,\n bool]=0.0, leftEnd: Union[float, bool]=1.0, leftStart: Union[float, bool]=0.0,\n multipleKnots: bool=True, nodeState: Union[int, bool]=0, positionTolerance:\n Union[float, bool]=0.1, reverseLeft: bool=False, reverseRight: bool=False,\n rightAnchor: Union[float, bool]=0.0, rightEnd: Union[float, bool]=1.0, rightStart:\n Union[float, bool]=0.0, tangentTolerance: Union[float, bool]=0.1,\n constructionHistory: bool=True, crvsInFirstRail: Union[int, bool]=0, name: AnyStr=\"\",\n object: bool=True, polygon: int=0, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def arithmetic_crossover(random, mom, dad, args):\n ax_alpha = args.setdefault('ax_alpha', 0.5)\n ax_points = args.setdefault('ax_points', None)\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n children = []\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n if ax_points is None:\n ax_points = list(range(min(len(bro), len(sis))))\n for i in ax_points:\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def arithmetic_crossover(random, mom, dad, args):\r\n ax_alpha = args.setdefault('ax_alpha', 0.5)\r\n ax_points = args.setdefault('ax_points', None)\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n children = []\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n if ax_points is None:\r\n ax_points = list(range(min(len(bro), len(sis))))\r\n for i in ax_points:\r\n bro[i] = ax_alpha * mom[i] + (1 - ax_alpha) * dad[i]\r\n sis[i] = ax_alpha * dad[i] + (1 - ax_alpha) * mom[i]\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def blendShapeEnvelopeOff():\n obj = cmds.ls(selection = True)\n history = cmds.listHistory(obj)\n bsHistory = cmds.ls(history, type = 'blendShape')\n for bs in bsHistory:\n cmds.setAttr(bs+'.'+'envelope',0.0) #note not changing blend target weights",
"def laplace_crossover(random, mom, dad, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n if random.random() < crossover_rate:\n bounder = args['_ec'].bounder\n a = args.setdefault('lx_location', 0)\n b = args.setdefault('lx_scale', 0.5)\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n for i, (m, d) in enumerate(zip(mom, dad)):\n u = random.random()\n if random.random() <= 0.5:\n beta = a - b * math.log(u)\n else:\n beta = a + b * math.log(u)\n bro[i] = m + beta * abs(m - d)\n sis[i] = d + beta * abs(m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n return [bro, sis]\n else:\n return [mom, dad]",
"def laplace_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n bounder = args['_ec'].bounder\r\n a = args.setdefault('lx_location', 0)\r\n b = args.setdefault('lx_scale', 0.5)\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n u = random.random()\r\n if random.random() <= 0.5:\r\n beta = a - b * math.log(u)\r\n else:\r\n beta = a + b * math.log(u)\r\n bro[i] = m + beta * abs(m - d)\r\n sis[i] = d + beta * abs(m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def cal_bl(self, offset, size):\n blbegin = offset / conf.blsize\n blend = (offset + size - 1) / conf.blsize + 1\n blnum = range(blbegin, blend)\n\n blfrom = [offset % conf.blsize, ]\n blfrom.extend([0 for i in range(len(blnum) - 1)])\n\n blto = [conf.blsize for i in range(len(blnum) - 1)]\n least = (offset + size) % conf.blsize\n\n if least == 0:\n least = conf.blsize\n blto.append(least)\n\n return zip(blnum, blfrom, blto)",
"def crossover(self, parents):\n\n randomCategory = random.sample(list(ga_.Category), 1)[0]\n randomParent1 = random.sample(parents, 1)[0]\n randomParent2 = None\n for parent in parents:\n if parent != randomParent1:\n randomParent2 = parent\n \n\n # put randomCategory from random parent to the new offpring and the remainder from the second parent\n offspring = ga_.Outfit()\n if randomCategory == ga_.Category.TOP:\n offspring.top = randomParent1.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.BOTTOM:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent1.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.SHOES:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent1.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.NECK:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent1.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.HANDBAG:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent1.handbag\n\n return offspring",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n new_cxpb=cxpb/(cxpb+mutpb)\n new_mutpb=mutpb/(cxpb+mutpb)\n \n #num_cx=int(new_cxpb*len(offspring))\n #num_mu=len(offspring)-num_cx\n #print(new_cxpb, new_mutpb)\n # Apply crossover and mutation on the offspring\n i = 1\n while i < len(offspring):\n if random.random() < new_cxpb:\n if (offspring[i - 1] == offspring[i]):\n offspring[i - 1], = toolbox.mutate(offspring[i - 1])\n offspring[i], = toolbox.mutate(offspring[i])\n else:\n offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i])\n del offspring[i - 1].fitness.values, offspring[i].fitness.values\n i = i + 2\n else:\n offspring[i], = toolbox.mutate(offspring[i])\n del offspring[i].fitness.values\n i = i + 1\n return offspring",
"def bakeClip(*args, blend: List[int, int]=None, clipIndex: Union[int, List[int]]=0,\n keepOriginals: bool=True, name: AnyStr=\"\", **kwargs)->AnyStr:\n pass",
"def test_blend_preset(self):\n (x_points_init, x_weights_init, x_rotations_init, x_translations_init,\n y_blended_points_init) = test_helpers.generate_preset_test_lbs_blend()\n\n x_points = tf.convert_to_tensor(value=x_points_init)\n x_weights = tf.convert_to_tensor(value=x_weights_init)\n x_rotations = tf.convert_to_tensor(value=x_rotations_init)\n x_translations = tf.convert_to_tensor(value=x_translations_init)\n y_blended_points = tf.convert_to_tensor(value=y_blended_points_init)\n\n y = linear_blend_skinning.blend(x_points, x_weights, x_rotations,\n x_translations)\n\n self.assertAllClose(y_blended_points, y)",
"def learning_proposal(self):\n\n n, s = self.X_select.shape\n\n beta_hat = self.observed_MLE\n\n perturbed_beta = beta_hat.copy()\n nidx = np.random.choice(np.arange(s), min(3, s), replace=False)\n for idx in nidx:\n scale = np.random.choice(self.scales, 1)\n perturbed_beta[idx] += (scale * np.random.standard_normal() *\n np.sqrt(self._beta_cov[idx, idx]))\n \n linpred = self.X_select.dot(perturbed_beta)\n prob = normal_dbn.cdf(linpred)\n perturbed_Y = np.random.binomial(1, prob)\n\n perturbed_MLE = probit_MLE(self.X, perturbed_Y, self.observed_outcome)[0]\n return perturbed_MLE, perturbed_Y",
"def blendShape(*args, after: bool=True, afterReference: bool=True, automatic: bool=True,\n before: bool=True, copyDelta: List[int, int, int]=None, copyInBetweenDelta:\n List[int, int, int, int]=None, deformerTools: bool=True, envelope: Union[float,\n bool]=1.0, exclusive: Union[AnyStr, bool]=\"\", export: AnyStr=\"\", exportTarget:\n Union[List[int, int], List[List[int, int]]]=None, flipTarget: Union[List[int,\n int], List[List[int, int]]]=None, frontOfChain: bool=True, geometry:\n Union[AnyStr, List[AnyStr], bool]=\"\", geometryIndices: bool=True,\n ignoreSelected: bool=True, ip: AnyStr=\"\", inBetween: bool=True, inBetweenIndex:\n int=0, inBetweenType: AnyStr=\"\", includeHiddenSelections: bool=False,\n mergeSource: Union[int, List[int]]=0, mergeTarget: int=0, mirrorDirection: int=0,\n mirrorTarget: Union[List[int, int], List[List[int, int]]]=None, name: AnyStr=\"\",\n normalizationGroups: bool=True, origin: AnyStr=\"\", parallel: bool=True, prune:\n bool=True, remove: Union[bool, List[bool]]=True, resetTargetDelta:\n Union[List[int, int], List[List[int, int]]]=None, split: bool=True,\n suppressDialog: bool=True, symmetryAxis: Union[AnyStr, bool]=\"\", symmetryEdge:\n Union[AnyStr, List[AnyStr], bool]=\"\", symmetrySpace: Union[int, bool]=0,\n tangentSpace: bool=True, target: Union[List[AnyStr, int, AnyStr, float],\n List[List[AnyStr, int, AnyStr, float]], bool]=None, topologyCheck: bool=True,\n transform: Union[AnyStr, bool]=\"\", weight: Union[List[int, float], List[List[int,\n float]], bool]=None, weightCount: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass",
"def define_breeding_chance_by_score(self, crossover):\r\n breeding_chance = []\r\n zones_num = self.breeding_rules.chance_zones\r\n zones = range(zones_num)\r\n zone_size = int(len(crossover) / zones_num)\r\n first_chance = int(self.breeding_rules.first_zone_chance * 100)\r\n last_chance = int(self.breeding_rules.last_zone_chance * 100)\r\n chance_step = int((last_chance - first_chance) / zones_num)\r\n chances = range(first_chance, last_chance, chance_step)\r\n for zone, chance in zip(zones, chances):\r\n chance = chance / 100\r\n for _ in range(zone_size):\r\n breeding_chance.append(chance)\r\n len_diff = len(breeding_chance) - len(crossover)\r\n if 0 < len_diff:\r\n breeding_chance = breeding_chance[:len(breeding_chance) - len_diff]\r\n if 0 > len_diff:\r\n len_diff = -len_diff\r\n breeding_chance.extend(breeding_chance[-1] for _ in range(len_diff))\r\n chance_sum = sum(breeding_chance)\r\n breeding_chance = [chance/chance_sum for chance in breeding_chance]\r\n return breeding_chance",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n \n # Apply crossover and mutation on the offspring\n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < cxpb:\n toolbox.mate(ind1, ind2)\n del ind1.fitness.values, ind2.fitness.values\n \n for ind in offspring:\n if random.random() < mutpb:\n toolbox.mutate(ind)\n del ind.fitness.values\n \n return offspring",
"def crossing(self, *args):\n return self.overlap(*args, type='point')",
"def batch_sample_beta(self):\n c_contexts = self.context[self.iter]\n\n old_beta = self.beta[self.iter]\n new_beta = -1\n proposal_sd = .1\n while new_beta <= 0:\n new_beta = random.gauss(mu = old_beta, sigma = proposal_sd)\n \n # set up to calculate the g densities for both the old and new beta values\n log_g_old = -1 * old_beta # which is np.log(np.exp(-1 * old_beta))\n log_g_new = -1 * new_beta # similar as above\n\n # derive contexts from breakpoints arrangement\n context_dict = self.make_context_dict(c_contexts)\n for context in context_dict.keys():\n log_g_old += math.lgamma(self.support_size * old_beta) \\\n - math.lgamma(self.support_size * old_beta + len(context_dict[context]))\n log_g_new += math.lgamma(self.support_size * new_beta) \\\n - math.lgamma(self.support_size * new_beta + len(context_dict[context]))\n \n for y in self.support:\n log_g_old += math.lgamma(context_dict[context].count(y) + old_beta) - math.lgamma(old_beta)\n log_g_new += math.lgamma(context_dict[context].count(y) + new_beta) - math.lgamma(new_beta)\n\n # compute candidate densities q for old and new beta\n # since the proposal distribution is normal this step is not needed\n log_q_old = 0#np.log(dnorm(old_beta, loc = new_beta, scale = proposal_sd))\n log_q_new = 0#np.log(dnorm(new_beta, loc = old_beta, scale = proposal_sd)) \n \n # compute the moving probability\n moving_prob = min(1, np.exp((log_g_new + log_q_old) - (log_g_old + log_q_new)))\n \n u = random.uniform(0,1)\n if u < moving_prob: self.beta[self.iter] = new_beta\n return self.beta[self.iter]"
]
| [
"0.60011",
"0.563031",
"0.55039793",
"0.5291128",
"0.52086353",
"0.517744",
"0.5123646",
"0.51229113",
"0.51227796",
"0.51224315",
"0.51028264",
"0.5100048",
"0.5081637",
"0.50476974",
"0.50207806",
"0.5018427",
"0.5007083",
"0.49650475",
"0.4952013",
"0.49350655",
"0.48709446",
"0.48413086",
"0.4834381",
"0.4828658",
"0.48012355",
"0.474098",
"0.47408703",
"0.47343037",
"0.47145444",
"0.47076222"
]
| 0.6025638 | 0 |
Return the offspring of simulated binary crossover on the candidates. This function performs simulated binary crossover (SBX), following the implementation in NSGAII | def simulated_binary_crossover(random, mom, dad, args):
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
di = args.setdefault('sbx_distribution_index', 10)
bounder = args['_ec'].bounder
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):
try:
if m > d:
m, d = d, m
beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)
alpha = 2.0 - 1.0 / beta**(di + 1.0)
u = random.random()
if u <= (1.0 / alpha):
beta_q = (u * alpha)**(1.0 / float(di + 1.0))
else:
beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))
bro_val = 0.5 * ((m + d) - beta_q * (d - m))
bro_val = max(min(bro_val, ub), lb)
sis_val = 0.5 * ((m + d) + beta_q * (d - m))
sis_val = max(min(sis_val, ub), lb)
if random.random() > 0.5:
bro_val, sis_val = sis_val, bro_val
bro[i] = bro_val
sis[i] = sis_val
except ZeroDivisionError:
# The offspring already have legitimate values for every element,
# so no need to take any special action here.
pass
return [bro, sis]
else:
return [mom, dad] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simulated_binary_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n di = args.setdefault('sbx_distribution_index', 10)\r\n bounder = args['_ec'].bounder\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d, lb, ub) in enumerate(zip(mom, dad, bounder.lower_bound, bounder.upper_bound)):\r\n try:\r\n if m > d:\r\n m, d = d, m\r\n beta = 1.0 + 2 * min(m - lb, ub - d) / float(d - m)\r\n alpha = 2.0 - 1.0 / beta**(di + 1.0)\r\n u = random.random() \r\n if u <= (1.0 / alpha):\r\n beta_q = (u * alpha)**(1.0 / float(di + 1.0))\r\n else:\r\n beta_q = (1.0 / (2.0 - u * alpha))**(1.0 / float(di + 1.0))\r\n bro_val = 0.5 * ((m + d) - beta_q * (d - m))\r\n bro_val = max(min(bro_val, ub), lb) \r\n sis_val = 0.5 * ((m + d) + beta_q * (d - m))\r\n sis_val = max(min(sis_val, ub), lb)\r\n if random.random() > 0.5:\r\n bro_val, sis_val = sis_val, bro_val\r\n bro[i] = bro_val\r\n sis[i] = sis_val\r\n except ZeroDivisionError:\r\n # The offspring already have legitimate values for every element,\r\n # so no need to take any special action here.\r\n pass\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)",
"def cross_on_arb_seq(self, slmax=6):\n p1_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n p2_index = randint(0, floor(self.population_size * self.cross_rate)-1)\n gene_of_p1 = self.population[p1_index]\n gene_of_p2 = self.population[p2_index]\n\n p1_begin = myrandint(0, len(gene_of_p1)-1)\n p1_end = p1_begin + myrandint(1, int_min(slmax, len(gene_of_p1)-p1_begin))\n p2_begin = myrandint(0, len(gene_of_p2)-1)\n p2_end = p2_begin + myrandint(1, int_min(slmax, len(gene_of_p2)-p2_begin))\n new_chromosome = []\n new_chromosome += gene_of_p1.chromosome[:p1_begin]\n new_chromosome += gene_of_p2.chromosome[p2_begin:p2_end]\n new_chromosome += gene_of_p1.chromosome[p1_end:]\n new_chromosome2 = []\n new_chromosome2 += gene_of_p2.chromosome[:p2_begin]\n new_chromosome2 += gene_of_p1.chromosome[p1_begin:p1_end]\n new_chromosome2 += gene_of_p2.chromosome[p2_end:]\n self.remove_repeatable(new_chromosome)\n self.remove_repeatable(new_chromosome2)\n return Gene(chromosome=new_chromosome), Gene(chromosome=new_chromosome2)",
"def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def crossover(cross):\n @functools.wraps(cross)\n def inspyred_crossover(random, candidates, args):\n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for i, (mom, dad) in enumerate(zip(moms, dads)):\n cross.index = i\n offspring = cross(random, mom, dad, args)\n for o in offspring:\n children.append(o)\n return children\n inspyred_crossover.single_crossover = cross\n return inspyred_crossover",
"def _apply_crossover(pop, op, pb):\n for i in range(1, len(pop), 2):\n if random.random() < pb:\n pop[i - 1], pop[i] = op(pop[i - 1], pop[i])\n del pop[i - 1].fitness.values\n del pop[i].fitness.values\n return pop",
"def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n new_cxpb=cxpb/(cxpb+mutpb)\n new_mutpb=mutpb/(cxpb+mutpb)\n \n #num_cx=int(new_cxpb*len(offspring))\n #num_mu=len(offspring)-num_cx\n #print(new_cxpb, new_mutpb)\n # Apply crossover and mutation on the offspring\n i = 1\n while i < len(offspring):\n if random.random() < new_cxpb:\n if (offspring[i - 1] == offspring[i]):\n offspring[i - 1], = toolbox.mutate(offspring[i - 1])\n offspring[i], = toolbox.mutate(offspring[i])\n else:\n offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1], offspring[i])\n del offspring[i - 1].fitness.values, offspring[i].fitness.values\n i = i + 2\n else:\n offspring[i], = toolbox.mutate(offspring[i])\n del offspring[i].fitness.values\n i = i + 1\n return offspring",
"def _crossover(self, best_population, crossover, n_parents=2, method=\"uniform_swap\"):\n if crossover:\n # randomly select parents\n parents_indexes = torch.randint(0, len(best_population), (self.population_size, n_parents),\n device=self.device)\n new_population = torch.zeros(self.population.shape, device=self.device)\n i = 0\n for p_idx in parents_indexes:\n new_population[i] = self._produce_child(best_population[p_idx], method=method)\n i += 1\n else:\n # randomly repeat best individuals\n new_pop_indexes = torch.randint(0, len(best_population), (self.population_size,), device=self.device)\n new_population = best_population[new_pop_indexes]\n return new_population",
"def segmented_crossover(\n self, mating_pop_dict, test=False, pairs=[], crossover_prob={}\n ):\n\n print('Performing crossovers')\n\n # Initialises dictionary of child networks\n crossover_pop_dict = OrderedDict()\n\n if test is False:\n # Selects pairs of networks at random to crossover with each other\n network_num = list(mating_pop_dict.keys())\n random.shuffle(network_num)\n network_num = iter(network_num) # Do not merge with line below,\n # and do not introduce any lines of code between them!\n network_num = list(zip(network_num, network_num))\n else:\n network_num = pairs\n\n # Performs segmented crossover\n for index, network_pair in enumerate(network_num):\n network_num_1 = network_pair[0]\n network_num_2 = network_pair[1]\n mate_1 = copy.deepcopy(mating_pop_dict[network_num_1])\n mate_2 = copy.deepcopy(mating_pop_dict[network_num_2])\n\n swap = False\n for node in list(mate_1.nodes):\n type_1 = mate_1.nodes()[node]['type']\n type_2 = mate_2.nodes()[node]['type']\n if type_1 != type_2:\n raise TypeError(\n 'Difference between type of {} in {} ({} = {}; {} ='\n ' {}) - should be identical'.format(node, network_pair,\n network_num_1, type_1, network_num_2, type_2)\n )\n if type_1 == 'loop':\n continue\n\n if test is False:\n random_number = random.uniform(0, 1)\n else:\n random_number = crossover_prob[index][node]\n\n if swap is False:\n if random_number <= self.swap_start_prob:\n swap = True\n else:\n swap = False\n elif swap is True:\n if random_number <= self.swap_stop_prob:\n swap = False\n else:\n swap = True\n\n if swap is True:\n # Copy to prevent these dictionaries from updating when the\n # node attributes are updated in the code below (otherwise\n # both nodes will be assigned the same identity as the node\n # in mate_1, instead of the node identities being crossed\n # over)\n mate_1_attributes = copy.deepcopy(mate_1.nodes()[node])\n mate_2_attributes = copy.deepcopy(mate_2.nodes()[node])\n # mate_1.nodes()[node] = {} does not work, get\n # TypeError: 'NodeView' object does not support item assignment\n for attribute in list(mate_1.nodes()[node].keys()):\n del mate_1.nodes()[node][attribute]\n for attribute in list(mate_2.nodes()[node].keys()):\n del mate_2.nodes()[node][attribute]\n nx.set_node_attributes(mate_1, values={node: mate_2_attributes})\n nx.set_node_attributes(mate_2, values={node: mate_1_attributes})\n\n crossover_pop_dict[network_num_1] = mate_1\n crossover_pop_dict[network_num_2] = mate_2\n\n return crossover_pop_dict",
"def heuristic_crossover(random, candidates, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n bounder = args['_ec'].bounder\r\n \r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n \r\n # Since we don't have fitness information in the candidates, we need \r\n # to make a dictionary containing the candidate and its corresponding \r\n # individual in the population.\r\n population = list(args['_ec'].population)\r\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\r\n \r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for mom, dad in zip(moms, dads):\r\n if random.random() < crossover_rate:\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n negpos = 1 if mom_is_better else -1\r\n val = d if mom_is_better else m\r\n bro[i] = val + random.random() * negpos * (m - d)\r\n sis[i] = val + random.random() * negpos * (m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n children.append(bro)\r\n children.append(sis)\r\n else:\r\n children.append(mom)\r\n children.append(dad)\r\n return children",
"def _cross_over(self,mp,cross_rate,eta):",
"def test_bias(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n gene1, gene2 = get_gru_node_gene(0, cfg.genome)\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n self.assertEqual(gene3.bias, 0)",
"def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)",
"def heuristic_crossover(random, candidates, args):\n crossover_rate = args.setdefault('crossover_rate', 1.0)\n bounder = args['_ec'].bounder\n \n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n \n # Since we don't have fitness information in the candidates, we need \n # to make a dictionary containing the candidate and its corresponding \n # individual in the population.\n population = list(args['_ec'].population)\n lookup = dict(zip([pickle.dumps(p.candidate, 1) for p in population], population))\n \n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for mom, dad in zip(moms, dads):\n if random.random() < crossover_rate:\n bro = copy.copy(dad)\n sis = copy.copy(mom)\n mom_is_better = lookup[pickle.dumps(mom, 1)] > lookup[pickle.dumps(dad, 1)]\n for i, (m, d) in enumerate(zip(mom, dad)):\n negpos = 1 if mom_is_better else -1\n val = d if mom_is_better else m\n bro[i] = val + random.random() * negpos * (m - d)\n sis[i] = val + random.random() * negpos * (m - d)\n bro = bounder(bro, args)\n sis = bounder(sis, args)\n children.append(bro)\n children.append(sis)\n else:\n children.append(mom)\n children.append(dad)\n return children",
"def crossover(NN1, NN2, p_c, p_m):\n if np.random.choice([0, 1], p=[1-p_c, p_c]):\n return nn.mate_neural_nets(NN1, NN2, p_m)\n else:\n return np.random.choice([NN1, NN2])",
"def cross(self):\n\n for i in range(self.pop_num): # Put in the first pop_num elements of the \"Parents and Sons\" array our entire input population.\n self.par_and_sons[i].A=self.population[i].A.copy()\n\n random.shuffle(self.population) # Shuffle population.\n\n tt=0 # The counter that is needed to implement a non-trivial crossing.\n for s in range(0,self.pop_num,2): # From 0 to pop_num with step 2. That is. here we take pop_num / 2 pairs of parents.\n self.mother.A=self.population[tt+int(self.pop_num/2)].A # Let the last pop_num / 2 individuals of our population be our mothers.\n self.father.A=self.population[tt].A # And let first pop_num / 2 individuals of our population be dads.\n \n tt=tt+1 \n ran=random.random()\n\n for n in range(self.length): # Crossover.\n if random.random()>0.5:\n self.son1.A[n] = self.father.A[n]\n self.son2.A[self.length-1-n] = self.father.A[n]\n self.son3.A[n] = self.mother.A[n]\n self.son4.A[self.length-1-n] = self.mother.A[n]\n else:\n self.son1.A[n] = self.mother.A[n]\n self.son2.A[self.length-1-n] = self.mother.A[n]\n self.son3.A[n] = self.father.A[n]\n self.son4.A[self.length-1-n] = self.father.A[n]\n\n self.par_and_sons[self.pop_num+2*s].A = self.son1.A.copy()\n self.par_and_sons[self.pop_num+2*s+1].A = self.son2.A.copy()\n self.par_and_sons[self.pop_num+2*s+2].A = self.son3.A.copy()\n self.par_and_sons[self.pop_num+2*s+3].A = self.son4.A.copy()",
"def crossover(obj1, obj2):\n\n assert obj1.structure == obj2.structure, 'The structures of the two brains are different'\n assert obj1.activation_function == obj2.activation_function, 'The activation functions of the two brains are different'\n\n new_brain = Brain((obj1.structure), activation_function=obj1.activation_function)\n\n for i in range(obj1.number_of_transitions):\n shape = obj1.weights[i].shape\n weights1 = obj1.weights[i].flatten()\n weights2 = obj2.weights[i].flatten()\n biases1 = obj1.biases[i]\n biases2 = obj2.biases[i]\n weights_combined = []\n biases_combined = []\n for j in range(len(weights1)):\n if np.random.uniform(0, 1) < 0.5:\n weights_combined.append(weights1[j])\n else:\n weights_combined.append(weights2[j])\n for j in range(len(biases1)):\n if np.random.uniform(0, 1) < 0.5:\n biases_combined.append(biases1[j])\n else:\n biases_combined.append(biases2[j])\n new_brain.weights[i] = np.asarray(weights_combined).reshape(shape)\n new_brain.biases[i] = np.asarray(biases_combined)\n\n return new_brain",
"def create_offspring_min(population, parent_sel, tsp, num_offspring):\n offspring = []\n while len(offspring) < num_offspring:\n parents = []\n if parent_sel == \"fps\":\n rel_fit = rel_fit_min(population, tsp)\n parents = select_fps(population, rel_fit)\n elif parent_sel == \"bintour\":\n parents = select_bin_tour_min(population, tsp)\n elif parent_sel == \"rank\":\n parents = rank_select_min(population, tsp)\n elif parent_sel == \"trunc\":\n parents = select_trunc_min(population, num_offspring, tsp)\n elif parent_sel == \"rand\":\n parents = select_random(population)\n child = []\n pot_parents = random.choices(parents, k=2)\n child = crossover(pot_parents)\n offspring.append(child)\n return offspring",
"def old_mutate(self, offspring):\n # this mutation function will use gray code\n for o in offspring:\n for (idx,_) in enumerate(o):\n before_mutation = o[idx]\n gray = self.binary_to_gray(before_mutation)\n if random.random() < self.mutation_chance:\n gray = gray ^ 1\n if random.random() < self.mutation_chance:\n gray = gray ^ 2\n if random.random() < self.mutation_chance:\n gray = gray ^ 4\n \n o[idx] = self.gray_to_binary(gray)\n \n return offspring",
"def generate_offspring(self, parents, generation, pool=None):\n # Do this only the first time, when the first N agents are generated\n if self.initial_pop:\n self.initial_pop = False\n # We do not store the agents of the initial pop in the archive. Just use them to init the emitters\n # Init emitter population with all agents in the initial population.\n for agent in parents:\n self.emitters_pop.append(OptimizingEmitter(agent['genome'], agent['id'], 0.5, self.bounds, self.params))\n\n # Now select emitter to use\n self.emitter_idx = np.argmin([em.stored for em in self.emitters_pop]) # Select emitter that generated the least solutions\n\n offsprings = Population(self.params, init_size=0, name='offsprings')\n for i in range(self.params.emitter_population): # The batch is the pop size\n off = self.agent_template.copy() # Get new agent\n off['genome'] = self.emitters_pop[self.emitter_idx].ask()\n off['parent'] = self.emitters_pop[self.emitter_idx].id\n off['ancestor'] = self.emitters_pop[self.emitter_idx].id\n offsprings.add(off)\n\n offs_ids = parents.agent_id + np.array(range(len(offsprings))) # Calculate offs IDs\n offsprings['id'] = offs_ids # Update offs IDs\n offsprings['born'] = [generation] * offsprings.size\n parents.agent_id = max(offs_ids) + 1 # This saves the maximum ID reached till now\n return offsprings",
"def crossOver(self, x, y):\n if random.uniform(0, 1) < self.probCrossOver:\n # generate berapa banyak perpindahan\n pindah = random.randint(0, self.panjangKromosom-1)\n for i in range(pindah):\n # melakukan swap nilai x dan y\n x[i], y[i] = y[i], x[i]\n return [x, y]",
"def expected_offspring(c1, c2, c3, c4, c5, c6):\n\tprob_c1 = 1\n\tprob_c2 = 1\n\tprob_c3 = 1\n\tprob_c4 = 0.75\n\tprob_c5 = 0.5\n\tprob_c6 = 0\n\n\tresult = 0\n\n\tfor i in range(1, c1 + 1):\n\t\tresult = result + prob_c1 * 2\n\t\n\tfor i in range(1, c2 + 1):\n\t\tresult = result + prob_c2 * 2\n\t\n\tfor i in range(1, c3 + 1):\n\t\tresult = result + prob_c3 * 2\n\n\tfor i in range(1, c4 + 1):\n\t\tresult = result + prob_c4 * 2\n\n\tfor i in range(1, c5 + 1):\n\t\tresult = result + prob_c5 * 2\n\t\n\tfor i in range(1, c6 + 1):\n\t\tresult = result + prob_c6 * 2\n\n\treturn result",
"def crossover(chromosome_1, chromosome_2):\n (x1, y1) = (randrange(col_count), randrange(row_count))\n (x2, y2) = (randrange(x1+1, col_count+1), randrange(y1+1, row_count+1))\n def mate(chromosome_1, chromosome_2):\n used = set(chromosome_1[x+y*col_count] for x in range(x1, x2) for y in range(y1, y2))\n not_used = (allele for allele in chromosome_2 if allele not in used)\n return [chromosome_1[x+y*col_count] if x1 <= x < x2 and y1 <= y < y2 else next(not_used) for y in range(row_count) for x in range(col_count)]\n return (mate(chromosome_1, chromosome_2), mate(chromosome_2, chromosome_1))",
"def crossing(self, *args):\n return self.phy2abs.crossing(*args)",
"def crossOver(self):\n # copy all the chromosomes from the current generation to a regular python list\n # start with an empty list\n lstChromosomes = []\n # loop through all the items in the queue\n while not self.generation.empty():\n # take a chromosome off the queue\n chromosome = self.generation.get()\n # append the chromosome to the list\n lstChromosomes.append(chromosome)\n # create an empty priority queue for the new generation\n newGeneration = PriorityQueue()\n # cross-over all chromosomes in turn - start with the beginning of the list\n for chrom1Index in range(0, len(lstChromosomes)-1):\n # cross-over with all chromosomes that come after it\n for chrom2Index in range(chrom1Index, len(lstChromosomes)):\n # get the chromosomes we are crossing over\n chrom1 = lstChromosomes[chrom1Index]\n chrom2 = lstChromosomes[chrom2Index]\n # perform the cross-over operation\n xOver = chrom1.crossOver(chrom2)\n # create two new chromosome objects\n newChrom1 = self.chromosomeClass()\n newChrom2 = self.chromosomeClass()\n # set their genes to the values created by crossover operation\n newChrom1.genes = xOver[0]\n newChrom2.genes = xOver[1]\n # save the new chromosomes we just created\n newGeneration.put(newChrom1)\n newGeneration.put(newChrom2)\n # save all the original chromosomes\n for chromosome in lstChromosomes:\n newGeneration.put(chromosome)\n # keep track of all the chromosomes we create\n lstChromosomes = []\n # keep track of how many we are keeping\n chromosomesKept = 0\n # as long as we haven't added more chromosomes than the population is supposed to have\n # and we have more chromosomes to add...\n while chromosomesKept < self.populationSize and not newGeneration.empty():\n # take a chromosome off the new generation queue\n newChromosome = newGeneration.get()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1\n # as long as we haven't added more chromosomes than the population is supposed to have, create\n # random chromosomes\n while chromosomesKept < self.populationSize:\n # create a random chromosome\n newChromosome = self.chromosomeClass()\n # have we seen this chromosome before?\n if (not newChromosome in lstChromosomes):\n # store it in our list of chromosomes\n lstChromosomes.append(newChromosome)\n # store it in the queue in the chromosome\n self.generation.put(newChromosome)\n # increase our count of chromosomes kept\n chromosomesKept += 1",
"def varAnd(population, toolbox, cxpb, mutpb):\n offspring = [toolbox.clone(ind) for ind in population]\n \n # Apply crossover and mutation on the offspring\n for ind1, ind2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < cxpb:\n toolbox.mate(ind1, ind2)\n del ind1.fitness.values, ind2.fitness.values\n \n for ind in offspring:\n if random.random() < mutpb:\n toolbox.mutate(ind)\n del ind.fitness.values\n \n return offspring"
]
| [
"0.6887202",
"0.66483",
"0.61619645",
"0.60364294",
"0.57141507",
"0.5652131",
"0.5621282",
"0.54595256",
"0.54372764",
"0.5420452",
"0.537418",
"0.5370365",
"0.5336791",
"0.5334947",
"0.53227204",
"0.53013736",
"0.5299903",
"0.52882814",
"0.52813506",
"0.5236538",
"0.5230506",
"0.52107",
"0.5199515",
"0.5118239",
"0.51080513",
"0.5092557",
"0.50576854",
"0.5050707",
"0.5037683",
"0.5021683"
]
| 0.6978185 | 0 |
Return the offspring of Laplace crossover on the candidates. This function performs Laplace crosssover (LX), following the implementation specified in (Deep and Thakur, "A new crossover operator for real coded genetic algorithms," Applied Mathematics and Computation, Volume 188, Issue 1, May 2007, pp. 895911). This function also makes use of the bounder function as specified in the EC's ``evolve`` method. | def laplace_crossover(random, mom, dad, args):
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
bounder = args['_ec'].bounder
a = args.setdefault('lx_location', 0)
b = args.setdefault('lx_scale', 0.5)
bro = copy.copy(dad)
sis = copy.copy(mom)
for i, (m, d) in enumerate(zip(mom, dad)):
u = random.random()
if random.random() <= 0.5:
beta = a - b * math.log(u)
else:
beta = a + b * math.log(u)
bro[i] = m + beta * abs(m - d)
sis[i] = d + beta * abs(m - d)
bro = bounder(bro, args)
sis = bounder(sis, args)
return [bro, sis]
else:
return [mom, dad] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring",
"def lombs(x, y):\n # Calculate curvature. \n curv = curvature(x, y)\n steps = np.sqrt(np.diff(x, axis=0)**2 + np.diff(y, axis=0)**2)[:-1]\n arc = np.cumsum(steps)\n # Calculate LS.\n ls_f, ls_p = LombScargle(arc, curv).autopower()\n return ls_f, ls_p",
"def laplace_crossover(random, mom, dad, args):\r\n crossover_rate = args.setdefault('crossover_rate', 1.0)\r\n if random.random() < crossover_rate:\r\n bounder = args['_ec'].bounder\r\n a = args.setdefault('lx_location', 0)\r\n b = args.setdefault('lx_scale', 0.5)\r\n bro = copy.copy(dad)\r\n sis = copy.copy(mom)\r\n for i, (m, d) in enumerate(zip(mom, dad)):\r\n u = random.random()\r\n if random.random() <= 0.5:\r\n beta = a - b * math.log(u)\r\n else:\r\n beta = a + b * math.log(u)\r\n bro[i] = m + beta * abs(m - d)\r\n sis[i] = d + beta * abs(m - d)\r\n bro = bounder(bro, args)\r\n sis = bounder(sis, args)\r\n return [bro, sis]\r\n else:\r\n return [mom, dad]",
"def _create_offspring(self):\n parents = self._select_parents()\n offspring = self._crossover(*parents)\n if (random.uniform(0, 1) < self.mutation_rate):\n self._mutate(offspring)\n return offspring",
"def calc_Laplace_Polynom(self, L, K):\n # N, M, Fin = self.X.get_shape()\n M = int(L.shape[0])\n # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.\n\n L = scipy.sparse.csr_matrix(L)\n L = graph.rescale_L(L, lmax=2)\n polynomials = []\n if K > 1:\n # only rank 2 for sparse_tensor_dense_matmul\n T0 = scipy.sparse.identity(M, dtype=np.float32, format=\"csr\")\n I = scipy.sparse.identity(M, dtype=np.float32, format=\"csr\")\n T1 = L\n # polynomials.extend([I, T1]) # the first matrix is I matrix\n polynomials = scipy.sparse.hstack([I.reshape(M * M, 1), T1.reshape(M * M, 1)])\n for k in range(2, K):\n T2 = 2 * L * T1 - T0 #\n polynomials = scipy.sparse.hstack([polynomials, T2.reshape(M * M, 1)])\n T0, T1 = T1, T2\n return polynomials",
"def Laplace_evidence(self):\n A = self.Laplace_covariance()\n try:\n hld = np.sum(np.log(np.diag(jitchol(A)[0])))\n except:\n return np.nan\n return 0.5*self._get_params().size*np.log(2*np.pi) + self.log_likelihood() - hld",
"def Laplace_evidence(self):\r\n A = self.Laplace_covariance()\r\n try:\r\n hld = np.sum(np.log(np.diag(jitchol(A)[0])))\r\n except:\r\n return np.nan\r\n return 0.5 * self._get_params().size * np.log(2 * np.pi) + self.log_likelihood() - hld",
"def crossover(cross):\r\n @functools.wraps(cross)\r\n def ecspy_crossover(random, candidates, args):\r\n if len(candidates) % 2 == 1:\r\n candidates = candidates[:-1]\r\n moms = candidates[::2]\r\n dads = candidates[1::2]\r\n children = []\r\n for i, (mom, dad) in enumerate(zip(moms, dads)):\r\n cross.index = i\r\n offspring = cross(random, mom, dad, args)\r\n for o in offspring:\r\n children.append(o)\r\n return children\r\n ecspy_crossover.single_crossover = cross\r\n return ecspy_crossover",
"def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population",
"def crossover(cross):\n @functools.wraps(cross)\n def inspyred_crossover(random, candidates, args):\n if len(candidates) % 2 == 1:\n candidates = candidates[:-1]\n moms = candidates[::2]\n dads = candidates[1::2]\n children = []\n for i, (mom, dad) in enumerate(zip(moms, dads)):\n cross.index = i\n offspring = cross(random, mom, dad, args)\n for o in offspring:\n children.append(o)\n return children\n inspyred_crossover.single_crossover = cross\n return inspyred_crossover",
"def Laplace_covariance(self):\n #TODO add in the prior contributions for MAP estimation\n #TODO fix the hessian for tied, constrained and fixed components\n if hasattr(self, 'log_likelihood_hessian'):\n A = -self.log_likelihood_hessian()\n\n else:\n print \"numerically calculating hessian. please be patient!\"\n x = self._get_params()\n def f(x):\n self._set_params(x)\n return self.log_likelihood()\n h = ndt.Hessian(f)\n A = -h(x)\n self._set_params(x)\n # check for almost zero components on the diagonal which screw up the cholesky\n aa = np.nonzero((np.diag(A)<1e-6) & (np.diag(A)>0.))[0]\n A[aa,aa] = 0.\n return A",
"def crossover(self):\n print(' - crossover')\n s = time.time()\n\n # make a list with all index\n tmp_list = list(range(0, self.size))\n while len(tmp_list) > 0:\n candidate_1 = random.choice(tmp_list)\n tmp_list.remove(candidate_1)\n candidate_2 = random.choice(tmp_list)\n tmp_list.remove(candidate_2)\n\n # ceck if the two candidates will crossover\n chance = random.uniform(0, 1)\n if chance <= self.crossover_rate:\n self.crossover_two_candidates(candidate_1, candidate_2)\n\n e = time.time()\n print(\" - time: \", e - s)",
"def Laplace_covariance(self):\r\n # TODO add in the prior contributions for MAP estimation\r\n # TODO fix the hessian for tied, constrained and fixed components\r\n if hasattr(self, 'log_likelihood_hessian'):\r\n A = -self.log_likelihood_hessian()\r\n\r\n else:\r\n print \"numerically calculating Hessian. please be patient!\"\r\n x = self._get_params()\r\n def f(x):\r\n self._set_params(x)\r\n return self.log_likelihood()\r\n h = ndt.Hessian(f) # @UndefinedVariable\r\n A = -h(x)\r\n self._set_params(x)\r\n # check for almost zero components on the diagonal which screw up the cholesky\r\n aa = np.nonzero((np.diag(A) < 1e-6) & (np.diag(A) > 0.))[0]\r\n A[aa, aa] = 0.\r\n return A",
"def cross_correlate_lorentzian(signal: np.ndarray, window_size=25, **kwargs):\n params = {\n \"x0\": 0,\n \"gamma\": 0.25,\n \"I\": 1\n }\n if kwargs:\n params.update(**kwargs)\n # Create a template of the second derivative Lorentzian profile for\n # x-correlating with the spectrum\n temp_x = np.linspace(-5, 5, window_size)\n temp_y = sec_deriv_lorentzian(temp_x, **params)\n # Cross-correlate with the Lorentzian profile; \"same\" mode ensures\n # it is the same length as the original signal for easy indexing\n corr_signal = np.correlate(signal, temp_y, mode=\"same\")\n return corr_signal",
"def a_test2_laplace():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('Laplace')\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def laplacian(self, array_in):\r\n\r\n # Call-through to Laplacian operator, already computed\r\n return self.laplace_op*array_in",
"def XtoL(self, x):\n lc = np.zeros(3)\n \n lc[0] = (x[0]-self.x0[0])/self.dh[0];\n lc[1] = (x[1]-self.x0[1])/self.dh[1];\n lc[2] = (x[2]-self.x0[2])/self.dh[2];\n \n return lc",
"def _get_Laplacian_matrix(self, X):\n self.laplacian_mat, self.laplacian_sym_mat, self.laplacian_weights = self.laplacian.compute_laplacian(\n self.get_Affinity_matrix(X)\n )",
"def lax_wendroff(CFL, uold, unew):\n\n unew[1:-1] = uold[1:-1] - (CFL * 0.5) * (uold[2:] - uold[:-2]) +\\\n (CFL * CFL * 0.5) * (uold[2:] - 2.0 * uold[1:-1] + uold[:-2])\n\n unew[0] = uold[0] - (CFL * 0.5) * (uold[1] - uold[-1]) +\\\n (CFL * CFL * 0.5) * (uold[1] - 2.0 * uold[0] + uold[-1])\n\n unew[-1] = uold[-1] - (CFL * 0.5) * (uold[0] - uold[-2]) +\\\n (CFL * CFL * 0.5) * (uold[0] - 2.0 * uold[-1] + uold[-2])\n\n return unew",
"def single_point_crossover(population):\r\n global decryption_key\r\n\r\n decryption_key += single_point_crossover_del\r\n\r\n new_population = []\r\n for i in range(0, len(population) - 1, 2):\r\n candidate1 = population[i]\r\n candidate2 = population[i + 1]\r\n\r\n # chromosomes have the same length\r\n # choose a random point\r\n length = len(candidate1)\r\n crossover_point = random.randint(0, length - 1)\r\n\r\n decryption_key += str(crossover_point) + \"|\"\r\n\r\n offspring1 = candidate2[0: crossover_point] + candidate1[crossover_point:]\r\n offspring2 = candidate1[0: crossover_point] + candidate2[crossover_point:]\r\n new_population.append(offspring1)\r\n new_population.append(offspring2)\r\n\r\n # append last chromosome if odd population size\r\n if len(population) % 2 == 1:\r\n new_population.append(population[len(population) - 1])\r\n\r\n decryption_key += single_point_crossover_del\r\n\r\n return new_population",
"def _calculate_ll(self, x):\n observation_log_probs = self._observation_log_probs(x, mask=None)\n forward_log_probs = self._forward(observation_log_probs)\n log_likelihood = logsumexp(\n forward_log_probs[forward_log_probs.shape[0] - 1, :].numpy())\n return log_likelihood",
"def solve_laplace_equation(\n grid: GridBase, bc: \"BoundariesData\", label: str = \"Solution to Laplace's equation\"\n) -> ScalarField:\n rhs = ScalarField(grid, data=0)\n return solve_poisson_equation(rhs, bc=bc, label=label)",
"def test_laplace():\n f = np.asarray([\n [0.99, 1.0, 0.5],\n [0.69, 0.6, 0.6]])\n R = common_metrics.laplace(f, maximise=True)\n expected = np.asarray(\n [0.83, 0.63])\n assert np.allclose(R, expected)\n R = common_metrics.laplace(f, maximise=False)\n expected = np.asarray(\n [-0.83, -0.63])\n assert np.allclose(R, expected)",
"def toy_linear_1d_classification_laplace(seed=default_seed, optimize=True, plot=True, axes=None):\n\n try:import pods\n except ImportError:print('pods unavailable, see https://github.com/sods/ods for example datasets')\n data = pods.datasets.toy_linear_1d_classification(seed=seed)\n Y = data['Y'][:, 0:1]\n Y[Y.flatten() == -1] = 0\n\n likelihood = GPy.likelihoods.Bernoulli()\n laplace_inf = GPy.inference.latent_function_inference.Laplace()\n kernel = GPy.kern.RBF(1)\n\n # Model definition\n m = GPy.core.GP(data['X'], Y, kernel=kernel, likelihood=likelihood, inference_method=laplace_inf)\n\n # Optimize\n if optimize:\n try:\n print(\"Pre opt\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 1\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 2\")\n print(m)\n m.optimize('bfgs', messages=1)\n print(\"Laplace opt 3\")\n print(m)\n except Exception as e:\n return m\n\n # Plot\n if plot:\n from matplotlib import pyplot as plt\n if axes is None:\n fig, axes = plt.subplots(2, 1)\n m.plot_f(ax=axes[0])\n m.plot(ax=axes[1])\n\n print(m)\n return m",
"def cross_network(self, x_0):\n x_l = x_0\n for i in range(self.cross_layer_num):\n xl_w = torch.tensordot(x_l, self.cross_layer_w[i], dims=([1], [0]))\n xl_dot = (x_0.transpose(0, 1) * xl_w).transpose(0, 1)\n x_l = xl_dot + self.cross_layer_b[i] + x_l\n return x_l",
"def a_test_laplace():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('Laplace')\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def refit_pll_opt(model,data):\n data = toPM(data);\n import scipy.optimize\n from scipy.sparse import triu\n def to_vector(L,h):\n return np.hstack((h,triu(L,k=1).tocoo().data))\n def from_vector(x):\n h = x[:len(model.h)];\n tmp = triu(model.L,k=1).tocoo();\n L = csr((x[len(model.h):],(tmp.row,tmp.col)),shape=model.L.shape)\n return L+L.T,h\n def f0(x0):\n L,h = from_vector(x0)\n return -__pll(L,h,data).mean()\n def jac(x0):\n L,h = from_vector(x0)\n return -to_vector(*__dpll(L,h,data))\n\n x0 = to_vector(model.L,model.h)\n res = scipy.optimize.minimize(f0,x0, method='BFGS',jac=jac)\n #print(\"Success? \",res.success)\n model.L,model.h = from_vector(res.x)\n return res",
"def V_lopass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_lopass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_C = V_out/Xcap(C, f)\n I_L = V_out/Z_low(L, R_L, f)\n V_L = I_L*R_L\n return V_L",
"def LL(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\treturn self.loss(self.V, [X_test, y_test, 0.]) * X_test.shape[0]",
"def ml_kl_loss(self, simulation, c1 = 1.0, ndims = 2, ehigh=1e5, emax = 1e10, turnover=200):\n loss = MLKL(c1, simulation, ndims, ehigh, emax, turnover)\n return loss.lossFunction"
]
| [
"0.58404624",
"0.57481474",
"0.57417303",
"0.57125723",
"0.56641746",
"0.5645168",
"0.5642679",
"0.5457934",
"0.5456284",
"0.54418725",
"0.53179973",
"0.5301446",
"0.52797896",
"0.52770644",
"0.5192121",
"0.5158112",
"0.51519334",
"0.5137148",
"0.51297754",
"0.51172304",
"0.5095923",
"0.5030536",
"0.50281745",
"0.50058866",
"0.49860853",
"0.49825868",
"0.4965177",
"0.49622887",
"0.49556053",
"0.49384934"
]
| 0.57764274 | 1 |
Parse a key=value=type command line arg that comes in a list. Returns Dictionary of options with the correct types. | def parse_options(option_list: List[str]) -> Dict[str, Union[int, float, str]]:
d = dict()
for o in option_list:
o = o.split('=')
if len(o) != 3:
raise OptionParsingError("Not enough elements in the parsed options. Need 3 elements.")
key = o[0]
val = o[1]
if o[2] not in type_mappings:
raise OptionParsingError(f"Unknown option type {o[2]}.")
type_func = type_mappings[o[2]]
d.update({key: type_func(val)})
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list",
"def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict",
"def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result",
"def crude_arg_parser(args=sys.argv):\n args_dict = {}\n key = None\n for e in args[1:]:\n if e[:2] == '--':\n if key:\n args_dict[key] = True # Switch arg\n key = e[2:]\n elif key:\n args_dict[key] = e\n key = None\n\n return args_dict",
"def process_cli_config_args(config_args:List[str]) -> Dict:\n # assert len(config_args) % 3 == 0, \\\n # \"You should pass config args in [--config.arg_name arg_value arg_type] format\"\n assert len(config_args) % 2 == 0, \\\n \"You should pass config args in [--config.arg_name arg_value] format\"\n arg_names = [config_args[i] for i in range(0, len(config_args), 2)]\n arg_values = [config_args[i] for i in range(1, len(config_args), 2)]\n\n result = {}\n\n for name, value in zip(arg_names, arg_values):\n assert name.startswith(CONFIG_ARG_PREFIX), \\\n f\"Argument {name} is unkown and does not start with `config.` prefix. Cannot parse it.\"\n\n result[name[len(CONFIG_ARG_PREFIX):]] = infer_type_and_convert(value)\n\n return result",
"def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict",
"def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")",
"def load_cli_kwargs(kwargs_list, delimiter='='):\n kwargs = {}\n for kv in kwargs_list:\n k, v = kv.split(delimiter, 1)\n kwargs[k] = v\n return kwargs",
"def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict",
"def parse_options(self,arg_str,opt_str,*long_opts,**kw):\n\n mode = kw.get('mode','string')\n list_all = kw.get('list_all',0)\n\n opts,args = getopt(arg_str.split(),opt_str,*long_opts)\n odict = {}\n for o,a in opts:\n if o.startswith('--'):\n o = o[2:]\n else:\n o = o[1:]\n try:\n odict[o].append(a)\n except AttributeError:\n odict[o] = [odict[o],a]\n except KeyError:\n if list_all:\n odict[o] = [a]\n else:\n odict[o] = a\n opts = Struct(odict)\n\n if mode == 'string':\n args = ' '.join(args)\n elif mode == 'list':\n pass\n else:\n raise ValueError,'incorrect mode given:'+`mode`\n return opts,args",
"def parse_params(params):\n def isoption(x):\n return x.startswith('-')\n solo_flags = []\n arg_flags = dict()\n i = 0\n while i < len(params):\n if not isoption(params[i]):\n raise ValueError('\"' + params[i] + '\" does not look like an option.')\n if i == len(params) - 1 or isoption(params[i+1]):\n solo_flags.append(params[i])\n i += 1\n continue\n else:\n arg_flags[params[i]] = process_arg(params[i+1])\n i += 2\n continue\n return solo_flags, arg_flags",
"def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters",
"def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result",
"def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict",
"def parse_args_dict(args=None):\n return vars(parse_args(args))",
"def parse_cmd_parameters_(args):\n # create argument parser\n parser = ArgumentParser(description=\"Script to build search index for ChatBot\")\n set_default_arguments_(parser)\n # parse options and transform them into common dictionary\n options = vars(parser.parse_args(args))\n # remove options with None values (if any)\n options = {k: v for k, v in options.items() if v is not None}\n return options",
"def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))",
"def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev",
"def get_command_line_options(args):\n options = dict()\n if args.option is not None:\n for o in args.option:\n b = o.split(':')\n if len(b) != 2:\n raise Error(\"Bad option `%s'\" % o)\n if b[1] == 'False':\n options[b[0]] = False\n elif b[1] == 'True':\n options[b[0]] = True\n else:\n options[b[0]] = b[1]\n return options",
"def readArgs(args):\n params = {}\n for k in args.keys():\n k2 = k.replace(\"<\", \"\").replace(\">\", \"\").replace(\"-\", \"\")\n try: # Convert strings to int or floats when required\n params[k2] = int(args[k])\n except:\n try:\n params[k2] = float(args[k])\n except:\n try:\n params[k2] = str2bool(args[k])\n except:\n params[k2] = args[k]\n return params",
"def parseOpts(self):\n\n for opt in self.opts:\n var, val = opt.split('=', 1)\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n # just a string\n pass\n self[var] = val",
"def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':\n if not args:\n return {}\n result = {}\n for a in args:\n if not a:\n continue\n arg = Argument(a)\n result[arg.name] = arg\n return result",
"def parse_args(self):\n parsed, _ = self.parser.parse_args()\n final = {}\n append = getattr(parsed, self.append_option)\n subtract = getattr(parsed, self.subtract_option)\n for option in self.all_options():\n name = option.dest\n if name is not None:\n value = getattr(parsed, name)\n default = self.defaults.get(name)\n if append and option.get_opt_string() in self.appendable:\n value = self.append(option, value)\n elif subtract and option.get_opt_string() in self.appendable:\n value = self.subtract(option, value)\n if value is None:\n value = default\n if value is None:\n value = raw_input(\"Please enter '%s': \" % option.help)\n self[name] = value\n return self",
"def parse_command_line():\n try:\n opts, args = getopt.getopt(sys.argv[1:],\n \"ni:ht:\",\n [\"dry-run\", \"interval=\", \"help\", \"timestamp=\"])\n except getopt.error, msg:\n print msg\n print \"for help use --help.\"\n sys.exit(2)\n\n options = {}\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n print __doc__\n sys.exit(0)\n elif o in (\"-n\", \"--dry-run\"):\n sys.exit(4) # not yet supported...\n elif o in (\"-i\", \"--interval\"):\n options['interval'] = int(a)\n elif o in (\"-t\", \"--timestamp\"):\n options['timestamp'] = a\n else:\n sys.exit(3) # how did we get here?\n # And what are left as args must be our filter list.\n options['sieves'] = args\n return options",
"def parse_options(type):\n # TODO: conflict_handler='resolve' is really required ??\n parser = ArgumentParser(conflict_handler='resolve')\n if type == 'backup':\n for name, description in _get_parameters_backup().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n elif type == 'restore':\n for name, description in _get_parameters_restore().items():\n if name in _get_parameters_restore_optional().keys():\n \tparser.add_argument('--{}'.format(name), help=description, required=False)\n else:\n parser.add_argument('--{}'.format(name), help=description, required=True)\n elif type == 'blob_operation':\n for name, description in _get_parameters_blob_operation().items():\n parser.add_argument('--{}'.format(name),\n help=description, required=True)\n else:\n raise Exception('Use either \\'backup\\' or \\'restore\\' as type.')\n\n for key, credentials in _get_parameters_credentials().items():\n for name, description in credentials.items():\n parser.add_argument('--{}'.format(name), help=description)\n configuration = vars(parser.parse_args())\n assert configuration['type'] == 'online' or configuration['type'] == 'offline', \\\n '--type must be \\'online\\' or \\'offline\\''\n return configuration",
"def parse_args():\n # Argument objects\n argument_objects = [\n FindInterfaceArg(),\n InterfaceArg(),\n NaughtyCountArg(),\n FirewallArg(),\n ModelTypeArg(),\n LogArg(),\n ]\n\n # Create the parser and parse the args\n parser = create_parser(argument_objects)\n parsed_args = parser.parse_args()\n options = {}\n\n # Parse all of the options\n for obj in argument_objects:\n if not obj.process_argument(parsed_args, options):\n parser.print_usage()\n exit()\n\n return options",
"def parse_options(argv):\n parser = OptionParser(\"Usage: %prog [options] host [...]\")\n parser.add_option('-t', '--type',\n help='specify key type TYPE (rsa or dsa)',\n metavar='TYPE')\n parser.add_option('-p', '--port',\n help='specify port number PORT',\n metavar='PORT')\n (options, hosts) = parser.parse_args(argv)\n if not options.port:\n port = DEFAULT_SSH_PORT\n else:\n if not options.port.isdigit():\n print >> sys.stderr, 'Invalid port number %s' % options.port\n parser.print_help()\n sys.exit(2)\n port = int( options.port )\n if not options.type:\n key_types = [ 'ssh-rsa', 'ssh-dss' ]\n elif options.type == 'rsa':\n key_types = [ 'ssh-rsa' ]\n elif options.type == 'dsa':\n key_types = [ 'ssh-dss' ]\n else:\n print >> sys.stderr, 'Invalid key type %s' % options.type\n parser.print_help()\n sys.exit(2)\n if not hosts:\n parser.print_help()\n sys.exit(2)\n return ( key_types, hosts, port )",
"def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d",
"def _arg_parse(self, **options) -> Dict[str, Any]:\n extra_options = dict()\n for key, value in options.items():\n private_key = f\"__{key}\"\n if hasattr(self, private_key):\n setattr(self, private_key, value)\n else:\n extra_options[key] = value\n\n return extra_options",
"def parse_args(argparser_args):\n return {k: v for k, v in vars(argparser_args).items() if v is not None}"
]
| [
"0.69291884",
"0.6713729",
"0.66098994",
"0.654922",
"0.6441218",
"0.6433154",
"0.64158493",
"0.6372566",
"0.6308097",
"0.62898046",
"0.6267641",
"0.62230957",
"0.6189013",
"0.6167051",
"0.6166115",
"0.616541",
"0.61501485",
"0.6109313",
"0.6103926",
"0.6084132",
"0.6078017",
"0.60777915",
"0.60342646",
"0.6032482",
"0.6003914",
"0.5993896",
"0.5987567",
"0.5971595",
"0.59616",
"0.59603333"
]
| 0.7624646 | 0 |
Encode an option dict into a command line string that cascade_at can understand. Returns List of strings that can be passed to the command line.. | def encode_options(options: Dict[str, Union[str, float, int]]) -> List[str]:
d = list()
rev_dict = {v: k for k, v in type_mappings.items()}
for k, v in options.items():
t = type(v)
if t not in rev_dict:
raise OptionParsingError(f"Unknown option type {t}.")
arg = f'{k}={v}={rev_dict[t]}'
d.append(arg)
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_options(options, return_list=True):\n\n cmd_options = []\n\n for key, value in options.items():\n\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n if return_list:\n return cmd_options\n\n cmd_options = \" \".join(cmd_options)\n\n return cmd_options",
"def __buildOptionString ( self ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings defining each element of self.switchSpecs\n # as getopt.getopt requires it ]\n for sw in self.switchSpecs:\n #-- 2 body --\n # [ if sw is a SwitchArg ->\n # result +:= a string defining sw as getopt.getopt\n # requires it ]\n if sw.takesValue:\n result.append ( \"%s:\" % sw.letter )\n else:\n result.append ( sw.letter )\n\n #-- 3 --\n # [ return the strings in result, concatenated ]\n return \"\".join ( result )",
"def retrieve_options(env):\n\n options = []\n if env.core != -1:\n options.extend([\"--core {}\".format(env.core)])\n if env.mtor != 4:\n options.extend([\"--mtor {}\".format(env.mtor)])\n if env.n != 1000:\n options.extend([\"--n {}\".format(env.n)])\n if env.forcefield != \"OPLS2005\":\n options.extend([\"--force {}\".format(env.forcefield)])\n if env.mae_lig:\n options.extend([\"--mae_charges\"])\n if env.gridres != 10:\n options.extend([\"--gridres {}\".format(env.gridres)])\n return \" \".join(options)",
"def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def configToCliArguments(config):\n if not isinstance(config, dict):\n raise TypeError(\"Expected dict for config\")\n\n args = []\n for key, value in config.items():\n if value == None:\n args.append(f\"--{key}\")\n continue\n\n if isinstance(value, list):\n value = \",\".join(value)\n args.append(f\"--{key}={value}\")\n\n return args",
"def GenerateToolArgStrings(options):\n # Preparing dnstreexport\n dnstreeexport_array = [options.tree_export]\n dnstreeexport_array.extend(['-c', options.config_file])\n if( options.force ):\n dnstreeexport_array.append('--force')\n if( options.quiet ):\n dnstreeexport_array.append('--quiet')\n dnstreeexport_arg_string = ' '.join(dnstreeexport_array)\n\n # Preparing dnscheckconfig\n dnscheckconfig_array = [options.check_config]\n dnscheckconfig_array.extend(['-i', '%s' % options.id])\n dnscheckconfig_array.extend(['--config-file', options.config_file])\n if( options.named_checkzone ):\n dnscheckconfig_array.extend(['-z', options.named_checkzone])\n if( options.named_checkconf ):\n dnscheckconfig_array.extend(['-c', options.named_checkconf])\n if( not options.quiet ):\n dnscheckconfig_array.append('-v')\n dnscheckconfig_arg_string = ' '.join(dnscheckconfig_array)\n\n # Preparing dnsservercheck\n dnsservercheck_array = [options.server_check]\n dnsservercheck_array.extend(['--export-config'])\n dnsservercheck_array.extend(['-c', options.config_file])\n dnsservercheck_array.extend(['-i', '%s' % options.id])\n dnsservercheck_arg_string = ' '.join(dnsservercheck_array)\n\n # Preparing dnsconfigsync\n dnsconfigsync_array = [options.config_sync]\n dnsconfigsync_array.extend(['--export-config'])\n dnsconfigsync_array.extend(['-i', '%s' % options.id])\n dnsconfigsync_array.extend(['-c', options.config_file])\n if( options.ssh_id ):\n dnsconfigsync_array.extend(['--ssh-id', options.ssh_id])\n if( options.rndc_exec ):\n dnsconfigsync_array.extend(['--rndc-exec', options.rndc_exec])\n if( options.rndc_port ):\n dnsconfigsync_array.extend(['--rndc-port', options.rndc_port])\n if( options.rndc_key ):\n dnsconfigsync_array.extend(['--rndc-key', options.rndc_key])\n if( options.rndc_conf ):\n dnsconfigsync_array.extend(['--rndc-conf', options.rndc_conf])\n dnsconfigsync_arg_string = ' '.join(dnsconfigsync_array)\n\n # Preparing dnsquerycheck\n dnsquerycheck_array = [options.query_check]\n dnsquerycheck_array.extend(['--export-config'])\n dnsquerycheck_array.extend(['-c', options.config_file])\n dnsquerycheck_array.extend(['-i', '%s' % options.id])\n dnsquerycheck_array.extend(['-n', '%s' % options.number])\n dnsquerycheck_array.extend(['-p', '%s' % options.port])\n dnsquerycheck_arg_string = ' '.join(dnsquerycheck_array)\n\n return [dnstreeexport_arg_string,\n dnscheckconfig_arg_string,\n dnsservercheck_arg_string,\n dnsconfigsync_arg_string, \n dnsquerycheck_arg_string]",
"def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )",
"def params_to_args(**params):\n\targs = []\n\tkeys = get_sorted_keys(params)\n\tfor k in keys:\n\t\tif params[k] == False:\n\t\t\tcontinue\n\t\targs.append('--'+k)\n\t\tif params[k] == True:\n\t\t\tcontinue\n\t\t\n\t\tif isinstance(params[k], str):\n\t\t\targs.append(params[k])\n\t\t\tcontinue\n\t\ttry:\n\t\t\targs.extend([str(v) for v in params[k]])\n\t\texcept:\n\t\t\targs.append(str(params[k]))\n\treturn args",
"def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options",
"def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu",
"def myst_options(options):\n num_options = len(options.keys())\n myst_options = []\n if num_options == 0:\n return myst_options\n elif num_options < 2: # TODO parameterise this in conf.py\n for option, option_val in options.items():\n myst_options.append(\":{}: {}\".format(option, option_val).rstrip())\n return myst_options\n else:\n myst_options.append(\"---\")\n for item in sorted(options.keys()):\n myst_options.append(\"{}: {}\".format(item, options[item]))\n myst_options.append(\"---\")\n return myst_options",
"def dict2argstr(args_dict):\n arg_str = \"\"\n for arg, value in args_dict.items():\n if value is not None:\n arg_str += \" --{} {}\".format(str(arg), str(value))\n return arg_str",
"def encode_commands(command_list: List[str]) -> List[str]:\n return ['-'.join(x.split(' ')) for x in command_list]",
"def gen_command(process):\n cmd = \"{} \".format(process.name)\n for o in process.options.opt_list:\n i = 0\n opt = \"\"\n for el in o: \n if el and el != \"input\" and el != \"output\" and i != 3:\n opt += str(el)\n if opt[-1] != \"=\" and opt[-1] != \"'\": # command without space\n opt += \" \" # space\n i += 1\n cmd += opt\n return cmd",
"def dumps(self):\n result = []\n pkg_options_dumps = self._package_options.dumps()\n if pkg_options_dumps:\n result.append(pkg_options_dumps)\n for pkg_pattern, pkg_option in sorted(self._deps_package_options.items()):\n dep_pkg_option = pkg_option.dumps(scope=pkg_pattern)\n if dep_pkg_option:\n result.append(dep_pkg_option)\n return \"\\n\".join(result)",
"def combine_options(options):\n solns = [\"\"]\n for option_set in options:\n option_strs = []\n for s in solns:\n for op in option_set:\n option_strs.append(s + op)\n solns = option_strs\n return solns",
"def build_multiple_options(self):\n multiple_options = \"\"\n for index, option in self.options.iteritems():\n multiple_options += \"\\n[\" + index + \"] \" + option\n multiple_options += \"\\n\"\n return multiple_options",
"def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options",
"def optionxform(self, optionstr):\r\n return optionstr",
"def optionxform(self, optionstr):\r\n return optionstr",
"def to_cli_args(args):\n args_dict = vars(args)\n\n cli_args = []\n for key, value in AgentArgs.ALL_OPTIONS.items():\n if value[\"action\"] == \"store_true\":\n if args_dict[key.replace(\"-\", \"_\")]:\n cli_args.append(\"--\" + key)\n else:\n cli_args.extend((\"--\" + key, args_dict[key.replace(\"-\", \"_\")]))\n return cli_args",
"def build_options(self):\n opts = [\n \"-k rpm.rpmva=off\",\n \"-k apache.log=True\",\n ]\n\n sensitive_keys = {\n self._engine_plugin: 'sensitive_keys',\n 'ovirt_engine_dwh': 'dwh_sensitive_keys',\n }\n if self.configuration['include_sensitive_data']:\n for plugin in sensitive_keys:\n self.configuration[sensitive_keys[plugin]] = ':'\n\n for plugin in sensitive_keys:\n if self.configuration.get(sensitive_keys[plugin]):\n opts.append(\n '-k {plugin}.sensitive_keys={keys}'.format(\n plugin=plugin,\n keys=self.configuration.get(sensitive_keys[plugin]),\n )\n )\n\n if self.configuration.get(\"ticket_number\"):\n opts.append(\n \"--ticket-number=%s\" % self.configuration.get(\"ticket_number\")\n )\n\n if self.sos_version < '30':\n opts.append('--report')\n\n if self.configuration.get(\"log_size\"):\n opts.append(\n \"--log-size=%s\" %\n self.configuration.get('log_size')\n )\n else:\n if self.sos_version < '30':\n opts.append('--report')\n opts.append(\"-k general.all_logs=True\")\n elif self.sos_version < '32':\n opts.append(\"-k logs.all_logs=True\")\n else:\n opts.append(\"--all-logs\")\n\n if self.configuration.get(\"upload\"):\n opts.append(\"--upload=%s\" % self.configuration.get(\"upload\"))\n return \" \".join(opts)",
"def _Beamlet_options(self):\n cmd = _Beamlet._Beamlet_options(self) + \" --anadir={0},{1},{2}\".format(self._anara, self._anadec, self._coordsys)\n return cmd",
"def cli(obj):\n for k, v in obj.items():\n if isinstance(v, list):\n v = ', '.join(v)\n click.echo(f'{k:20}: {v}')",
"def encode_options(options):\n last_number = 0\n packed = []\n for opt in sorted_options(options):\n delta = opt.number - last_number\n last_number = opt.number\n pvalue = opt.packed_value\n (od, odx) = _optionint_helper.option_encoding(delta)\n (ol, olx) = _optionint_helper.option_encoding(len(pvalue))\n encoded = struct.pack(str('B'), (od << 4) | ol)\n encoded += odx + olx + pvalue\n packed.append(encoded)\n return b''.join(packed)",
"def list_opts():\n return [('ironic_lib', utils_opts)]",
"def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"",
"def build_cmdline():\n\tcmd=optparse.OptionParser(version=__version__)\n\tcmd.add_option('-c', '', dest='config_fname',type=\"string\", help='WHM/WHMCS configuration file', metavar=\"FILE\")\n\tcmd.add_option('-s', '', dest=\"whm_section\", type=\"string\", help=\"WHM server to use. Specify section name. eg: -s ds01\", metavar=\"SERVER\")\n\tcmd.add_option('','--search', action=\"store\", dest='search', type=\"string\", help=\"Search client by DNS domain name or cPanel username\", metavar=\"STRING\")\n\tcmd.add_option('-d', '', dest='whmcs_deptid', type=\"int\", help=\"WHMCS Department ID\", metavar=\"INT\") \n\tcmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type=\"string\", help=\"WHMCS abuse ticket template file\", metavar='FILE')\n\tcmd.add_option('-r', '', dest='whm_suspendmsg_fname', type=\"string\", help='cPanel account suspension reason template file', metavar='FILE')\n\tcmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type=\"string\", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE')\n\tcmd.add_option('', '--subject', dest='whmcs_subject', type=\"string\", help='Specify abuse ticket subject title.', metavar=\"STRING\")\n\tcmd.add_option('-y', '--allyes', dest='allyes', action=\"store_true\", default=False, help='Assume yes as an answer to any question which would be asked')\n\treturn cmd"
]
| [
"0.6672936",
"0.6576634",
"0.64632386",
"0.6266173",
"0.61365074",
"0.61365074",
"0.612159",
"0.60841817",
"0.6073305",
"0.6043996",
"0.59815985",
"0.59466445",
"0.59257406",
"0.58806074",
"0.586232",
"0.5844024",
"0.58317894",
"0.5820227",
"0.5800517",
"0.5799547",
"0.5799502",
"0.5799502",
"0.5787573",
"0.57711416",
"0.57153845",
"0.56999236",
"0.5694455",
"0.56899816",
"0.5682806",
"0.56804883"
]
| 0.7084254 | 0 |
Parse the dismod commands that come from command line arguments in a list. Returns list of commands that dismod can understand | def parse_commands(command_list: List[str]) -> List[str]:
return [' '.join(x.split('-')) for x in command_list] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_commands(self, commands: List[str]):",
"def cmd_list(args):",
"def parse_commands(args):\n\n # command flags, mapped to their function.\n commands = {\n '-l': {\n 'aliases': ['--list'],\n 'func': do_list,\n 'kwargs': None,\n },\n '-h': {\n 'aliases': ['--help'],\n 'func': print_usage,\n 'kwargs': {'retcode': 0},\n },\n }\n\n # Setup list of known flags...\n commandflags = list(commands.keys())\n aliasesgen = (commands[cmdflag]['aliases'] for cmdflag in commands.keys())\n for cmdaliases in aliasesgen:\n commandflags += cmdaliases\n commandflags.sort()\n\n # Function to retrive a flag by its name, or alias.\n def get_flag(flagname):\n \"\"\" Retrieve a flag function by name, or alias. \"\"\"\n if flagname in commands.keys():\n return commands[flagname]\n else:\n for cmdflag in commands.keys():\n if flagname in commands[cmdflag]['aliases']:\n return commands[cmdflag]\n\n # wrun commands must come before any script file.\n commandargs = []\n while ((args) and (args[0].startswith('-'))):\n commandargs.append(args.pop(0))\n\n # Retrieve functions for flags, and call them..\n for cmdarg in commandargs:\n if cmdarg in commandflags:\n # known flag, retrieve the function for it and call it.\n command = get_flag(cmdarg)\n commandkw = command['kwargs']\n commandfunc = command['func']\n if commandkw:\n print_debug('Running command option function with args...')\n commandfunc(**commandkw)\n else:\n print_debug('Running command option function...')\n commandfunc()\n else:\n # unknown flag!\n print_fail((\n 'Unknown flag given!: {}\\n'\n 'Run with --help for usage instructions.'\n ).format(cmdarg))\n\n # Return args without any 'wrun command flags'.\n return args",
"def parse_commands(self) -> list:\n\n command = self.path.split(\"?\")[1]\n commands = command.split(\"&\")\n\n return commands",
"def commands(self, *ignored):\n return [command.rsplit(\"_\").pop() for command in dir(self) if command.startswith(\"command_\")]",
"def _get_commands(self) -> list:\n return [i[1] for i in inspect.getmembers(self, predicate=lambda i: hasattr(i, \"is_cmd\"))]",
"def get_commands_list() -> list:\n return open(\"data/metadata/commands.list.txt\", \"r\").read().split(\"\\n\")",
"def processCommandList():\n\n try:\n # Assume that maya.cmds.about and maya.cmds.internalVar are already registered\n #\n commandListPath = os.path.realpath( os.environ[ 'MAYA_LOCATION' ] )\n platform = maya.cmds.about( os=True )\n commandListPath = os.path.join( commandListPath, commandListLocations[platform], 'commandList' )\n\n file = open( commandListPath, 'r' )\n for line in file:\n commandName, library = line.split()\n if not commandName in maya.cmds.__dict__:\n maya.cmds.__dict__[commandName] = __makeStubFunc( commandName, library )\n except:\n sys.stderr.write(\"Unable to process commandList %s\" % commandListPath)\n raise",
"def main_list(args):\n return list_commands(args.directory)",
"def getCmdList():\n return [obj for name, obj in inspect.getmembers(sys.modules[__name__]) \n if inspect.isclass(obj) and issubclass(obj, Cmd)][1:]",
"def _parse_command(self, cmd):\n if isinstance(cmd, list):\n args = [str(x) for x in cmd]\n assert args\n else:\n args = shlex.split(cmd)\n return args",
"def _command_as_list(module_name):\n parts = module_name.split('.')\n for part in COMMANDS_PACKAGE_NAME.split('.'):\n if parts[0] == part:\n parts = parts[1:]\n return [SCRIPT_COMMAND] + parts",
"def parseCmdLine(cmdLine):\n files=[]\n modifiers=[]\n for i in range(len(cmdLine)):\n arg = cmdLine[i]\n if arg[:2] != '--':\n files = cmdLine[i:]\n return (modifiers, files)\n \n arg = arg[2:]\n parts = arg.split('=',1)\n modifiers.append((parts[0], parts[1]))\n return (modifiers, files)",
"def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)",
"def extract_commands(self):\n # import pdb; pdb.set_trace()\n left_i = 0\n right_i = 1\n commands = {}\n cmd = self.cmd\n\n if not cmd:\n return\n while left_i < len(cmd):\n sub_cmd = cmd[left_i:right_i]\n if sub_cmd in self.action_list:\n arg_len, arguments = self.extract_command_arguments(right_i)\n commands[sub_cmd] = arguments\n left_i = right_i + arg_len\n right_i = left_i + 1\n else:\n left_i, right_i = self.update_i(left_i, right_i)\n return commands",
"def parse_cli(argv):\n possible_remits = ['pandoc', 'panzer', 'diff']\n sourcelist = list()\n remit = None\n if len(argv) < 2 or argv[1] not in possible_remits:\n print(__doc__)\n sys.exit(1)\n if len(argv) >= 2:\n remit = argv[1]\n if len(argv) > 2:\n sourcelist = argv[2:]\n return remit, sourcelist",
"def parse_command_list(config_str):\n return [command for command in config_str.splitlines() if command]",
"def load_command_list(filename=None):\n contents = None\n if filename:\n logger.debug('Attempting to read commands from \"{}\"'.format(filename))\n with open(filename, 'r') as fp:\n contents = fp.read().strip()\n\n if not contents:\n contents = ''\n\n # Split data as lines (ignore empty)\n return [l.strip().upper() for l in contents.split('\\n') if l.strip() != '']",
"def get_cmds(progfile, command):\n if not progfile: # `cmd` from argument\n return [command]\n else: # `cmd` from file (actually a Python module)\n from importlib import import_module\n mod_name = progfile\n\n try:\n import_module(mod_name)\n return getattr(sys.modules[mod_name], 'cmds', [])\n except ImportError:\n raise ImportError(u\"no file named '%s.py'\" % mod_name.replace(u'.', u'/'))",
"def list_commands(self, ctx):\n commands = []\n for filename in os.listdir(cmd_folder):\n if filename.endswith('.py') and filename.startswith('cmd_'):\n commands.append(filename[4:-3])\n commands.sort()\n return commands",
"def argv(self) -> List[str]:\n if self.command:\n rtn = [utils.strip_quotes(self.command)]\n for cur_token in self.arg_list:\n rtn.append(utils.strip_quotes(cur_token))\n else:\n rtn = []\n\n return rtn",
"def cmdline(self, args=()):\n cmds = [self._interpreter.binary]\n cmds.append(self._pex)\n cmds.extend(args)\n return cmds",
"def list_commands(self):\n response = self.do_command('list_commands')\n stripped = [s for s in (t.strip() for t in response.split(\"\\n\"))]\n return [s for s in stripped if is_well_formed_gtp_word(s)]",
"def cmdline(self, args=()):\r\n cmds = [self._interpreter.binary]\r\n cmds.append(self._pex)\r\n cmds.extend(args)\r\n return cmds",
"def commands(self) -> List[Command]:\n return []",
"def _commands(cli):\n res = []\n for n, t in inspect.getmembers(cli):\n if _is_command(t, cli):\n res.append(t)\n sorted(res, key=lambda f: f.__name__.lower())\n return res",
"def commands(self):\n if 'Commands' in self:\n return self['Commands']\n\n text = self['Body']\n commands = []\n cmd = [\"\", \"\"]\n isCmd = False\n isArg = False\n isComment = False\n for i in range(len(text)):\n # don't parse the commented lines\n # ignore everyline starting with '>'\n if text[i] == '>':\n j = i-1\n while text[j] in (' ', '\\t'):\n j -= 1\n if text[j] == '\\n':\n isComment = True\n elif text[i] == '\\n':\n isComment = False\n if isComment:\n if isArg:\n cmd[1] += text[i]\n continue\n\n if text[i-1] != '\\\\' and text[i:i+2] == '._' and (isCmd or isArg):\n isArg = False\n commands.append(cmd)\n cmd = [\"\", \"\"]\n elif isCmd:\n if text[i] == ' ':\n isArg = True\n isCmd = False\n else:\n cmd[0] += text[i]\n elif isArg:\n if text[i:i+3] in ('\\\\._', '\\\\_.'):\n pass\n else:\n cmd[1] += text[i]\n elif text[i-1] != '\\\\' and text[i-1:i+1] == '_.':\n isCmd = True\n\n return commands",
"def _commands(self) -> Dict[str, List[str]]:\r\n pass",
"async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)",
"def list_command(ctx: Any) -> None:\n pass"
]
| [
"0.68276083",
"0.6675786",
"0.66491014",
"0.6639968",
"0.66223323",
"0.651876",
"0.65133154",
"0.64849234",
"0.64712006",
"0.6466917",
"0.63955957",
"0.63948977",
"0.6363335",
"0.6358228",
"0.6328111",
"0.63193023",
"0.6306998",
"0.62842536",
"0.6258902",
"0.6255466",
"0.6252229",
"0.6248148",
"0.62432915",
"0.62426174",
"0.62175643",
"0.6187287",
"0.6137669",
"0.61217165",
"0.6096536",
"0.6079555"
]
| 0.7127569 | 0 |
Converts an argument to a flag, like model_version_id to modelversionid. | def _arg_to_flag(name: str) -> str:
arg = '-'.join(name.split('_'))
return f'--{arg}' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _flag_to_arg(flag: str) -> str:\n arg = flag.split('--')[1].split('-')\n arg = '_'.join(arg)\n return arg",
"def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):\n command = _arg_to_flag(k)\n if v is not None:\n command += f' {v}'\n return command",
"def flag_key(arg: Argument) -> List[Union[int, str]]:\n # Setup\n ret: List[Union[int, str]] = []\n x = sort_candidate(arg)\n # Long-style flags win over short-style ones, so the first item of\n # comparison is simply whether the flag is a single character long (with\n # non-length-1 flags coming \"first\" [lower number])\n ret.append(1 if len(x) == 1 else 0)\n # Next item of comparison is simply the strings themselves,\n # case-insensitive. They will compare alphabetically if compared at this\n # stage.\n ret.append(x.lower())\n # Finally, if the case-insensitive test also matched, compare\n # case-sensitive, but inverse (with lowercase letters coming first)\n inversed = \"\"\n for char in x:\n inversed += char.lower() if char.isupper() else char.upper()\n ret.append(inversed)\n return ret",
"def to_modelica(arg):\n # Check for strings and booleans\n if isinstance(arg, str):\n return '\\\\\"' + arg + '\\\\\"'\n elif isinstance(arg, bool):\n if arg is True:\n return 'true'\n else:\n return 'false'\n try:\n return '{' + \", \".join(to_modelica(x) for x in arg) + '}'\n except TypeError:\n return repr(arg)",
"def _flag():\n current_flag = _flag.flag\n _flag.flag <<= 1\n return current_flag",
"def _bool_to_int(self, bool_arg):\n if bool_arg == True:\n return 1\n else:\n return 0",
"def getArg(flag):\n try:\n a = sys.argv[sys.argv.index(flag) + 1]\n except:\n return \"\"\n else:\n return a",
"def normalize_flags(argv: List[str]) -> List[str]:\n bolean_flag_patern = re.compile(r'--[\\w_]+=(true|false)')\n\n def _normalize_flag(arg: str) -> str:\n if not bolean_flag_patern.match(arg):\n return arg\n if arg.endswith('=true'):\n return arg[: -len('=true')] # `--flag=true` -> `--flag`\n elif arg.endswith('=false'):\n # `--flag=false` -> `--noflag`\n return '--no' + arg[len('--') : -len('=false')]\n else:\n raise AssertionError(f'Unrecognized arg: {arg}')\n\n return [_normalize_flag(a) for a in argv]",
"def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)",
"def _prep_bool_arg(arg):\n return bool(strtobool(str(arg)))",
"def convertbinary(value, argument):\n\n if argument == 'to':\n return bin(value)\n elif argument == 'from':\n return format(value)\n raise ValueError(\"Invalid argument specified.\")",
"def arg_to_boolean(arg: str) -> Optional[bool]:\n return argToBoolean(arg) if arg else None",
"def convert_arg(node):\n if isinstance(node, ast.Name):\n return node.id\n else:\n return convert_literal_node(node)",
"def TransformFlags(self) -> _n_2_t_0[bool]:",
"def flags(self) -> UserFlag:",
"def getFlag(self, flag) -> bool:\n ...",
"def GetJflag(cmdline):\n\n for i in range(len(cmdline)):\n if (cmdline[i] == '-j' and i + 1 < len(cmdline)\n and cmdline[i + 1].isdigit()):\n return int(cmdline[i + 1])\n\n if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):\n return int(cmdline[i][len('-j'):])",
"def flag(flag_parameter: int):\n try:\n clear_parameter = clear(flag_parameter)\n flag = JapaneseFlag(clear_parameter)\n flag.create_full_pic()\n print(flag)\n return flag\n except ArgumentError as err:\n print('ArgumentError: {}'.format(err))\n except Exception as err:\n print('Application error: {}!\\n Please, contact support'.format(err))",
"def single_flag_name(self):\n return self.enum_class.to_representation(self.flags.name)",
"def arg_name(name):\n return \"--\" + name.replace('_', '-')",
"def preprocess_bools(args):\n for arg in args:\n if type(args[arg]) == bool:\n args[arg] = int(args[arg])\n return args",
"def process_bool_arg(arg):\n if isinstance(arg, bool):\n return arg\n elif isinstance(arg, basestring):\n if arg.lower() in [\"true\", \"1\"]:\n return True\n elif arg.lower() in [\"false\", \"0\"]:\n return False",
"def __process_xx_switch_arg(self, argument):\n _method_name = '__process_xx_switch_arg'\n\n match = self.__xx_args_switch_regex.match(argument)\n xarg = match.group(2)\n on_or_off = match.group(1)\n if on_or_off == '+':\n on_or_off_text = 'on'\n else:\n on_or_off_text = 'off'\n\n if 'switch' not in self.__xx_args:\n self.__xx_args['switch'] = OrderedDict()\n self._logger.finer('WLSDPLY-08304', argument, xarg, on_or_off_text,\n class_name=self._class_name, method_name=_method_name)\n self.__xx_args['switch'][xarg] = on_or_off",
"def __getVersionArg(self, version):\n if version == \"WORKING\":\n return None\n else:\n return str(version)",
"def str2bool(v):\n if v.lower() == 'true':\n return True\n elif v.lower() == 'false':\n return False\n raise argparse.ArgumentTypeError('Boolean value expected.')",
"def cdef_to_gccflag(self, define):\n\t\timport re\n\t\ttry:\n\t\t\tm = re.search(r\"#define (\\w+) (.+)\", define)\n\t\t\tidentifier = m.group(1)\n\t\t\treplacement = m.group(2)\n\t\texcept:\n\t\t\ttry:\n\t\t\t\tm = re.search(r\"#define (\\w+)\", define)\n\t\t\t\tidentifier = m.group(1)\n\t\t\texcept:\n\t\t\t\treturn None\n\n\t\tif 'replacement' in locals():\n\t\t\treturn \"-D%s=%s\" % (identifier, replacement)\n\t\telse:\n\t\t\treturn \"-D%s\" % identifier",
"def getId(*args):",
"def getId(*args):",
"def getId(*args):",
"def getId(*args):"
]
| [
"0.6590305",
"0.5745334",
"0.55384004",
"0.54860014",
"0.52809024",
"0.52305204",
"0.5170556",
"0.5114146",
"0.50949305",
"0.5084797",
"0.5046727",
"0.5032376",
"0.49962172",
"0.49942017",
"0.49607122",
"0.49438405",
"0.491969",
"0.4874299",
"0.48694623",
"0.48176888",
"0.48037282",
"0.47940764",
"0.47936434",
"0.47651058",
"0.475466",
"0.47306508",
"0.47189912",
"0.47189912",
"0.47189912",
"0.47189912"
]
| 0.70808184 | 0 |
Splits a flag that looks like modelversionid into an argument that looks like model_version_id. | def _flag_to_arg(flag: str) -> str:
arg = flag.split('--')[1].split('-')
arg = '_'.join(arg)
return arg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _arg_to_flag(name: str) -> str:\n arg = '-'.join(name.split('_'))\n return f'--{arg}'",
"def _parse_flags(self, flags):\n s = ''\n for flag in flags:\n if len(s):\n s += ' | '\n s += 'gf.sim.VariableFlag.%s' % (flag)\n if len(s):\n return s\n else:\n return '0'",
"def get_version_specific_arguments(self, version: str):\n result = []\n semversion = semver.VersionInfo.parse(version)\n\n # Extended database names were introduced in 3.9.0\n if self.supports_extended_names:\n result += [\"--args.all.database.extended-names-databases=true\"]\n\n # Telemetry was introduced in 3.11.0\n if (semversion.major == 3 and semversion.minor >= 11) or (semversion.major > 3):\n result += [\"--all.server.telemetrics-api=false\"]\n\n # Column cache\n if (\n self.cfg.enterprise\n and semver.compare(version, \"3.9.5\") >= 0\n and semver.compare(version, \"3.10.0\") != 0\n and semver.compare(version, \"3.10.1\") != 0\n ):\n result += [\"--args.all.arangosearch.columns-cache-limit=10000\"]\n\n return result",
"def _get_build_flags(cmdline: str) -> Tuple[Tuple[str, ...], Tuple[str, ...]]:\n cmdlist = cmdline.split()\n labels = [arg for arg in cmdlist if arg.startswith(\"//\")]\n build_flags = [arg for arg in cmdlist if not arg.startswith(\"//\")]\n return (tuple(labels), tuple(build_flags))",
"def normalize_flags(argv: List[str]) -> List[str]:\n bolean_flag_patern = re.compile(r'--[\\w_]+=(true|false)')\n\n def _normalize_flag(arg: str) -> str:\n if not bolean_flag_patern.match(arg):\n return arg\n if arg.endswith('=true'):\n return arg[: -len('=true')] # `--flag=true` -> `--flag`\n elif arg.endswith('=false'):\n # `--flag=false` -> `--noflag`\n return '--no' + arg[len('--') : -len('=false')]\n else:\n raise AssertionError(f'Unrecognized arg: {arg}')\n\n return [_normalize_flag(a) for a in argv]",
"def split_ver(v):\n return [int(x) for x in v.split('.')]",
"def split_args(args):\n double_dash_pos = [i for i, x in enumerate(args) if x == '--']\n if not double_dash_pos:\n return (args, [])\n else:\n double_dash_pos = double_dash_pos[0]\n return (args[:double_dash_pos], args[double_dash_pos+1:])",
"def model_version_type_ids(hybridizer):\n if hybridizer:\n mts = [3]\n else:\n mts = [1, 2]\n return ', '.join([str(x) for x in mts])",
"def get_parsed_flags():\n return Flags.parsed_args",
"def _command_version(self, name):\n last_remove_version = 0\n for feature in self.command_removed_by_feature[name]:\n last_remove_version = max(last_remove_version, feature['number'])\n\n earliest_non_remove_version_number = 9999\n\n for feature in self.command_required_by_feature[name]:\n number = feature['number']\n if number > last_remove_version:\n if number < earliest_non_remove_version_number:\n earliest_non_remove_version_number = number\n return feature['name']\n\n for extension in self.command_required_by_extension[name]:\n extension_name = extension['name']\n if extension_name in self.extensions_to_collect:\n return extension_name\n\n return '?'",
"def split_package_id(id):\n return id.split(\";\", 4)",
"def parseCmdLine(cmdLine):\n files=[]\n modifiers=[]\n for i in range(len(cmdLine)):\n arg = cmdLine[i]\n if arg[:2] != '--':\n files = cmdLine[i:]\n return (modifiers, files)\n \n arg = arg[2:]\n parts = arg.split('=',1)\n modifiers.append((parts[0], parts[1]))\n return (modifiers, files)",
"def split_version(v):\n try:\n s = [int(x) for x in v.split('.')]\n except ValueError:\n return []\n return s",
"def read_flags():\n return flag_args",
"def _handle_long_form(element):\n if len(element) <= 2:\n # then it can't possibly start with \"--\"\n raise CmdLineException(\"Invalid: \" + element)\n tokens = element.split(\"=\", 1)\n if len(tokens) == 2 and tokens[1].isspace():\n tokens[1] = None\n return tokens",
"def GetJflag(cmdline):\n\n for i in range(len(cmdline)):\n if (cmdline[i] == '-j' and i + 1 < len(cmdline)\n and cmdline[i + 1].isdigit()):\n return int(cmdline[i + 1])\n\n if (cmdline[i].startswith('-j') and cmdline[i][len('-j'):].isdigit()):\n return int(cmdline[i][len('-j'):])",
"def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):\n command = _arg_to_flag(k)\n if v is not None:\n command += f' {v}'\n return command",
"def test_arg_flaglistOneFlag(self):\n flag = b\"flag\"\n parsed, rest = self.server.arg_flaglist(flag)\n self.assertEqual(parsed, [flag])\n self.assertFalse(rest)",
"def _pre_process(version, separators, ignore_case):\n if ignore_case:\n version = version.lower()\n return [int(x) if x.isdigit()\n else [int(y) if y.isdigit()\n else y for y in re.findall(\"\\d+|[a-zA-Z]+\", x)]\n for x in re.split(separators, version)]",
"def materialize_import_flags(self, model: common_definitions.Model) -> List[str]:\n return utils.substitute_flag_vars(\n flags=self.import_flags, ENTRY_FUNCTION=model.entry_function\n )",
"def default_version_splitter(instring):\n return instring.split()[-1]",
"def command_version(data):\n new_version = data.get('version')\n if new_version == None:\n return\n\n version = new_version # save for error message\n new_version = bigsh.desc_version_to_path_elem(new_version)\n\n # skip version change is this is the current version.\n if bigsh.desc_version == new_version:\n return\n \n # temporary.\n if bigsh.desc_version == 'bigdb' and new_version == 'version200':\n return\n\n # see if the requested version exists\n if not bigsh.command_packages_exists(new_version):\n print 'No command description group for version %s' % version\n return\n\n # run 'env [envriron_vars] ... bigcli.py'\n command = ['env']\n command.append('BIGCLI_COMMAND_VERSION=%s' % version)\n command.append('BIGCLI_STARTING_MODE=config')\n # (other env variables persist)\n if os.path.exists('/opt/bigswitch/cli/bin/bigcli'):\n # controller VM\n command.append('/opt/bigswitch/cli/bin/bigcli')\n else:\n # developer setup\n base = os.path.dirname(__file__)\n command.append(os.path.join(base, 'bigcli.py'))\n\n # bigsh.options.init ?\n if bigsh.options.init:\n command.append('--init')\n\n # dump the command descriptions, and read a new set.\n # open a subshell with a new command version\n subprocess.call(command, cwd=os.environ.get(\"HOME\"))\n\n return",
"def parse_bld_args(self, args: argparse.Namespace) -> RepoBuildArgs:",
"def split_specstring_into_ops_and_versions(spec):\n specset = pip._vendor.packaging.specifiers.SpecifierSet(spec)\n ops_and_versions = []\n\n for spec in specset._specs:\n ops_and_versions.append([spec.operator, spec.version])\n \n return ops_and_versions",
"def split_command_input(command):\n args = command.split(' ', 1)\n if len(args) > 1:\n return args[0], args[1]\n return args[0], ''",
"def get_args(args):\n arglist = []\n for idx, arg in enumerate(args):\n if arg.startswith('-'):\n break\n arglist.append(arg)\n\n if len(arglist) == 0:\n raise ValueError(\"Flag is not allowed\")\n return idx, arglist",
"def __get_xx_switch_args(self, incremental_result):\n result = incremental_result\n if 'switch' in self.__xx_args:\n xx_switch_args = self.__xx_args['switch']\n for key, value in xx_switch_args.iteritems():\n if len(result) > 0:\n result += ' '\n result += '-XX:' + value + key\n return result",
"def _split_name(name):\n name_split = name.split('_view_')\n view_num = None\n if(len(name_split) > 1):\n view_num = int(name_split[1])\n optimizer_key = ''\n fp16_key = ''\n if name_split[0].startswith('Moment_1'):\n optimizer_key = 'Moment_1_'\n elif name_split[0].startswith('Moment_2'):\n optimizer_key = 'Moment_2_'\n elif name_split[0].startswith('Update_Count'):\n optimizer_key = 'Update_Count_'\n elif name_split[0].endswith('_fp16'):\n fp16_key = '_fp16'\n param_name = name_split[0]\n if optimizer_key != '':\n param_name = param_name.split(optimizer_key)[1]\n param_name = param_name.split('_fp16')[0]\n return param_name, optimizer_key, view_num, fp16_key",
"def split_command_input(command):\n command.lower()\n args = command.split(' ', 1)\n if len(args) > 1:\n return args[0], args[1]\n return args[0], ''",
"def split_action_id (id):\n assert isinstance(id, basestring)\n split = id.split ('.', 1)\n toolset = split [0]\n name = ''\n if len (split) > 1:\n name = split [1]\n return (toolset, name)"
]
| [
"0.5780041",
"0.5571187",
"0.5545629",
"0.5502681",
"0.54060006",
"0.5399714",
"0.52989393",
"0.5282142",
"0.5276059",
"0.52680475",
"0.5203346",
"0.51975304",
"0.51257795",
"0.50831616",
"0.5066755",
"0.50496227",
"0.5044899",
"0.50396246",
"0.49897784",
"0.4975387",
"0.49657848",
"0.49485594",
"0.49474186",
"0.4941405",
"0.49215353",
"0.48985323",
"0.48881575",
"0.48877102",
"0.48857468",
"0.48850274"
]
| 0.64214563 | 0 |
Convert an argument name to an "empty" placeholder for an argument to later be filled in. Used by the jobmon TaskTemplate. E.g. takes something that looks like "model_version_id" and converts it to "{model_version_id}" | def _arg_to_empty(name: str) -> str:
arg = "{" + name + "}"
return arg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_arg_name(self, arg, variable_name):",
"def arg_name(name):\n return \"--\" + name.replace('_', '-')",
"def argument(arg, default):\n return \"{0}={1}\".format(arg, default) if default else arg",
"def variable_argument(self):\n if self.is_variadic():\n if self.args[-1] == '...':\n # An unnamed variable argument replaces __VA_ARGS__\n return \"__VA_ARGS__\"\n else:\n # Strip '...' from argument name\n return self.args[-1][:-3]\n else:\n return None",
"def _visit_arg_with_default(self, arg: ast.arg, default: ast.AST | None) -> str:\n name = self.visit(arg)\n if default:\n if arg.annotation:\n name += \" = %s\" % self.visit(default)\n else:\n name += \"=%s\" % self.visit(default)\n return name",
"def format_arg(arg_name: str, value: Any, max_length: int = 200) -> str:\n return \"{arg_name}={value}\".format(\n arg_name=arg_name, value=trim_string(repr(value), max_length=max_length)\n )",
"def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")",
"def _getArgStr(self):\n return \"name=%r\" % (self.name)",
"def _name(self):\n return self.arguments[0].split('(')[0]",
"def opt(self, arg: SeField[Any]) -> str:\n if is_bare_opt(arg.type):\n return f\"{arg.varname} if {arg.varname} is not None else None\"\n else:\n inner = arg[0]\n inner.name = arg.varname\n return f\"({self.render(inner)}) if {arg.varname} is not None else None\"",
"def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))",
"def _name(self):\n return self._arguments[0].split('(')[0]",
"def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)",
"def _arg_to_flag(name: str) -> str:\n arg = '-'.join(name.split('_'))\n return f'--{arg}'",
"def get_argument(self, name):\n val = self.arguments.get(name)\n if val:\n return val[0]\n return None",
"def name(self):\n\t\treturn self.args[0]",
"def get_default_arg():\n\n arg = 'cog:C_cog_space_GRP world:parts_GRP trueWorld:noXform_GRP '\n return arg",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def convert_arg(node):\n if isinstance(node, ast.Name):\n return node.id\n else:\n return convert_literal_node(node)",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def job_string(args):\n name = ''\n if args.pruneStart:\n name += str(args.pruneStart)\n if args.dihed:\n name += '_dihed_'\n else:\n name += 'xyz_'\n if args.nonH:\n name += 'nonH_'\n else:\n name += 'H_'\n if args.division:\n name += args.division\n if args.pruneFinish:\n name += str(args.pruneFinish)\n return name",
"def _get_variable_name(param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name",
"def get_argument_module_name(arg, dim):\n return \"arg_%s_dim%s\" % (arg.name, dim)",
"def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name",
"def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name"
]
| [
"0.6888572",
"0.66297895",
"0.6515924",
"0.62020886",
"0.61162794",
"0.60101515",
"0.59318465",
"0.59016836",
"0.5827356",
"0.57620907",
"0.5743553",
"0.57284564",
"0.5666781",
"0.56305546",
"0.56073815",
"0.55728793",
"0.5566801",
"0.55580854",
"0.55580854",
"0.5548567",
"0.5529238",
"0.5529238",
"0.5529238",
"0.5529238",
"0.5529238",
"0.5516921",
"0.55107",
"0.54995817",
"0.54985934",
"0.54880995"
]
| 0.7613815 | 0 |
Takes a key (k) and a value (v) and turns it into a commandline argument like k=model_version v=1 and returns modelversion 1. If empty, returns an a template command rather than the command itself | def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):
command = _arg_to_flag(k)
if v is not None:
command += f' {v}'
return command | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def command_friendly_kv_pair(dict):\n # subprocess.run expects parameters to be in the foo=bar format. We build this format here and return a list\n output = []\n for key, value in dict.items():\n output.append('%s=%s' % (key, value))\n return output",
"def get_key_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n key_name = args['key_name']\n key_version = args.get('key_version', '')\n\n response = client.get_key_request(vault_name, key_name, key_version)\n cloned_response = copy.deepcopy(response)\n outputs = copy.deepcopy(response)\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs['key_vault_name'] = vault_name\n\n readable_key_info = convert_key_info_to_readable(cloned_response['key'])\n readable_attrib = convert_attributes_to_readable(cloned_response['attributes'])\n\n readable_output = tableToMarkdown(f'{key_name} Information',\n {**readable_key_info, **readable_attrib},\n ['key_id', 'enabled', 'json_web_key_type', 'key_operations', 'create_time',\n 'update_time',\n 'expiry_time'],\n removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Key',\n outputs_key_field='kid',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results",
"def get_key_value_command():\n # Get Args needed for the command\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n # Search Collection for incident_id and key\n search = incident + '.key'\n result = COLLECTION.find_one({search: key}, {'_id': False})\n value = result[incident].get('value')\n contents = {\n 'Incident': incident,\n 'Key': key,\n 'Value': value,\n 'Modified': result.get(incident).get('modified')\n }\n human_readable = tableToMarkdown('The key and value that is stored for the incident', contents)\n ec = {'MongoDB.Entry(val.Key === obj.Key)': contents}\n return human_readable, ec, {}",
"def cli(arg_dict):\n\n keys = list(arg_dict.keys())\n for key in keys:\n v = arg_dict[key]\n usr_args_ls = sys.argv\n for ind in range(len(usr_args_ls)):\n val = usr_args_ls[ind]\n if val == \"-\" + key[0] or val == \"--\" + key:\n if type(v).__name__ == \"bool\":\n v = not v\n else:\n v = usr_args_ls[ind + 1]\n\n arg_dict[key] = v",
"def command_version(data):\n new_version = data.get('version')\n if new_version == None:\n return\n\n version = new_version # save for error message\n new_version = bigsh.desc_version_to_path_elem(new_version)\n\n # skip version change is this is the current version.\n if bigsh.desc_version == new_version:\n return\n \n # temporary.\n if bigsh.desc_version == 'bigdb' and new_version == 'version200':\n return\n\n # see if the requested version exists\n if not bigsh.command_packages_exists(new_version):\n print 'No command description group for version %s' % version\n return\n\n # run 'env [envriron_vars] ... bigcli.py'\n command = ['env']\n command.append('BIGCLI_COMMAND_VERSION=%s' % version)\n command.append('BIGCLI_STARTING_MODE=config')\n # (other env variables persist)\n if os.path.exists('/opt/bigswitch/cli/bin/bigcli'):\n # controller VM\n command.append('/opt/bigswitch/cli/bin/bigcli')\n else:\n # developer setup\n base = os.path.dirname(__file__)\n command.append(os.path.join(base, 'bigcli.py'))\n\n # bigsh.options.init ?\n if bigsh.options.init:\n command.append('--init')\n\n # dump the command descriptions, and read a new set.\n # open a subshell with a new command version\n subprocess.call(command, cwd=os.environ.get(\"HOME\"))\n\n return",
"def version_template(self) -> Optional[pulumi.Input['CryptoKeyVersionTemplateArgs']]:\n return pulumi.get(self, \"version_template\")",
"def get_key(command):\n return command.split(\" \")[1]",
"def cli_show_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n show_versions()\n\n ctx.exit()",
"def get_commandlinearg(self, keyname, defaultval=None):\n if (hasattr(self.commandlineargs,keyname)):\n val = getattr(self.commandlineargs,keyname)\n if (val != None):\n return val\n try:\n # try to access commandline args as dictionary\n return self.commandlineargs[keyname]\n except:\n pass\n # return default val\n return defaultval",
"def kV(*args):\n # Getter\n if len(args) == 0:\n return CheckForError(lib.Generators_Get_kV())\n\n # Setter\n Value, = args\n CheckForError(lib.Generators_Set_kV(Value))",
"def get_key_plain():\n if len(sys.argv) != 2:\n exit(\"Usage: python vigenere.py k\")\n\n # get plaintext\n user_input = input(\"plaintext: \")\n \n return sys.argv[1], user_input",
"def write_key_value_command():\n # Get Args needed for the command\n timestamp = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S+00:00\")\n incident = demisto.args().get('id', get_investigation_id())\n key = demisto.args().get('key')\n value = demisto.args().get('value')\n logjson = {\n incident: {\n 'modified': timestamp,\n 'key': key,\n 'value': value\n }\n }\n # Check for previous record/document\n search = incident + '.key'\n cursor = COLLECTION.find_one({search: key})\n # If no record\n if not cursor:\n # Add to MongoDB\n result = COLLECTION.insert_one(logjson)\n entry_id = result.inserted_id\n context = {\n 'ID': str(entry_id),\n 'Incident': incident,\n 'Modified': timestamp,\n 'Key': key,\n 'Value': value\n }\n ec = {\n 'MongoDB.Entry(val.ID === obj.ID)': context\n }\n return f'Incident \"{incident}\" - key/value collection - 1 document added', ec, {}\n\n # Modify Existing Record\n object_id = cursor.get('_id')\n COLLECTION.update_one(\n {'_id': object_id},\n {'$set': {\n incident: {\n 'key': key,\n 'value': value,\n 'modified': timestamp\n }\n }}\n )\n context = {\n 'ID': str(object_id),\n 'Incident': incident,\n 'Modified': timestamp,\n 'Key': key,\n 'Value': value\n }\n ec = {\n 'MongoDB.Entry(val.ID === obj.ID)': context\n }\n return f'Incident \"{incident}\" - key/value collection - 1 document updated', ec, {}",
"def update_cmd(self, key, update_value):\r\n\t\tif self._iscommand(key):\r\n\t\t\tCOMMAND_NAME[key] = update_value\r\n\t\t\tself._writer(self._str_converter(COMMAND_NAME))\r\n\t\telse:\r\n\t\t\tprint(key, 'no existe')\r\n\t\t\treturn 'ERROR'",
"def main(argv): \n if len(argv) < 2:\n print 'generate.py -k <k-value> -o <outputfile>'\n exit(1)\n argv = argv[1:]\n k = 0\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hk:o:\",[\"k-value=\",\"ofile=\"])\n except getopt.GetoptError:\n print 'generate.py -k <k-value> -o <outputfile>'\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print 'generate.py -k <k-value> -o <outputfile>'\n sys.exit()\n elif opt in (\"-k\", \"--kval\"):\n k = int(arg)\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n generate(k, outputfile)",
"def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )",
"def GetKey(self, version_number):\n return self.dict[str(version_number)]",
"def _print_version(ctx: click.Context, _, value: str):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(__version__)\n ctx.exit()",
"def command_line(self, cmdargs):\n\n\tcommand = cmdargs.pop(0)\n\targs = []\n\tkwargs = {}\n\n\tif command in self.command_table:\n\t cmdopts = self.command_table[command]\n\telse:\n\t raise RuntimeError, 'unknown command: %s' % command\n\n\titeration_kludge = cmdopts.get('__FAKE_ITERATION_KLUDGE__', 0)\n\n\tif iteration_kludge == 0:\n\t for x in cmdargs:\n\t\ttry:\n\t\t i = x.index('=')\n\t\t k = x[0:i]\n\t\t v = x[i+1:]\n\t\t kwargs[k] = v\n\t\texcept ValueError:\n\t\t args.append(x)\n\t print self.call(command, *args, **kwargs), # TRAILING COMMA REQ'D\n\telif iteration_kludge == 1:\n\t for x in cmdargs:\n\t\targs[0] = x\n\t\tprint self.call(command, *args, **kwargs) # ALLOW KLUDGE NEWLINE\n\telif iteration_kludge == 2:\n\t args[0] = cmdargs.pop(0)\n\t for x in cmdargs:\n\t\targs[1] = x\n\t\tprint self.call(command, *args, **kwargs) # ALLOW KLUDGE NEWLINE",
"def fmt_option_key(key, value):\n if value is None:\n return \"\"\n return f\"{key}={value}\"",
"def get_arg(key, default=None):\n # type: (str, Any) -> Any\n if default is None:\n default = \"\"\n return plugin.args.get(key, [default])[0]",
"def get_arg(key, default=None):\n # type: (str, Any) -> Any\n if default is None:\n default = \"\"\n return plugin.args.get(key, [default])[0]",
"def show_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo('Zap AppImage utility')\n click.echo('version: {}'.format(__version__))\n ctx.exit()",
"def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']",
"def run_makemkv(cmd, logfile):\n\n logging.debug(f\"Ripping with the following command: {cmd}\")\n try:\n # need to check output for '0 titles saved'\n subprocess.run(f\"{cmd} >> {logfile}\", capture_output=True, shell=True, check=True)\n except subprocess.CalledProcessError as mkv_error:\n raise MakeMkvRuntimeError(mkv_error) from mkv_error",
"def exec_init(self, key, value, **_):\n return value",
"def version_template(self) -> pulumi.Output['outputs.CryptoKeyVersionTemplateResponse']:\n return pulumi.get(self, \"version_template\")",
"def select_cmd():\r\n help_dict = {'1': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory, rebuild ISO\",\r\n '2': \"Create LZ, GMA/TPL, \"\r\n \"replace stage files in <ISO path>//stage directory\",\r\n '3': \"Create LZ, GMA/TPL\",\r\n '4': \"Create .lz.raw\",\r\n '5': \"Compress .lz.raw\",\r\n '6': \"Create LZ\",\r\n '7': \"Create GMA/TPL\",\r\n '8': \"Replace stage files in <ISO path>//stage directory, run GCR\",\r\n '9': \"Rebuild ISO\"\r\n }\r\n\r\n for h_key, h_value in help_dict.items():\r\n print(\"{} ----> {}\".format(h_key, h_value))\r\n\r\n while True:\r\n cmd_input = input(\"\\nEnter command: \")\r\n if cmd_input == \"\":\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n elif cmd_input.lower() not in help_dict.keys():\r\n print(\"\\nInvalid command! Try again.\")\r\n\r\n else:\r\n return cmd_input.lower()",
"def usage(cls):\n return {\n 'name': 'version',\n 'args': '<version name>',\n 'desc': 'selects the current release version'\n }",
"def get_version_key(self, version):\n if self._generic_only:\n return GENERIC_VERSION\n else:\n self.check_version_exists(version)\n return version",
"def version_command(context):\n click.echo(\"Sveetoy Commandline {}\".format(\n __version__,\n ))"
]
| [
"0.6279236",
"0.6083482",
"0.57848155",
"0.5633048",
"0.5611898",
"0.55644727",
"0.55293155",
"0.5340225",
"0.53104573",
"0.5288793",
"0.5262455",
"0.5247463",
"0.51920956",
"0.51567817",
"0.5098501",
"0.50756335",
"0.5070354",
"0.5051675",
"0.503861",
"0.50332874",
"0.50332874",
"0.5025259",
"0.5003886",
"0.4985648",
"0.4968297",
"0.49636495",
"0.49373865",
"0.49358523",
"0.4931403",
"0.49295613"
]
| 0.68132955 | 0 |
A class that does operations on a list of arguments. | def __init__(self, arg_list: List[_Argument]):
self.arg_list: List[_Argument] = arg_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_args(args):\n run_list_args(args)",
"def create_argument_list(self):\n raise NotImplementedError",
"def parse_list_args(args):\n\n args.image_transformers = parse_transformers(args.image_transformers)\n args.tensor_transformers = parse_transformers(args.tensor_transformers)\n args.test_image_transformers = parse_transformers(args.test_image_transformers)\n args.test_tensor_transformers = parse_transformers(args.test_tensor_transformers)\n\n args.block_layout = parse_block_layout(args.block_layout)",
"def arg_comprehension(*args):\n\n my_list = [args for argument in args]\n print(my_list)",
"def cmd_list(args):",
"def __init__(self, inlist):\n\n self.args = []\n while len(inlist) != 0:\n added = 0\n for i, v in enumerate(self.args):\n if ((str(inlist[0][1]) == str(self.args[i][1])) and\n (str(inlist[0][2]) == str(self.args[i][2]))):\n self.args[i] = (self.args[i][0] +\n inlist[0][0], inlist[0][1], inlist[0][2])\n inlist.remove(inlist[0])\n added = 1\n break\n if added != 1:\n self.args.append(inlist[0])\n inlist.remove(inlist[0])\n i = 0\n # This code is to remove empty parts from the list\n while i < len(self.args):\n if ((self.args[i][0] == 0) | (self.args[i][1] == 0) |\n (self.args[i][2] == 0)):\n self.args.remove(self.args[i])\n i -= 1\n i += 1",
"def _input_args(self, args: List[str]):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert isinstance(args, list), f\"{args} is a {type(args)}, expected a list of strings!\"\n assert len(args) > 0, f\"Expected a non-empty argument list!\"\n assert all(isinstance(a, str) for a in args), f\"Expected a list of strings, not {[type(a) for a in args]}!\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n # add dummy argument zero\n args = [\"\"] + args\n # allocate args in memory\n arg_strings = [self._str(a, \"arg\") for a in args]\n # allocate a pointer array for argv\n self.data += [f\"argv: .word \" + \" \".join(\"0\" for _ in range(len(args)))]\n # load argc and argv\n self._args += [\"\", \"# argument count in a0\", f\"li a0, {len(args)}\"]\n self._args += [\"\", \"# load pointers to argument strings into argv\", f\"la a1, argv\"]\n for ii, aa in enumerate(arg_strings):\n self._args += [f\"la t1, {aa}\", f\"sw t1, {ii * 4}(a1)\"]",
"def arglist(self) -> List:\n return self.argv[1:]",
"def f(*args):\n alist = [a() for a in args]\n print(alist)",
"def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args",
"def create_arg_list(self):\n\n sim = self.sim\n\n py_kernel_args = sim.kernel_args # Python variables that are passed into the kernel\n gen_kernel_args = sim.ctx_info['kernel_arguments'] # A list of needed kernel arguments from kernel autogen (Mako)\n\n list_for_kernel = gen_kernel_args[self.short_name]\n\n python_args_needed = [z[0] for z in list_for_kernel]\n\n self.arg_list = [py_kernel_args[z] for z in python_args_needed]\n\n # Loop over the arg_list...if the argument is a function, call it!\n for i in range(len(self.arg_list)):\n value = self.arg_list[i]\n if inspect.isfunction(value):\n self.arg_list[i] = value()\n\n additional_cl_args = [sim.queue, self.kernel_global_size, self.kernel_local_size]\n\n self.arg_list = additional_cl_args + self.arg_list",
"def __arg_list(self):\n args = []\n try:\n arg = self.__arg()\n args.append(arg)\n if arg.token.endswith(\"...\"):\n return args\n\n while True:\n self.match_value(Punctuator, \",\")\n\n arg = self.__arg()\n if arg.token.endswith(\"...\"):\n return args\n\n args.append(arg)\n except ParseError:\n return args",
"def execute(*args):",
"def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list",
"def _inlined_and_lined_list_args(self, options):\n raise RuntimeError(\"To be implemented by deriving class\")",
"def _run_args(cls, args: Optional[List[str]] = None):\n parser = cls.setup_args()\n opt = parser.parse_args(args=args)\n return cls._run_from_parser_and_opt(opt, parser)",
"def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)",
"def parse_arguments(args):",
"def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg) # pylint: disable-msg=W0702",
"def add_args(self):\n raise NotImplementedError",
"def AddArguments(cls, argument_group):",
"def arguments(*args):\n def decorate(func):\n func.arguments = args\n return func\n return decorate",
"def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg)",
"def parse_args(args):\n # If called as a main function, this processes command line arguments\n # as main. If this is called as part of an action\n if isinstance(args, list):\n parser = argparse.ArgumentParser(description=description)\n else:\n parser = args\n # add required parameters for this application\n parser.add_argument(\"operands\",\n nargs='+',\n type=float,\n help=\"List of operands.\")\n # add options for this application\n parser.add_argument(\n '-v',\n '--verbose',\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action='store_const',\n const=logging.INFO)\n parser.add_argument(\n '-vv',\n '--very-verbose',\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action='store_const',\n const=logging.DEBUG)\n if isinstance(args, list):\n return parser.parse_args(args)",
"def run_args(args, methods):\n if not args:\n return False\n valuable_args = {k for k, v in args.__dict__.items() if v}\n arg_methods = {methods[a] for a in valuable_args if a in methods}\n for method in arg_methods:\n method(args)",
"def add_args(*args):\n num_args = len(args)\n for i in range(0,num_args,2):\n add_arg(args[i], args[i+1])",
"def check_and_run_arguments(self, method_name = \"\", argument_list = [] ):\n\t\tcomplicate_argument_list = []\n\t\tdir_list = dir(self)\n\t\tif method_name not in dir_list:\n\t\t\tprint( f\"EasygoParser only has these methods: {dir_list}\" )\n\t\t\treturn False\n\t\tif method_name in complicate_argument_list and -1 == argument_list[0].find( \"filename=\" ):\n\t\t\tprint( f\"Method {method_name} of EasygoParser needs complicate arguments and can ONLY read arguments from a file\" )\n\n\t\targuments_dict = {}\n\t\tif -1 < argument_list[0].find( \"filename=\" ):\n\t\t\tfilename_list = argument_list[0].split(\"=\")\n\t\t\tif 2 != len( filename_list ):\n\t\t\t\tprint( f\"use filename=xxxx to let this script understand the argument file name\" )\n\t\t\t\treturn False\n\t\t\targuments_dict = self.read_argument_file( filename = filename_list[1], method_name = method_name )\n\t\t\tif 1 > len( arguments_dict ):\n\t\t\t\tprint( f\"Warning! There is no argument in file {filename_list[1]}\" )\n\t\telse:\n\t\t\tfor one in argument_list:\n\t\t\t\tif -1 == one.find( \"=\" ):\n\t\t\t\t\tprint( f\"argument {one} needs = to separate key and value\" )\n\t\t\t\t\treturn False\n\t\t\t\ttemp_list = one.split(\"=\")\n\t\t\t\tif 2 != len( temp_list ):\n\t\t\t\t\tprint( f\"argument {one} needs ONLY one = to separate key and value\" )\n\t\t\t\t\treturn False\n\t\t\t\targuments_dict[ temp_list[0] ] = temp_list[1]\n\t\tmethodcaller( method_name, **arguments_dict )(self)\n\t\t# https://blog.csdn.net/pythondafahao/article/details/79616294\n\t\t# https://blog.csdn.net/chenjinyu_tang/article/details/8136841\n\t\t# https://www.cnblogs.com/2bjiujiu/p/7289961.html\n\t\t# https://docs.python.org/3/library/operator.html\n\t\t# https://python3-cookbook.readthedocs.io/zh_CN/latest/c08/p20_call_method_on_object_by_string_name.html\n\t\t# https://blog.csdn.net/mrqingyu/article/details/84403924\n\t\t# https://stackoverflow.com/questions/3061/calling-a-function-of-a-module-by-using-its-name-a-string",
"def __arg_list(self):\n arg = self.__arg()\n args = [arg]\n try:\n while not self.eol():\n self.match_value(Punctuator, \",\")\n arg = self.__arg()\n args.append(arg)\n except ParseError:\n pass\n return args",
"def __call__(self, args):",
"def __init__(self, *args, **kwargs):\n list.__init__(self, args)\n self.flopsstr = None\n self.flops = None\n\n if not isinstance(self[0], Name):\n self[0] = Name(self[0])\n\n # infer and compile flops, min, max, attr\n self.init_lambdas(kwargs)\n\n # lookup for fast argument selection\n self.argtypelookup = {}"
]
| [
"0.6630469",
"0.63799804",
"0.6223992",
"0.61640614",
"0.61035067",
"0.6083365",
"0.60742",
"0.6046261",
"0.60256857",
"0.600408",
"0.59836245",
"0.5931386",
"0.5906878",
"0.5883545",
"0.5882717",
"0.58796144",
"0.587377",
"0.58663076",
"0.5862648",
"0.58600634",
"0.58468693",
"0.5836343",
"0.5832118",
"0.5831384",
"0.5805115",
"0.5803464",
"0.5797161",
"0.57783973",
"0.57698536",
"0.57652795"
]
| 0.6529208 | 1 |
Converts list of arguments to an ArgumentParser. | def _to_parser(self) -> ArgumentParser:
parser = ArgumentParser()
for arg in self.arg_list:
parser.add_argument(arg._flag, **arg._parser_kwargs)
return parser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()",
"def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser",
"def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args",
"def parse_arguments():\n parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n dest=\"verbosity\",\n choices=(\"DEBUG\", \"INFO\", \"WARN\", \"ERROR\", \"CRITICAL\"),\n default=\"ERROR\",\n help=\"Verbosity/Log level. Defaults to ERROR\",\n )\n parser.add_argument(\n \"-l\", \"--logfile\", dest=\"logfile\", help=\"Store log to this file.\"\n )\n parser.add_argument(\n \"--username\",\n dest=\"username\",\n required=True,\n help=\"GitHub username.\",\n )\n parser.add_argument(\n \"--pat\",\n dest=\"pat\",\n required=True,\n help=\"GitHub PAT.\",\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments",
"def parser(cls, *args, **kwargs):\n\n parser = ArgumentParser(*args, **kwargs)\n parser.add_argument('-a', \"--address\",\n help=\"Force entry point address\", default=None)\n parser.add_argument('-b', \"--dumpblocs\", action=\"store_true\",\n help=\"Log disasm blocks\")\n parser.add_argument('-z', \"--singlestep\", action=\"store_true\",\n help=\"Log single step\")\n parser.add_argument('-d', \"--debugging\", action=\"store_true\",\n help=\"Debug shell\")\n parser.add_argument('-g', \"--gdbserver\", type=int,\n help=\"Listen on port @port\")\n parser.add_argument(\"-j\", \"--jitter\",\n help=\"Jitter engine. Possible values are: gcc (default), tcc, llvm, python\",\n default=\"gcc\")\n parser.add_argument(\n '-q', \"--quiet-function-calls\", action=\"store_true\",\n help=\"Don't log function calls\")\n parser.add_argument('-i', \"--dependencies\", action=\"store_true\",\n help=\"Load PE and its dependencies\")\n\n for base_cls in cls._classes_():\n base_cls.update_parser(parser)\n return parser",
"def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps",
"def parse_args():\n import argparse\n\n #argument\n parser =argparse.ArgumentParser()\n\n parser.add_argument('--in_list', help = 'path to input list.')\n parser.add_argument('--out_list', help = 'path for saving list.')\n args = parser.parse_args()\n\n return args",
"def create_parser(argument_objects):\n # Create the arg parser\n parser = argparse.ArgumentParser(\n description=\"Welcome to deepdos, the machine learning/ai based ddos analysis/mitigation service\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n for obj in argument_objects:\n obj.register_argument(parser)\n\n return parser",
"def parse_args(self, args: List[str]) -> Namespace:\n parser = self._to_parser()\n args = parser.parse_args(args)\n if hasattr(args, 'dm_commands'):\n if args.dm_commands is not None:\n args.dm_commands = parse_commands(args.dm_commands)\n else:\n args.dm_commands = list()\n if hasattr(args, 'dm_options'):\n if args.dm_options is not None:\n args.dm_options = parse_options(args.dm_options)\n else:\n args.dm_options = dict()\n LOG.debug(f\"Arguments: {args}.\")\n return args",
"def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()",
"def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)",
"def parse_arguments(args, parent_parser=[]):\n return ((), args)",
"def parseArguments(args=None):\n\n # parse command line arguments\n parser = argparse.ArgumentParser(description='collection creator')\n parser.add_argument( 'config_file', action=\"store\" )\n parser.add_argument( 'out_path', action=\"store\" )\n\n return parser.parse_args(args)",
"def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()",
"def parse_args(args=None):\n\n parser = argparse.ArgumentParser(description=ds.ARGPARSER['description'])\n parser.add_argument('input',\n help=ds.ARGPARSE_INPUT['help'])\n parser.add_argument('output',\n nargs='?',\n help=ds.ARGPARSE_OUTPUT['help'],\n default=ds.ARGPARSE_OUTPUT['default'])\n parser.add_argument('-X', '--overwrite',\n help=ds.ARGPARSE_OVERWRITE['help'],\n action='store_true')\n parser.add_argument('-e', '--extensions',\n nargs='+',\n default=ds.ARGPARSE_EXTENSION['default'],\n help=ds.ARGPARSE_EXTENSION['help'])\n parser.add_argument('-w', '--wrapper',\n help=ds.ARGPARSE_WRAPPER['help'],\n default=ds.ARGPARSE_WRAPPER['default'], )\n parser.add_argument('-v', '--verbose',\n help=ds.ARGPARSE_VERBOSE['help'],\n action='store_true')\n parser.add_argument('-r', '-R',\n help=ds.ARGPARSE_RECURSIVE['help'],\n action='store_true',\n dest='recursive')\n parser.add_argument('--version',\n action='version',\n version=ah.__version__)\n\n if args is not None:\n return parser.parse_args(args)\n else:\n return parser.parse_args()",
"def _get_parserobj(self, option_list):\n if '--version' in self.parselines[0]:\n if 'optparse' == self.parser_type:\n parser = OptionParser(version=\"dummy\")\n else:\n parser = ArgumentParser(\n version='dummy',\n formatter_class=RawDescriptionHelpFormatter)\n else:\n if 'optparse' == self.parser_type:\n parser = OptionParser()\n else:\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter)\n for opt in option_list:\n if opt['short'] and self.parser_type is 'optparse':\n parser.add_option(opt['short'], opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif not opt['short'] and self.parser_type is 'optparse':\n parser.add_option(opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif opt['short'] and self.parser_type is 'argparse':\n parser.add_argument(opt['short'], opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n elif not opt['short'] and self.parser_type is 'argparse':\n parser.add_argument(opt['long'],\n metavar=opt['metavar'],\n help=opt['help'].strip())\n else:\n raise InvalidParserTypeError(\"Invalid paresr type.\")\n return parser",
"def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()",
"def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):\n return from_argparse_args(cls, args, **kwargs)",
"def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()",
"def argparser():\n parser = create_parser()\n args, unparsed = parser.parse_known_args()\n\n return args, unparsed",
"def parse_args(args):\n # If called as a main function, this processes command line arguments\n # as main. If this is called as part of an action\n if isinstance(args, list):\n parser = argparse.ArgumentParser(description=description)\n else:\n parser = args\n # add required parameters for this application\n parser.add_argument(\"operands\",\n nargs='+',\n type=float,\n help=\"List of operands.\")\n # add options for this application\n parser.add_argument(\n '-v',\n '--verbose',\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action='store_const',\n const=logging.INFO)\n parser.add_argument(\n '-vv',\n '--very-verbose',\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action='store_const',\n const=logging.DEBUG)\n if isinstance(args, list):\n return parser.parse_args(args)",
"def build_parser():\n parser = argparse.ArgumentParser(usage='$ python recentfeed.py http://domain.com/rss/',\n description='''Takes a list of URLs passed as args.\n Returns the items published today unless otherwise specified.''',\n epilog='')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False, action=\"store_true\")\n parser.add_argument(\"-d\", \"--days\", dest=\"days\", default=0, action=\"count\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\", default=\"html\", type=str)\n parser.add_argument(\"urls\", action=\"append\", nargs=\"*\")\n return parser",
"def create_arguments_parser():\n description = \"Statically analyse SBML files for modelling errors\"\n parent_arg_parser = rate_checker_sbml.create_arguments_parser()\n parser = argparse.ArgumentParser(description=description,\n parents=[parent_arg_parser])\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()",
"def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()"
]
| [
"0.6689526",
"0.66418236",
"0.65175176",
"0.6508834",
"0.6451397",
"0.6451397",
"0.64353776",
"0.642825",
"0.64202136",
"0.6392615",
"0.6363931",
"0.63528883",
"0.6325844",
"0.6324088",
"0.6311919",
"0.6261066",
"0.6239911",
"0.62153864",
"0.61830664",
"0.6181541",
"0.61752915",
"0.6174586",
"0.6168599",
"0.6159659",
"0.6152048",
"0.6127797",
"0.6119321",
"0.6111359",
"0.6107365",
"0.6090719"
]
| 0.7810521 | 0 |
Parses arguments from a list of arguments into an argument namespace using ArgumentParser.parse_args(). Also decodes potential dismod commands and options. | def parse_args(self, args: List[str]) -> Namespace:
parser = self._to_parser()
args = parser.parse_args(args)
if hasattr(args, 'dm_commands'):
if args.dm_commands is not None:
args.dm_commands = parse_commands(args.dm_commands)
else:
args.dm_commands = list()
if hasattr(args, 'dm_options'):
if args.dm_options is not None:
args.dm_options = parse_options(args.dm_options)
else:
args.dm_options = dict()
LOG.debug(f"Arguments: {args}.")
return args | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_arguments(args):",
"def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)",
"def _parse_args(self):\n parser = argparse.ArgumentParser()\n _, args = parser.parse_known_args()\n self.args = [a for a in args if a != '']",
"def parse(self, arg_list):\n\n if self._meta.ignore_unknown_arguments is True:\n args, unknown = self.parse_known_args(arg_list)\n self.parsed_args = args\n self.unknown_args = unknown\n else:\n args = self.parse_args(arg_list)\n self.parsed_args = args\n return self.parsed_args",
"def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments",
"def Parse(self, args):\n unparsed = []\n\n skip_parse = False\n\n for arg in args:\n if arg == '--':\n skip_parse = True\n continue\n\n if skip_parse:\n unparsed.append(arg)\n continue\n\n match = RE_FLAG.match(arg)\n if match is None:\n unparsed.append(arg)\n continue\n\n key = match.group(1)\n value = match.group(2)\n\n if key not in self._defs:\n unparsed.append(arg)\n continue\n\n self._defs[key].Parse(value)\n\n self._unparsed = tuple(unparsed)\n return True",
"def parse_args(args=None):\n return AP.parse_args(args=args)",
"def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev",
"def parseArgs(self, args, **vars):\n argList = []\n for token in self.argLexer.finditer(args):\n for tokenType, tokenValue in list(token.groupdict().items()):\n if tokenValue is not None:\n argList.append(getattr(self, 'argtoken_' +\n tokenType)(tokenValue, vars))\n return argList",
"def _parse_args(self, prepared_args):\n pass",
"def parse_args(self, args):\n (options, args) = optparse.OptionParser.parse_args(self, args)\n return options",
"def parse_args(self, args):\n raise Exception(\"Not implemented\")",
"def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)",
"def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments",
"def __parse_args(self):\n _method_name = '__parse_args'\n\n if self.__raw_args is not None and len(self.__raw_args) > 0:\n if isinstance(self.__raw_args, list):\n arguments = self.__raw_args\n else:\n arguments = self.__raw_args.split()\n\n for argument in arguments:\n if self.__client_server_regex.match(argument):\n self.__client_server_args.append(argument)\n elif self.__x_args_size_regex.match(argument):\n self.__process_x_size_arg(argument)\n elif self.__x_args_value_regex.match(argument):\n self.__process_x_value_arg(argument)\n elif self.__x_args_other_regex.match(argument):\n self.__process_x_other_arg(argument)\n elif self.__xx_args_switch_regex.match(argument):\n self.__process_xx_switch_arg(argument)\n elif self.__xx_args_value_regex.match(argument):\n self.__process_xx_value_arg(argument)\n elif self.__sys_props_regex.match(argument):\n self.__process_sys_prop_arg(argument)\n else:\n self._logger.finer('WLSDPLY-08300', argument, class_name=self._class_name, method_name=_method_name)\n self.__unsorted_args.append(argument)",
"def parse_args(args):\n # If called as a main function, this processes command line arguments\n # as main. If this is called as part of an action\n if isinstance(args, list):\n parser = argparse.ArgumentParser(description=description)\n else:\n parser = args\n # add required parameters for this application\n parser.add_argument(\"operands\",\n nargs='+',\n type=float,\n help=\"List of operands.\")\n # add options for this application\n parser.add_argument(\n '-v',\n '--verbose',\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action='store_const',\n const=logging.INFO)\n parser.add_argument(\n '-vv',\n '--very-verbose',\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action='store_const',\n const=logging.DEBUG)\n if isinstance(args, list):\n return parser.parse_args(args)",
"def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments",
"def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")",
"def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()",
"def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)",
"def parse_args(args=None):\n\n parser = argparse.ArgumentParser(description=ds.ARGPARSER['description'])\n parser.add_argument('input',\n help=ds.ARGPARSE_INPUT['help'])\n parser.add_argument('output',\n nargs='?',\n help=ds.ARGPARSE_OUTPUT['help'],\n default=ds.ARGPARSE_OUTPUT['default'])\n parser.add_argument('-X', '--overwrite',\n help=ds.ARGPARSE_OVERWRITE['help'],\n action='store_true')\n parser.add_argument('-e', '--extensions',\n nargs='+',\n default=ds.ARGPARSE_EXTENSION['default'],\n help=ds.ARGPARSE_EXTENSION['help'])\n parser.add_argument('-w', '--wrapper',\n help=ds.ARGPARSE_WRAPPER['help'],\n default=ds.ARGPARSE_WRAPPER['default'], )\n parser.add_argument('-v', '--verbose',\n help=ds.ARGPARSE_VERBOSE['help'],\n action='store_true')\n parser.add_argument('-r', '-R',\n help=ds.ARGPARSE_RECURSIVE['help'],\n action='store_true',\n dest='recursive')\n parser.add_argument('--version',\n action='version',\n version=ah.__version__)\n\n if args is not None:\n return parser.parse_args(args)\n else:\n return parser.parse_args()",
"def parse_args(self):\n return self.__process_args__(self.parser.parse_args())",
"def parse_arguments(self, args, words, start_word_index, scopes,\n arg_data, fields, actions, prefix_matches, command):\n\n if len(args) == 0:\n return [[0, [], scopes, arg_data, fields, actions]]\n\n parse_results = []\n\n arg = args[0]\n\n if _is_string(arg):\n arg = {'token': arg}\n\n remaining_args = args[1:]\n\n arg_scopes = [arg] + scopes\n arg_parse_results = []\n\n # Get the attributes we need from the arg\n # FIXME: Should possibly get rid of the 'data' mechanism\n # and handle it via a custom data handler\n choices = arg.get('choices')\n nested_args = arg.get('args')\n if nested_args:\n # Convert the nested argument list into a choices argument with\n # a single choice, so that we can leverage the code below that\n # handles choices\n if choices:\n raise error.CommandDescriptionError('An argument can\\'t have both '\n '\"choices\" and \"args\" attributes', command)\n choices = (nested_args,)\n\n # Determine whether or not this argument is optional.\n # Default to making arguments optional for no commands, except for if\n # it's a choices argument. In that case it will probably be ambiguous\n # about which fields should be reset to the default values, so we just\n # don't try to handle that.\n optional_name = 'optional-for-no' if self.is_no_command else 'optional'\n #optional_default_value = self.is_no_command\n #optional_name = 'optional'\n optional_default_value = False\n #optional = arg.get(optional_name, optional_default_value)\n # FIXME: Disabling the special handling of optional arguments for no\n # command. That's causing spurious completions to be included. Not sure\n # how to fix that right now. Do we really need the special optional\n # handling anyway? Does Cisco actually support that.\n # For example, being able to use \"no ip address\" rather than\n # \"no ip address 192.168.2.2 255.255.255.0\". I haven't actually tried\n # both forms on a Cisco switch to see what it does.\n optional = arg.get(optional_name, optional_default_value)\n\n # Check to see if this arg overrides either the command type or action\n # Note that we don't want to set the \"actions\" variable with the\n # updated actions yet until we know that the current argument\n # actually matched against the command words and wasn't an optional\n # argument that was skipped.\n arg_scopes, arg_actions = self.check_command_type_and_actions(\n arg, arg_scopes, actions)\n\n if choices:\n if not _is_list(choices):\n raise error.CommandDescriptionError('\"choices\" argument must be a list '\n 'or tuple of argument descriptions from which to choose',\n command)\n\n for choice in choices:\n choice_args = _get_choice_args(choice)\n choice_arg_scopes = arg_scopes\n choice_actions = list(arg_actions)\n choice_prefix_matches = list(prefix_matches)\n if isinstance(choice, collections.Mapping):\n choice_arg_scopes = [choice] + choice_arg_scopes\n choice_optional = choice.get(optional_name, False)\n if choice_optional:\n optional = True\n choice_arg_scopes, choice_actions = \\\n self.check_command_type_and_actions(\n choice, choice_arg_scopes, choice_actions)\n choice_arg_data = dict(arg_data)\n choice_fields = list(fields)\n\n choice_parse_results = self.parse_arguments(choice_args,\n words, start_word_index, choice_arg_scopes,\n choice_arg_data, choice_fields, choice_actions,\n choice_prefix_matches, command)\n for choice_parse_result in choice_parse_results:\n words_matched = choice_parse_result[0]\n new_arg_data = choice_parse_result[3]\n # FIXME: Not sure if the code below is the best way to\n # handle things, but the idea is that we want to detect\n # the case where any of the choices in a choice block\n # is composed of all optional arguments. In that case\n # the overall choice block thus becomes optional. The\n # reason we propagate the optional attribute is that if\n # there are multiple choices that consist entirely of\n # optional arguments then we'd get mlutiple redundant\n # matches with exactly the same arg_data and prefix_matches\n # which would lead to an ambiguous command when we \n # process the results at the end. So by not adding a\n # result for each of those cases and instead just adding\n # a single result for the overall choice block.\n # The other thing we need to do is distinguish between\n # optional args and default args which will both lead to\n # cases where words_matched == 0. For the default arg\n # case though we will add the match in the nested call\n # since it will have changes to the arg_data which are\n # significant in the processing of the command action.\n # Since we've already added a result, we don't want to\n # set the overall choice to be optional or else again\n # we'll get multiple amibuous results. The way we detect\n # that case is if the arg_data from the parse_result is\n # different than the arg_data that was passed in. So\n # that's why we use the following test.\n if words_matched == 0 and new_arg_data == arg_data:\n # FIXME: I don't think this will work correctly\n # if/when we support default values for args. In that\n # case the choice may have matched 0 words, but it\n # may have updated the arg_data with some default\n # argument values, which we'll if we don't add the\n # parse_result at this point. Need to think more\n # about this.\n optional = True\n else:\n arg_parse_results.append(choice_parse_result)\n else:\n token = arg.get('token')\n field = arg.get('field')\n arg_type = arg.get('type')\n tag = arg.get('tag')\n default = self.get_default_value(arg)\n \n tag_prefix_match = None\n parsed_tag = False\n is_match = True\n words_matched = 0\n results = None\n\n # First try to parse the tag if there is one\n if tag and len(words) > 0:\n word = words[0]\n if tag.lower().startswith(word.lower()):\n if tag.lower() != word.lower():\n tag_prefix_match = [start_word_index+words_matched, tag]\n words_matched += 1\n parsed_tag = True\n else:\n self.handle_parse_error(\"Unexpected argument at \\\"%s\\\"\" % word,\n start_word_index, CommandHandler.UNEXPECTED_TOKEN_PRIORITY, tag)\n is_match = False\n\n # Handle incomplete argument matching\n if is_match:\n if words_matched < len(words):\n word = words[words_matched]\n else:\n self.handle_incomplete_command(arg, arg_scopes,\n arg_data, fields, parsed_tag, command)\n if default:\n word = default\n else:\n self.handle_parse_error(\"Unexpected end of command\",\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_END_OF_ARGUMENTS_PRIORITY)\n is_match = False\n\n # Handle the argument value\n if is_match:\n if token:\n if token.lower().startswith(word.lower()):\n value = True if arg_type == 'boolean' else token\n results = [(value, token)]\n else:\n self.handle_parse_error(\n \"Unexpected argument at \\\"%s\\\"\" % word,\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_TOKEN_PRIORITY, token)\n is_match = False\n else:\n # Check that the argument is valid\n try:\n results = validate_argument(arg, word, arg_data, arg_scopes, command)\n except error.ArgumentValidationError, e:\n expected_tokens = e.expected_tokens\n if expected_tokens:\n if _is_string(expected_tokens):\n expected_tokens = (expected_tokens,)\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_TOKEN_PRIORITY,\n expected_tokens)\n else:\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.VALIDATION_ERROR_PRIORITY)\n is_match = False\n\n if is_match:\n assert results is not None\n assert _is_list(results)\n assert len(results) > 0\n # If we reach here we've successfully matched the word. The word\n # may have come from the commands words or it may have come from\n # the default value for the argument. We only want to bump the\n # words_matched in the former case, which is why we need to check\n # against the length of the words array. Note that we don't want\n # to bump words_matched in the code above where we get it from \n # the command words, because then the word offset we pass to\n # handle_parse_error would be off by 1 if the validation fails.\n if words_matched < len(words):\n words_matched += 1\n data = arg.get('data')\n arg_data_handler = _lookup_in_scopes('data-handler', arg_scopes)\n self.handle_first_matched_result(command)\n\n for result in results:\n value, match_token = result\n new_arg_data = dict(arg_data)\n if data:\n new_arg_data.update(data)\n # XXX should the mode passed in here to the handler be\n # the mode of the command, or the current mode ?\n # (mode-of-the-command in case its a higher submode push)\n if arg_data_handler:\n invocation_scope = {\n # FIXME: The 'name' attribute is deprecated. Remove once\n # everything's been converted.\n 'name': field,\n 'field': field,\n 'value': value,\n 'data': new_arg_data,\n 'is-no-command': self.is_no_command,\n 'current-mode-path': bigsh.run.finder.mode_stack.get_current_mode_path(),\n 'current-mode-obj-id': bigsh.run.finder.mode_stack.get_current_mode_obj()\n }\n new_arg_scopes = [invocation_scope] + arg_scopes\n try:\n result = _call_proc(arg_data_handler,\n argument_data_handler_registry, new_arg_scopes,\n command)\n except Exception, e:\n # XXX ought to not manage parameter exceptions for _call_proc\n if debug.cli():\n print _line(), 'Backtrace'\n traceback.print_exc()\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.VALIDATION_ERROR_PRIORITY)\n return parse_results\n elif field is not None:\n new_arg_data[field] = value\n\n self.handle_matched_result(command, result, arg_scopes)\n\n # FIXME: Do we still need the separate fields dict?\n # If so, I don't think this is actually correct, since\n # we want fields to not necessarily be kept exactly in\n # sync with arg_data. Need to think about this more.\n new_fields = new_arg_data.keys()\n new_prefix_matches = list(prefix_matches)\n if tag_prefix_match:\n new_prefix_matches.append(tag_prefix_match)\n if len(match_token) > len(word):\n new_prefix_matches.append(\n [start_word_index+words_matched-1, match_token])\n arg_parse_results.append([words_matched, new_prefix_matches,\n arg_scopes, new_arg_data, new_fields, arg_actions])\n\n if optional:\n arg_parse_results.append([0, prefix_matches, scopes,\n arg_data, fields, actions])\n\n for arg_parse_result in arg_parse_results:\n (words_matched, prefix_matches, arg_scopes, arg_data,\n fields, actions) = arg_parse_result\n remaining_words = words[words_matched:]\n remaining_parse_results = self.parse_arguments(\n remaining_args, remaining_words,\n start_word_index + words_matched, scopes, arg_data,\n fields, actions, prefix_matches, command)\n # The first item in each tuple is the words consumed, but\n # that's relative to the remaining args that we passed to\n # it. For the parse results from this invocation of\n # parse args we also need to include the counts of the args\n # that we've already parsed plus the args that were parsed\n # for the current choice.\n for parse_result in remaining_parse_results:\n parse_result[0] += words_matched\n# parse_prefix_matches = parse_result[1]\n# for match in parse_prefix_matches:\n# match[0] += words_matched\n parse_result[1] = prefix_matches + parse_result[1]\n parse_results.append(parse_result)\n\n return parse_results",
"def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)",
"def _parse_cli_opts(self, args):\n self._args = args\n for opt, group in self._all_cli_opts():\n opt._add_to_cli(self._oparser, group)\n\n return self._parse_config_files()",
"def _parse_options(self, force_args=None):\r\n argv = sys.argv[1:] if force_args is None else force_args\r\n if argv and argv[0] in self._commands:\r\n self._command = argv.pop(0)\r\n else:\r\n self._command = None\r\n parser = self._construct_full_parser()\r\n self._option_values, self._argv = parser.parse(self._add_default_options(argv))",
"def parse_args(argv: list[str]) -> argparse.Namespace:\n os_release = platform.freedesktop_os_release()\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-c\",\n \"--chroot\",\n default=os_release[\"VERSION_CODENAME\"],\n help=\"Specify a chroot or active session to use. (default: use current distribution\"\n \" codename '%(default)s')\",\n )\n parser.add_argument(\n \"-d\",\n \"--directory\",\n default=os.getcwd(),\n help=\"Change to directory inside the chroot before running the command or login shell.\"\n \" Use the current directory if it exists in the chroot. Otherwise fall back to the user's\"\n \" home directory (and create the directory if it is missing).\",\n )\n parser.add_argument(\n \"-u\",\n \"--user\",\n default=getpass.getuser(),\n help=\"Run as a different user. The default is to run as %(default)s and fallback to root\"\n \" if that user does not exist in the chroot.\",\n )\n parser.add_argument(\n \"-p\",\n \"--packages\",\n default=[],\n action=\"append\",\n help=\"List of comma- or space-separated packages that should be installed\"\n \" without recommends. Can be specified multiple times.\",\n )\n parser.add_argument(\n \"--ppa\",\n default=[],\n action=\"append\",\n help=\"PPA APT sources that should be added. Can be specified multiple times.\",\n )\n parser.add_argument(\n \"-e\", \"--enable-proposed\", action=\"store_true\", help=\"Enable -proposed pocket.\"\n )\n parser.add_argument(\n \"--proposed-uri\",\n default=\"http://archive.ubuntu.com/ubuntu\",\n help=\"Sources list URI to use for -proposed (default: %(default)s)\",\n )\n parser.add_argument(\n \"--proposed-components\",\n default=\"main,universe\",\n help=\"List of comma- or space-separated components to use for -proposed\"\n \" (default: %(default)s)\",\n )\n\n args = parser.parse_args(argv)\n args.packages = [p for packages in args.packages for p in re.split(\"[, ]\", packages)]\n args.proposed_components = re.split(\"[, ]\", args.proposed_components)\n\n return args",
"def parse_list_args(args):\n\n args.image_transformers = parse_transformers(args.image_transformers)\n args.tensor_transformers = parse_transformers(args.tensor_transformers)\n args.test_image_transformers = parse_transformers(args.test_image_transformers)\n args.test_tensor_transformers = parse_transformers(args.test_tensor_transformers)\n\n args.block_layout = parse_block_layout(args.block_layout)",
"def parse_args(argv: List[str]) -> argparse.Namespace:\n parser = argparse.ArgumentParser(\n description=('Change or add the juju channel to the bundles '\n 'for the charm.'),\n epilog=(\"Either pass the directory of the charm, or be in that \"\n \"directory when the script is called.\"))\n parser.add_argument('dir', nargs='?',\n help=\"Optional directory argument\")\n group = parser.add_mutually_exclusive_group(required=True)\n parser.add_argument('--bundle',\n dest='bundles',\n action='append',\n type=Path,\n metavar='FILE',\n help=('Path to a bundle file to update. '\n 'May be repeated for multiple files to update'))\n group.add_argument('--channel', '-c',\n dest='channel',\n type=str.lower,\n metavar='CHANNEL',\n help=('If present, adds channel spec to openstack '\n 'charms. Must use --remove-channel if this is '\n 'not supplied.')),\n group.add_argument('--remove-channel',\n dest=\"remove_channel\",\n help=(\"Remove the channel specifier. Don't use with \"\n \"--channel.\"),\n action='store_true')\n group.add_argument('--branch', '-b',\n dest='branches',\n action='append',\n metavar='BRANCH',\n type=str.lower,\n help=('If present, adds a channel spec to known charms '\n 'in the lp-builder-config/*.yaml files using the '\n 'branch to map to the charmhub spec. If the '\n 'branch is not found, then the charm is ignored. '\n 'May be repeated for multiple branches to test '\n 'against.'))\n parser.add_argument('--ignore-track', '-i',\n dest='ignore_tracks',\n action='append',\n metavar=\"IGNORE\",\n type=str.lower,\n help=('Ignore this track. e.g. if '\n '\"--ignore-track lastest\" is used, then any '\n 'track/<channel> will be ignored if the track '\n 'is \"latest\". This is only useful when used '\n 'with the \"--branch\" argument. Note that the '\n 'match is done via \"starts_with\" so that, for '\n 'example, any \"latest\" track can be matched '\n 'against.'))\n parser.add_argument('--ensure-charmhub',\n dest='ensure_charmhub',\n action='store_true',\n default=False,\n help=('If set to True, then cs:~.../ prefixes of '\n 'charms will be switched to ch:<charm>'))\n parser.add_argument('--disable-local-overlay',\n dest='disable_local_overlay',\n action='store_true',\n default=False,\n help=('If set to True, then ensure that '\n '\"local_overlay_enabled: False\" are in the '\n 'bundles.'))\n parser.add_argument('--set-local-charm',\n dest='set_local_charm',\n action='store_true',\n default=False,\n help=('If set to True, then the local charm, as '\n 'determined by the charmcraft.yaml file is set '\n 'to the ../../(../)<charm>.charm'))\n parser.add_argument('--enforce-edge',\n dest='enforce_edge',\n action='store_true',\n default=False,\n help=('If set to True, then ensure that the channel '\n 'is set to <track>/edge regardless of how it is '\n 'set in the lp-build-config.'))\n parser.add_argument('--log', dest='loglevel',\n type=str.upper,\n default='INFO',\n choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),\n help='Loglevel')\n parser.set_defaults(channel=None,\n remove_channel=False,\n loglevel='INFO')\n return parser.parse_args(argv)",
"def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result"
]
| [
"0.7105361",
"0.67964506",
"0.6789828",
"0.67334574",
"0.6713294",
"0.67111045",
"0.6589387",
"0.6570861",
"0.65562314",
"0.6522016",
"0.65109265",
"0.6507887",
"0.6491577",
"0.6470998",
"0.6409037",
"0.63932204",
"0.63873875",
"0.6382316",
"0.6366575",
"0.6359192",
"0.63561946",
"0.63507164",
"0.6342974",
"0.63371193",
"0.6318095",
"0.62980026",
"0.6288879",
"0.62840575",
"0.62487966",
"0.622736"
]
| 0.74812925 | 0 |
Creates a template of arguments from an argument list. Will return something that looks like "{argument1} {argument2}" | def template(self) -> str:
arguments = []
for arg in self.arg_list:
flag = arg._flag
arg = _flag_to_arg(flag)
placeholder = _arg_to_empty(arg)
arguments.append(placeholder)
return ' '.join(arguments) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def args_template():\n def required_arg_template_pat():\n return ( \n (c.paren(var_or_atomics() + opt_colon_sort_meta())) |\n var_or_atomic()\n )\n return (brace_noassign().possibly() + required_arg_template_pat().many())",
"def _create_arguments(self, args):\n assert isinstance(args, (list, tuple))\n\n arguments = []\n index = 0\n for arg in args:\n assert isinstance(arg, (list, tuple))\n assert len(arg) == 2 or len(arg) == 3\n\n identifier = arg[0]\n if isinstance(arg[1], str):\n idl_type = self._create_type(\n arg[1], is_optional=(len(arg) == 3))\n else:\n idl_type = arg[1]\n\n default_value = None\n if len(arg) == 3:\n default_value = self._create_literal_constant(arg[2])\n\n arguments.append(\n Argument.IR(\n identifier,\n index=index,\n idl_type=idl_type,\n default_value=default_value))\n\n index += 1\n\n return arguments",
"def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result",
"def generate_arg_string(enum_tuple_list):\n format_string_list = []\n for tup in enum_tuple_list:\n\n format_string_list.append(tup[0]+\":\"+tup[1]+\" \")\n individualFormatString = tup[0]\n format_string_list.append(individualFormatString)\n format_string_list.append(\":\")\n\n arg_string = \"\".join(format_string_list)\n arg_string = arg_string[0:-1]\n return arg_string",
"def argument_list_quote(arguments):\n args = []\n for arg in arguments:\n args.append(argument_quote(arg))\n return '\"%s\"' % ' '.join(args)",
"def format_template(template, *args):\n return textwrap.dedent(template % args).strip()",
"def create_usdzconvert_arguments(args: list) -> list:\n usdz_converter_path = current_app.config.get('USDZ_CONVERTER_PATH') / \\\n current_app.config.get('USDZ_CONVERTER_SCRIPT_PATH')\n\n arguments = [_get_converter_interpreter_arg(),\n usdz_converter_path.resolve().as_posix()]\n\n for arg in args:\n arguments.append(arg)\n\n return arguments",
"def format_arguments(*args, **kwargs) -> str:\n return ', '.join(chain([f'{arg!r}' for arg in args],\n [f'{key}={value!r}' for key, value in kwargs.items()]))",
"def generate_arg_and_kwags():\n def gen_func(\n #df: DataSource,\n option: List[list],\n style: List[dict]\n )->List[Tuple[list, dict]]:\n\n if len(option) != len(style):\n raise SystemError(\"option and style must be same size list.\")\n\n arg_and_kwarg = []\n for o, s in zip(option, style):\n arg = [*o]\n kwargs = s\n arg_and_kwarg.append((arg, kwargs))\n return arg_and_kwarg\n return gen_func",
"def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)",
"def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str",
"def arguments_pattern(arguments):\n pattern = []\n \n # reserved keywords for composite commands\n reserved_keywords = (\"to\", \"with\", \">\", \"<\", \"=\", \"apartment\", \"type\")\n \n # check the type of each argument and create a pattern\n for arg in arguments:\n if arg in reserved_keywords:\n pattern.append(arg)\n continue\n \n arg_type = argument_type(arg)\n \n if arg_type == float:\n pattern.append(\"float\")\n elif arg_type == int:\n pattern.append(\"int\")\n else: \n pattern.append(\"string\")\n \n # remove the keywords from the arguments to be able to handle them\n for reserved in reserved_keywords:\n if reserved in arguments:\n arguments.remove(reserved)\n \n # return the pattern as a string\n return \" \".join(pattern)",
"def _quote_arguments(args):\n return map(lambda x: '\"{}\"'.format(x) if ' ' in x else '{}'.format(x), args)",
"def _template(inlist):\n from collections import OrderedDict\n if isinstance(inlist, str):\n inlist = [inlist]\n\n templates = []\n for item in reversed(inlist):\n templates.append(output_space(item))\n\n return OrderedDict(reversed(OrderedDict(templates).items()))",
"def template_function(self, node, ordered_functions):\n oldoptions = node.options\n headers_typedef = collections.OrderedDict()\n\n # targs - ast.TemplateArgument\n for iargs, targs in enumerate(node.template_arguments):\n new = node.clone()\n ordered_functions.append(new)\n self.append_function_index(new)\n\n new._generated = \"cxx_template\"\n\n fmt = new.fmtdict\n if targs.fmtdict:\n fmt.update(targs.fmtdict)\n\n # Use explicit template_suffix if provide.\n # If single template argument, use type's explicit_suffix\n # or the unqualified flat_name.\n # Multiple template arguments, use sequence number.\n if fmt.template_suffix:\n pass\n elif len(targs.asts) == 1:\n ntypemap = targs.asts[0].typemap\n if ntypemap.template_suffix:\n fmt.template_suffix = ntypemap.template_suffix\n else:\n fmt.template_suffix = \"_\" + ntypemap.flat_name\n else:\n fmt.template_suffix = \"_\" + str(iargs)\n\n new.cxx_template = {}\n fmt.CXX_template = targs.instantiation # ex. <int>\n\n # Gather headers required by template arguments.\n for targ in targs.asts:\n ntypemap = targ.typemap\n headers_typedef[ntypemap.name] = ntypemap\n\n self.push_instantiate_scope(new, targs)\n\n if new.ast.template_argument:\n iast = getattr(self.instantiate_scope, new.ast.template_argument)\n new.ast = new.ast.instantiate(node.ast.instantiate(iast))\n # Generics cannot differentiate on return type\n new.options.F_create_generic = False\n\n # Replace templated arguments.\n # arg - declast.Declaration\n newparams = []\n for arg in new.ast.declarator.params:\n if arg.template_argument:\n iast = getattr(self.instantiate_scope, arg.template_argument)\n newparams.append(arg.instantiate(iast))\n else:\n newparams.append(arg)\n new.ast.declarator.params = newparams\n self.pop_instantiate_scope()\n\n new.gen_headers_typedef = headers_typedef\n # Do not process templated node, instead process\n # generated functions above.\n node.wrap.clear()",
"def update_with_template_args(args, list_args=None):\r\n if not args.get('--template'):\r\n return\r\n\r\n list_args = list_args or []\r\n\r\n template_path = args.pop('--template')\r\n if not os.path.exists(template_path):\r\n raise ArgumentError(\r\n 'File does not exist [-t | --template] = %s'\r\n % template_path)\r\n\r\n config = configparser.ConfigParser()\r\n ini_str = '[settings]\\n' + open(\r\n os.path.expanduser(template_path), 'r').read()\r\n ini_fp = StringIO(ini_str)\r\n config.readfp(ini_fp)\r\n\r\n # Merge template options with the options passed in\r\n for key, value in config.items('settings'):\r\n option_key = '--%s' % key\r\n if option_key in list_args:\r\n value = value.split(',')\r\n if not args.get(option_key):\r\n args[option_key] = value",
"def arg_comprehension(*args):\n\n my_list = [args for argument in args]\n print(my_list)",
"def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list",
"def make_args_string(args):\n if not args:\n return \"\"\n\n if not isinstance(args, six.string_types):\n assert isinstance(args, (list, tuple))\n args = \" \".join([str(x) for x in args])\n args_text = \"{0}\".format(args)\n return args_text.strip()",
"def _make_formatter(*args, **kwargs):\n # pylint: disable = no-else-return\n\n assert not(args and kwargs)\n\n if args:\n # tuples are given for the whole command string but applied per token.\n # We need to supply only the tuples which are needed for the current\n # token.\n args = list(args[::-1])\n pcents = _re.compile(r'%[^%]').findall\n\n def formatter(value):\n \"\"\" Tuple formatter \"\"\"\n count = len(pcents(value))\n torepl = []\n while len(torepl) < count:\n torepl.append(args.pop())\n return value % tuple(torepl)\n return formatter\n\n elif kwargs:\n return lambda x: x % kwargs\n\n return lambda x: x",
"def formatargvalues(args, varargs, varkw, locals,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n def convert(name, locals=locals,\r\n formatarg=formatarg, formatvalue=formatvalue):\r\n return formatarg(name) + formatvalue(locals[name])\r\n specs = []\r\n for i in range(len(args)):\r\n specs.append(strseq(args[i], convert, join))\r\n if varargs:\r\n specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\r\n if varkw:\r\n specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\r\n return '(' + string.join(specs, ', ') + ')'",
"def tmap(app, args):\n tmpl = Template(app, args.template, args.template_args)\n for a in args.template_args:\n print(a)",
"def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()",
"def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT",
"def expand(*templates: Strings, **kwargs: Strings) -> List[str]:\n formats = flatten(*templates)\n results: List[str] = []\n data: Dict[str, Any] = {}\n\n def _collect(items: List[Tuple[str, Strings]]) -> None:\n if len(items) == 0:\n for template in formats:\n results.append(template.format(**data))\n else:\n name, values = items[0]\n for value in flatten(values):\n data[name] = value\n _collect(items[1:])\n\n _collect(list(kwargs.items()))\n\n return results",
"def _represent_args(*args, **kwargs):\n argument_strings = [repr(a) for a in args]\n keyword_strings = [\"=\".join((k, repr(v))) for k, v in kwargs.items()]\n return \", \".join(argument_strings + keyword_strings)",
"def arg_maker(self, args):\n added_lines = ''\n arg_line = ''\n for arg in args:\n if arg == 'user':\n added_lines += '\\n\\tuser = self.username'\n arg_line += ' user,'\n if arg == 'cwd':\n added_lines += '\\n\\tcwd = self.fs.cwd'\n arg_line += ' cwd,'\n if arg == 'table':\n added_lines += '\\n\\ttable = self.users_database[1]'\n arg_line += ' table,'\n if arg == 'database':\n added_lines += '\\n\\tdatabase = self.users_database[0]'\n arg_line += ' database,'\n if arg == 'args':\n arg_line += ' line,'\n arg_line = arg_line[1:-1]\n return added_lines, arg_line",
"def create_list_string(list_):\n return f\"[{' '.join(list_)}]\"",
"def escape_args(self, *args):\n\t\treturn tuple((self.escape(arg) for arg in args))",
"def template_string(template, **kwargs):\n\n temp = Template(template)\n return temp.render(**kwargs)"
]
| [
"0.65616566",
"0.6183756",
"0.6144895",
"0.60093826",
"0.5927253",
"0.58037114",
"0.5793754",
"0.57438946",
"0.5647408",
"0.56372666",
"0.56073415",
"0.5587228",
"0.5582654",
"0.5552702",
"0.55442554",
"0.54972726",
"0.5460326",
"0.5448793",
"0.5448093",
"0.5413476",
"0.5407082",
"0.53951913",
"0.53404355",
"0.53330183",
"0.53076845",
"0.52766365",
"0.5274318",
"0.52563",
"0.52390724",
"0.52046967"
]
| 0.6726828 | 0 |
Returns the current range of valid values | def getRange(self) -> Tuple[int, int]:
return self.validator().bottom(), self.validator().top() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value",
"def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def __validate(self, value: int, extend_range: bool):\n if extend_range:\n bottom, top = self.getRange()\n self.setRange(min(value, bottom), max(value, top))\n return numpy.clip(value, *self.getRange())",
"def isRangeValid(self) -> bool:\n ...",
"def get_range(self):\n return time_to_range(self.get_time())",
"def range(self):\n return self.timerange()",
"def _validate_val_range(self, proposal):\n val_range = proposal[\"value\"]\n if len(val_range) != 2:\n raise traitlets.TraitError(\"val_range must be of length 2.\")\n if val_range[0] > val_range[1]:\n raise traitlets.TraitError(\n \"val_range[0] must be smaller than val_range[1].\"\n )\n return val_range",
"def _value_in_bounds(self, vals):\n return (self._min_in_bounds(vals[0]), self._max_in_bounds(vals[1]))",
"def range(self) -> Tuple[Union[int, float], Union[int, float]]:\n return self._range",
"def get_range(lst):\n return float(max(lst)) - float(min(lst))",
"def detect_range(self) -> Union[int, float]:\n return self.proto.detect_range",
"def getRange(self):\n return self.range",
"def _value_in_bounds(self, val):\n val = self._stepped_value(val)\n\n if val <= self.valmin:\n if not self.closedmin:\n return\n val = self.valmin\n elif val >= self.valmax:\n if not self.closedmax:\n return\n val = self.valmax\n\n if self.slidermin is not None and val <= self.slidermin.val:\n if not self.closedmin:\n return\n val = self.slidermin.val\n\n if self.slidermax is not None and val >= self.slidermax.val:\n if not self.closedmax:\n return\n val = self.slidermax.val\n return val",
"def data_range(x):\n return max(x)-min(x)",
"def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))",
"def min_range(self):\n return self._min_range",
"def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)",
"def range(self) -> ty.Tuple[float, float]:\r\n ...",
"def get_valid_fret_range(history, *, dist_range, guitar):\n min_w = guitar.max_fret\n max_w = guitar.min_fret\n for fret in history:\n min_w = min(min_w, fret)\n max_w = max(max_w, fret)\n min_x = max(max_w - dist_range, guitar.min_fret)\n max_x = min(min_w + dist_range, guitar.max_fret)\n return min_x, max_x",
"def range(self):\n return self.range_array",
"def range_(self):\n return self.bset.range_",
"def get_bounds():\n return [0.00], [1.00]",
"def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax",
"def _computeRangeFromData(data):\n if data is None:\n return None\n\n dataRange = min_max(data, min_positive=True, finite=True)\n if dataRange.minimum is None: # Only non-finite data\n return None\n\n if dataRange is not None:\n min_positive = dataRange.min_positive\n if min_positive is None:\n min_positive = float('nan')\n return dataRange.minimum, min_positive, dataRange.maximum",
"def _validate(self, value, **options):\n\n # this is a workaround to get the correct values of accepted min and max in\n # case they are callable and producing different results on each call.\n current_values = dict()\n current_values[self.CURRENT_MAX_KEY] = None\n current_values[self.CURRENT_MIN_KEY] = None\n options[self.CURRENT_VALUE_KEY] = current_values\n try:\n super()._validate(value, **options)\n except (self.maximum_value_error, self.minimum_value_error):\n equality_min = ''\n equality_max = ''\n\n inclusive_maximum = options.get('inclusive_maximum')\n if inclusive_maximum is None:\n inclusive_maximum = self.inclusive_maximum\n\n inclusive_minimum = options.get('inclusive_minimum')\n if inclusive_minimum is None:\n inclusive_minimum = self.inclusive_minimum\n\n if inclusive_minimum is not False:\n equality_min = self.inclusive_minimum_value_message\n\n if inclusive_maximum is not False:\n equality_max = self.inclusive_maximum_value_message\n\n current_min = current_values.get(self.CURRENT_MIN_KEY)\n if current_min is None:\n current_min = self.accepted_minimum\n\n current_max = current_values.get(self.CURRENT_MAX_KEY)\n if current_max is None:\n current_max = self.accepted_maximum\n\n raise self.range_value_error(self.range_value_message.format(\n param_name=self._get_field_name(**options),\n lower=self._get_representation(current_min),\n upper=self._get_representation(current_max),\n or_equal_min=equality_min, or_equal_max=equality_max))",
"def limits(self):\n\n\t\treturn [\n\t\t\tmin(self.xvalues),\n\t\t\tmax(self.xvalues),\n\t\t\tmin(self.yvalues),\n\t\t\tmax(self.yvalues)]",
"def get_valueRange(self):\n return \"0-{}\".format(self.resource.get_size() - 1)",
"def _get_energy_range(self):\n\n e0_min = self.network.isomers[0].E0\n e0_max = e0_min\n\n for isomer in self.network.isomers[1:]:\n E0 = isomer.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for reactant in self.network.reactants:\n E0 = reactant.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for product in self.network.products:\n E0 = product.E0\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n for rxn in self.network.path_reactions:\n E0 = rxn.transition_state.conformer.E0.value_si\n if E0 < e0_min:\n e0_min = E0\n if E0 > e0_max:\n e0_max = E0\n\n return e0_min, e0_max",
"def range(self):\n return self._upper - self._lower"
]
| [
"0.7685519",
"0.70607877",
"0.70454717",
"0.6991528",
"0.6965289",
"0.6962066",
"0.6859699",
"0.68507016",
"0.68431336",
"0.6823089",
"0.68209",
"0.6793608",
"0.66680664",
"0.6633512",
"0.66233736",
"0.6601982",
"0.65988374",
"0.6583819",
"0.6581041",
"0.65713406",
"0.6556152",
"0.6556028",
"0.6551214",
"0.6522887",
"0.65058124",
"0.6505044",
"0.6504823",
"0.6504433",
"0.65009856",
"0.6496733"
]
| 0.73728305 | 1 |
Handle change of histogram range from the range slider | def __rangeChanged(self, first, second):
tooltip = "Histogram range:\n[%g, %g]" % (first, second)
self.__rangeSlider.setToolTip(tooltip)
self.__rangeLabel.setToolTip(tooltip) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def autoHistogramRange(self):\n self.vb.enableAutoRange(self.vb.XAxis, True)\n self.vb.enableAutoRange(self.vb.YAxis, True)\n # self.range = None\n # self.updateRange()\n # self.vb.setMouseEnabled(False, False)\n\n # def updateRange(self):\n # self.vb.autoRange()\n # if self.range is not None:\n # self.vb.setYRange(*self.range)\n # vr = self.vb.viewRect()\n\n # self.region.setBounds([vr.top(), vr.bottom()])",
"def __updateHistogramFromControls(self, *args):\n\n hist = self.getHistogram(copy=False)\n if hist is not None:\n count, edges = hist\n if (len(count) == self.__nbinsLineEdit.getValue() and\n (edges[0], edges[-1]) == self.__rangeSlider.getValues()):\n return # Nothing has changed\n\n self._updateFromItem()",
"def update_view_to_bins(self, *args):\n with delay_callback(self, 'x_min', 'x_max'):\n self.x_min = self.hist_x_min\n self.x_max = self.hist_x_max",
"def update_view_to_bins(self, *args):\n with delay_callback(self, 'x_min', 'x_max'):\n self.x_min = self.hist_x_min\n self.x_max = self.hist_x_max",
"def update_bins_to_view(self, *args):\n with delay_callback(self, 'hist_x_min', 'hist_x_max'):\n if self.x_max > self.x_min:\n self.hist_x_min = self.x_min\n self.hist_x_max = self.x_max\n else:\n self.hist_x_min = self.x_max\n self.hist_x_max = self.x_min",
"def update_bins_to_view(self, *args):\n with delay_callback(self, 'hist_x_min', 'hist_x_max'):\n if self.x_max > self.x_min:\n self.hist_x_min = self.x_min\n self.hist_x_max = self.x_max\n else:\n self.hist_x_min = self.x_max\n self.hist_x_max = self.x_min",
"def update_plot_preview(self):\n\n min_val = self.range_slider.first_position\n max_val = self.range_slider.second_position\n img_data = self.normalize_histogram(min_val, max_val)\n new_hist = self.calc_histogram(img_data)\n\n self.hist_canvas.axes.clear()\n self.hist_canvas.axes.bar(range(256), self.original_hist, color='b', alpha=0.7)\n self.hist_canvas.axes.bar(range(256), new_hist, color='g', alpha=0.7)\n self.hist_canvas.draw()\n\n self.current_img_data = img_data",
"def updateRanges(self):\n # work directly on the histograms, no copy\n for i in range(len(self)):\n self[i].updateRange()",
"def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK",
"def range_callback(data):\n global D\n D.ranges = data.ranges",
"def _spin_changed(self, event):\n val = event.GetValue()\n if val < self.minval:\n self.minval = val\n elif val > self.orig_min:\n self.minval = self.orig_min\n if val > self.maxval:\n self.maxval = val\n elif val < self.orig_max:\n self.maxval = self.orig_max\n self.slider.SetValue(100*(val-self.minval)/(self.maxval-self.minval))\n if self.handler:\n self.handler(event)\n event.Skip()",
"def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return",
"def slider_changed(self):\n freq_index = self.ui.frequencySlider.value()\n freq = self.psd.freqs[freq_index]\n self.ui.fmin.setText(str(freq))\n self.ui.fmax.setText(str(freq))\n self.value_changed()",
"def updateRange(self):\n if self.autoFollow:\n self.xrange = self.param.activeRange()\n self.xrange = self.xrange # call getter & setter again to verify limits",
"def setHistogramRange(self, mn, mx, padding=0.1):\n self.vb.enableAutoRange(self.vb.YAxis, False)\n if self.orientation == 'horizontal':\n self.vb.setXRange(mn, mx, padding)\n elif self.orientation == 'vertical':\n self.vb.setYrange(mn, mx, padding)\n # mn -= d*padding\n # mx += d*padding\n # self.range = [mn,mx]\n # self.updateRange()\n # self.vb.setMouseEnabled(False, True)\n # self.region.setBounds([mn,mx])",
"def setRange(self, range):\n\t\tself.range = range\n\t\tself.slider.setMinimum(0.0)\n\t\tself.slider.setMaximum(100.0)\n\t\tself.spinbox.setRange(self.range[0], self.range[1])\n\n\t\tdiff = self.range[1] - self.range[0]\n\t\tif diff <= 1:\n\t\t\tself.spinbox.setSingleStep(0.01)",
"def updateThresholdValues (self, DoubleSlider, Node, ThMax ):\n DoubleSlider.Slider.minimum = 0\n DoubleSlider.SpinBoxL.setRange(0,ThMax)\n DoubleSlider.Slider.maximum = ThMax\n DoubleSlider.SpinBoxR.setRange(0,ThMax)\n if ThMax!=0:\n DisplayNode = Node.GetScalarVolumeDisplayNode()\n LowerThreshold = DisplayNode.GetLowerThreshold()\n UpperThreshold = DisplayNode.GetUpperThreshold()\n DoubleSlider.Slider.minimumValue = LowerThreshold\n DoubleSlider.Slider.maximumValue = UpperThreshold \n DoubleSlider.SpinBoxL.blockSignals(True)\n DoubleSlider.SpinBoxR.blockSignals(True)\n DoubleSlider.SpinBoxL.value = LowerThreshold\n DoubleSlider.SpinBoxR.value = UpperThreshold\n DoubleSlider.SpinBoxL.blockSignals(False)\n DoubleSlider.SpinBoxR.blockSignals(False)",
"def _updateFromItem(self):\n item = self.getItem()\n\n if item is None:\n self.reset()\n return\n\n if not isinstance(item, self._SUPPORTED_ITEM_CLASS):\n _logger.error(\"Unsupported item\", item)\n self.reset()\n return\n\n # Compute histogram and stats\n array = item.getValueData(copy=False)\n\n if array.size == 0:\n self.reset()\n return\n\n xmin, xmax = min_max(array, min_positive=False, finite=True)\n if xmin is None or xmax is None: # All not finite data\n self.reset()\n return\n guessed_nbins = min(1024, int(numpy.sqrt(array.size)))\n\n # bad hack: get 256 bins in the case we have a B&W\n if numpy.issubdtype(array.dtype, numpy.integer):\n if guessed_nbins > xmax - xmin:\n guessed_nbins = xmax - xmin\n guessed_nbins = max(2, guessed_nbins)\n\n # Set default nbins\n self.__nbinsLineEdit.setDefaultValue(guessed_nbins, extend_range=True)\n # Set slider range: do not keep the range value, but the relative pos.\n previousPositions = self.__rangeSlider.getPositions()\n if xmin == xmax: # Enlarge range is none\n if xmin == 0:\n range_ = -0.01, 0.01\n else:\n range_ = sorted((xmin * .99, xmin * 1.01))\n else:\n range_ = xmin, xmax\n\n self.__rangeSlider.setRange(*range_)\n self.__rangeSlider.setPositions(*previousPositions)\n\n histogram = Histogramnd(\n array.ravel().astype(numpy.float32),\n n_bins=max(2, self.__nbinsLineEdit.getValue()),\n histo_range=self.__rangeSlider.getValues(),\n )\n if len(histogram.edges) != 1:\n _logger.error(\"Error while computing the histogram\")\n self.reset()\n return\n\n self.setHistogram(histogram.histo, histogram.edges[0])\n self.resetZoom()\n self.setStatistics(\n min_=xmin,\n max_=xmax,\n mean=numpy.nanmean(array),\n std=numpy.nanstd(array),\n sum_=numpy.nansum(array))",
"def ct_slider_value_changed(self):\n for (x, slider) in enumerate(self.sliders):\n # for x in range(0, len(self.sliders)):\n # slider = self.sliders[x]\n slider_value = float(slider.value()) / float(slider.maximum())\n # Use an square function for easier opacity adjustments\n converted_value = slider_value * slider_value * slider_value\n self.render_widget.sectionsOpacity[x] = converted_value\n\n self.render_widget.update()",
"def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None",
"def numBinsChanged(self, val):\n self.numBins = val",
"def histogram(self):\r\n channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())\r\n\r\n #create a window, the reference must be stored, because the window\r\n #gets destroyed when its reference is garbage collected\r\n #make plotWindow a list and append to that if multiple windows should be possible\r\n title = \"histogram of {:s} channel\".format(self.ui.channel_selection.currentText())\r\n self.plotWindow = pyguitools.SimplePlotWindow(name = title)\r\n self.plotWindow.ax1.hist(self.npImg[self.ui.y0.value():self.ui.y1.value(),\r\n self.ui.x0.value():self.ui.x1.value(), \r\n channel].flatten(),\r\n bins=self.settings[\"histogramm bins\"],\r\n range=(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]))\r\n self.plotWindow.ax1.set_xlim(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]) \r\n self.plotWindow.show()",
"def simple_slider_value_changed(self):\n slider_value = float(self.sliders_simple_widget.value()) \\\n / float(self.sliders_simple_widget.maximum())\n self.render_widget.lowerBound = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * slider_value\n self.render_widget.update()",
"def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)",
"def mip_slider_value_changed(self):\n min_value = float(self.min_slider.value()) / float(self.min_slider.maximum())\n max_value = float(self.max_slider.value()) / float(self.max_slider.maximum())\n\n self.render_widget.mipMin = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * min_value\n self.render_widget.mipMax = self.render_widget.minimum \\\n + (self.render_widget.maximum - self.render_widget.minimum) * max_value\n\n self.render_widget.update()",
"def _changeDisplayRange(self):\n try:\n newrange = float(str(self._wmin.text())), float(str(self._wmax.text()))\n except ValueError:\n return\n self._rc.setDisplayRange(*newrange)",
"def value_changed(self):\n from ..backend.util import get_index_freq\n\n try:\n fmin = float(self.ui.fmin.text())\n except ValueError:\n fmin = self.psd.freqs[0]\n\n try:\n fmax = float(self.ui.fmax.text())\n except ValueError:\n fmax = self.psd.freqs[-1]\n self.f_index_min, self.f_index_max = get_index_freq(\n self.psd.freqs, fmin, fmax)\n try:\n self.vmax = float(self.ui.vmax.text())\n except ValueError:\n self.vmax = None\n try:\n self.vmin = float(self.ui.vmin.text())\n except ValueError:\n self.vmin = None\n\n self.log = self.ui.displayLog.checkState()\n self.plot_psd()",
"def __init__(self, parent):\n\n super().__init__()\n\n self.color_depth = parent.color_depth\n self.original_hist = parent.calc_histogram()['b']\n self.img_data = parent.data.copy()\n self.current_img_data = None\n\n self.init_ui(self, [self.img_data.min(), self.img_data.max()])\n self.label_txt.setText(\"Choose the range for normalization:\")\n self.setWindowTitle(\"Normalize\")\n\n self.range_slider.left_value_changed.connect(self.update_left_value)\n self.range_slider.right_value_changed.connect(self.update_right_value)\n self.range_slider.range_chagned.connect(self.update_plot_preview)\n\n self.update_left_value()\n self.update_right_value()\n self.update_plot_preview()",
"def rb_callback(self, data):\n self.range_n_bearing_vals = data.range_n_bearing",
"def _sync_range(self, args, rkey, pkey, rwidget):\n rwidget.blockSignals(True)\n try:\n self._sync_range_hilow(args, rkey, rwidget)\n self._sync_range_points(args, pkey, rwidget)\n self._sync_unit(args, rwidget)\n finally:\n rwidget.blockSignals(False)"
]
| [
"0.7538808",
"0.73559135",
"0.73497653",
"0.73497653",
"0.73337156",
"0.73337156",
"0.72392315",
"0.6810438",
"0.66320217",
"0.66224724",
"0.658371",
"0.65811986",
"0.65531963",
"0.6504793",
"0.64732856",
"0.6411091",
"0.6354699",
"0.6342652",
"0.62566984",
"0.62514645",
"0.61842245",
"0.6151693",
"0.61389667",
"0.6093732",
"0.60279936",
"0.6025392",
"0.6001187",
"0.59964234",
"0.5967287",
"0.5917417"
]
| 0.7936009 | 0 |
Update histogram and stats from the item | def _updateFromItem(self):
item = self.getItem()
if item is None:
self.reset()
return
if not isinstance(item, self._SUPPORTED_ITEM_CLASS):
_logger.error("Unsupported item", item)
self.reset()
return
# Compute histogram and stats
array = item.getValueData(copy=False)
if array.size == 0:
self.reset()
return
xmin, xmax = min_max(array, min_positive=False, finite=True)
if xmin is None or xmax is None: # All not finite data
self.reset()
return
guessed_nbins = min(1024, int(numpy.sqrt(array.size)))
# bad hack: get 256 bins in the case we have a B&W
if numpy.issubdtype(array.dtype, numpy.integer):
if guessed_nbins > xmax - xmin:
guessed_nbins = xmax - xmin
guessed_nbins = max(2, guessed_nbins)
# Set default nbins
self.__nbinsLineEdit.setDefaultValue(guessed_nbins, extend_range=True)
# Set slider range: do not keep the range value, but the relative pos.
previousPositions = self.__rangeSlider.getPositions()
if xmin == xmax: # Enlarge range is none
if xmin == 0:
range_ = -0.01, 0.01
else:
range_ = sorted((xmin * .99, xmin * 1.01))
else:
range_ = xmin, xmax
self.__rangeSlider.setRange(*range_)
self.__rangeSlider.setPositions(*previousPositions)
histogram = Histogramnd(
array.ravel().astype(numpy.float32),
n_bins=max(2, self.__nbinsLineEdit.getValue()),
histo_range=self.__rangeSlider.getValues(),
)
if len(histogram.edges) != 1:
_logger.error("Error while computing the histogram")
self.reset()
return
self.setHistogram(histogram.histo, histogram.edges[0])
self.resetZoom()
self.setStatistics(
min_=xmin,
max_=xmax,
mean=numpy.nanmean(array),
std=numpy.nanstd(array),
sum_=numpy.nansum(array)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_values(self):\n # have to reset params to 0 when recalculating\n self.total_weight = self.total_price = self.total_value = self.total_fitness = 0\n for index, value in enumerate(self.item_stats):\n if value == 1:\n self.total_weight += self.items[index].weight\n self.total_price += self.items[index].price\n self.total_value += self.items[index].value\n self.total_fitness += self.items[index].fitness",
"def updateItem(self, value):\n self.value = value\n self.age = 0\n self.freq += 1",
"def updatestats(self):\n result = self.statsfromcounts(self.hypCountByScenario)\n self.pScenario = result[\"p\"]\n self.scenarioEntropy = result[\"entropy\"]",
"def update(self):\n\n self.stats = statistics.get()\n self.ticker = exchangerates.get_ticker()",
"def _augment_item_hist(item_df, event_df):\n return item_df.join(\n event_df[event_df['_holdout']==0]\n .groupby('ITEM_ID').size().to_frame('_hist_len')\n ).fillna({'_hist_len': 0})",
"def update_bars(self):\n raise NotImplementedError(\"Should implement update_bars()\")",
"def update_item(self, table, item):",
"def __updateHistogramFromControls(self, *args):\n\n hist = self.getHistogram(copy=False)\n if hist is not None:\n count, edges = hist\n if (len(count) == self.__nbinsLineEdit.getValue() and\n (edges[0], edges[-1]) == self.__rangeSlider.getValues()):\n return # Nothing has changed\n\n self._updateFromItem()",
"def update_stats(self, idx, key):\n\n stats = self.stats\n if not stats.has_key(idx):\n stats[idx] = {}\n if stats[idx].has_key(key):\n stats[idx][key] += 1\n else:\n stats[idx][key] = 1",
"def update_plot_preview(self):\n\n min_val = self.range_slider.first_position\n max_val = self.range_slider.second_position\n img_data = self.normalize_histogram(min_val, max_val)\n new_hist = self.calc_histogram(img_data)\n\n self.hist_canvas.axes.clear()\n self.hist_canvas.axes.bar(range(256), self.original_hist, color='b', alpha=0.7)\n self.hist_canvas.axes.bar(range(256), new_hist, color='g', alpha=0.7)\n self.hist_canvas.draw()\n\n self.current_img_data = img_data",
"def update_bar(self, bar):\n self.count += 1\n if not self.inited and self.count >= self.size:\n self.inited = True\n\n self.open_array[:-1] = self.open_array[1:]\n self.high_array[:-1] = self.high_array[1:]\n self.low_array[:-1] = self.low_array[1:]\n self.close_array[:-1] = self.close_array[1:]\n self.volume_array[:-1] = self.volume_array[1:]\n self.time_array[:-1] = self.time_array[1:]\n self.extra_array[:-1] = self.extra_array[1:]\n self.range_array[:-1] = self.range_array[1:]\n\n self.open_array[-1] = bar.open_price\n self.high_array[-1] = bar.high_price\n self.low_array[-1] = bar.low_price\n self.close_array[-1] = bar.close_price\n self.volume_array[-1] = bar.volume\n self.time_array[-1] = bar.datetime\n self.extra_array[-1] = {\"pattern\":[]}\n if self.count > 1:\n self.range_array[:-1] = self.range_array[1:]\n self.range_array[-1] = round(self.close_array[-1] / self.close_array[-2] - 1, 6)\n else:\n self.range_array[-1] = 0",
"def updateBar(self):\n pass",
"def updateRanges(self):\n # work directly on the histograms, no copy\n for i in range(len(self)):\n self[i].updateRange()",
"def __add_to_hist(self):\n pieces, _ = self.get_pieces()\n self.hist.append([pieces, self.current_dice, self.current_player, self.round])",
"def update(self, item, outcome, timestamp):\n\n system, current_deck = self.deck_of_item[item]\n new_deck = max(1, current_deck + 2 * outcome - 1)\n\n self.deck_of_item[item] = (system, new_deck)\n if current_deck >= 1:\n self.items_of_deck[(system, current_deck)].remove(item)\n if new_deck <= self.num_decks:\n self.items_of_deck[(system, new_deck)].add(item)\n\n self.latest_timestamp_of_item[item] = timestamp",
"def process_new_items(self, new_items):\n self.items_hat = np.hstack([self.items_hat, new_items])",
"def push_histogram(self, data):\n # Loop through bands of this tile\n for i, dat in enumerate(data):\n # Create histogram for new data with the same bins\n new_hist = numpy.histogram(dat['data'], bins=self.hist_bins[i])\n # Add counts of this tile to band metadata histogram\n self.hist_values[i] += new_hist[0]",
"def histogram(original, updated, bins=None, main=\"\", save=None, log=False):\n #Lengths of score lists must be identical, assume in same order\n assert len(original) == len(original)\n\n #Set up bins:\n if bins is not None and bins > 0:\n imoprovements = {(-1,-1):0}\n for i in xrange(0, len(original), bins):\n improvements[(0,i+bins)] = 0\n else:\n improvements = {(-1,-1):0, (-5,0):0, (0,1):0, (1,25):0, (25,50):0, (50,75):0, (75,100):0, (100,125):0, (125,150):0, (150,200):0, (200,300):0, (300,400):0, (500,10000):0} #defaultdict(int)\n \n #Calcualte improvements\n for o, u in izip(original, updated):\n if o>u: \n improvements[(-1,-1)] += 1\n continue\n for lower, upper in improvements:\n if lower <= int(u-o) < upper:\n improvements[(lower,upper)] += 1\n break\n keys = sorted(improvements.keys(), key=lambda x:x[0])\n values = [improvements[r] for r in keys]\n\n fig, ax = plt.subplots()\n ax.set_title(main)\n ax.set_xlabel(\"Improvement (updated-original) bitscores\")\n ax.set_ylabel(\"log(Frequency)\")\n #ax.set_yscale('log')\n\n width = 1.0\n #ax.set_xticks(np.arange(len(improvements)))\n #ax.set_xticklabels([l for l, u in keys])\n bar(ax, np.arange(len(improvements)), values, log=log,\n annotate=True, grid='y', xticklabels=[l for l, u in keys])\n\n if save is None:\n plt.show()\n else:\n plt.savefig(save)",
"def update_frequencies():\n pass",
"def update_stats(self):\n\n self.raw_data_index = 0\n start_time = time.time()\n data_frame = load_data(self.selections)\n user_stats = get_user_stats(data_frame)\n\n self.time_stats_data.config(text=get_time_stats(data_frame))\n self.station_stats_data.config(text=get_station_stats(data_frame))\n self.trip_stats_data.config(text=get_trip_duration_stats(data_frame))\n\n self.user_stats_data.config(text=user_stats[0])\n self.gender_stats_data.config(text=user_stats[1])\n self.age_stats_data.config(text=user_stats[2])\n self.status.config(\n text=f\"Updated statistics in {round((time.time() - start_time), 2)} seconds. Modify filters using left radio buttons as desired...\"\n )",
"def update_stats(self):\n self.stats = []\n for index in range(self.priority_list.count()):\n self.stats.append(self.priority_list.item(index).text())",
"def updateItem(self, object):\n pass",
"def updateColorItem(self, item, itemColor): \n self.avatarConfiguration[item] = itemColor\n self.paintAvatarItem(item)",
"def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n if not bandit_logs['actions']:\n estimate = 0 # if not taken till now then 0 is assigned\n actions = 0\n else:\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])",
"def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]",
"def update_stats(self, step):\n self.dynamic.progressBar.setValue(\n float(step) / float(self.meas_max_volt / self.steps) * 100\n )",
"def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])",
"def update_percent(self):",
"def update(self):\n inventoryJson = self.__agent__.getInventoryJson()\n itemsLeft = len(inventoryJson) != 0\n itemTypesInObservation = []\n itemsAdded = []\n itemsDeleted = []\n\n # Loop over all item types in the observation\n while (itemsLeft):\n itemType = inventoryJson[0][\"type\"]\n itemTypesInObservation.append(itemType)\n numOfItemInObs = inventoryJson[0][\"quantity\"]\n\n if itemType not in self.__inventory__: # Add an array of ids for this item type if it was never discovered\n self.__inventory__[itemType] = []\n numOfItemInInv = len(self.__inventory__[itemType])\n\n for i in range(1, len(inventoryJson)): # Loop over remaining items, and for each item of matching type, add to counter\n if inventoryJson[i][\"type\"] == itemType:\n numOfItemInObs += inventoryJson[i][\"quantity\"]\n inventoryJson = [item for item in inventoryJson if item[\"type\"] != itemType] # Remove all of those inventory items\n \n if numOfItemInObs > numOfItemInInv: # Add more items with unique id of this type to inventory\n for i in range(numOfItemInInv, numOfItemInObs):\n newItem = self.addItem(itemType)\n itemsAdded.append(newItem)\n elif numOfItemInObs < numOfItemInInv: # Remove some items of this type from inventory\n for i in range(numOfItemInObs, numOfItemInInv):\n if len(self.__inventory__[itemType]) > 0:\n lostItem = self.__inventory__[itemType].pop(0)\n itemsDeleted.append(lostItem)\n\n # Only perform another iteration if there are more items of different types that we have not yet checked\n if len(inventoryJson) == 0:\n itemsLeft = False\n \n # For any items in the inventory that was not in the observation, set the quantity to 0\n for itemType in self.__inventory__:\n if itemType not in itemTypesInObservation:\n self.__inventory__[itemType].clear()\n\n return (itemsAdded, itemsDeleted)",
"def __init__(self, iterable=None):\n super(Histogram, self).__init__()\n self.types = 0 # the number of distinct item types in this histogram\n self.tokens = 0 # the total count of all item tokens in this histogram\n if iterable:\n self.update(iterable)"
]
| [
"0.6690862",
"0.6283491",
"0.597615",
"0.59533566",
"0.5937633",
"0.5907779",
"0.5881812",
"0.58777666",
"0.5872015",
"0.5857613",
"0.5854136",
"0.58169454",
"0.5816622",
"0.58079773",
"0.5781084",
"0.57798415",
"0.5769755",
"0.57676035",
"0.57232594",
"0.570378",
"0.5695487",
"0.5674231",
"0.5642966",
"0.56370693",
"0.56107336",
"0.56058127",
"0.55926234",
"0.55908877",
"0.55861527",
"0.5556202"
]
| 0.7574821 | 0 |
Synchronises selected item with plot widget. | def _updateSelectedItem(self):
plot = self.plot
if plot is not None:
selected = plot.selection().getSelectedItems()
# Give priority to image over scatter
for klass in (items.ImageBase, items.Scatter):
for item in selected:
if isinstance(item, klass):
# Found a matching item, use it
self.getHistogramWidget().setItem(item)
return
self.getHistogramWidget().setItem(None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_selection(self):\n raise NotImplementedError",
"def plot_changed(self):\n self.plotType = self.ui.selectPlotType.currentText()\n self.value_changed()",
"def slotSelectFromPlot(self, selectDict):\n logger.info(' -->> emit signalSelectFromPlot ScatterPlotMainWindow !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n self.signalSelectFromPlot.emit(selectDict)\n\n self.mySetStatusBar(f\"User selected: {selectDict['path']}\")",
"def collection_selected(self):\n self.processgraph.set_collection(str(self.collectionBox.currentText()))\n self.reload_processgraph_view()",
"def update_plot_det_items(self,param):\n items=param.value()['selected']\n self.settings.child('scan_options', 'plot_from').setOpts(limits=items)",
"def _on_plot_selection(self, event=None):\n if event is not None:\n combo = event.GetEventObject()\n event.Skip()\n else:\n combo = self.cb_plotpanel\n selection = combo.GetSelection()\n\n if combo.GetValue() != 'None':\n panel = combo.GetClientData(selection)\n self.parent.on_set_plot_focus(panel)",
"def OnSelect(self,event):\r\n index = event.GetSelection()\r\n item = self.items[index]\r\n self.data.select(item)\r\n if self.gInfoBox:\r\n self.gInfoBox.DiscardEdits()\r\n self.gInfoBox.SetValue(self.data.getInfo(item))",
"def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()",
"def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)",
"def select_items(self):\n\n self.listWidget.currentItem().setSelected(True)\n self.items_selected = self.listWidget.selectedItems()\n\n if self.frame_ordering == \"quality\":\n self.indices_selected = [self.quality_sorted_indices[self.listWidget.row(item)] for item\n in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n else:\n self.indices_selected = [self.listWidget.row(item) for item in self.items_selected]\n self.frame_index = self.indices_selected[0]\n self.quality_index = self.rank_indices[self.frame_index]\n\n self.synchronize_slider()",
"def _selectInd(self, ind):\n logger.info(f'plotNumber:{self.plotNumber} ind: {ind}')\n if ind > len(self.plotDf)-1:\n return\n xVal = self.plotDf.at[ind, self.stateDict['xStat']]\n yVal = self.plotDf.at[ind, self.stateDict['yStat']]\n if self.scatterPlotSelection is not None:\n logger.info(f' setting scatterPlotSelection x:{xVal} y:{yVal}')\n self.scatterPlotSelection.set_data(xVal, yVal)\n self.fig.canvas.draw()",
"def update_plot():\n pass",
"def item_selected(self, _widget, _idx):\n # get item title\n self.sel_fmt = str(self.types_list.getItemNameAt(_idx))\n \n # enable \"ok\" button if any item selected\n self.button_ok.setEnabled(True)\n # update editor checkbox\n self.checkbox_edit.setStateCheck(False)\n self.checkbox_edit.setEnabled(self._formats[self.sel_fmt][1])",
"def set_item_selection(self, item):\n self._set_item_selection(item.encode())",
"def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()",
"def on_selected_new_item(self, item):\n pass",
"def update_view(self, selected):\n pass",
"def updateSelection(self, selectionItem):\n self.currentLayerData = self.layers[self.getCurrentRow()]\n if self.model.indexFromItem(self.jobRow) == selectionItem.indexes()[0]:\n # Job Row is selected. Update selection to the last selected or first layer.\n if self.selected is None:\n self.selected = self.jobRow.child(0)\n self.setSelectedFromItem(self.selected)\n else:\n currentRow = self.getCurrentRow()\n self.selected = self.jobRow.child(currentRow)\n self.selectionChanged.emit(self.currentLayerData)",
"def _on_item_selection_changed(self, event):\n item = event.GetItem()\n if item is not None:\n self._model.change_value(event.GetColumn(), item)",
"def setDataSelection(self, selection):\n pass",
"def selection_changed(self):\n self.emit('selection_changed')",
"def vue_data_item_selected(self, event):\n viewer_id, selected_items = event['id'], event['selected_items']\n\n self._update_selected_data_items(viewer_id, selected_items)",
"def selected(self, item):\n self.elementoSeleccionado = item",
"def _update_data(self, selected):\n if selected.row() != self.datasets.index:\n self.datasets.index = selected.row()\n self.datasets.update_current()\n self._update_main()",
"def update_selection(self):\n\n # clear all boxes\n self.clear_boxes()\n self.draw_figure(self.s)\n\n # update temperature list\n if self.Data[self.s]['T_or_MW'] == \"T\":\n self.temperatures = np.array(self.Data[self.s]['t_Arai']) - 273.\n else:\n self.temperatures = np.array(self.Data[self.s]['t_Arai'])\n\n self.T_list = [\"%.0f\" % T for T in self.temperatures]\n self.tmin_box.SetItems(self.T_list)\n self.tmax_box.SetItems(self.T_list)\n self.tmin_box.SetValue(\"\")\n self.tmax_box.SetValue(\"\")\n self.Blab_window.SetValue(\n \"%.0f\" % (float(self.Data[self.s]['pars']['lab_dc_field']) * 1e6))\n if \"saved\" in self.Data[self.s]['pars']:\n self.pars = self.Data[self.s]['pars']\n self.update_GUI_with_new_interpretation()\n self.Add_text(self.s)\n self.write_sample_box()",
"def detachFromPlotItem(self):\n raise NotImplementedError() # TODO",
"def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected",
"def wrapSelectedPlots(self, window, allifnone=True):\n curves = []\n if self.__selectedCurves:\n curves = self.__selectedCurves\n elif allifnone:\n curves = self.itemList()\n for item in curves:\n xdata = numpy.array(item.data().xData())\n ydata = numpy.array(item.data().yData())\n xdata = xdata % window\n item.setData(xdata, ydata)\n self.replot()",
"def trigger_item_shared(self, checked):\n if self.selected_item and checked != self.selected_item.shared:\n if self.selected_item.shared:\n self.selected_item.unshare()\n else:\n self.selected_item.share()\n self.controller.item_shared(self.selected_item)",
"def onPick(self, event):\n\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n\n logger.info(f'isShift:{isShift}')\n line = event.artist\n\n # filter out clicks on 'Annotation' used by mplcursors\n try:\n # when Scatter, line is 'PathCollection', a list of (x,y)\n offsets = line.get_offsets()\n except (AttributeError) as e:\n return\n\n ind = event.ind # ind is a list []\n if len(ind)==0:\n return\n ind = ind[0]\n\n # ind is the ith element in (x,y) list of offsets\n # ind 10 (0 based) is index 11 (1 based) in table list\n logger.info(f' selected from plot ind:{ind}, offsets values are {offsets[ind]}')\n selectDict = self.getAnnotation(ind)\n\n # to do, just put copy of state dict ???\n selectDict['plotType'] = self.stateDict['plotType']\n selectDict['dataType'] = self.stateDict['dataType']\n\n selectDict['isShift'] = isShift\n\n #\n # emit\n logger.info(f' -->> signalSelectFromPlot.emit()')\n for _k, _v in selectDict.items():\n logger.info(f' {_k}: {_v}')\n self.signalSelectFromPlot.emit(selectDict)"
]
| [
"0.64624995",
"0.6395857",
"0.6296318",
"0.6237512",
"0.62184864",
"0.6191139",
"0.61861914",
"0.61574066",
"0.6152853",
"0.6127451",
"0.61113685",
"0.6080558",
"0.60571474",
"0.60452074",
"0.60305876",
"0.60287696",
"0.60277885",
"0.6020022",
"0.60172886",
"0.5983049",
"0.5979111",
"0.59604335",
"0.5949271",
"0.59401476",
"0.59329545",
"0.59233797",
"0.59086967",
"0.59052277",
"0.5884695",
"0.5849066"
]
| 0.7954543 | 0 |
Returns the widget displaying the histogram | def getHistogramWidget(self):
return self._getToolWindow() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def histogram(self):\r\n channel = self.ui.channel_selection.itemData(self.ui.channel_selection.currentIndex())\r\n\r\n #create a window, the reference must be stored, because the window\r\n #gets destroyed when its reference is garbage collected\r\n #make plotWindow a list and append to that if multiple windows should be possible\r\n title = \"histogram of {:s} channel\".format(self.ui.channel_selection.currentText())\r\n self.plotWindow = pyguitools.SimplePlotWindow(name = title)\r\n self.plotWindow.ax1.hist(self.npImg[self.ui.y0.value():self.ui.y1.value(),\r\n self.ui.x0.value():self.ui.x1.value(), \r\n channel].flatten(),\r\n bins=self.settings[\"histogramm bins\"],\r\n range=(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]))\r\n self.plotWindow.ax1.set_xlim(self.settings[\"histogramm min\"],self.settings[\"histogramm max\"]) \r\n self.plotWindow.show()",
"def histogram(self):\n return self._hist",
"def get_hist(self):\n return self.hist",
"def get_histogram(self):\n n_rows = self.df.shape[0]\n if n_rows > 250:\n fig, ax = plt.subplots()\n ax.hist(self.df[self.col_name], bins=50)\n else: \n fig, ax = plt.subplots()\n ax.hist(self.df[self.col_name], bins=int(round(n_rows/5,0)))\n return fig",
"def getHistogram(self, copy: bool=True):\n for item in self.getPlotWidget().getItems():\n if item.getName() == 'histogram':\n return (item.getValueData(copy=copy),\n item.getBinEdgesData(copy=copy))\n else:\n return None",
"def getHistogram(self) -> Optional[numpy.ndarray]:\n histogram = self.getHistogramWidget().getHistogram()\n return None if histogram is None else histogram[0]",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):",
"def mostrar_histograma(self):\n histograma = self.imagem_core.get_histograma()\n\n graph = Graph(\n xlabel='Tom de Cinza',\n ylabel='Quantidade de tons',\n padding=5,\n xmin=0,\n xmax=max(histograma.keys()),\n ymin=0,\n ymax=max(histograma.values())\n )\n plot = MeshLinePlot()\n plot.points = histograma.items()\n graph.add_plot(plot)\n self.widgets_dinamicos.append(graph)\n self.add_widget(graph)",
"def _create_histogram_plot(self, source):\n # figure\n kwargs = {\n \"plot_height\": 300,\n \"height_policy\": \"fit\",\n \"plot_width\": 300,\n \"title\": self._histogram_title,\n \"css_classes\": [self._histogram]\n }\n p = default_figure(kwargs)\n\n # histogram\n fcolor = self.plot_design.fill_color\n p.quad(top=self._hist_source_data,\n bottom=0,\n left=self._hist_source_left_edges,\n right=self._hist_source_right_edges,\n source=source,\n fill_color=fcolor,\n line_color=fcolor\n )\n\n # plot specific styling\n p.y_range.start = 0\n p.yaxis.visible = False\n p.xaxis.ticker = BasicTicker(desired_num_ticks=5)\n p.xaxis.formatter = NumeralTickFormatter(format=\"0.[0]\")\n\n return p",
"def drawHist(data, xLabel, unit, binSize, title):\n mean = np.mean(data)\n median = np.median(data)\n mode = stats.mode(data)[0].astype(float)\n \n q1, q3 = np.percentile(data, [25, 75])\n iqr = q3 - q1\n sigma = np.std(data)\n \n \n bins = np.arange(min(data), max(data) + 1, binSize)\n plt.style.use('dark_background')\n fig, ax = plt.subplots(figsize=(12,7))\n plt.hist(data, bins=bins, histtype='bar') \n plt.title(title)\n plt.xlabel(xLabel + \" \" + unit)\n plt.ylabel('count')\n ymax = ax.get_ylim()[1]\n ax.vlines(mean, 0, ymax, color='red', label='mean')\n ax.vlines(mean-sigma, 0, ymax, color='red', linestyle='--', \n label='mean +/- std')\n ax.vlines(mean+sigma, 0, ymax, color='red', linestyle='--')\n plt.legend()\n plt.show()\n \n print(\"Einheit: \", unit)\n print(\"Minimum: \", round(data.min(),3))\n print(\"Maximum: \", round(data.max(),3))\n print(\"Mittelwert: \", round(mean,3))\n print(\"Median: \", round(median,3))\n print(\"Modus: \", round(mode[0],3))\n print(\"Standardabweichung: \", round(sigma, 3))\n print(\"1. Quartil: \", round(q1,3))\n print(\"3. Quartil: \", round(q3,3))\n print(\"Quartilsdifferenz: \", round(iqr,3))",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def getHistogram( self, img):\n bins = 256\n range_scale = [0,254]\n nivel_transparencia = 0.5\n plt.hist(img.ravel(),bins,range_scale, label=\"histogram\", alpha=nivel_transparencia);\n plt.legend(loc='upper right')\n plt.show()",
"def display(\n self, start=None, stop=None, step=None, format=\"%14.4f\", show_empty=False\n ):\n collection = self.collect(start, stop, step)\n print(\"Histogram\")\n print(\"=========\")\n size = len(format % 0)\n sformat = \"%\" + str(size) + \"s\"\n columns = [\"Range Start\", \"Range End\", \"Count\", \"Bins\"]\n formats = [sformat % s for s in columns]\n print(*formats)\n print(\"-\" * (size * 4 + 3))\n total = 0.0\n for row in collection:\n count = row[\"count\"]\n total += count\n if show_empty or count > 0:\n print(\n format % row[\"value_start\"],\n format % row[\"value_stop\"],\n format % count,\n (\n sformat\n % (\"[%s-%s]\" % (row[\"bin_index_start\"], row[\"bin_index_stop\"]))\n ),\n )\n print(\"-\" * (size * 4 + 3))\n print((\"Total: \" + format) % total)",
"def create_histogram(self, i):\n # styling\n sns.set(style=\"whitegrid\")\n font = {'weight': 'normal'}\n plt.rc('font', **font)\n plt.rc('axes', labelsize=25) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=25) # fontsize of the tick labels\n plt.rc('ytick', labelsize=25)\n fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=100)\n try:\n if self.dtype_is_object() or self.num_of_values() <= 15:\n if self.num_of_values() > 15:\n data = pd.to_numeric(self.data, errors='coerce')\n plot = sns.distplot(data.dropna())\n else:\n plot = sns.countplot(self.remove_nan_values())\n else:\n plot = sns.distplot(self.remove_nan_values())\n plot.set(xlabel='', ylabel='')\n except Exception:\n plt.text(0.5, 0.5, f'Unable to plot', ha='center', va='center', transform=ax.transAxes, fontsize=16)\n if not os.path.isdir('hist_images'):\n os.mkdir('hist_images')\n plt.savefig(f'hist_images/histogram{i}.png', bbox_inches='tight')\n plt.close()\n plt.clf()",
"def histogram_plot(self):\n if not self.HAS_PYQT5:\n return\n data = self.histogram_data\n if data is None or len(data) == 0:\n return\n\n # start up plot viewer if needed\n if self.plotviewer is None or not self.plotviewer.isVisible():\n self.plotviewer = MatplotlibPlot()\n\n self.plotviewer.setWindowTitle('Histogram')\n self.plotviewer.plot_layout = 'rows'\n self.plotviewer.share_axes = self.plot_parameters['share_axes']\n self.plotviewer.plot(data)\n self.plotviewer.set_scroll('bottom')\n self.plotviewer.show()\n self.plotviewer.raise_()",
"def getWidget(self):",
"def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()",
"def get_histogram(self, column):\n\n df_histogram = pd.DataFrame(self.serie)\n #histogramcol = alt.Chart(df_histogram).mark_bar().encode(alt.X(column, bin=True), y='count()')\n histogramcol = alt.Chart(df_histogram).mark_bar().encode(alt.X(column, bin=alt.Bin(maxbins=50)),y='count()')\n return histogramcol",
"def GetHistogram(self, label: 'short') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3ISS3_GetHistogram(self, label)",
"def GetHistogram(self, label: 'short') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3ISS3_GetHistogram(self, label)",
"def visualize(self):\n self.dataFrame.hist()\n plt.show()",
"def GetHistogram(self, label: 'unsigned short') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF3IUS3_GetHistogram(self, label)",
"def plotHist(self):\n X = []\n Y = []\n for item in self.hist.items():\n X.append(int(item[0]))\n Y.append(int(item[1]))\n plt.bar(X,Y, align='center')\n plt.xticks([1,2,3,4,5,6,7])\n plt.ylim(0,len(self.responses))\n plt.title(self.text)\n plt.xlabel('Number of Responses')\n plt.ylabel('Value of Response')\n for x, y in zip(X, Y):\n plt.text(x, y, str(y), ha='center', va='bottom')\n plt.show()",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):\n pylab.hist(values, bins = numBins)\n pylab.xlabel(xLabel)\n pylab.ylabel(yLabel)\n if not title == None:\n pylab.title(title)\n pylab.show()",
"def histogram(self, data):\n histogram_keys = ['bins', 'color', 'alpha', 'label']\n histogram_config = self.config.filter(histogram_keys, prefix='histogram_')\n\n _, _, bar = self.ax.hist(data, **histogram_config)\n\n return [bar]",
"def GetHistogram(self, label: 'unsigned short') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUS3_GetHistogram(self, label)",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def GetHistogram(self, label: 'short') -> \"itkHistogramD_Pointer\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS3ISS3_GetHistogram(self, label)",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):\r\n pylab.hist(values, bins = numBins)\r\n pylab.xlabel(xLabel)\r\n pylab.ylabel(yLabel)\r\n if title != None:\r\n pylab.title(title)\r\n pylab.show()",
"def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK"
]
| [
"0.7061277",
"0.69501203",
"0.68699145",
"0.6706122",
"0.6534066",
"0.65103215",
"0.6489858",
"0.6466796",
"0.639807",
"0.63101625",
"0.62911344",
"0.6211254",
"0.61960334",
"0.61696315",
"0.61635846",
"0.61367637",
"0.6123929",
"0.6083236",
"0.6079656",
"0.6065166",
"0.60495555",
"0.6024803",
"0.6021307",
"0.60204214",
"0.6015875",
"0.60073584",
"0.600563",
"0.60028696",
"0.5989144",
"0.5989112"
]
| 0.82228994 | 0 |
A dualincome ruleset which uses simple rules where possible, intended mainly for testing. | def alice(
partner1_salary_compound_rate: float,
partner1_salary_plateau: float,
partner2_salary_compound_rate: float,
partner2_salary_plateau: float,
base_spending: float,
spending_luxury_compound_rate: float,
rrsp_interest_rate: float,
tfsa_interest_rate: float,
):
ruleset_func = ruleset.get_couple_ruleset(
salary_rules.get_compound_plateau(
partner1_salary_compound_rate, partner1_salary_plateau
),
salary_rules.get_compound_plateau(
partner2_salary_compound_rate, partner2_salary_plateau
),
couple_spending_rules.get_luxury_over_basic(
base_spending, spending_luxury_compound_rate
),
couple_savings_rules.get_equalizing_rrsp_only_split(),
rrsp_interest_rate,
tfsa_interest_rate,
)
return ruleset_func | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rules():",
"def __init__(self, left_rule, right_rule):\n Rule.__init__(self)\n self.__subrules = [left_rule, right_rule]",
"def __init__(self, left_rule, right_rule):\n Rule.__init__(self)\n self.__autorule = None\n self.__subrules = [left_rule, right_rule]",
"def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)",
"def scenario_scheme_revenue_rule(_m, y, s):\r\n\r\n # Net revenue obtained from thermal generators\r\n thermal = sum(\r\n m.p[g, y, s, t] * (m.EMISSIONS_RATE[g] - m.baseline[y]) * m.permit_price[y] for g in m.G_THERM for t in\r\n m.T)\r\n\r\n # Net revenue obtained from candidate renewable generators (existing renewables considered ineligible)\r\n renewables = sum(\r\n m.p[g, y, s, t] * (- m.baseline[y] * m.permit_price[y]) for g in m.G_C_WIND.union(m.G_C_SOLAR) for t in\r\n m.T)\r\n\r\n return m.RHO[y, s] * (thermal + renewables)",
"def test_rule_equalto():",
"def getRules(self):\n self._rules = {}\n _RuleSet = self._sets[self._currentSet - 1 ]\n for oneSet in _RuleSet :\n \n if len(oneSet) < 2 : \n pass \n \n for x in range(1, max(floor(len(oneSet) / 2),2) ):\n \n comb = combinations(oneSet, x)\n for item in comb:\n remaining = tuple(x for x in oneSet if x not in item)\n self._rules[(item,remaining)] = 0\n self._rules[(remaining,item)] = 0",
"def test_rule(self):\n rule = self.rule\n cases = [\n (\n \"A membrane that is part of a nucleus. Blah.\",\n \"membrane\",\n \"is part of a nucleus\",\n \"Blah.\",\n 0,\n ),\n (\n \"A membrane which is part of a nucleus. Blah.\",\n \"membrane\",\n \"is part of a nucleus\",\n \"Blah.\",\n 0,\n ),\n (\"A membrane that is part of a nucleus\", \"membrane\", \"is part of a nucleus\", None, 0),\n (\"A foo or a bar\", None, None, None, 2),\n (\"A membranXX that is part of a nucleXX\", \"membranXX\", \"is part of a nucleXX\", None, 2),\n (\n \"A foo membrane that is part of a nucleus\",\n \"foo membrane\",\n \"is part of a nucleus\",\n None,\n 1,\n ),\n (\n \"A nuclear membrane is a membrane that is part of a nucleus\",\n \"membrane\",\n \"is part of a nucleus\",\n None,\n 1,\n ),\n ]\n ldef = LogicalDefinitionAxiom(\n definedClassId=NUCLEAR_MEMBRANE,\n genusIds=[MEMBRANE],\n restrictions=[ExistentialRestrictionExpression(propertyId=PART_OF, fillerId=NUCLEUS)],\n )\n for case in cases:\n tdef, genus, differentia, gloss, expected_results = case\n pdef = rule.process_text_definition(tdef)\n self.assertEqual(genus, pdef.genus_text)\n self.assertEqual(differentia, pdef.differentia_text)\n self.assertEqual(gloss, pdef.gloss)\n results = list(rule.check_against_logical_definition(self.oi, pdef, ldef))\n self.assertEqual(\n expected_results,\n len(results),\n f\"check_against_logical_definition unexpected; Case: {case}\",\n )\n results = list(rule.evaluate(self.oi))\n for result in results:\n print(result)",
"def ko_rule(self):\n pass",
"def test():\n def showRules(rules):\n for r in rules:\n t,s,c = r\n print t,\"|\",s,c\n return \"\"\n ruletable = GentileRuleTable()\n from testdata.testtree import testtree\n testtree.appendXToTree()\n fetcher = GentileRuleFetcher(testtree, ruletable)\n print \"--- convertTokenToSourceString ---\"\n source14 = fetcher.convertTokensToSourceString(testtree.tree.node(14))\n source7 = fetcher.convertTokensToSourceString(testtree.tree.node(7))\n print 14, source14\n print 7, source7\n fetcher.initJoints()\n print \"--- findExactlyMatchingRules ---\"\n print 14, showRules(fetcher.findExactlyMatchingRules(14))\n print 7, showRules(fetcher.findExactlyMatchingRules(7))\n print \"--- findMergedMatchingRules ---\"\n print 14, showRules(fetcher.findMergedMatchingRules(14))\n print 7, showRules(fetcher.findMergedMatchingRules(7))\n print 1, showRules(fetcher.findMergedMatchingRules(1))\n print \"--- findRecontructMatchingRules ---\"\n print 14, showRules(fetcher.findRecontructMatchingRules(23))\n print \"--- findDepravedMatchingRules ---\"\n print 23, showRules(fetcher.findDepravedMatchingRules(23))\n print 7, showRules(fetcher.findDepravedMatchingRules(7))\n print 27, showRules(fetcher.findDepravedMatchingRules(27))",
"def generateRules(singleCovering, decisions):\n tempCovering = tupleToDict(singleCovering)\n tempDecisions = tupleToDict(decisions)\n\n coverDF = pd.DataFrame(tempCovering)\n decisionsDF = pd.DataFrame(tempDecisions)\n\n combinedDF = pd.concat([coverDF, decisionsDF], axis=1)\n\n ruleDF = combinedDF[combinedDF.iloc[:,-1] != 'madhu']\n # ruleDF = ruleDF.drop_duplicates()\n conceptblockDF = ruleDF.copy(deep=True)\n del conceptblockDF['class']\n\n ruleDict = conceptblockDF.T.to_dict().values()\n ruleTuple = dictToTuple(ruleDict)\n\n\n ruleset = set(ruleDF.index.values)\n\n for i in range(len(ruleTuple)):\n listofsets = []\n count = 0\n\n for j in range(len(ruleTuple[i])):\n # collect the cases that are satisfying a rule from the ruleTuple\n listofsets.append(set(combinedDF[combinedDF[ruleTuple[i][j][0]] == ruleTuple[i][j][1]].index.values))\n\n for m in range(len(listofsets)):\n if (len(listofsets) > 1):\n # drop the first condition from the rule\n appendlast = listofsets.pop(0)\n\n # compute the case Numbers thar are satifying the ruleTUple\n u = set.intersection(*listofsets)\n\n if (not u.issubset(ruleset)):\n # Check whether the remaining attributes satisfy the cases\n # if not append the condition to the attribute list\n listofsets.append(appendlast)\n elif(len(ruleTuple[i]) > 1):\n # if yes remove the dropped attribute from the list\n ruleTuple[i].pop(m-count)\n count = count + 1\n\n return list(set([tuple(i) for i in ruleTuple]))",
"def __init__(self, rules):\n self.rules = rules\n\n self._rhs_rules = defaultdict(list)\n self._rhs_unary_rules = defaultdict(list)\n\n self._nonterm = set(rule.lhs for rule in rules)\n self._term = set(token for rhs in chain(rule.rhs for rule in rules)\n for token in rhs if token not in self._nonterm)\n\n for rule in rules:\n _, rhs, _ = rule\n self._rhs_rules[rhs].append(rule)\n\n for rhs_rules in self._rhs_rules.values():\n rhs_rules.sort(key=lambda r: r.log_prob, reverse=True)\n\n self._is_cnf = all(len(rule.rhs) == 1\n or (len(rule.rhs) == 2\n and all(s in self._nonterm for s in rule.rhs))\n for rule in self.rules)",
"def define_expressions(m):\r\n\r\n def dual_objective_expression_rule(_m):\r\n \"\"\"Expression for dual objective function\"\"\"\r\n\r\n # Build limits\r\n t_1 = sum(- (m.mu_2[z, y] * m.SOLAR_BUILD_LIMITS[z]) - (m.mu_3[z, y] * m.WIND_BUILD_LIMITS[z]) - (\r\n m.mu_4[z, y] * m.STORAGE_BUILD_LIMITS[z]) for z in m.Z for y in m.Y)\r\n\r\n # Min power output\r\n t_2 = sum(\r\n m.sigma_1[g, y, s, t] * m.P_MIN[g] for g in m.G.difference(m.G_STORAGE) for y in m.Y for s in m.S for t\r\n in m.T)\r\n\r\n # Max power - existing generators\r\n t_3 = sum(\r\n - m.sigma_2[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_THERM for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Max power - existing wind\r\n t_4 = sum(\r\n - m.sigma_4[g, y, s, t] * m.Q_W[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_WIND for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing solar\r\n t_5 = sum(\r\n - m.sigma_6[g, y, s, t] * m.Q_S[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_SOLAR for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing hydro\r\n t_6 = sum(\r\n - m.sigma_8[g, y, s, t] * m.P_H[g, y, s, t] * (1 - m.F[g, y]) for g in m.G_E_HYDRO for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max charging power - existing storage\r\n t_7 = sum(\r\n - m.sigma_11[g, y, s, t] * m.P_IN_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max discharging power - existing storage\r\n t_8 = sum(\r\n - m.sigma_13[g, y, s, t] * m.P_OUT_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max energy - existing storage units\r\n t_9 = sum(\r\n - m.sigma_16[g, y, s, t] * m.Q_MAX[g] for g in m.G_E_STORAGE for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Min energy - interval end\r\n t_10 = sum(m.sigma_18[g, y, s] * m.Q_END_MIN[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Max energy - interval end\r\n t_11 = sum(- m.sigma_19[g, y, s] * m.Q_END_MAX[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Ramp-up constraint - generators\r\n t_12 = sum(\r\n - m.sigma_20[g, y, s, t] * m.RR_UP[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-up constraint - initial power output - generators\r\n t_13 = sum(\r\n - m.sigma_20[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y\r\n for s in m.S)\r\n\r\n # Ramp-down constraint - generators\r\n t_18 = sum(\r\n - m.sigma_23[g, y, s, t] * m.RR_DOWN[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-down constraint - initial power output - generators\r\n t_19 = sum(\r\n m.sigma_23[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for\r\n s in m.S)\r\n\r\n # Min powerflow\r\n t_24 = sum(m.sigma_27[l, y, s, t] * m.POWERFLOW_MIN[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Max powerflow\r\n t_25 = sum(\r\n - m.sigma_28[l, y, s, t] * m.POWERFLOW_MAX[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Demand\r\n t_26 = sum(m.lamb[z, y, s, t] * m.DEMAND[z, y, s, t] for z in m.Z for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Initial storage unit energy\r\n t_27 = sum(m.zeta_1[g, y, s, m.T.first()] * m.Q0[g, y, s] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n return (t_1 + t_2 + t_3 + t_4 + t_5 + t_6 + t_7 + t_8 + t_9 + t_10 + t_11 + t_12 + t_13 + t_18 + t_19 + t_24\r\n + t_25 + t_26 + t_27)\r\n\r\n # Dual objective expression\r\n m.DUAL_OBJECTIVE_EXPRESSION = Expression(rule=dual_objective_expression_rule)\r\n\r\n def scenario_revenue_rule(_m, y, s):\r\n \"\"\"Total revenue collected from wholesale electricity sales\"\"\"\r\n\r\n if y != m.Y.last():\r\n # Scaling factor\r\n scaling_factor = m.DELTA[y] * m.RHO[y, s]\r\n\r\n # Revenue from electricity sales (wholesale) = $/MWh x MWh\r\n return sum((m.lamb[z, y, s, t] / scaling_factor) * m.DEMAND[z, y, s, t] * m.RHO[y, s] for z in m.Z\r\n for t in m.T)\r\n\r\n else:\r\n # Scaling factor\r\n scaling_factor = m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE))\r\n\r\n # Revenue from electricity sales (wholesale)\r\n return sum((m.lamb[z, y, s, t] / scaling_factor) * m.DEMAND[z, y, s, t] * m.RHO[y, s] for z in m.Z\r\n for t in m.T)\r\n\r\n # Revenue from wholesale electricity sales for each scenario\r\n m.SCENARIO_REVENUE = Expression(m.Y, m.S, rule=scenario_revenue_rule)\r\n\r\n def scenario_average_price_rule(_m, y, s):\r\n \"\"\"Average price for a given scenario\"\"\"\r\n\r\n return m.SCENARIO_REVENUE[y, s] / m.SCENARIO_DEMAND[y, s]\r\n\r\n # Scenario demand weighted average wholesale price\r\n m.SCENARIO_AVERAGE_PRICE = Expression(m.Y, m.S, rule=scenario_average_price_rule)\r\n\r\n def year_average_price_rule(_m, y):\r\n \"\"\"Average price for a given year\"\"\"\r\n\r\n # Total revenue\r\n return sum(m.SCENARIO_REVENUE[y, s] for s in m.S) / sum(m.SCENARIO_DEMAND[y, s] for s in m.S)\r\n\r\n # Year demand weighted average wholesale price\r\n m.YEAR_AVERAGE_PRICE = Expression(m.Y, rule=year_average_price_rule)\r\n\r\n return m",
"def scenario_revenue_rule(_m, y, s):\r\n\r\n if y != m.Y.last():\r\n # Scaling factor\r\n scaling_factor = m.DELTA[y] * m.RHO[y, s]\r\n\r\n # Revenue from electricity sales (wholesale) = $/MWh x MWh\r\n return sum((m.lamb[z, y, s, t] / scaling_factor) * m.DEMAND[z, y, s, t] * m.RHO[y, s] for z in m.Z\r\n for t in m.T)\r\n\r\n else:\r\n # Scaling factor\r\n scaling_factor = m.DELTA[y] * m.RHO[y, s] * (1 + (1 / m.INTEREST_RATE))\r\n\r\n # Revenue from electricity sales (wholesale)\r\n return sum((m.lamb[z, y, s, t] / scaling_factor) * m.DEMAND[z, y, s, t] * m.RHO[y, s] for z in m.Z\r\n for t in m.T)",
"def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules",
"def makeRule(self, datapoint):\n\t\t\n\t\tant = []\n\t\tcons = []\n\t\tmembershipsFactors = []\n\n\t\top = \"and\"\n\n\t\t# define antecedent\n\t\tfor i, inp in enumerate(self.inputs):\n\t\t\tmemb = inp.calculate_memberships(datapoint[:-len(self.outputs)][i])\n\t\t\tmaxInMemb = (-1, \"\")\n\t\t\tfor key in memb:\n\t\t\t\tif (memb[key] > maxInMemb[0]):\n\t\t\t\t\tmaxInMemb = (memb[key], key)\n\t\t\tant.append(maxInMemb[1])\n\t\t\tmembershipsFactors.append(maxInMemb[0])\n\n\t\t# define consequent\n\t\tfor i, outp in enumerate(self.outputs):\n\t\t\tmemb = outp.calculate_memberships(datapoint[-len(self.outputs):][i])\n\t\t\tmaxInMemb = (-1, \"\")\n\t\t\tfor key in memb:\n\t\t\t\tif (memb[key] > maxInMemb[0]):\n\t\t\t\t\tmaxInMemb = (memb[key], key)\n\t\t\tcons.append(maxInMemb[1])\n\t\t\tmembershipsFactors.append(maxInMemb[0])\n\n\t\t# increase counter to keep track of amount of rules\n\t\tself.counter += 1\n\n\t\t# if (np.product(membershipsFactors) > 1.0):\n\t\t# \tprint membershipsFactors, np.product(membershipsFactors) (debug)\n\n\t\t# return the new rule and it's degree\n\t\treturn basic.Rule(self.counter, ant, op, cons[0], self.andMeth, self.orMeth), np.product(membershipsFactors)",
"def get_subset_rule(test_rule, rules):\n for rule in rules:\n if rule.action.is_chained() or rule.action.is_return():\n continue\n if is_subset(rule, test_rule):\n return rule\n return None",
"def test_dq_rules(self,DQ):\r\n pass",
"def generateAssociationRule(freqSet):",
"def test_RULE_90():\n\tk, outputs = 3, [0,1,0,1,1,0,1,0]\n\t# Prime Implicants\n\ttrue_pi0s = set(['020','121'])\n\ttrue_pi1s = set(['021','120'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('121',[],[[0,2]]),('020',[],[[0,2]])]\n\ttrue_ts1s = [('120',[[0,2]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))",
"def get_rules(self, exclude_zero_coef=False, subregion=None):\n\n n_features = len(self.coef) - len(self.rules_without_feature_names_)\n rule_ensemble = list(self.rules_without_feature_names_)\n output_rules = []\n ## Add coefficients for linear effects\n for i in range(0, n_features):\n if self.lin_standardise:\n coef = self.coef[i] * self.friedscale.scale_multipliers[i]\n else:\n coef = self.coef[i]\n if subregion is None:\n importance = abs(coef) * self.stddev[i]\n else:\n subregion = np.array(subregion)\n importance = sum(abs(coef) * abs([x[i] for x in self.winsorizer.trim(subregion)] - self.mean[i])) / len(\n subregion)\n output_rules += [(self.feature_names_[i], 'linear', coef, 1, importance)]\n\n ## Add rules\n for i in range(0, len(self.rules_without_feature_names_)):\n rule = rule_ensemble[i]\n coef = self.coef[i + n_features]\n\n if subregion is None:\n importance = abs(coef) * (rule.support * (1 - rule.support)) ** (1 / 2)\n else:\n rkx = self.transform(subregion, [rule])[:, -1]\n importance = sum(abs(coef) * abs(rkx - rule.support)) / len(subregion)\n\n output_rules += [(rule.__str__(), 'rule', coef, rule.support, importance)]\n rules = pd.DataFrame(output_rules, columns=[\"rule\", \"type\", \"coef\", \"support\", \"importance\"])\n if exclude_zero_coef:\n rules = rules.ix[rules.coef != 0]\n return rules",
"def __init__(self, rules):\n self.rules = set(rules)\n self.products = []",
"def test_a1_check_rules(self):\n # Has rule\n rule = logic.check_rules(1, 1)\n self.assertEqual(rule, 1)\n rule = logic.check_rules(1, 2)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(1, 4)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(0, 3)\n self.assertEqual(rule, 4)\n rule = logic.check_rules(1, 8)\n self.assertEqual(rule, 3)\n rule = logic.check_rules(1, 0)\n self.assertEqual(rule, 1)\n\n # No rule match\n rule = logic.check_rules(0, 1)\n self.assertEqual(rule, -1)\n rule = logic.check_rules(0, 0)\n self.assertEqual(rule, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, -1)\n with self.assertRaises(ValueError):\n rule = logic.check_rules(1, 9)",
"def make_rules(UI):\n \n Conditionals = Conditional_Database(UI)\n location = UI.location\n \n \n Rules = []\n if location in ['Rio de Janeiro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n elif location in ['Indonesia']:\n #National\n Rules.append(SDlib.Rule('Implement Some Restrictions Nationwide', 1, \n func = lambda policy_input: Conditionals.In_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Nationwide', 2, \n func = lambda policy_input: Conditionals.In_Rule2func(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Nationwide', 3, \n func = lambda policy_input: Conditionals.In_Rule3func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Nationwide', 4, \n func = lambda policy_input: Conditionals.In_Rule4func(policy_input))) \n #Java\n Rules.append(SDlib.Rule('Implement Some Restrictions Java - Zonal', 5, \n func = lambda policy_input: Conditionals.In_Rule1func_j(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Java - Zonal', 6, \n func = lambda policy_input: Conditionals.In_Rule2func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Java - Zonal', 7, \n func = lambda policy_input: Conditionals.In_Rule3func_j(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Java - Zonal', 8, \n func = lambda policy_input: Conditionals.In_Rule4func_j(policy_input))) \n\n #Sulawesi\n Rules.append(SDlib.Rule('Implement Some Restrictions Sulawesi - Zonal', 9, \n func = lambda policy_input: Conditionals.In_Rule1func_s(policy_input)))\n Rules.append(SDlib.Rule('Implement High Restrictions Sulawesi - Zonal', 10, \n func = lambda policy_input: Conditionals.In_Rule2func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Some Restrictions Sulawesi - Zonal', 11, \n func = lambda policy_input: Conditionals.In_Rule3func_s(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing Sulawesi - Zonal', 12, \n func = lambda policy_input: Conditionals.In_Rule4func_s(policy_input))) \n\n elif location in ['Chile']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Ch_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Ch_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Ch_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Ch_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Ch_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Ch_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Ch_Rule7func(policy_input)))\n \n elif location in ['Santiago']:\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Sa_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Sa_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Sa_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Sa_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Sa_Rule5func(policy_input)))\n \n if location in ['Querétaro']:\n\n Rules.append(SDlib.Rule('Initial Closures', 1, \n func = lambda policy_input: Conditionals.Br_Rule1func(policy_input)))\n Rules.append(SDlib.Rule('Additional Closures', 2, \n func = lambda policy_input: Conditionals.Br_Rule2func(policy_input)))\n \n Rules.append(SDlib.Rule('Complete Lockdown', 3, \n func = lambda policy_input: Conditionals.Br_Rule3func(policy_input)))\n \n Rules.append(SDlib.Rule('Re-open Some Businesses', 4, \n func = lambda policy_input: Conditionals.Br_Rule4func(policy_input)))\n Rules.append(SDlib.Rule('Relax Mandatory Social Distancing', 5, \n func = lambda policy_input: Conditionals.Br_Rule5func(policy_input)))\n Rules.append(SDlib.Rule('Order More Ventilators', 6, \n func = lambda policy_input: Conditionals.Br_Rule6func(policy_input)))\n Rules.append(SDlib.Rule('Pay More for Ventilators to Accelerate Delivery', 7, \n func = lambda policy_input: Conditionals.Br_Rule7func(policy_input)))\n\n return Rules",
"def dual_objective_expression_rule(_m):\r\n\r\n # Build limits\r\n t_1 = sum(- (m.mu_2[z, y] * m.SOLAR_BUILD_LIMITS[z]) - (m.mu_3[z, y] * m.WIND_BUILD_LIMITS[z]) - (\r\n m.mu_4[z, y] * m.STORAGE_BUILD_LIMITS[z]) for z in m.Z for y in m.Y)\r\n\r\n # Min power output\r\n t_2 = sum(\r\n m.sigma_1[g, y, s, t] * m.P_MIN[g] for g in m.G.difference(m.G_STORAGE) for y in m.Y for s in m.S for t\r\n in m.T)\r\n\r\n # Max power - existing generators\r\n t_3 = sum(\r\n - m.sigma_2[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_THERM for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Max power - existing wind\r\n t_4 = sum(\r\n - m.sigma_4[g, y, s, t] * m.Q_W[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_WIND for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing solar\r\n t_5 = sum(\r\n - m.sigma_6[g, y, s, t] * m.Q_S[g, y, s, t] * m.P_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_SOLAR for y in\r\n m.Y for s in m.S for t in m.T)\r\n\r\n # Max power - existing hydro\r\n t_6 = sum(\r\n - m.sigma_8[g, y, s, t] * m.P_H[g, y, s, t] * (1 - m.F[g, y]) for g in m.G_E_HYDRO for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max charging power - existing storage\r\n t_7 = sum(\r\n - m.sigma_11[g, y, s, t] * m.P_IN_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max discharging power - existing storage\r\n t_8 = sum(\r\n - m.sigma_13[g, y, s, t] * m.P_OUT_MAX[g] * (1 - m.F[g, y]) for g in m.G_E_STORAGE for y in m.Y for s in\r\n m.S for t in m.T)\r\n\r\n # Max energy - existing storage units\r\n t_9 = sum(\r\n - m.sigma_16[g, y, s, t] * m.Q_MAX[g] for g in m.G_E_STORAGE for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Min energy - interval end\r\n t_10 = sum(m.sigma_18[g, y, s] * m.Q_END_MIN[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Max energy - interval end\r\n t_11 = sum(- m.sigma_19[g, y, s] * m.Q_END_MAX[g] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n # Ramp-up constraint - generators\r\n t_12 = sum(\r\n - m.sigma_20[g, y, s, t] * m.RR_UP[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-up constraint - initial power output - generators\r\n t_13 = sum(\r\n - m.sigma_20[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y\r\n for s in m.S)\r\n\r\n # Ramp-down constraint - generators\r\n t_18 = sum(\r\n - m.sigma_23[g, y, s, t] * m.RR_DOWN[g] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for s in m.S\r\n for t in m.T)\r\n\r\n # Ramp-down constraint - initial power output - generators\r\n t_19 = sum(\r\n m.sigma_23[g, y, s, m.T.first()] * m.P0[g, y, s] for g in m.G_THERM.union(m.G_E_HYDRO) for y in m.Y for\r\n s in m.S)\r\n\r\n # Min powerflow\r\n t_24 = sum(m.sigma_27[l, y, s, t] * m.POWERFLOW_MIN[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Max powerflow\r\n t_25 = sum(\r\n - m.sigma_28[l, y, s, t] * m.POWERFLOW_MAX[l] for l in m.L for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Demand\r\n t_26 = sum(m.lamb[z, y, s, t] * m.DEMAND[z, y, s, t] for z in m.Z for y in m.Y for s in m.S for t in m.T)\r\n\r\n # Initial storage unit energy\r\n t_27 = sum(m.zeta_1[g, y, s, m.T.first()] * m.Q0[g, y, s] for g in m.G_STORAGE for y in m.Y for s in m.S)\r\n\r\n return (t_1 + t_2 + t_3 + t_4 + t_5 + t_6 + t_7 + t_8 + t_9 + t_10 + t_11 + t_12 + t_13 + t_18 + t_19 + t_24\r\n + t_25 + t_26 + t_27)",
"def test_create_rule(self):\n pass",
"def _parse_rules(self, model, comp, node):\n parent = node\n formulas = {}\n # Create variables with assignment rules (all except derivatives)\n node = dom_child(parent, 'assignmentRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing assignment rule for <' + str(var) + '>.')\n var = comp[var]\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Assignment found for unknown parameter: \"'\n + var + '\".')\n node = dom_next(node, 'assignmentRule')\n # Create variables with rate rules (states)\n node = dom_child(parent, 'rateRule')\n while node:\n var = self._convert_name(str(node.getAttribute('variable')).strip())\n if var in comp:\n self.log('Parsing rate rule for <' + var + '>.')\n var = comp[var]\n ini = var.rhs()\n ini = ini.eval() if ini else 0\n var.promote(ini)\n var.set_rhs(parse_mathml_rhs(\n dom_child(node, 'math'), comp, self))\n else:\n raise SBMLError('Derivative found for unknown parameter: <'\n + var + '>.')\n node = dom_next(node, 'rateRule')",
"def __add__(self, second_rule):\n return AndRule(self, second_rule)",
"def hrules(self):\n ...",
"def test_RULE_110():\n\tk, outputs = 3, [0,1,1,1,0,1,1,0]\n\n\ttrue_pi0s = set(['200','111'])\n\ttrue_pi1s = set(['021','201','012','210'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('200',[],[[1,2]]),('111',[],[[0,1,2]])]\n\ttrue_ts1s = [('201',[[0,1]],[]),('012',[[1,2]],[]),('201',[[1,2]],[]),('012',[[0,2]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))"
]
| [
"0.5862188",
"0.5724831",
"0.5615715",
"0.5579088",
"0.54993427",
"0.54069495",
"0.5384174",
"0.532144",
"0.53145355",
"0.53075856",
"0.5280832",
"0.5273101",
"0.5262033",
"0.52355766",
"0.5225133",
"0.52012074",
"0.5176728",
"0.51510364",
"0.51488656",
"0.51320904",
"0.51302737",
"0.51236176",
"0.5090344",
"0.50575644",
"0.50526047",
"0.5050327",
"0.50460196",
"0.5044658",
"0.50269175",
"0.5009003"
]
| 0.60019565 | 0 |
Save trainable variables in the model to npz file with current value of each variable in tf.trainable_variables(). | def save_trainable_variables (self , sess , savefn):
state = getattr (self , 'state' , {})
utils.train.save_trainable_variables(
sess, savefn, self._scope, **state ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, model_out_file):\n\t\tvariables_dict = {v.name: v for v in tf.global_variables()}\n\t\tvalues_dict = self.sess.run(variables_dict)\n\t\tnp.savez(open(model_out_file, 'wb'), **values_dict)",
"def savable_variables(self):\n return tf.global_variables()",
"def save(filename, vars):\n p = {}\n for var in vars:\n p[var.name] = var\n\n # 3. Evaluate all tensors at once\n keys = list(p.keys())\n values = tf.get_default_session().run([p[k] for k in keys])\n p = dict(zip(keys, values))\n\n # 3. Write.\n np.savez(filename, **p)",
"def save_variables(self, sess=None):\n if self.is_built:\n self.saver.save(sess=sess, save_path=self.checkpoint_dir)\n else:\n raise RuntimeError(\"You must build the model before save model's variables.\")",
"def save_network(session, tf_variables, file_path):\n variable_values = session.run(tf_variables)\n with open(file_path, mode='wb') as f:\n pickle.dump(variable_values, f)",
"def load_trainable_variables (self, sess, savefn):\r\n self.state = utils.train.load_trainable_variables(sess, savefn)",
"def save_params_to_pickle_file(session: tf.Session,\n params_filename: Text) -> None:\n params = {}\n for var in tf.trainable_variables():\n params[var.name] = var.eval(session=session)\n with open(params_filename, 'wb') as f:\n pickle.dump(params, f, pickle.HIGHEST_PROTOCOL)",
"def save(self):\n\n self.saver.save(self.sess, self.path + '/tensorflow-model', global_step=self.counter.count)",
"def save_npz(save_dict={}, name='model.npz'):\n rename_dict = {}\n for k, value in enumerate(save_dict):\n rename_dict.update({'param'+str(k) : value.eval()})\n np.savez(name, **rename_dict)\n print('Model is saved to: %s' % name)",
"def save_model(self, filename='model.pt'):\n checkpoint = {\n 'input_size': self.linear_layers[0].in_features,\n 'output_size': self.linear_layers[-1].out_features,\n 'hidden_layers': [layer.out_features for layer in self.linear_layers[:-1]],\n 'state_dict': self.state_dict()}\n torch.save(checkpoint, filename)",
"def save_checkpoint_numpy(self,\n session: tf.Session,\n global_step: int):\n\n checkpoints_path = pathlib.Path(self.info.checkpoint_path)\n if not checkpoints_path.exists():\n checkpoints_path.mkdir()\n\n checkpoint_path = checkpoints_path / str(global_step)\n assert not checkpoint_path.exists()\n\n checkpoint_path.mkdir()\n\n values = session.run(self.__variables)\n\n # Save each variable\n files = {}\n for variable, value in zip(self.__variables, values):\n file_name = tf_util.variable_name_to_file_name(\n tf_util.strip_variable_name(variable.name))\n file_name = f\"{file_name}.npy\"\n files[variable.name] = file_name\n\n np.save(str(checkpoint_path / file_name), value)\n\n # Save header\n with open(str(checkpoint_path / \"info.json\"), \"w\") as f:\n f.write(json.dumps(files))\n\n # Delete old checkpoints\n for c in checkpoints_path.iterdir():\n if c.name != checkpoint_path.name:\n util.rm_tree(c)",
"def list_trainable_variables(self):\n self.variables_to_train = None\n if not self.train_encoder:\n self.variables_to_train = list(set(tf.trainable_variables()) - set(self.autoencoder_variables))",
"def save_trained_model(self, filename):\n d = self.pack_npz()\n with open(filename, 'wb') as f:\n np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)",
"def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, 'params')\n with open(filename, 'w+') as f:\n pickle.dump(data, f)",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def save_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.save(sess, dir_path)",
"def save_state(self, file):\n np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,\n z_best=self.z_best, ll_best=self.ll_best, log=self.log)",
"def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)",
"def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, PARAM_EXTENSION)\n with open(filename, 'w') as f:\n pickle.dump(data, f)",
"def save_checkpoint_manual(model: LFADS, path: str):\n model_wts = [v.numpy() for v in model.trainable_variables]\n optim_wts = model.optimizer.get_weights()\n checkpoint = {\"model\": model_wts, \"optimizer\": optim_wts}\n with open(path, \"wb\") as fout:\n pickle.dump(checkpoint, fout)",
"def log_trainable_variables(self):\n var_names = list(self.trainable_variables.keys())\n self.logger.log_trainable_variables(var_names)",
"def save_trained_parameters(sess, graph, keys):\n \n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [flags.final_tensor_name])\n with gfile.FastGFile(flags.output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n with gfile.FastGFile(flags.output_labels, 'w') as f:\n f.write('\\n'.join(keys) + '\\n')",
"def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)",
"def save_model_checkpoint(model, optimizer, global_step, epoch_info, file_name):\n output = {\n \"model\" : model.state_dict(),\n \"optimizer\" : optimizer.state_dict(),\n \"global_step\" : global_step + 1,\n \"epoch_info\" : epoch_info\n }\n torch.save(output, file_name)",
"def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])",
"def save(estimator, path):\n saver = tf.train.Saver()\n if \"/\" not in path:\n path = \"./\" + path\n saver.save(estimator.sess, path + \".ckpt\")\n\n save_dr = directRanker()\n for key in estimator.get_params():\n # ToDo: Need to be fixed to also restore the cost function\n if key == \"cost\":\n save_dr.__setattr__(key, None)\n else:\n save_dr.__setattr__(key, estimator.get_params()[key])\n\n with open(path + \".pkl\", 'wb') as output:\n pickle.dump(save_dr, output, 0)",
"def _save_model(graph_or_sess):\r\n if isinstance(graph_or_sess, tf.Graph):\r\n ops = graph_or_sess.get_operations()\r\n for op in ops:\r\n if 'variable' in op.type.lower():\r\n raise ValueError('Please input a frozen graph (no variables). Or pass in the session object.')\r\n\r\n with graph_or_sess.as_default():\r\n sess = tf.Session(config=configProto)\r\n\r\n fake_var = tf.Variable([0.0], name=\"fake_var\")\r\n sess.run(tf.global_variables_initializer())\r\n else:\r\n sess=graph_or_sess\r\n\r\n PATH = os.path.join(\"model\", \"tmp-model\")\r\n make_dir(path = os.path.dirname(PATH))\r\n saver = tf.train.Saver()\r\n #i should deal with the case in which sess is closed.\r\n saver.save(sess, PATH)\r\n\r\n if isinstance(graph_or_sess, tf.Graph):\r\n sess.close()\r\n\r\n return PATH + \".meta\"",
"def variable_progression():\n\t# files = glob.glob('parameter_checkpoints/epoch-*[!.meta]')\n\tfiles = glob.glob('parameter_checkpoints/epoch-*')\n\n\t# reorder epochs by 'human order' otherwise it would order it as 1,110,12,...\n\t# http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n\tdef atoi(text):\n\t return int(text) if text.isdigit() else text\n\n\tdef natural_keys(text):\n\t '''\n\t alist.sort(key=natural_keys) sorts in human order\n\t http://nedbatchelder.com/blog/200712/human_sorting.html\n\t (See Toothy's implementation in the comments)\n\t '''\n\t return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\n\tfiles.sort(key=natural_keys)\n\n\tx, W, bh, bv = rbm.get_variables()\n\ttrainable_vars = [W, bh, bv]\n\n\tsaver = tf.train.Saver(trainable_vars)\t# restore the weights and biases of the trained model\n\n\tweights = []\n\tbhs = []\n\tbvs = []\n\twith tf.Session() as sess:\n\t\tinit = tf.initialize_all_variables()\t\n\t\tsess.run(init)\n\t\t# iterate through each saved epoch checkpoint, and add the W, bh, and bv matrices to their\n\t\t# respective lists\n\t\tfor f in files:\n\t\t\tsaver.restore(sess, f)\t\t# load the saved weights and biases from a given epoch checkpoint file\n\t\t\tweights.append(W.eval())\t\n\t\t\tbhs.append(bh.eval())\n\t\t\tbvs.append(bv.eval())\n\n\treturn weights, bhs, bvs",
"def get_variables_to_train(flags):\n if flags.trainable_scopes is None:\n # print(tf.trainable_variables())\n return tf.trainable_variables()\n else:\n scopes = [scope.strip() for scope in flags.trainable_scopes.split(',')]\n\n variables_to_train = []\n for scope in scopes:\n variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n variables_to_train.extend(variables)\n return variables_to_train",
"def _variables_to_save(self):\n save_vars = self.weights + self.biases\n save_names = [_.name for _ in save_vars]\n save_dict = {name: var for name, var in zip(save_names, save_vars)}\n return save_dict"
]
| [
"0.77411115",
"0.7342082",
"0.7287824",
"0.7103885",
"0.688335",
"0.6768757",
"0.66715765",
"0.66602504",
"0.65303904",
"0.64213234",
"0.6400361",
"0.6381975",
"0.6329674",
"0.6326982",
"0.62663496",
"0.6257402",
"0.62162197",
"0.6214965",
"0.62076235",
"0.61964625",
"0.61960953",
"0.6194839",
"0.6177405",
"0.61773753",
"0.6166384",
"0.6166159",
"0.6126703",
"0.6117842",
"0.6105535",
"0.6087731"
]
| 0.774512 | 0 |
Load trainable variables from saved file. | def load_trainable_variables (self, sess, savefn):
self.state = utils.train.load_trainable_variables(sess, savefn) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_training_data(self):\n self._save_training_data()",
"def load_params_from_pickle_file(session: tf.Session,\n params_filename: Text) -> None:\n with open(params_filename, 'rb') as f:\n params = pickle.load(f)\n for var in tf.trainable_variables():\n session.run(var.assign(params[var.name]))",
"def load_variables(cls):\n cls._variablesDict = fileops.get_json_dict(cls.get_variables_filepath())",
"def load(self, sess, file_path, verbose=True):\n if(verbose): print(\"Loading model from: \" + str(file_path))\n self.tf_saver.restore(sess, file_path)\n if(verbose): print(\"Done!\")",
"def load_auditory_model_vars(self, sess):\n self.sess = sess\n for network_key in sorted(self.config_recognition_networks.keys()):\n fn_ckpt = self.config_recognition_networks[network_key]['fn_ckpt']\n saver0 = self.config_recognition_networks[network_key]['saver0']\n saver1 = self.config_recognition_networks[network_key]['saver1']\n print('Loading `{}` variables from {}'.format(network_key, fn_ckpt))\n saver0.restore(self.sess, fn_ckpt)\n saver1.restore(self.sess, fn_ckpt)\n self.vars_loaded = True",
"def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))",
"def load(filename, vars, batch_size=10):\n p = np.load(filename)\n ops = []\n feed_dict = {}\n\n with tf.variable_scope('load'):\n for var in vars:\n if var.name not in p.keys():\n continue\n\n # Create placeholder.\n placeholder = tf.placeholder(var.dtype)\n feed_dict[placeholder] = p[var.name]\n\n # Create assign op for normal vars.\n ops.append(tf.assign(var, placeholder, validate_shape=False).op)\n\n if ops:\n for ofs in range(0, len(ops), batch_size):\n tf.get_default_session().run(ops[ofs:ofs+batch_size], feed_dict)",
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def load_variables_from_checkpoint(sess, start_checkpoint):\n saver = tf.train.Saver(tf.global_variables())\n saver.restore(sess, start_checkpoint)",
"def save_trainable_variables (self , sess , savefn):\r\n state = getattr (self , 'state' , {})\r\n utils.train.save_trainable_variables(\r\n sess, savefn, self._scope, **state )",
"def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')",
"def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)",
"def load_training_data(file_path):\n return load_data(file_path)",
"def load(self):\n\n X_train, y_train, X_test, y_test, variable_types, name = _load_data(\n self.task_id)\n\n self.X_train = X_train\n self.y_train = y_train\n self.X_test = X_test\n self.y_test = y_test\n self.variable_types = variable_types\n self.name = name\n\n return self.X_train, self.y_train, self.X_test, self.y_test",
"def load_training(self):\n path = \"./training/\" + self.training + \".json\"\n\n data = {}\n\n with open(path, \"r\") as infile:\n data = json.load(infile)\n\n self.states = data[\"states\"]\n self.transitions = data[\"transitions\"]\n self.matrix = data[\"matrix\"]",
"def load_classification_parameters(sess, ckpt_file, scope):\n class_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n class_saver = tf.train.Saver(var_list=map_to_scope(class_vars))\n class_saver.restore(sess, ckpt_file)",
"def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()",
"def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]",
"def load(self, model_in_file):\n\t\tself.build_model()\n\n\t\tnpz_data = np.load(model_in_file)\n\t\tvalues_dict = {f: npz_data[f] for f in npz_data.files}\n\t\tops = []\n\t\tfor v in tf.global_variables():\n\t\t\tif v.name in values_dict:\n\t\t\t\tops.append(v.assign(values_dict[v.name]))\n\n\t\t#pdb.set_trace()\n\t\t#ops = [v.assign(values_dict[v.name]) for v in tf.global_variables()]\n\t\tself.sess.run(ops)",
"def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()",
"def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']",
"def read_variables(fpath):\n from tensorflow.python import pywrap_tensorflow\n reader = pywrap_tensorflow.NewCheckpointReader(fpath)\n var_to_shape_map = reader.get_variable_to_shape_map()\n variables = {}\n for k in var_to_shape_map.keys():\n variables[k] = reader.get_tensor(k)\n return variables",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']",
"def load_model_params(self, full_path):\n \n print(\"Loading model parameters from %s\"%full_path)\n with open (full_path, 'rb') as f:\n \n self.theta = cPickle.load(f)\n \n if self.num_hidden == True or (self.num_hidden > 0):\n \n self.W, self.b, self.bhid = self.theta\n \n else:\n \n self.W, self.b = self.theta",
"def train(self, trainfile):",
"def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)",
"def _loadTrain(self, features, labels):\n\t\tself.trainX_, self.trainY_, self.trainLabel_ = self.__load(features, labels)",
"def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]"
]
| [
"0.71960396",
"0.70627034",
"0.6982785",
"0.69725084",
"0.6921597",
"0.6917487",
"0.6774719",
"0.6695076",
"0.6676079",
"0.6676079",
"0.66635466",
"0.6660492",
"0.6598899",
"0.6594415",
"0.6560092",
"0.6553832",
"0.6492643",
"0.64879227",
"0.6473616",
"0.64683986",
"0.64595157",
"0.6428966",
"0.6413619",
"0.64041543",
"0.63882434",
"0.63835734",
"0.63720375",
"0.63623315",
"0.63333327",
"0.63304234"
]
| 0.82551587 | 0 |
Call func by selecting appropriate arguments from a argdict as well as kwargs | def callFuncBasedOnDict(func, argdict, **kwargs):
if argdict is None:
argdict = {}
seldict = selectArgsFromDict(func, argdict)
if kwargs is not None:
seldict.update(kwargs)
return func(**seldict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def selectArgsFromDict(func, argdict):\n return dict([(i, argdict[i]) for i in getArgs(func) if i in argdict])",
"def kwargs(kwargs):\n run_kwargs(kwargs)",
"def exec_params(call, *args, **kwargs):\n arg_spec = getattr(call, '_argspec', None)\n if arg_spec and not arg_spec.keywords:\n kwargs = {key: value for key, value in kwargs.iteritems()\n if key in arg_spec.args}\n return call(*args, **kwargs)",
"def apply(self, func):\r\n return func(**self.kwargs)",
"def __call__(self, *ar, **kw):\n\t\tkw = {**self.default_kw, **kw} # add any default keywords\n\t\tkw = {k:v for k,v in kw.items() if self.is_kwarg_valid(k)} # remove non valid keywords (keywords that are not in base func)\n\n\t\t# selectively get the kwargs according to the user\n\t\tif self.ignore_kw == \"ALL\":\n\t\t\tkw = {}\n\t\telif type(self.ignore_kw) == list:\n\t\t\tkw = {k:v for k,v in kw.items() if not k in self.ignore_kw}\n\t\telse:\n\t\t\traise Exception(\"self.ignore_kw must be list or ALL, but is:\", self.ignore_kw)\n\t\t\n\n\t\tassert self.check(ar, is_check_verbose=True), \"Checks have failed on given parameters %s for %s\"%(ar, self.__class__.__name__)\n\t\treturn self.base_func(*self.additional_check(ar), **kw)",
"def fn(*args, **kwargs):\n pass",
"def _run_kwargs(cls, kwargs: Dict[str, Any]):\n parser = cls.setup_args()\n opt = parser.parse_kwargs(**kwargs)\n return cls._run_from_parser_and_opt(opt, parser)",
"def expand_call(kargs):\n func = kargs['func']\n del kargs['func']\n out = func(**kargs)\n return out",
"def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()",
"def test_onearg_and_keyword(self):\n varargs = (12,)\n kwargs = {'default' : 13}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)",
"def kwargs_from_call(param_names: List[str], kwdefaults: Dict[str, Any], args: Tuple[Any, ...],\n kwargs: Dict[str, Any]) -> MutableMapping[str, Any]:\n # (Marko Ristin, 2020-12-01)\n # Insert _ARGS and _KWARGS preemptively even if they are not needed by any contract.\n # This makes the code logic much simpler since we do not explicitly check if a contract would\n # need them, though it might incur a subtle computational overhead\n # (*e.g.*, when the contracts do not need them or don't use any argument at all).\n # We need to have a concrete issue where profiling helps us determine if this is a real\n # bottleneck or not and not optimize for no real benefit.\n resolved_kwargs = {'_ARGS': args, '_KWARGS': kwargs}\n\n # Set the default argument values as condition parameters.\n for param_name, param_value in kwdefaults.items():\n resolved_kwargs[param_name] = param_value\n\n # Override the defaults with the values actually supplied to the function.\n for i, func_arg in enumerate(args):\n if i < len(param_names):\n resolved_kwargs[param_names[i]] = func_arg\n else:\n # Silently ignore call arguments that were not specified in the function.\n # This way we let the underlying decorated function raise the exception\n # instead of frankensteining the exception here.\n\n # It seems that this line can not be covered,\n # see https://github.com/nedbat/coveragepy/issues/1041.\n # The branch was covered manually in ``tests.test_checkers``.\n pass # pragma: no cover\n\n for key, val in kwargs.items():\n resolved_kwargs[key] = val\n\n return resolved_kwargs",
"def myfunc(*args, **kwargs):\n print('I would like {} {}'.format(args[0], kwargs['food']))",
"def test_020_kwargs(self):\n caller = self.get_caller([KwargsTaskOverride])\n self.assertEqual([\"A\", \"B\"], caller(\"A\", \"B\"))",
"def chooseArgs(function_args, user_args):\r\n return {x: user_args[x] for x in user_args if x in function_args}",
"def test_star_args_with_dict():\n arg_dict = {'visited_color': 'orange',\n 'link_color': 'yellow',\n 'back_color': 'red',\n 'fore_color': 'blue'}\n assert arguments.fun_star_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')",
"def test_kw_args_with_dict():\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red',\n 'back_color': 'yellow',\n 'fore_color': 'orange'}\n assert arguments.fun_opt_kw_params(**arg_dict) == ('orange', 'yellow',\n 'red', 'blue')",
"def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])",
"def test_kw_args_with_tuple_and_dict():\n arg_tuple = ('orange', 'yellow')\n arg_dict = {'visited_color': 'blue',\n 'link_color': 'red'}\n\n assert arguments.fun_opt_kw_params(*arg_tuple, **arg_dict) == ('orange',\n 'yellow',\n 'red',\n 'blue')",
"def _call_cupy(func, args, kwargs):\n\n return func(*args, **kwargs)",
"def call(self, *args, **kwargs):",
"def _execute(self, input_args, available_args):\n\n if len(available_args) == 0: # We've reached the bottom of the recursive stack, execute function\n doExecute = True\n if self.behavior == 'sampled':\n if random.random() > self.sampled_thresh:\n doExecute = False\n\n if doExecute:\n if self.args_as_dict: # this passes ONE argument to the function which is the dictionary\n self.func(input_args)\n else:\n self.func(**input_args) # this calls the function with arguments specified in the dictionary\n\n # get all keys\n keys = available_args.keys()\n keys_to_remove = []\n\n for i, key in enumerate(keys):\n values = available_args.get(key)\n\n # this is a list of possible inputs so iterate over it. Strings are iterable in python so filter out\n if isinstance(values, collections.Iterable) and not isinstance(values, str):\n # first, augment available_args so it no longer contains keys that we have already carried over\n keys_to_remove.append(key)\n for k in keys_to_remove:\n available_args.pop(k)\n\n for value in values:\n input_args[key] = value\n self._execute(input_args, available_args)\n\n available_args[key] = values # replace values so they can be used in the next iterative call\n break # don't do any more iterations after we handled the first key with multiple choices\n input_args[key] = values\n keys_to_remove.append(key)\n if (i+1) == len(keys): # we've reached the final item in the available args\n self._execute(input_args, {})",
"def apply_on_each_func_args_sig(func,\n cur_args,\n cur_kwargs,\n sig, # type: Signature\n func_to_apply,\n func_to_apply_params_dict):\n\n # match the received arguments with the signature to know who is who\n bound_values = sig.bind(*cur_args, **cur_kwargs)\n\n # add the default values in here to get a full list\n apply_defaults(bound_values)\n\n for att_name, att_value in bound_values.arguments.items():\n if att_name in func_to_apply_params_dict.keys():\n # value = a normal value, or cur_kwargs as a whole\n func_to_apply(att_value, func_to_apply_params_dict[att_name], func, att_name)\n\n # The behaviour below is removed as it is too complex to explain\n # else:\n # if sig.parameters[att_name].kind == Parameter.VAR_KEYWORD:\n # # if the attribute is variable-length keyword argument we can try to find a matching key inside it\n # # each item is handled independently (if func signature contains the kw args names such as a, b)\n # for name, value in att_value.items():\n # if name in func_to_apply_params_dict.keys():\n # func_to_apply(value, func_to_apply_params_dict[name], func, name)",
"def arg_use():\n def args_use(*args):\n # The different of args and *args.\n print(\"*args value is {}\".format(*args)) \n print(\"args value is {}\".format(args))\n for arg in args:\n print(arg)\n def kwargs_use(*args, **kwargs):\n # *kwargs get kwargs key(s)\n print(\"*kwargs value is %s\", *kwargs)\n print(\"kwargs value is %s, type is %s\" %(kwargs, type(kwargs)))\n for kwarg in kwargs:\n print(kwarg)\n arg_str = \"abc\"\n arg_list = [1, 2, 3]\n arg_dict = {'name': \"Cai\", 'age': 24}\n args_use(arg_str, arg_list)\n kwargs_use(arg_str, arg_dict, user='CAI', id=23)\n kwargs_use(arg_str, **{'name': \"Cai\", 'age': 24}, user='CAI', id=23)",
"def test_named_params(self):\n varargs = ()\n kwargs = {'arg1' : \"arg1_val\", 'default' : \"default_val\"}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assertEquals(kwargs, var_dict)",
"def myfunc(**kwargs):\n if 'fruit' in kwargs:\n print('My fruit of choice is {}'.format(kwargs['fruit']))\n else:\n print('I did not find any fruit here')",
"def func(*args, **kwargs):\n return call(*args, **kwargs) # pylint: disable = E1102",
"def set_func_args(self, *args, **kwargs):\n self._func_args = args \n self._func_kw_args = kwargs",
"def run(self):\n self.fn(*self.args, **self.kwargs)",
"def call(func, args):\n assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(\n func.__name__)\n is_func = isinstance(func, FunctionType)\n raw_func = func if is_func else func.__class__.__call__\n hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))\n params, vararg, kwarg = get_signature(raw_func)\n params += [vararg, kwarg]\n keyword_args = {}\n positional_args = ()\n for k, nk, v in _normalize(args):\n if nk == vararg:\n hints[nk] = Tuple[hints[nk], ...]\n elif nk not in params and kwarg in hints:\n hints[nk] = hints[kwarg]\n try:\n value = cast(hints[nk], v)\n except exc.CastError as e:\n six.raise_from(exc.InvalidCliValueError(k, v), e)\n if nk == vararg:\n positional_args = value\n elif (nk in params or kwarg) and (\n nk not in keyword_args or keyword_args[nk] is None):\n keyword_args[nk] = value\n return func(*positional_args, **keyword_args)",
"def _handle_func_args(func, *args, **kwargs):\n if not isinstance(func, (types.FunctionType, types.MethodType)):\n raise RuntimeError('fn {} is not function or method'.format(func))\n if kwargs:\n bound_arguments = inspect.signature(func).bind(*args, **kwargs)\n bound_arguments.apply_defaults()\n args = bound_arguments.args\n kwargs = bound_arguments.kwargs\n\n positional_args = 0\n default_args = 0\n has_var = False\n for value in inspect.signature(func).parameters.values():\n if value.kind is inspect.Parameter.VAR_POSITIONAL or value.kind is inspect.Parameter.VAR_KEYWORD:\n has_var = True\n if value.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:\n if value.default is inspect.Parameter.empty:\n positional_args += 1\n else:\n default_args += 1\n\n if has_var:\n return args, kwargs\n\n if len(args) < positional_args:\n raise TypeError(f\"Function {func.__name__} needs {positional_args} positional argument, but got {len(args)}.\")\n if len(args) > positional_args + default_args:\n raise TypeError(f\"Function {func.__name__} needs {positional_args} positional argument and {default_args} \"\n f\"default argument, total {positional_args + default_args}, but got {len(args)}.\")\n return args, kwargs"
]
| [
"0.7247215",
"0.7148424",
"0.6686485",
"0.6509613",
"0.6478772",
"0.6449938",
"0.6368157",
"0.6364912",
"0.624615",
"0.62181324",
"0.620267",
"0.61353123",
"0.6101103",
"0.6094938",
"0.6090208",
"0.59868765",
"0.59685",
"0.59656274",
"0.59288245",
"0.59287405",
"0.5921486",
"0.5917609",
"0.5900504",
"0.5898911",
"0.5893225",
"0.58900833",
"0.5882319",
"0.5862605",
"0.5859495",
"0.58594024"
]
| 0.8205471 | 0 |
full test to determine whether a function is callable given arguments | def isCallableWithArgs(func, argdict):
return not missingArgs(func, argdict) and not invalidArgs(func, argdict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_callable(func: Any) -> bool:\n # noinspection PyTypeChecker\n return isinstance(func, (types.FunctionType, types.BuiltinFunctionType,\n types.MethodType, functools.partial))",
"def is_callable(o):\n return callable(o)",
"def is_callable(o):\n return isinstance(o, collections.Callable)",
"def is_callable(obj):\n return callable(obj)",
"def callable(obj):\n return bool(_PyCallable_Check(_py_object(obj)))",
"def _handle_callable(value: Any, annotation_args: Tuple[List[Any], Any]) -> bool:\n\n if not isinstance(value, Callable):\n return False\n\n # Here, we wish to compare a given callable with the annotation provided.\n # The only way to verify this information is through the type hints of the function.\n # Note that the `Callable` syntax does not indicate optional or keyword arguments,\n # so those are ignored if present.\n param_annotations, return_annotation = annotation_args\n signature = inspect.signature(function)\n indicated_return_annotation = signature.return_annotation\n\n # have to write functions to convert between `typing` and builtin\n if indicated_return_annotation != return_annotation:\n return False\n \n print(\"callable functionality WIP\")\n pass",
"def callable(obj): # pylint: disable=redefined-builtin\n return bool(PyCallable_Check(py_object(obj)))",
"def all_fn(*args) -> bool:\n for arg in args:\n if not isinstance(arg, (FunctionType, partial)):\n return False\n\n return True",
"def is_function(self):\n return self.args is not None",
"def is_callable_type(typevar: Union[Callable, callable, TypeVar]) -> bool:\n if typevar == callable or typevar == Callable:\n return True\n # This return is split in 2 parts to calm down pycharms static analyzer.\n if hasattr(typevar, \"__origin__\"):\n # noinspection PyUnresolvedReferences\n return typevar.__origin__ == Callable.__origin__\n return False",
"def is_call_arg_of(self, *args):\n return _ida_hexrays.cexpr_t_is_call_arg_of(self, *args)",
"def isfunction(object):\r\n return isinstance(object, types.FunctionType)",
"def is_function(obj):\n return isinstance(obj, (types.FunctionType, types.MethodType,\n types.LambdaType))",
"def is_fn(self, argno: int, argc: int) -> '_Checker':\n t = self.arg_types[argno]\n if not isinstance(t, FunctionType):\n raise XlsTypeError(\n self.span, t, None,\n 'Want argument {} to be a function; got {}'.format(argno, t))\n if len(t.params) != argc:\n raise XlsTypeError(\n self.span, t, None,\n 'Want argument {} to be a function with {} parameters; got {}'.format(\n argno, argc, t))\n return self",
"def is_partial_like(func: Callable):\n return (\n hasattr(func, 'func')\n and hasattr(func, 'args')\n and hasattr(func, 'keywords')\n and isinstance(func.args, tuple)\n )",
"def is_valid_function(self, paras):\n if len(paras) != 0:\n return True\n return True",
"def isValidFunction(self):\n for token in self.value:\n if token.type == 'defFunction' or token.type == 'callFunction':\n if token.value.split('(')[0] == self.name:\n return False\n return True",
"def try_bind(func: Callable, *args: Any, **kwargs: Any) -> bool:\n try:\n inspect.signature(func).bind(*args, **kwargs)\n except TypeError:\n return False\n else:\n return True",
"def test_interpolator_is_callable(name):\n interpolator = interpolation.get_interpolator(name)\n assert callable(interpolator)",
"def is_callable(self, name, method):\r\n return name in self._registry and self._registry[name].method == method",
"def is_lambda(fun):\n return isinstance(fun, type(LAMBDA)) and fun.__name__ == LAMBDA.__name__",
"def isroutine(object):\r\n return (isbuiltin(object)\r\n or isfunction(object)\r\n or ismethod(object)\r\n or ismethoddescriptor(object))",
"def is_no_python_compiled_callable(\n no_python_callable: Callable, raise_error: bool = False\n):\n is_no_python_callable = hasattr(no_python_callable, \"signatures\")\n if raise_error and not is_no_python_callable:\n raise ValueError(\n f\"The callable provided must be no_python compiled. The callable that \"\n f\"caused\"\n f\"this error is named {no_python_callable.__name__}\"\n )\n\n return is_no_python_callable",
"def has_func(cls, obj, *args):\n methods = dir(obj)\n matched = [x for x in args if x in methods]\n return len(matched) == len(args)",
"def test_from_callable(self):\n def func(a: int = 0):\n return a\n fsig = FSignature.from_callable(func)\n assert len(fsig.parameters) == 1\n assert fsig.parameters['a'] == FParameter(\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n name='a',\n interface_name='a',\n default=0,\n type=int,\n )",
"def is_compatible(self, function, arguments):",
"def is_function(self, function: str) -> bool:\n return function in self.function_converter",
"def _can_perform_call(self, node, args, keywords):\n return (\n getattr(node, \"starargs\", None) is None\n and getattr(node, \"kwargs\", None) is None\n and all(isinstance(arg, KnownValue) for arg in args)\n and all(isinstance(arg, KnownValue) for _, arg in keywords)\n )",
"def getCallable():",
"def _check_test(\n test: Callable[[np.ndarray, np.ndarray], np.ndarray[bool]]\n | Callable[[np.ndarray, np.ndarray, int], np.ndarray[bool]],\n) -> Callable[[np.ndarray, np.ndarray, int], np.ndarray[bool]]:\n signature = inspect.signature(test)\n if \"level\" in signature.parameters:\n return test\n\n def test_with_level(x, y, *, level: int):\n return test(x, y)\n\n return test_with_level"
]
| [
"0.8043053",
"0.769143",
"0.76113504",
"0.7497088",
"0.7310708",
"0.7201597",
"0.7183681",
"0.6882035",
"0.68759084",
"0.6750539",
"0.6558839",
"0.65530676",
"0.65278536",
"0.6495978",
"0.64895743",
"0.6446316",
"0.6437389",
"0.6419986",
"0.641831",
"0.6383968",
"0.6358628",
"0.62974757",
"0.6269665",
"0.6264798",
"0.62429553",
"0.62242526",
"0.6215236",
"0.61667603",
"0.6149444",
"0.6127822"
]
| 0.80176526 | 1 |
function to tell what you are missing from argdict | def missingArgs(func, argdict):
return set(getRequiredArgs(func)).difference(argdict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_required_arg(*args):\n for a in args:\n if a not in arg_dict:\n return a",
"def GetMissingArguments(self):\n return []",
"def test_missing_arg_repr():\n argspec = inspect.getfullargspec(exceptional.wrap)\n assert repr(argspec.kwonlydefaults[\"message\"]) == \"<MISSING>\"",
"def test_process_args_should_reject_missing_units(self, arg_dict):\n with pytest.raises(KeyError):\n change_resolution.process_args(arg_dict)",
"def arg_err(self,func):\n print 'Error in arguments:'\n print inspect.getdoc(func)",
"def invalid_args(func, argdict):\r\n args, _, keywords, _ = inspect.getargspec(func)\r\n if keywords:\r\n return set() # All accepted\r\n return set(argdict) - set(args)",
"def invalidArgs(func, argdict):\n args, varargs, varkw, defaults = inspect.getargspec(func)\n if varkw:\n return set() # All accepted\n return set(argdict) - set(args)",
"def _check_args(self, args):\n if len(args) == 0:\n print(\"No parameters provided.\")\n return False\n else:\n return True",
"def _check_args(self, args_):\n\n pass",
"def test_map_args_invalid():\n pass",
"def test_pype_get_arguments_name_empty():\n context = Context({'pype': {'name': None}})\n\n with pytest.raises(KeyInContextHasNoValueError) as err_info:\n pype.get_arguments(context)\n\n assert repr(err_info.value) == (\n \"KeyInContextHasNoValueError(\\\"pypyr.steps.pype ['pype']['name'] \"\n \"exists but is empty.\\\",)\")",
"def test_args(self):\n args = forge.args\n assert isinstance(args, forge._signature.VarPositional)\n assert args.name == 'args'\n assert args.converter is None\n assert args.validator is None",
"def test_missing_args(self, test, x, y, z=3, _private_arg=3): # noqa: D213, D407",
"def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()",
"def getPositionalArgs():",
"def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))",
"def test_noarg(self):\n varargs = ()\n kwargs = {}\n method = getattr(self.foo,'f_noarg')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict == {})",
"def bad_args(args):\n PARSER.print_help()\n exit(0)",
"def test_kwargs_not_false_positive(*args, **kwargs):\n 'Hello John Doe {0[0]}'.format(args)\n 'Hello {0[name]}'.format(kwargs)",
"def _verify_arguments(self, kwargs: dict[str, Any]):\n geom_stat_args = kwargs.keys() | self._stat._kwargs.keys()\n unknown = (\n geom_stat_args\n - self.aesthetics()\n - self.DEFAULT_PARAMS.keys() # geom aesthetics\n - self._stat.aesthetics() # geom parameters\n - self._stat.DEFAULT_PARAMS.keys() # stat aesthetics\n - { # stat parameters\n \"data\",\n \"mapping\",\n \"show_legend\", # layer parameters\n \"inherit_aes\",\n \"raster\",\n }\n ) # layer parameters\n if unknown:\n msg = (\n \"Parameters {}, are not understood by \"\n \"either the geom, stat or layer.\"\n )\n raise PlotnineError(msg.format(unknown))",
"def filter_args_dict(self, args):\n return dict((k,v) for (k,v) in viewitems(args) if self.has_arg(k))",
"def test_map_args_all_none():\n pass",
"def test_missing_args(self, test, x, y, z=3, t=1, _private=0): # noqa: D213, D407",
"def test_single_arg_missing(self):\n from plone.api.exc import MissingParameterError\n _func = required_parameters('arg1')(undecorated_func)\n with self.assertRaises(MissingParameterError):\n _func()",
"def validate_args(args):\n if args['i'] is None:\n print('# -i required', file=sys.stderr)\n exit(9)\n\n return args",
"def valid_args(args):\n return args is not None and len(args) > 0",
"def raise_on_kwargs_not_empty(kwargs):\n if kwargs:\n raise SyntaxWarning(f\"Unknown arguments: {kwargs}\")",
"def cmd_missing_arg(self):\n self.respond(\"501 Syntax error: command needs an argument.\")",
"def _parse_from_dict(self, arg_dict):\n missing = {}\n for key, value in arg_dict.items():\n flag_desc = self._defs.get(key.replace(\"-\", \"_\"), None)\n if flag_desc is None:\n missing[key] = value\n else:\n flag_desc.parse(value)\n return missing",
"def check_args(args, iam='gfind', allow_no_coords=False):\n\n args = gargs.check_common_args(args, iam, allow_no_coords=allow_no_coords)\n\n return args"
]
| [
"0.73363334",
"0.7244634",
"0.70374715",
"0.6803898",
"0.67266184",
"0.67114115",
"0.6554523",
"0.6470076",
"0.6388287",
"0.6375971",
"0.63278836",
"0.63259083",
"0.63241726",
"0.63233143",
"0.6309832",
"0.63031167",
"0.62901896",
"0.62558925",
"0.6230833",
"0.6225469",
"0.619034",
"0.61806643",
"0.6172443",
"0.61595637",
"0.61563706",
"0.6146625",
"0.61416453",
"0.6141377",
"0.614004",
"0.60895383"
]
| 0.7362744 | 0 |
this function classifies objects from video dict with cvlib python package. | def classify_objects(video_dict, params, paths, vid_time_length=10, make_videos=True):
yolo_dict = {}
for video_num, (name, video) in enumerate(video_dict.items()):
print('Classifying video {}/{}.'.format(video_num, len(video_dict)))
yolo_dict[name] = {}
# loop over frames of video and store in lists
obj_bounds = []
obj_labels = []
obj_label_confidences = []
cap_cvlib = []
for i in range(video.shape[0]):
frame = video[i, :, :, :]
# apply object detection
bbox, label, conf = cv.detect_common_objects(frame, confidence=params['detection_confidence_threshold'],
model=params['detection_model'])
obj_bounds.append(bbox)
obj_labels.append([l.replace('motorcycle', 'motorbike') for l in label])
obj_label_confidences.append(conf)
# draw bounding box over detected objects
if make_videos:
img_cvlib = draw_bbox(frame, bbox, label, conf)
cap_cvlib.append(img_cvlib)
# write video to local file
if make_videos:
cap_cvlib_npy = np.asarray(cap_cvlib)
local_mp4_path_out = paths['processed_video'] + name
imageio.mimwrite(local_mp4_path_out, cap_cvlib_npy, fps=int(video.shape[0] / vid_time_length))
yolo_dict[name]['bounds'] = obj_bounds
yolo_dict[name]['labels'] = obj_labels
yolo_dict[name]['confidences'] = obj_label_confidences
frame_level_df = yolo_output_df(yolo_dict)
return frame_level_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vis_detections_video(im, class_name, dets, csv_file, csv, frame_id, thresh=0.5):\n nms_max_overlap = 0.6\n metric = nn_matching.NearestNeighborDistanceMetric(\"cosine\", 0.2, 100)\n tracker = Tracker(metric)\n detections = []\n scores = []\n h, w, _ = im.shape\n thick = int((h + w) // 300)\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return im\n for i in inds:\n scores.append(dets[i, -1])\n\n for i in inds:\n bbox = dets[i, :4]\n boxResults = process_box(bbox, scores, h, w, thresh)\n if boxResults is None:\n continue\n left, right, top, bot, mess, max_indx, confidence = boxResults\n detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))\n scores.append(confidence)\n\n scores = np.array(scores)\n detections = np.array(detections)\n features = deep_sort_encode(im, detections.copy())\n detections = [Detection(bbox, score, feature) for bbox,score, feature in zip(detections,scores, features)]\n # Run non-maxima suppression.\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)\n detections = [detections[i] for i in indices]\n tracker.predict()\n tracker.update(detections)\n trackers = tracker.tracks\n for track in trackers:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n bbox = track.to_tlbr()\n id_num = str(track.track_id)\n csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])\n csv_file.flush()\n cv2.rectangle(im, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(0,255,255), thick//3)\n cv2.putText(im, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)\n # cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2)\n # cv2.rectangle(im,(int(bbox[0]),int(bbox[1])-10),(int(bbox[0]+200),int(bbox[1])+10),(10,10,10),-1)\n # cv2.putText(im, id_num,(int(bbox[0]),int(bbox[1]-2)),cv2.FONT_HERSHEY_SIMPLEX,.45,(255,255,255))#,cv2.CV_AA)\n return im",
"def vis_detections_video(im, class_name, dets, thresh=0.5):\n\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return im\n\n for i in inds:\n bbox = dets[i, :4]\n score = dets[i, -1]\n cv2.rectangle(im,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),2)\n cv2.rectangle(im,(int(bbox[0]),int(bbox[1])-10),(int(bbox[0]+200),int(bbox[1])+10),(10,10,10),-1)\n cv2.putText(im,'{:s} {:.3f}'.format(class_name, score),(int(bbox[0]),int(bbox[1]-2)),cv2.FONT_HERSHEY_SIMPLEX,.45,(255,255,255))#,cv2.CV_AA)\n return im",
"def demo_video(sess, net, im, csv_file, csv, frame_id):\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.75\n\n NMS_THRESH = 0.2\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n if(cls == 'person'):\n im=vis_detections_video(im, cls, dets, csv_file, csv, frame_id, thresh=CONF_THRESH)\n #cv2.imwrite(os.path.join('output',str(time.time())+'.jpg'),im)\n cv2.imshow('ret',im)\n \n cv2.waitKey(20)",
"def __init__(self, video_w, video_h, video_fps, tracking_result):\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.fontScale = 1\n self.thickness = 2\n self.thicknessUpdate = 3\n self.color = (238, 221, 192) # A surfrider color\n self.icons = get_icons()\n self.classes_to_icons = {'bottles':self.icons[0], 'fragments':self.icons[1], 'others':self.icons[2]}\n self.video_w = video_w\n self.video_h = video_h\n self.video_fps = video_fps\n self.tracking_result = tracking_result\n self.detection_image_size = (1024, 768)\n self.frames_to_boxes_dict = None\n self.frames_to_update_hud = None",
"def video_to_features(vid):\n ext = Extractor()\n return [ext.extract(frame) for frame in vid]",
"def __init__(self, resource_path, resource_id, params=None):\r\n\r\n self.anal_results = {} # Dictionary with analysis results\r\n\r\n self.anal_times = {} # Dictionary with times for analysis\r\n\r\n self.cloth_threshold = 0 # Threshold for clothing recognition\r\n\r\n self.cut_idxs = [] # List of frame indexes where new shots begin\r\n\r\n self.detected_faces = [] # List of detected faces\r\n\r\n # List of tracked faces not considered\r\n self.disc_tracked_faces = []\r\n\r\n self.faces_nr = {} # Number of faces for each frame\r\n\r\n self.frame_list = [] # List of frame paths\r\n\r\n self.frames_in_models = {} # Frames used in face models\r\n\r\n self.fps = 0 # Frame rate of video in frames per second\r\n\r\n self.hist_diffs = [] # List with histogram differences\r\n\r\n self.nose_pos_list = [] # List with nose positions\r\n\r\n self.params = params\r\n\r\n self.progress = 0 # Progress in analyzing video\r\n\r\n self.recognized_faces = [] # List of recognized faces\r\n\r\n self.resource_id = resource_id # Id of resource being analyzed\r\n\r\n # Name of resource being analyzed\r\n self.resource_name = os.path.basename(resource_path)\r\n\r\n # Path of resource being analyzed\r\n self.resource_path = resource_path\r\n\r\n self.saved_frames = 0 # Number of saved and analyzed frames\r\n\r\n self.track_threshold = 0 # Threshold for tracking interruption\r\n\r\n self.tracked_faces = [] # List of tracked faces\r\n\r\n self.video_frames = 0 # Number of original frames in video\r\n\r\n # Setup directories and files with results\r\n\r\n file_name = self.resource_id + '.YAML'\r\n\r\n # Directory for this video\r\n video_indexing_path = c.VIDEO_INDEXING_PATH\r\n\r\n # Absolute paths provided by user\r\n video_params_file_path = None\r\n frames_path = None\r\n aligned_faces_path = None\r\n face_tracking_file_path = None\r\n face_models_dir_path = None\r\n cloth_models_dir_path = None\r\n frames_in_models_file_path = None\r\n\r\n if params is not None:\r\n if c.VIDEO_INDEXING_PATH_KEY in params:\r\n video_indexing_path = params[c.VIDEO_INDEXING_PATH_KEY]\r\n if ce.VIDEO_PARAMS_FILE_PATH_KEY in params:\r\n video_params_file_path = params[ce.VIDEO_PARAMS_FILE_PATH_KEY]\r\n if ce.FRAMES_PATH_KEY in params:\r\n frames_path = params[ce.FRAMES_PATH_KEY]\r\n if ce.FACES_PATH_KEY in params:\r\n aligned_faces_path = params[ce.FACES_PATH_KEY]\r\n if ce.FACE_TRACKING_FILE_PATH_KEY in params:\r\n face_tracking_file_path = params[ce.FACE_TRACKING_FILE_PATH_KEY]\r\n if ce.FACE_MODELS_DIR_PATH_KEY in params:\r\n face_models_dir_path = params[ce.FACE_MODELS_DIR_PATH_KEY]\r\n if ce.CLOTH_MODELS_DIR_PATH_KEY in params:\r\n cloth_models_dir_path = params[ce.CLOTH_MODELS_DIR_PATH_KEY]\r\n if ce.FRAMES_IN_MODELS_PATH_KEY in params:\r\n frames_in_models_file_path = params[\r\n ce.FRAMES_IN_MODELS_PATH_KEY]\r\n\r\n self.video_path = os.path.join(\r\n video_indexing_path, resource_id, c.FACE_EXTRACTION_DIR)\r\n\r\n # Directory for frame_list\r\n if frames_path is None:\r\n self.frames_path = os.path.join(self.video_path, c.FRAMES_DIR)\r\n else:\r\n self.frames_path = frames_path\r\n\r\n # File with frame list\r\n self.frames_file_path = os.path.join(self.frames_path, file_name)\r\n\r\n # Directory for detection results\r\n self.det_path = os.path.join(self.video_path, c.FACE_DETECTION_DIR)\r\n\r\n # Directory for aligned faces\r\n if aligned_faces_path is None:\r\n self.align_path = os.path.join(self.det_path, c.ALIGNED_FACES_DIR)\r\n else:\r\n self.align_path = aligned_faces_path\r\n\r\n # File with detection results\r\n self.det_file_path = os.path.join(self.det_path, file_name)\r\n\r\n # Directory for tracking results\r\n self.track_path = os.path.join(self.video_path, c.FACE_TRACKING_DIR)\r\n\r\n # File with tracking results\r\n if face_tracking_file_path is None:\r\n self.track_file_path = os.path.join(self.track_path, file_name)\r\n else:\r\n self.track_file_path = face_tracking_file_path\r\n\r\n # Directory for face models\r\n if face_models_dir_path is None:\r\n self.face_models_path = os.path.join(\r\n self.video_path, c.FACE_MODELS_DIR)\r\n else:\r\n self.face_models_path = face_models_dir_path\r\n\r\n # Directory for cloth models\r\n if cloth_models_dir_path is None:\r\n self.cloth_models_path = os.path.join(\r\n self.video_path, c.CLOTH_MODELS_DIR)\r\n else:\r\n self.cloth_models_path = cloth_models_dir_path\r\n\r\n # File with list of frames in models\r\n if frames_in_models_file_path is None:\r\n self.frames_in_models_file_path = os.path.join(\r\n self.video_path, c.FRAMES_IN_MODELS_FILE)\r\n else:\r\n self.frames_in_models_file_path = frames_in_models_file_path\r\n\r\n # File with nose positions\r\n self.nose_pos_file_path = os.path.join(\r\n self.video_path, c.NOSE_POSITIONS_FILE)\r\n\r\n # Directory for clustering results\r\n self.cluster_path = os.path.join(\r\n self.video_path, c.PEOPLE_CLUSTERING_DIR)\r\n\r\n # Directory with files with clustering results\r\n self.cluster_files_path = os.path.join(self.cluster_path, c.YAML_FILES_DIR)\r\n\r\n # File with number of faces in each frame\r\n self.faces_nr_path = os.path.join(\r\n self.video_path, c.FACES_NR_IN_FRAMES_FILE)\r\n\r\n # Directory with complete annotations\r\n self.compl_ann_path = os.path.join(\r\n self.video_path, c.FACE_ANNOTATION_DIR)\r\n\r\n # Directory with simple annotations\r\n self.simple_ann_path = os.path.join(\r\n self.video_path, c.FACE_SIMPLE_ANNOTATION_DIR)\r\n\r\n # File with parameters\r\n if video_params_file_path is None:\r\n params_file_name = self.resource_id + '_parameters.YAML'\r\n self.params_file_path = os.path.join(\r\n self.video_path, params_file_name)\r\n else:\r\n self.params_file_path = video_params_file_path\r\n\r\n # File with times used for analysis\r\n analysis_file_name = self.resource_id + '_analysis_times.YAML'\r\n\r\n self.analysis_file_path = os.path.join(\r\n self.video_path, analysis_file_name)",
"def draw_obj(img, video_frame):\n dets = []\n contours, hierarchy = cv.findContours(img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n dst = np.zeros(img.shape, dtype=np.uint8)\n if len(contours) > 20000:\n print(len(contours))\n return dets\n for i in range(len(contours)):\n cv.drawContours(dst, contours, i, (255, 0, 0), 1)\n cnt = contours[i]\n area = cv.contourArea(cnt)\n if area <= 60:\n continue\n x, y, w, h = cv.boundingRect(cnt)\n\n if x < 1 and y < 1:\n continue\n cv.rectangle(video_frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n dets.append([x, y, x + w, y + h, 0.9])\n dets = np.asarray(dets)\n\n cv.imshow('bbox', video_frame)\n\n return dets",
"def __init__(self, dataPath, transformImage=None):\r\n self.dataPath = dataPath\r\n self.transformImage = transformImage\r\n self.videos = sorted(os.listdir(self.dataPath))\r\n self.query = 'cast.json'\r\n self.cand = 'candidate.json'\r\n self.queryDir = 'cast'\r\n self.candDir = 'candidates'\r\n self.datas = []\r\n self.labels = []\r\n self.iden = {}\r\n total = 0\r\n for index in range(len(self.videos)):\r\n with open(os.path.join(self.dataPath, self.videos[index], self.query)) as f:\r\n query = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n with open(os.path.join(self.dataPath, self.videos[index], self.cand)) as f:\r\n cand = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n for qk,qv in query.items():\r\n if qv not in self.iden:\r\n self.iden[qv] = total\r\n total += 1\r\n self.datas.append(qk)\r\n self.labels.append(self.iden[qv])\r\n for ck,cv in cand.items():\r\n if qv == cv:\r\n self.datas.append(ck)\r\n self.labels.append(self.iden[qv])\r\n #total += 1\r\n self.len = len(self.datas)",
"def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()",
"def save_distinct_ImgObj(video_str, folder, frames_skipped: int = 0, check_blurry: bool = True,\n hessian_threshold: int = 2500, ensure_min=True):\n\n ensure_path(folder + \"/jpg\")\n\n frames_skipped += 1\n\n if video_str == \"webcam\":\n video_str = 0\n cap = cv2.VideoCapture(video_str)\n # cap= cv2.VideoCapture(0)\n # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 200)\n # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 200)\n\n distinct_frames = DistinctFrames()\n i = 0\n a = None\n b = None\n check_next_frame = False\n i_prev = 0 # the last i which was stored\n\n detector = cv2.xfeatures2d_SURF.create(hessian_threshold)\n\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.imshow('frame', gray)\n keypoints, descriptors = detector.detectAndCompute(gray, None)\n\n a = (len(keypoints), descriptors, serialize_keypoints(keypoints), gray.shape)\n img_obj = ImgObj(a[0], a[1], i, a[2], a[3])\n save_to_memory(img_obj, 'image' + str(i) + '.pkl', folder)\n cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)\n distinct_frames.add_img_obj(img_obj)\n i_of_a=0\n while True:\n ret, frame = cap.read()\n if ret:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if i % frames_skipped != 0 and not check_next_frame:\n i = i + 1\n continue\n\n cv2.imshow('frame', gray)\n # print(i)\n\n if check_blurry:\n if is_blurry_grayscale(gray):\n check_next_frame = True\n print(\"frame \" + str(i) + \" skipped as blurry\")\n i = i + 1\n continue\n check_next_frame = False\n\n keypoints, descriptors = detector.detectAndCompute(gray, None)\n b = (len(keypoints), descriptors, serialize_keypoints(keypoints), gray.shape)\n if len(keypoints)<100:\n print(\"frame \"+str(i)+ \" skipped as \"+str(len(keypoints))+\" <100\")\n i = i+1\n continue\n import matcher as mt\n image_fraction_matched, min_good_matches = mt.SURF_returns(a, b, 2500, 0.7, True)\n if image_fraction_matched == -1:\n check_next_frame = True\n i=i+1\n continue\n check_next_frame = False\n if 0< image_fraction_matched < 0.1 or min_good_matches<50 or (ensure_min and i - i_prev > 50):\n img_obj2 = ImgObj(b[0], b[1], i, b[2], b[3])\n print(str(image_fraction_matched)+ \" fraction match between \"+str(i_of_a)+\" and \"+ str(i))\n save_to_memory(img_obj2, 'image' + str(i) + '.pkl', folder)\n cv2.imwrite(folder + '/jpg/image' + str(i) + '.jpg', gray)\n distinct_frames.add_img_obj(img_obj2)\n a = b\n i_of_a=i\n i_prev = i\n\n i = i + 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n print(\"Created distinct frames object\")\n cap.release()\n cv2.destroyAllWindows()\n distinct_frames.calculate_time()\n return distinct_frames",
"def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))",
"def __init__(self, dataPath, transformImage=None):\r\n self.dataPath = dataPath\r\n self.transformImage = transformImage\r\n self.videos = sorted(os.listdir(self.dataPath))\r\n self.query = 'cast.json'\r\n self.cand = 'candidate.json'\r\n self.queryDir = 'cast'\r\n self.candDir = 'candidates'\r\n self.datas = []\r\n self.domains = []\r\n self.labels = []\r\n self.iden = {}\r\n total = 0\r\n for index in range(len(self.videos)):\r\n with open(os.path.join(self.dataPath, self.videos[index], self.query)) as f:\r\n query = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n with open(os.path.join(self.dataPath, self.videos[index], self.cand)) as f:\r\n cand = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[0]))\r\n for qk,qv in query.items():\r\n if qv not in self.iden:\r\n self.iden[qv] = total\r\n total += 1\r\n for ck,cv in cand.items():\r\n if qv == cv:\r\n self.domains.append(qk)\r\n self.datas.append(ck)\r\n self.labels.append(self.iden[qv])\r\n #total += 1\r\n self.len = len(self.datas)",
"def __init__(self, video: cv2.VideoCapture):\n self.video = video",
"def testVideoOnObjectDetection(testVideo1, testVideo2, label):\n \n this_dir = os.path.abspath(os.path.join(os.getcwd(), '../objectDetection/testing/'))\n \n print('****************************************************************************************************')\n print('getenv: ', os.getcwd())\n print(\"this_dir: \", this_dir)\n print('labelmap: ', os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\")))\n print('****************************************************************************************************')\n \n GRAPH_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"inference_graph/frozen_inference_graph.pb\"))\n LABEL_PATH = os.path.abspath(os.path.join(this_dir, \"..\", \"training/labelmap.pbtxt\"))\n \n video1 = cv2.VideoCapture(testVideo1)\n video2 = cv2.VideoCapture(testVideo2)\n \n coors = objectDetection.coordinates.coordinates()\n obj_detect = hand_detection.Object_Detection(coors, GRAPH_PATH, LABEL_PATH, video1, video2, Verbose=True)\n \n results = []\n \n while(video1.isOpened() and video2.isOpened()):\n output = obj_detect.Detect()\n if output is None: break\n else: results.append(output)\n \n cv2.destroyAllWindows()\n \n print(results)\n print([result for result in results])\n correct = CheckWrong([result[\"video1\"][\"classes\"] for result in results], label)\n \n assert correct == True\n \n return",
"def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))",
"def wrangle_video_record(json_obj: dict):\n entry_dict = {}\n for key, value in get_final_key_paths(\n json_obj, '', True, black_list=['localized', 'thumbnails'],\n final_keys_only=True):\n if key in video_keys_and_columns: # converting camelCase to underscore\n new_key = []\n for letter in key:\n if letter.isupper():\n new_key.append('_' + letter.lower())\n else:\n new_key.append(letter)\n key = ''.join(new_key)\n if key == 'relevant_topic_ids':\n value = list(set(value)) # due to duplicate parent topic ids\n elif key == 'duration':\n value = convert_duration(value)\n elif key == 'published_at':\n value = value.replace('T', ' ')\n elif key == 'actual_start_time':\n key = 'stream'\n value = 'true'\n elif key in ['view_count', 'dislike_count', 'like_count',\n 'comment_count']:\n value = int(value)\n entry_dict[key] = value\n\n return entry_dict",
"def determine_object_class(self, components_detected):\n for subimage, components in components_detected.items():\n\n for component in components:\n\n if component.class_id == 0:\n component.object_name = \"insl\" # Insulator\n\n elif component.class_id == 1:\n component.object_name = \"dump\" # Vibration dumper\n\n else:\n component.object_name = \"pillar\"",
"def func(frame):\n nonlocal net\n\t\n prevh, prevw, _= frame.shape\n\n wscale = prevw / 480\n hscale = prevh / 320\n\n frame = cv2.resize(frame, (480, 320))\n frame = jetson.utils.cudaFromNumpy(frame)\n detections = net.Detect(frame)\n ret = [(d.ClassID, d.Top*hscale, d.Left*wscale, d.Right*wscale, d.Bottom*hscale) for d in detections]\n print(ret)\n return ret",
"def detect_from_video(config: Dict):\n video = config['inference']['video_input']['video_input_path']\n vp = VideoProcessing(video=video)\n vp.generate_frames(export_path=config['inference']['video_input']['video_to_frames_export_path'])\n if config['inference']['video_input']['video_to_frames_export_path'] == config['inference']['predicted_frames_export_path']:\n print(\"[Warning]... You have given Video to frame path same as prediction output path /nPredicted output will overwrite video to frame\")\n img_height = config['inference']['img_height']\n img_width = config['inference']['img_width']\n model = ssd_300(image_size=(img_height, img_width, 3),\n n_classes=config['inference']['n_classes'],\n mode='inference',\n l2_regularization=0.0005,\n scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]\n aspect_ratios_per_layer=[[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]],\n two_boxes_for_ar1=True,\n steps=[8, 16, 32, 64, 100, 300],\n offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n clip_boxes=False,\n variances=[0.1, 0.1, 0.2, 0.2],\n normalize_coords=True,\n subtract_mean=[123, 117, 104],\n swap_channels=[2, 1, 0],\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400)\n\n # Load the trained weights into the model.\n weights_path = config['inference']['weights_path']\n\n model.load_weights(weights_path, by_name=True)\n \n # Working with image\n all_images = glob.glob(f\"{config['inference']['video_input']['video_to_frames_export_path']}/*/*\")\n \n # Setting Up Prediction Threshold\n confidence_threshold = config['inference']['confidence_threshold']\n \n # Setting Up Classes (Note Should be in same order as in training)\n classes = config['inference']['classes']\n \n vp.existsFolder(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}\")\n # Working with image\n for current_img in tqdm(all_images):\n current_img_name = current_img.split('/')[-1]\n orig_image = cv2.imread(current_img)\n input_images = [] # Store resized versions of the images here\n img = image.load_img(current_img, target_size=(img_height, img_width))\n img = image.img_to_array(img) \n input_images.append(img)\n input_images = np.array(input_images)\n \n # Prediction\n y_pred = model.predict(input_images)\n\n # Using threshold\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n \n # Drawing Boxes\n for box in y_pred_thresh[0]:\n xmin = box[2] * orig_image.shape[1] / img_width\n ymin = box[3] * orig_image.shape[0] / img_height\n xmax = box[4] * orig_image.shape[1] / img_width\n ymax = box[5] * orig_image.shape[0] / img_height\n \n label = f\"{classes[int(box[0])]}: {box[1]:.2f}\"\n cv2.rectangle(orig_image, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (255, 0, 0), 2)\n cv2.putText(orig_image, label, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imwrite(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}/{current_img_name}\", orig_image)\n \n # Creating video\n vp.generate_video(import_path=config['inference']['predicted_frames_export_path'],\n export_path=config['inference']['video_input']['video_output_path'])",
"def detect_vehicles_video(video_name,\n classifier=None,\n scaler=None,\n tracker=None,\n decision=DECISION_THRESHOLD,\n mining=False,\n view=False,\n show=False):\n #video_out_name = \"project_out.mp4\"\n video_out_name = \"project_out.mp4\"\n cap = cv2.VideoCapture(video_name)\n\n fourcc = cv2.VideoWriter_fourcc(*'H264')\n out = cv2.VideoWriter(video_out_name, fourcc, 20.0, (1280, 720))\n frame_count = 0\n\n if mining:\n save_path = SAVE_PATH\n else:\n save_path = None\n while cap.isOpened():\n ret, frame = cap.read()\n frame_count += 1\n #if (frame_count % 50 != 0):\n # continue\n if frame is not None:\n prefix = 'f' + str(frame_count)\n print(\"video frame: \", frame_count)\n out_frame = detect_vehicles_image(\n cv2.cvtColor(frame, cv2.COLOR_BGR2RGB),\n classifier=classifier,\n scaler=scaler,\n decision=decision,\n tracker=tracker,\n save_path=save_path,\n prefix=prefix,\n view=view, show=show)\n out_frame_bgr = cv2.cvtColor(out_frame, cv2.COLOR_RGB2BGR)\n if view is not None:\n cv2.imshow('Vehicle Dectection and Tracking',\n out_frame_bgr)\n out.write(out_frame_bgr)\n else: # frame is None\n break\n if cv2.waitKey(2) & 0xFF == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()",
"def __init__(self, dataPath, transformImage=None, mix=False):\r\n if not mix:\r\n self.dataPath = dataPath[0]\r\n self.videos = sorted(glob.glob(os.path.join(self.dataPath,'*')))\r\n else:\r\n self.dataPath = dataPath[0]\r\n self.videos = sorted(glob.glob(os.path.join(dataPath[0],'*'))+glob.glob(os.path.join(dataPath[1],'*')))\r\n self.transformImage = transformImage\r\n self.query = 'cast.json'\r\n self.cand = 'candidate.json'\r\n self.queryDir = 'cast'\r\n self.candDir = 'candidates'\r\n self.datas = []\r\n self.labels = []\r\n self.iden = {}\r\n total = 0\r\n for index in range(len(self.videos)):\r\n with open(os.path.join(self.videos[index], self.query)) as f:\r\n query = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[1]))\r\n with open(os.path.join(self.videos[index], self.cand)) as f:\r\n cand = OrderedDict(sorted(json.load(f).items(), key=lambda d:d[1]))\r\n for qk,qv in query.items():\r\n if qv not in self.iden:\r\n self.iden[qv] = total\r\n total += 1\r\n # self.datas.append(qk)\r\n # self.labels.append(self.iden[qv])\r\n data = []\r\n label = []\r\n data.append(qk)\r\n label.append(self.iden[qv])\r\n for ck,cv in cand.items():\r\n if qv == cv:\r\n data.append(ck)\r\n label.append(self.iden[qv])\r\n\r\n datas = [data[i:i+4] for i in range(0,len(data),4)]\r\n labels = [label[i:i+4] for i in range(0,len(label),4)]\r\n for i in range(len(datas)):\r\n if len(datas[i])==4:\r\n self.datas.append(datas[i])\r\n self.labels.append(labels[i])\r\n # print(self.labels,'\\n')\r\n #total += 1\r\n self.classnum=total\r\n self.len = len(self.datas)",
"def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)",
"def detect_and_draw(self, img, videosettings):\n if videosettings:\n haar_scale = 1.2\n min_neighbors = 2\n haar_flags = cv.CV_HAAR_DO_CANNY_PRUNING\n else:\n haar_scale = 1.1\n min_neighbors = 3\n haar_flags = 0\n min_size = (7, 7)\n image_scale = 2.4\n\n # allocate temporary images\n gray = cv.CreateImage((img.width,img.height), 8, 1)\n small_img = cv.CreateImage((cv.Round(img.width / image_scale),\n cv.Round (img.height / image_scale)), 8, 1)\n\n # convert color input image to grayscale\n cv.CvtColor(img, gray, cv.CV_BGR2GRAY)\n\n # scale input image for faster processing\n cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)\n\n cv.EqualizeHist(small_img, small_img)\n\n if self.cascade:\n store = cv.CreateMemStorage(1024)\n faces = cv.HaarDetectObjects(small_img, self.cascade, store, haar_scale, min_neighbors, haar_flags, min_size)\n normfaces = []\n if faces:\n for ((x, y, w, h), n) in faces:\n # the input to cv.HaarDetectObjects was resized, so scale the\n # bounding box of each face and convert it to two CvPoints\n pt1 = (int(x * image_scale), int(y * image_scale))\n pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))\n cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)\n #normfaces.append(((int(round(x*image_scale)), int(round(y*image_scale)), int(round(w*image_scale)), int(round(h*image_scale))),n))\n\n center_x = (int(round(w*image_scale)/2) + int(round(x*image_scale)))\n center_y = (int(round(h*image_scale)/2) + int(round(y*image_scale)))\n angle_x = self.transformer.get_x_angle(center_x)\n # hardcoded speed according to the size of the face in the image\n speed_y = self.transformer.get_speed(w, h)\n normfaces.append( ( (center_x, center_y), (speed_y, angle_x), (w*h), n, w, h) )\n if self.verbose:\n cv.ShowImage(\"Haar Detector rotation\" + str(self.rotation) , img)\n \n cv.WaitKey(10)\n return normfaces",
"def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n #boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n #count = int(get_output_tensor(interpreter, 3))\n\n #results = []\n #for i in range(count):\n # if scores[i] >= threshold:\n # result = {\n # #'bounding_box': boxes[i],\n # 'class_id': classes[i],\n # 'score': scores[i]\n # }\n # results.append(result)\n \n \n #print(\"detection results:\\n\" + str(results))\n #return results\n return np.array([int(_class) for _class in classes]), np.array(scores)",
"def __call__(self, video_path, per_frames = 1 , offset = None):\n \n cap = cv2.VideoCapture(video_path)\n \n if not cap.isOpened():\n raise Exception(\"Video file does not exist or is invalid\")\n\n \n if offset:\n cap.set(cv2.CAP_PROP_POS_MSEC, offset)\n \n \n info = []\n\n while cap.isOpened():\n ret, frame = cap.read()\n if ret:\n if cap.get(cv2.CAP_PROP_POS_FRAMES) % per_frames == 0:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n faces_info = self.detect_faces_from_image(frame,\n desired_width=224, desired_height=224) \n if faces_info:\n for element in faces_info:\n face_img = image.img_to_array(element[1])\n\n face_img = utils.preprocess_input(face_img, version=1)\n face_img = np.expand_dims(face_img, axis=0)\n\n features = self.vgg_feature_extractor.predict(face_img)\n label = self.gender_svm.predict(features)[0]\n decision_value = round(self.gender_svm.decision_function(features)[0], 3)\n\n bounding_box = element[0][0]\n detection_score = round(element[5], 3)\n bbox_length = bounding_box.bottom() - bounding_box.top()\n\n info.append([\n cap.get(cv2.CAP_PROP_POS_FRAMES), bounding_box, (bbox_length, bbox_length), label,\n decision_value, detection_score\n ])\n\n else:\n break\n cap.release()\n info = pd.DataFrame.from_records(info, columns = ['frame', 'bb', 'size','label', 'decision', 'conf'])\n return info",
"def process_video(weights_path,video_path,output_path,margins=40,facenet_threshold=.985,euclidean_distance_threshold = 120.0):\n with torch.no_grad():\n mtcnn = MTCNN(image_size= 256, margin = 0)\n model = Model.VGGFace_Extractor().to(device)\n model.load_state_dict(torch.load(weights_path))\n model.eval()\n cap = cv2.VideoCapture(video_path)\n rotateCode = check_rotation(video_path)\n fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\n out = cv2.VideoWriter(output_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))\n ret, frame1 = cap.read()\n hsv = np.zeros_like(frame1)\n hsv[..., 1] = 255\n i = 0\n while (cap.isOpened()):\n i += 1\n ret, frame2 = cap.read()\n if not (ret): break\n if rotateCode is not None:\n frame2 = correct_rotation(frame2, rotateCode)\n\n boxes, probs = mtcnn.detect(frame2)\n img_draw = frame2.copy()\n img_draw = Image.fromarray(img_draw)\n draw = ImageDraw.Draw(img_draw)\n if boxes is not None:\n names = []\n distances_difference = []\n for (box, point) in zip(boxes, probs):\n \"\"\" Loop from the extract_face method from facenet_pytorch\"\"\"\n\n if point < facenet_threshold: continue\n margin = margins\n image_size = 256\n margin = [\n margin * (box[2] - box[0]) / (image_size - margin),\n margin * (box[3] - box[1]) / (image_size - margin),\n ]\n raw_image_size = get_size(img_draw)\n box = [\n int(max(box[0] - margin[0] / 2, 0)),\n int(max(box[1] - margin[1] / 2, 0)),\n int(min(box[2] + margin[0] / 2, raw_image_size[0])),\n int(min(box[3] + margin[1] / 2, raw_image_size[1])),\n ]\n\n face = img_draw.crop(box).copy().resize((image_size, image_size), Image.BILINEAR).convert(\"RGB\")\n features_1 = model(utils.preprocess(face,device).reshape(-1, 3, 224, 224))\n images_path = \"individuals_extracted/\"\n data_path = os.path.join(images_path, '*pt')\n files = glob.glob(data_path)\n name = \"Unknown\"\n best_distance = euclidean_distance_threshold + 5\n for k,f1 in enumerate(files):\n features = torch.load(f1)\n distance = utils.euclidean_distance(features,features_1)\n if distance < euclidean_distance_threshold and distance < best_distance:\n best_distance = distance\n name = re.sub('_[1-9]*[.]*[a-zA-Z]*', '', f1.replace(images_path,\"\"))\n\n names.append(name)\n distances_difference.append(best_distance)\n\n for (box, point,name,distances) in zip(boxes, probs,names,distances_difference):\n if point < facenet_threshold or name == \"Unknown\": continue\n draw.rectangle(box.tolist(), width=4)\n draw.text(box.tolist(), name, font=ImageFont.truetype(\"Keyboard.ttf\",40))\n\n k = cv2.waitKey(3) & 0xff\n if k == 27:\n break\n out.write(np.asarray(img_draw))\n\n out.release()\n cap.release()\n cv2.destroyAllWindows()",
"def voc_pred_process(pred_data, val_cls, recs):\n num_classes = config.num_classes\n cls_img_ids = {}\n cls_bboxes = {}\n cls_scores = {}\n classes = {}\n cls_npos = {}\n for cls in val_cls:\n if cls == 'background':\n continue\n class_recs = {}\n npos = 0\n for imagename in imagenames:\n R = [obj for obj in recs[imagename] if obj['name'] == cls]\n bbox = np.array([x['bbox'] for x in R])\n difficult = np.array([x['difficult'] for x in R]).astype(np.bool)\n det = [False] * len(R)\n npos = npos + sum(~difficult)\n class_recs[imagename] = {'bbox': bbox,\n 'difficult': difficult,\n 'det': det}\n cls_npos[cls] = npos\n classes[cls] = class_recs\n cls_img_ids[cls] = []\n cls_bboxes[cls] = []\n cls_scores[cls] = []\n\n for sample in pred_data:\n pred_boxes = sample['boxes']\n box_scores = sample['box_scores']\n img_id = sample['img_id']\n h, w = sample['image_shape']\n\n final_boxes = []\n final_label = []\n final_score = []\n\n for c in range(1, num_classes):\n class_box_scores = box_scores[:, c]\n score_mask = class_box_scores > config.min_score\n class_box_scores = class_box_scores[score_mask]\n class_boxes = pred_boxes[score_mask] * [h, w, h, w]\n\n if score_mask.any():\n nms_index = apply_nms(class_boxes, class_box_scores, config.nms_threshold, config.max_boxes)\n class_boxes = class_boxes[nms_index]\n class_box_scores = class_box_scores[nms_index]\n\n final_boxes += class_boxes.tolist()\n final_score += class_box_scores.tolist()\n final_label += [c] * len(class_box_scores)\n\n for loc, label, score in zip(final_boxes, final_label, final_score):\n cls_img_ids[val_cls[label]].append(img_id)\n cls_bboxes[val_cls[label]].append([loc[1], loc[0], loc[3], loc[2]])\n cls_scores[val_cls[label]].append(score)\n return classes, cls_img_ids, cls_bboxes, cls_scores, cls_npos",
"def classify_face(im):\n faces_death = get_encoded_faces_deaths()\n faces_arrested = get_encoded_faces_arrested()\n faces_wanted = get_encoded_faces_wanted()\n\n faces_encoded_death = list(faces_death.values())\n known_face_names_death = list(faces_death.keys())\n\n faces_encoded_arrested = list(faces_arrested.values())\n known_face_names_arrested = list(faces_arrested.keys())\n\n faces_encoded_wanted = list(faces_wanted.values())\n known_face_names_wanted = list(faces_wanted.keys())\n\n img = cv2.imread(im, 1)\n face_locations = face_recognition.face_locations(img)\n unknown_face_encodings = face_recognition.face_encodings(img,face_locations)\n face_names = []\n find_in_db(im,known_face_names_death,unknown_face_encodings,face_names,faces_encoded_death,\"unnatural_death_images/unnatural_death_images\")\n find_in_db(im,known_face_names_arrested,unknown_face_encodings,face_names,faces_encoded_arrested,\"ArrestPerson_images\")\n find_in_db(im,known_face_names_wanted,unknown_face_encodings,face_names,faces_encoded_wanted,\"wanted\")",
"def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()",
"def find_cars_video(input_path, output_path, clf, hyperparams, box_color=None, debug=False):\n v = VideoProcessor(clf, hyperparams, box_color)\n v.process_video(input_path, output_path, debug)"
]
| [
"0.6015281",
"0.601183",
"0.5993276",
"0.5792634",
"0.5756552",
"0.5669971",
"0.56536233",
"0.5638236",
"0.56043786",
"0.5590023",
"0.5587217",
"0.55868554",
"0.55619556",
"0.5552709",
"0.5552561",
"0.5544587",
"0.55440867",
"0.549236",
"0.5492263",
"0.54880214",
"0.54424584",
"0.5438063",
"0.54378784",
"0.5434901",
"0.5392022",
"0.5388163",
"0.5385139",
"0.5381662",
"0.5377557",
"0.5348522"
]
| 0.730892 | 0 |
Moves the tooltip in the world frame by the given x,y,z / roll,pitch,yaw. True on success | def move_tip(x=0., y=0., z=0., roll=0., pitch=0., yaw=0.):
transform = PyKDL.Frame(PyKDL.Rotation.RPY(pitch, roll, yaw),
PyKDL.Vector(-x, -y, -z))
tip_pose = get_tip_pose()
tip_pose_kdl = posemath.fromMsg(tip_pose)
final_pose = toMsg(tip_pose_kdl * transform)
arm_commander.set_start_state_to_current_state()
arm_commander.set_pose_targets([final_pose])
plan = arm_commander.plan()
if not arm_commander.execute(plan):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n try:\n self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n except:\n pass\n self.after( int( self.delay * 1000 ), self.show )",
"def move(self, event):\n self.lastMotion = time()\n # If the follow flag is not set, motion within the\n # widget will make the ToolTip disappear\n #\n if self.follow is False:\n self.withdraw()\n self.visible = 1\n\n # Offset the ToolTip 10x10 pixes southwest of the pointer\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\n try:\n # Try to call the message function. Will not change\n # the message if the message function is None or\n # the message function fails\n self.msgVar.set(self.msgFunc())\n except:\n pass\n self.after(int(self.delay * 1000), self.show)",
"def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)",
"def move(self, event):\r\n self.lastMotion = time()\r\n # If the follow flag is not set, motion within the\r\n # widget will make the ToolTip disappear\r\n #\r\n if self.follow is False:\r\n self.withdraw()\r\n self.visible = 1\r\n\r\n # Offset the ToolTip 10x10 pixes southwest of the pointer\r\n self.geometry('+%i+%i' % (event.x_root+20, event.y_root-10))\r\n try:\r\n # Try to call the message function. Will not change\r\n # the message if the message function is None or\r\n # the message function fails\r\n self.msgVar.set(self.msgFunc())\r\n except:\r\n pass\r\n self.after(int(self.delay * 1000), self.show)",
"def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)",
"def move( self, event ):\n self.lastMotion = time()\n if self.follow == False: # If the follow flag is not set, motion within the widget will make the ToolTip dissapear\n self.withdraw()\n self.visible = 1\n\n root = self.parent\n root = self.parent\n\n # parent_name = self.winfo_parent()\n # root = self._nametowidget(parent_name)\n\n \n # pa = re.split(r'(\\D)', root.geometry())\n # pt = re.split(r'(\\D)', self.geometry())\n #pm = re.split(r'(\\D)', self.master.geometry())\n #print \"root: \", pa\n #print \"tool: \", self.geometry()\n #print \"pm: \", self.wdgt.geometry()\n #print \"mouse: \", event.x_root, event.y_root\n #print \"mouser: \", event.x, event.y\n \n xCan = event.x_root - self.parent.winfo_rootx()\n yCan = event.y_root - self.parent.winfo_rooty()\n #print \"mouser2: \", xCan, yCan\n \n \n \n #if pa[5] == '-':\n # limit_x = int(pa[0]) - int(pa[6]) \n # print \"minus\"\n #else:\n #limit_x = int(pa[0]) + int(pa[4]) \n #if root.state() == 'zoomed':\n # limit_x = int(pa[0])\n #print \"lim: \", limit_x\n \n self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n \n # if xCan > (limit_x-int(pt[0])):\n # #print \"xxx\"\n # self.geometry( '+%i+%i' % ( event.x_root-int(pt[0]), event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n # else:\n # self.geometry( '+%i+%i' % ( event.x_root+10, event.y_root+10 ) ) # Offset the ToolTip 10x10 pixes southwest of the pointer\n # try:\n # self.msgVar.set( self.msgFunc() ) # Try to call the message function. Will not change the message if the message function is None or the message function fails\n # except:\n # pass\n self.after( int( self.delay * 1000 ), self.show )",
"def showTooltip(self, label): \n self.tooltipWindow = ocempgui.widgets.TooltipWindow (label)\n x, y = pygame.mouse.get_pos ()\n self.tooltipWindow.topleft = x + 8, y - 5\n self.tooltipWindow.depth = 99 # Make it the topmost widget.\n self.tooltipWindow.zOrder = 30000\n self.window.add_child(self.tooltipWindow)",
"def send_hover_setpoint(self, vx, vy, yaw_rate, z):\n self._send_hover_setpoint_client( vx, vy, yaw_rate, z)",
"def on_mouse_move(self, event):\n\n # self.view = 1 * np.eye(4, dtype=np.float32)\n # self.model = 1 * np.eye(4, dtype=np.float32)\n\n # self.translate -= event.delta[1]\n # self.translate = max(-1, self.translate)\n # print(event.delta[1])\n # print(self.translate)\n # self.view = translate((0, 0, -self.translate))\n # self.game_program['u_view'] = self.view\n # self.game_program['u_size'] = 5 / self.translate\n # self.view = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.view\n # self.model = (0.1*self.translate*np.eye(4, dtype=np.float32)) + self.model\n # print(self.view)\n\n # self.game_program['u_model'] = self.model\n # self.game_program['u_view'] = self.view\n\n x, y = event.pos\n #print(x, y)\n self.x_offset, self.y_offset = x - self.last_x, - (y - self.last_y)\n self.last_x, self.last_y = x, y\n self.x_offset *= self.sensitivity\n self.y_offset *= self.sensitivity\n\n self.yaw, self.pitch = self.yaw - self.x_offset, self.pitch + self.y_offset\n self.rot_y(self.yaw * np.pi / 180)\n self.rot_x(self.pitch * np.pi / 180)\n\n self.view = np.dot(self.rot_mat_y, self.rot_mat_x)\n self.game_program['u_view'] = self.view\n\n self.update()",
"def move(self, z, add_trace=True):\n # Project neutrons onto perpendicular plane at the sample position\n dz = z - self.z\n self.x = dz*tan(self.angle) + self.x\n self.z = z\n if add_trace:\n self.add_trace()",
"def _move(self, event):\n if self._current_tower.get_value() > self._coins:\n return\n\n #move the shadow tower to mouse position\n position = event.x, event.y\n self._current_tower.position = position\n\n legal, grid_path = self._game.attempt_placement(position)\n\n #find the best path and covert positions to pixel positions\n path = [self._game.grid.cell_to_pixel_centre(position)\n for position in grid_path.get_shortest()]\n\n #Task 1.2 (Tower placement): Draw the tower preview here\n self._view.draw_preview(self._current_tower, legal)\n self._view.draw_path(path)",
"def mousePosition(self):",
"def move(self):\n \n self.position = self.wander()",
"def OnMove(self, event):\n self.SetStatusText(\"%.2f, %.2f\"%tuple(event.Coords))\n event.Skip()",
"def update_tip_pose(self):\n world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)\n wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef\n world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)\n wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip\n self.eTt = np.matmul(np.linalg.inv(wTe), wTt)\n self.tTe = np.linalg.inv(self.eTt)",
"def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)",
"def moveBasedOnRetreatAction(self, time_passed):\n cpos = self.toScreenCoordinate()\n mpos = pygame.mouse.get_pos()\n toMouse = Vector2.from_points(cpos,mpos)\n toMouse.normalize()\n rheading = -toMouse\n \n heading = self.heading\n angle_between = heading.angle_between(rheading)\n if angle_between>=-30 and angle_between<=30:\n return\n \n distance = time_passed * self.speed\n movement = rheading * distance\n x = movement.get_x()\n y = movement.get_y()\n if not self.checkCollision(x, y) and self.checkValidCoord(x, y):\n self.move(x, y)",
"def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y",
"def move(self, dx, dy):\r\n self._location_x += dx\r\n self._location_y += dy\r\n print(world.tile_exists(self._location_x, self._location_y).intro_text())",
"def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1",
"def set_pub_robot_pose(self, x, y, yaw):\r\n self.publisher_robot.set_pose_by_center(x, y, yaw)",
"def move(point):\n # wrapper just so we don't have to import pymouse separately\n m = PyMouse()\n m.move(*point)",
"def point_at(obj, target, roll=0):\n\tif not isinstance(target, mathutils.Vector):\n\t\ttarget = mathutils.Vector(target)\n\tloc = obj.location\n\t# direction points from the object to the target\n\tdirection = target - loc\n\n\tquat = direction.to_track_quat('-Z', 'Y')\n\n\t# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n\tquat = quat.to_matrix().to_4x4()\n\trollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n\t# remember the current location, since assigning to obj.matrix_world changes it\n\tloc = loc.to_tuple()\n\tobj.matrix_world = quat * rollMatrix\n\tobj.location = loc",
"def adjust_tello_position(offset_x, offset_y, offset_z):\n if not -90 <= offset_x <= 90 and offset_x is not 0:\n if offset_x < 0:\n drone.rotate_ccw(20)\n elif offset_x > 0:\n drone.rotate_cw(20)\n \n if not -70 <= offset_y <= 70 and offset_y is not -30:\n if offset_y < 0:\n drone.move_up(15)\n elif offset_y > 0:\n drone.move_down(20)\n \n if not 15000 <= offset_z <= 30000 and offset_z is not 0:\n if offset_z < 15000:\n drone.move_forward(20)\n elif offset_z > 30000:\n drone.move_backward(20)",
"def point_at(obj, target, roll=0):\n if not isinstance(target, mathutils.Vector):\n target = mathutils.Vector(target)\n loc = obj.location\n # direction points from the object to the target\n direction = target - loc\n\n quat = direction.to_track_quat('-Z', 'Y')\n\n # /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py\n quat = quat.to_matrix().to_4x4()\n rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')\n\n # remember the current location, since assigning to obj.matrix_world changes it\n loc = loc.to_tuple()\n obj.matrix_world = quat * rollMatrix\n obj.location = loc",
"def move(self, x, y, z):\n oldx, oldy, oldz = self.position\n self.position = (oldx + x, oldy + y, oldz + z)",
"def look_at(self, frame_id, x, y, z):\n # TODO: Create goal\n # TODO: Fill out the goal (we recommend setting min_duration to 1 second)\n # TODO: Send the goal\n # TODO: Wait for result\n goal = control_msgs.msg.PointHeadGoal()\n goal.min_duration = rospy.Duration(1)\n goal.target.header.frame_id = frame_id\n goal.target.point.x = x\n goal.target.point.y = y\n goal.target.point.z = z\n self.head_client.send_goal_and_wait(goal)",
"def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return",
"def hoverMoveEvent(self, event):\n activeTool = self._activeTool()\n toolMethodName = str(activeTool) + \"HoverMove\"\n if hasattr(self, toolMethodName):\n getattr(self, toolMethodName)(event.pos())",
"def run_prank():\n d = display.Display()\n num_screens = d.screen_count()\n s = d.screen(random.randrange(0, num_screens) )\n root = s.root\n root.warp_pointer(random.randrange(0, s.width_in_pixels), random.randrange(0, s.height_in_pixels) )\n d.sync()\n return (True, 'It looks like you are using a mouse. Let me help you move it')"
]
| [
"0.6089259",
"0.6000828",
"0.5988017",
"0.5988017",
"0.5411746",
"0.5384831",
"0.53588617",
"0.5325385",
"0.5292513",
"0.5253395",
"0.51829153",
"0.5145922",
"0.51455396",
"0.5140263",
"0.5134566",
"0.51338595",
"0.5131408",
"0.5099384",
"0.50469345",
"0.50339395",
"0.5030658",
"0.500792",
"0.5002727",
"0.4997354",
"0.49870688",
"0.49751118",
"0.4971047",
"0.49590194",
"0.49558425",
"0.4947538"
]
| 0.70308006 | 0 |
Throws ValueError if data is not list, tuple, or None | def _data_validation(data):
if isinstance(data, (list, tuple, type(None))) is not True:
raise ValueError(f"data must be tuple, list, or None, "
f"data type is '{type(data).__name__}'. "
f"Iterable data cannot be empty.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkTrainData(cls, data):\n\n if data == None or len(data) == 0:\n raise Exception(\"No data\")\n\n if type(data[0]) != tuple:\n raise Exception(\"Not a list of tuples\")\n\n if len(data[0]) != 2 and type(data[0][0]) != str and type(data[0][1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n length = len(data[0][1])\n\n for tup in data:\n if len(tup) != 2 and type(tup[0]) != str and type(tup[1]) != list:\n raise Exception(\"Not a tuple of (String, [data])\")\n\n if len(tup[1]) != length:\n raise Exception(\"Not all elements have the same amount of data\")",
"def _check_and_transform_input(self, data):\n if isinstance(data, list):\n if np.array(data).shape == (len(data),):\n if len(data) == 1:\n data = np.array(data).reshape(1, 1)\n data = np.array(data).reshape(len(data), 1)\n else:\n data = np.concatenate(data).reshape(len(data), -1)\n else:\n raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))\n\n return data",
"def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True",
"def _raise_on_error(data: Union[list, dict]) -> None:\n if isinstance(data, list) and data:\n data = data[0]\n\n if isinstance(data, dict) and \"error\" in data:\n raise_error(data[\"error\"])",
"def _match_shape_to_data_any(data: Union[Any, List[Any]]) -> list:\n if isinstance(data, list):\n return data\n return [data]",
"def is_tuple_or_list(value):\n return isinstance(value, list) or isinstance(value, tuple)",
"def unpack_validation_data(validation_data, raise_if_ambiguous=True):\n if (isinstance(validation_data, (iterator_ops.Iterator,\n iterator_ops.IteratorBase,\n data_types.DatasetV2,\n data_utils.Sequence))\n or not hasattr(validation_data, '__len__')):\n val_x = validation_data\n val_y = None\n val_sample_weight = None\n elif len(validation_data) == 2:\n try:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n except ValueError:\n val_x, val_y, val_sample_weight = validation_data, None, None\n elif len(validation_data) == 3:\n try:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n except ValueError:\n val_x, val_y, val_sample_weight = validation_data, None, None\n else:\n if raise_if_ambiguous:\n raise ValueError(\n 'When passing a `validation_data` argument, '\n 'it must contain either 2 items (x_val, y_val), '\n 'or 3 items (x_val, y_val, val_sample_weights), '\n 'or alternatively it could be a dataset or a '\n 'dataset or a dataset iterator. '\n 'However we received `validation_data=%s`' % validation_data)\n val_x, val_y, val_sample_weight = validation_data, None, None\n return val_x, val_y, val_sample_weight",
"def data_missing(data):\n return type(data)._from_sequence([None, data[0]])",
"def isTuple(data):\n\ttry:\n\t\tfrom types import TupleType\n\t\tif type(data) == TupleType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type((0,0)):\n\t\t\treturn True\n\treturn False",
"def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst",
"def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data",
"def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")",
"def ex_list(data):\n return tuple(data)",
"def _validate_value(self, val):\r\n if type(val) in (int, long, float, str, unicode, ):\r\n return val\r\n if isinstance(val, tuple) or isinstance(val, frozenset):\r\n for i in val:\r\n self._validate_value(i)\r\n return val\r\n raise TypeError(\r\n \"Only number/strings and tuples/frozensets allowed here.\",\r\n )",
"def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)",
"def list_typecheck(val, name, msg):\n if type(val) != Pair and val != Nil():\n raise SnekEvaluationError(name + \" error: \" + msg)",
"def f_supports(self, data):\n dtype = type(data)\n if dtype is tuple or dtype is list and len(data) == 0:\n return True # ArrayParameter does support empty tuples\n elif dtype is np.ndarray and data.size == 0 and data.ndim == 1:\n return True # ArrayParameter supports empty numpy arrays\n else:\n return super(ArrayParameter, self).f_supports(data)",
"def validateListOfSomething(asValues, aoNilValues = tuple([[], None]), fAllowNull = True):\n if asValues in aoNilValues or (not asValues and not fAllowNull):\n return (asValues, None if fAllowNull else 'Mandatory.')\n\n if not isinstance(asValues, list):\n return (asValues, 'Invalid data type (%s).' % (type(asValues),));\n\n asValues = list(asValues); # copy the list.\n if asValues:\n oType = type(asValues[0]);\n for i in range(1, len(asValues)):\n if type(asValues[i]) is not oType: # pylint: disable=unidiomatic-typecheck\n return (asValues, 'Invalid entry data type ([0]=%s vs [%d]=%s).' % (oType, i, type(asValues[i])) );\n\n return (asValues, None);",
"def _maybe_dt_data(self, data, feature_names, feature_types,\n meta=None, meta_type=None):\n if meta and data.shape[1] > 1:\n raise ValueError(\n 'DataTable for label or weight cannot have multiple columns')\n if meta:\n # below requires new dt version\n # extract first column\n data = data.to_numpy()[:, 0].astype(meta_type)\n return data, None, None\n\n data_types_names = tuple(lt.name for lt in data.ltypes)\n bad_fields = [data.names[i]\n for i, type_name in enumerate(data_types_names)\n if type_name not in self.dt_type_mapper]\n if bad_fields:\n msg = \"\"\"DataFrame.types for data must be int, float or bool.\n Did not expect the data types in fields \"\"\"\n raise ValueError(msg + ', '.join(bad_fields))\n\n if feature_names is None and meta is None:\n feature_names = data.names\n\n # always return stypes for dt ingestion\n if feature_types is not None:\n raise ValueError(\n 'DataTable has own feature types, cannot pass them in.')\n feature_types = np.vectorize(self.dt_type_mapper2.get)(\n data_types_names)\n\n return data, feature_names, feature_types",
"def processPrintableData(data:tuple):\n try:\n msg, value = data\n except Exception as e:\n raise Exception(f\"Expected 2 items, got {len(data)} items\")\n print(f\"{msg}\\n\\tValue: {value}\")",
"def _validate_from_plain(data: Sequence[Sequence],\n columns: Sequence[str],\n dtypes: Sequence[str],\n row_wise: bool):\n\n if row_wise:\n # assert equal number of elements across rows\n row_lenghts = {len(row) for row in data}\n if len(row_lenghts) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"row. Please check provided input data\")\n\n # assert equal number of columns and elements per row\n row_lenghts.add(len(columns))\n if len(row_lenghts) > 1:\n raise ValueError(\n \"Number of columns has to equal the number of \"\n \"values per row. Please check column names and \"\n \"provided input data.\")\n\n # assert equal number of dtypes and elements per row\n row_lenghts.add(len(dtypes))\n if len(row_lenghts) > 1:\n raise ValueError(\"Number of dtypes has to equal the number of \"\n \"values per row. Please check dtypes and \"\n \"provided input data.\")\n\n else:\n # assert equal number of elements across columns\n col_lengths = {len(col) for col in data}\n if len(col_lengths) > 1:\n raise ValueError(\"Input data has varying number of values per \"\n \"columns. Please check provided input data\")\n\n # assert equal number of columns in data, column names and dtypes\n col_count = len(columns)\n if col_count != len(data):\n raise ValueError(\"Input data and column names have different \"\n \"amount of columns. Please check provided \"\n \"input data\")\n\n if col_count != len(dtypes):\n raise ValueError(\"Input data and dtypes have different \"\n \"amount of columns. Please check provided \"\n \"input data\")",
"def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")",
"def test_read_data_processed(model_data):\n assert len(model_data) == 6 and type(model_data) is tuple",
"def validate_tuple(validator, data):\n if type(data) is not tuple:\n return False\n if len(validator) != len(data):\n return False\n # all elements must be valid\n return all(imap(validate_common, validator, data))",
"def check_data_shape(self, data_shape):\n if not len(data_shape) == 2:\n raise ValueError('data_shape should have length 2')\n if not data_shape[0] == 1:\n raise ValueError('This iterator expects inputs to have 1 channels.')",
"def __init__(self,data):\n\t\tself.data = tuple([tuple(x) if isiterable(x) else (x,) for x in data])\n\t\tself.rows = len(self.data)\n\t\tself.cols = len(self.data[0]) if len(self.data)>0 else 0",
"def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data",
"def try_float(data):\n try:\n return float(data)\n except (ValueError, TypeError ):\n return data",
"def test_invalid_op_inputs_with_wrong_types(self, data, description):\n with self.assertRaises(TypeError, msg=description):\n tfx.get_op(data, tf.Graph())",
"def _handle_input_data(data):\n data = np.asarray(data)\n if np.ndim(data) == 1:\n d_rows = 1\n d_cols = len(data)\n data = data.reshape((1, data.shape[0]))\n elif np.ndim(data) == 2:\n d_rows = data.shape[0]\n d_cols = data.shape[1]\n else:\n raise ValueError(\"Incorrect dimensionality of data. Must be <= 2\")\n return data, d_rows, d_cols"
]
| [
"0.7660926",
"0.64638644",
"0.6422923",
"0.6404551",
"0.63229305",
"0.62484825",
"0.6218231",
"0.616253",
"0.6160331",
"0.6153625",
"0.61400735",
"0.6136175",
"0.61123484",
"0.61044014",
"0.6084617",
"0.5957655",
"0.5912516",
"0.58761597",
"0.58723783",
"0.5868495",
"0.5853898",
"0.5848841",
"0.5837813",
"0.5836258",
"0.58339566",
"0.58302104",
"0.58236945",
"0.581262",
"0.5803171",
"0.5803074"
]
| 0.85903543 | 0 |
Return a tuple of data's quartile information (Q1, Q2, Q3, IQR) If you arrange all valued in the dataset from smallest to largest, Q2 is the middlemost value (the median). If you divide the dataset in half from this median and find the middlemost value in these halves, Q1 is the middlemost value of the first half and Q3 is the middlemost value of the second half, neither half of which includes Q2. The interquartile range (IQR) is the number of units between Q1 and Q3, i.e. Q3 Q1. | def get_quartile_data(cls, data: tuple or list) -> tuple:
cls._data_validation(data)
# Sort the data
sorted_data = sorted(list(data))
# Get q2, which is the median
q2 = cls.get_median(data)
first_half_data = list()
second_half_data = list()
# add to first half until median, then add to second half
for i in range(len(sorted_data)):
# if less than q2, first half
if sorted_data[i] < q2:
first_half_data.append(sorted_data[i])
# if greather than q2, second half, skips q2
elif sorted_data[i] > q2:
second_half_data.append(sorted_data[i])
# use median method on halves to get quartiles
q1 = cls.get_median(first_half_data)
q3 = cls.get_median(second_half_data)
iqr = q3-q1
return q1, q2, q3, iqr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n from math import floor\n # Sort the data\n n = cls.get_n(data)\n if n == 0:\n # Empty dataset, returns zeroes\n return 0, 0, 0, 0\n sorted_data = sorted(list(data))\n n_is_odd = True if n % 2 == 1 else False\n\n # Get middle index\n odd_middle_index = floor(n / 2)\n even_upper_index = floor(n / 2)\n even_lower_index = floor(n / 2) - 1\n\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n\n # add to first half until median, then add to second half\n if n_is_odd:\n for i in range(n):\n if i < odd_middle_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n elif i > odd_middle_index:\n second_half_data.append(sorted_data[i])\n else:\n for i in range(n):\n if i <= even_lower_index:\n first_half_data.append(sorted_data[i])\n # note how if index = middle_index, skips\n else:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3 - q1\n return q1, q2, q3, iqr",
"def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]",
"def IQR(data):\n return percentile(data, 75) - percentile(data, 25)",
"def test_get_quartile_data(self):\n LOWER = 0\n MID = 1\n UPPER = 2\n number_of_simulations = 100\n expected_lower = 25\n expected_middle = 50\n expected_upper = 75\n quartile_tuple = investment_growth.get_quartile_data(number_of_simulations)\n self.assertEqual(expected_lower, quartile_tuple[LOWER])\n self.assertEqual(expected_middle, quartile_tuple[MID])\n self.assertEqual(expected_upper, quartile_tuple[UPPER])",
"def data_quartiles(self):\n data = []\n for graph in self._graphs.values():\n data += graph.data.values()\n data.sort()\n datalen = len(data)\n return(data[0], data[datalen/4], data[datalen/2],\n data[3*datalen/4], data[-1])",
"def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]",
"def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)",
"def IQR(x):\r\n\r\n x.sort()\r\n # split values into lower and upper portions at the median\r\n odd = len(x) % 2\r\n midpoint = int(len(x) / 2)\r\n if odd:\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint + 1:]\r\n else: # if even\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint:]\r\n # find the median of the low and high values\r\n min_val = median(low_vals)\r\n max_val = median(high_vals)\r\n return min_val, max_val",
"def quantileValues(data, device):\n r = pd.DataFrame([])\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n minValue = data.min()\n maxValue = data.max()\n q1 = data.quantile(0.25)\n # q2 = tmp['deltaSeconds'].quantile(0.5)\n q3 = data.quantile(0.75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n\n elif isinstance(data, np.ndarray):\n minValue = data.min()\n maxValue = data.max()\n q1 = np.percentile(data, 25)\n # q2 = np.percentile(data, 50)\n q3 = np.percentile(data, 75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n r = (r.append(pd.DataFrame({'mac': device.mac, 'minValue': minValue,\n 'maxValue': maxValue, 'lower': lower,\n 'upper': upper, 'q1': q1, 'q3': q3},\n index=[0]), ignore_index=True))\n return r",
"def iqr(data, cols, t=1.5):\n Q1 = data[cols].quantile(0.25)\n Q3 = data[cols].quantile(0.75)\n IQR = Q3-Q1\n low_bound = {}\n upp_bound = {}\n for col in list(IQR.index):\n low_bound[col] = Q1[col]-t*IQR[col]\n upp_bound[col] = Q3[col]+t*IQR[col]\n return {\"low_b\": low_bound, \"upp_b\": upp_bound}",
"def calcrange_quartile(data, log=False):\n if not isinstance(data, numpy.ndarray):\n data = numpy.array(data)\n if log:\n data = data[data > 0.]\n\n if len(data) == 0:\n if log: return 0.1, 1.\n else: return 0., 1.\n\n data = numpy.sort(data)\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n if log:\n return q1 / (q3 - q1), q3 * (q3 - q1)\n else:\n return q1 - (q3 - q1), q3 + (q3 - q1)",
"def quartile(db: pd.DataFrame, col: str) -> pd.DataFrame:\n _, bins = pd.qcut(x=db[col],\n q=[0.25, 0.75], # [0.0, 0.25, 0.5, 0.75, 1.0],\n retbins=True, duplicates='drop')\n q1 = bins[0] # lower (first) quartile\n q3 = bins[1] # upper (third) quartile\n iqr = q3 - q1 # InterQuartile Range\n lower_fence = (q1 - 1.5 * iqr)\n upper_fence = (q3 + 1.5 * iqr)\n db.loc[db[col] < lower_fence, col] = q1\n db.loc[db[col] > upper_fence, col] = q3\n return db",
"def getquantile(df, low=0.1, high=0.9):\n q1 = df.quantile(low)\n q3 = df.quantile(high)\n print(q1)\n print(q3)\n return df[df<q1],df[df>q3]",
"def _quantile(data, quantile):\r\n index = quantile * (len(data) - 1)\r\n bottom_index = int(floor(index))\r\n top_index = int(ceil(index))\r\n\r\n difference = index - bottom_index\r\n output = (1 - difference) * \\\r\n data[bottom_index] + difference * data[top_index]\r\n\r\n return output",
"def find_outliers_IQR(data): \n \n res = data.describe()\n q1 = res['25%']\n q3 = res['75%']\n thresh = 1.5*(q3-q1)\n idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh))\n return idx_outliers",
"def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])",
"def Quantile(data, q, precision=1.0):\n N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))\n norm_cumul = 1.0*N.cumsum() / len(data)\n\n for i in range(0, len(norm_cumul)):\n if norm_cumul[i] > q:\n return bins[i]",
"def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)",
"def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))",
"def interquartile_range(x):\n return (quantile(x,0.75) - quantile(x,0.25))",
"def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):\n # Make a copy of trace\n x = x.copy()\n\n # For multivariate node\n if x.ndim > 1:\n # Transpose first, then sort, then transpose back\n sx = np.transpose(np.sort(np.transpose(x)))\n else:\n # Sort univariate node\n sx = np.sort(x)\n\n try:\n # Generate specified quantiles\n quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]\n\n return dict(zip(qlist, quants))\n\n except IndexError:\n print(\"Too few elements for quantile calculation\")",
"def get_i_of_q(data, nbins=256):\n simulation = get_simulation()\n result = simulation.result()\n axes_limits = ba.get_axes_limits(result, ba.AxesUnits.QSPACE)\n shape = data.shape\n x = np.linspace(axes_limits[0], axes_limits[1], shape[0])\n y = np.linspace(axes_limits[2], axes_limits[3], shape[1])\n\n # xx, yy = np.meshgrid (x, y)\n # q = np.sqrt(xx**2 + yy**2)\n # result = np.array([q.flatten(), data.flatten()]).transpose()\n result = []\n for i in range(shape[0]):\n for j in range(shape[1]):\n q = np.sqrt(x[i]**2 + y[j]**2)\n result.append([q, data[i, j]])\n result = np.array(result)\n # sort data\n result = result[result[:, 0].argsort()]\n\n # bin data\n bins = np.linspace(0.0, np.max(q), nbins)\n indices = np.digitize(result[:, 0], bins)\n\n a = []\n for i in range(bins.size):\n idx = np.where(indices==i)\n if idx[0].size > 0:\n a.append([bins[i], np.mean(result[:, 1][idx])])\n return np.array(a)",
"def quantiles(self, q: int):\n return self.dist.quantiles(q)",
"def interquartile_range(xs: List[float]) -> float:\n return quantile(xs, 0.75) - quantile(xs, 0.25)",
"def make_quartiles(s, q1, q3):\n return graph_objs.Scatter(\n x=[s, s],\n y=[q1, q3],\n text=['lower-quartile: ' + '{:0.2f}'.format(q1),\n 'upper-quartile: ' + '{:0.2f}'.format(q3)],\n mode='lines',\n line=graph_objs.Line(\n width=4,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text'\n )",
"def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()",
"def interquartile_range(magnitudes):\n num_obs = magnitudes.shape[0]\n per_25 = int(num_obs / 4.0)\n\n q1 = magnitudes[per_25]\n q3 = magnitudes[num_obs - per_25]\n\n return (q3 - q1)[0]",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def performance_quantiles(data, performance_measure):\n quantiles = pd.qcut(x=data[performance_measure], q=4, labels=['q1', 'q2', 'q3', 'q4'])\n bins = quantiles.to_frame(name=performance_measure + '_quantiles')\n data_quantiles = pd.merge(data, bins, right_index=True, left_index=True)\n data_quantiles.dropna(inplace=True)\n data_quantiles.sort_values(performance_measure + '_quantiles', inplace=True)\n return data_quantiles",
"def quantile(self, q, *, axis=0, **kwargs) -> \"Dataset\":\n return self._quantile(q, axis=axis, func=np.quantile, **kwargs)"
]
| [
"0.86814016",
"0.7956387",
"0.73629045",
"0.7266403",
"0.72327554",
"0.6956993",
"0.6757972",
"0.668319",
"0.6638791",
"0.66310585",
"0.6606817",
"0.659038",
"0.6587118",
"0.6565645",
"0.65437573",
"0.6538354",
"0.6502364",
"0.64743066",
"0.64359903",
"0.63698876",
"0.636948",
"0.63541955",
"0.6283211",
"0.6256492",
"0.62240076",
"0.6189307",
"0.6177793",
"0.61569035",
"0.6142549",
"0.6125611"
]
| 0.8612872 | 1 |
Return a tuple of all outliers in dataset. Outliers are defined as data points which are not within 1.5 IQRs of Q1 or Q3. If remove_outliers=True, instead returns the data with outliers removed. Lower Outlier Limit = Q1 (1.5IQR) Upper Outlier Limit = Q3 +(1.5IQR) | def get_outlier_data(cls, data: tuple or list, remove_outliers=False) -> tuple:
cls._data_validation(data)
q1, q2, q3, iqr = cls.get_quartile_data(data)
data_without_outliers = list()
outliers_list = list()
lower_out_bound, upper_out_bound = q1 - 1.5*iqr, q3 + 1.5*iqr
print(lower_out_bound, upper_out_bound)
for i in range(len(data)):
if lower_out_bound <= data[i] <= upper_out_bound:
data_without_outliers.append(data[i])
else:
outliers_list.append(data[i])
if remove_outliers:
return tuple(data_without_outliers)
else:
return tuple(outliers_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_outliers(data): \n \n idx_out = find_outliers_IQR(data)\n \n cleaned = data[~idx_out].copy()\n\n # print(f'There were {idx_out.sum()} outliers.')\n \n return cleaned",
"def get_outlier_data(\n cls, data: tuple or list, remove_outliers=False\n ) -> tuple:\n cls._data_validation(data)\n q1, _, q3, iqr = cls.get_quartile_data(data)\n if (q1, _, q3, iqr) == (0, 0, 0, 0):\n # getting outliers from empty set, return empty\n return tuple()\n data_without_outliers = list()\n outliers_list = list()\n lower_out_bound, upper_out_bound = q1 - 1.5*iqr, q3 + 1.5*iqr\n for i in range(len(data)):\n if lower_out_bound <= data[i] <= upper_out_bound:\n data_without_outliers.append(data[i])\n else:\n outliers_list.append(data[i])\n if remove_outliers:\n return tuple(data_without_outliers)\n else:\n return tuple(outliers_list)",
"def find_outliers_IQR(data): \n \n res = data.describe()\n q1 = res['25%']\n q3 = res['75%']\n thresh = 1.5*(q3-q1)\n idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh))\n return idx_outliers",
"def get_outliers(self):\n out = Outliers()\n out.set_data(self.data)\n out.run()\n return out.get_value('outliers')",
"def outlier_vars(data, show_plot=False):\n \n outliers = [] \n Q1 = data.quantile(0.25)\n Q3 = data.quantile(0.75)\n IQR = Q3 - Q1\n num_data = data.select_dtypes(include='number')\n result = dict ((((num_data < (Q1 - 1.5 * IQR)) | (num_data > (Q3 + 1.5 * IQR)))==True).any())\n for k,v in result.items():\n if v == True: \n outliers.append(k)\n if show_plot:\n pair_plot = sns.pairplot(data[outliers]);\n print(f'{result},\\n\\n Visualization of outlier columns')\n return pair_plot\n else:\n return data[outliers]",
"def remove_outliers(self, data, min_p= 25, max_p= 75, cut= '', skewed= False):\n data_c = [ d for d in data if d ]\n q25, q75 = np.nanpercentile(data_c, min_p), np.nanpercentile(data_c, max_p)\n cut_off = (q75 - q25) * cut\n lower, upper = q25-cut_off, q75+cut_off\n\n if skewed==True:\n q50 = np.nanpercentile(data_c, 50)\n lower , upper = q25-(q50-q25)*cut , q75+(q75-q50)*cut\n\n median = np.nanmedian(data_c)\n cleaned, outliers = [],[]\n\n for d in np.asarray(data):\n if d >= lower and d <= upper:\n cleaned.append(d)\n outliers.append(np.nan)\n elif np.isnan(d):\n cleaned.append(np.nan)\n outliers.append(np.nan)\n else:\n cleaned.append(np.nan)\n outliers.append(d)\n return cleaned, outliers, lower, upper, median",
"def identify_outliers(x):\n outliers = np.array([])\n\n IQR = iqr(x)\n low_cut = np.percentile(x,25) - 1.5*IQR\n high_cut = np.percentile(x,75) + 1.5*IQR\n\n for sub in x.index:\n if x.loc[sub] < low_cut or x.loc[sub] > high_cut:\n # outliers = np.append(outliers,np.asarray(x == i).nonzero()[0])\n outliers = np.append(outliers,sub)\n\n return outliers",
"def find_outliers(data, method='iqr'):\n\n if method=='iqr':\n # Finding the interquartile range\n q1 = data.quantile(.25)\n q3 = data.quantile(.75)\n iqr = q3-q1\n\n upper = q3 + iqr*1.5\n lower = q1 - iqr*1.5\n elif method=='std':\n std = data.std()\n lower = data.mean() - 3*std\n upper = data.mean() + 3*std\n else:\n raise ValueError(\"Invalid value for 'method' passed\")\n\n\n return lower, upper",
"def filter_outliers(self, df, outlier):\n return df[~outlier].reset_index(drop=True)",
"def remove_outliers(data):\n upper_boundary = np.quantile(data, 0.992)\n lower_boundary = np.quantile(data, 0.008)\n selection = data[(data > lower_boundary) & (data < upper_boundary)]\n standard_dev = np.std(selection)\n median = np.median(selection)\n data[(median + 4.5 * standard_dev < data) | (data < median - 4.5 * standard_dev)] = median\n return data",
"def outlier_hunt(df):\n outlier_indices = []\n\n # iterate over features(columns)\n for col in df.columns.tolist():\n # 1st quartile (25%)\n Q1 = np.percentile(df[col], 1)\n\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col], 99)\n\n # Interquartile rrange (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices.extend(outlier_list_col)\n\n # select observations containing more than 2 outliers\n outlier_indices = Counter(outlier_indices)\n multiple_outliers = list(k for k, v in outlier_indices.items() if v >= 2)\n\n return multiple_outliers",
"def is_outlier(incoming_data):\r\n outlier_df = \\\r\n incoming_data[incoming_data.apply(\r\n lambda x: np.abs(x - x.mean()) / x.std() > 3).all(axis=1)]\r\n return not outlier_df.empty",
"def filterOutlier(data_list,z_score_threshold=3):\n\t# n = len(data_list)\n\t# z_score_threshold = (n-1)/np.sqrt(n)\n\tdata = np.array(data_list)\n\tmedian = np.median(data)\n\tdeviation = np.median([np.abs(x - median) for x in data])\n\tz_scores = [0.675*(x - median)/deviation for x in data]\n\tdata_out = data[np.where(np.abs(z_scores) < z_score_threshold)].tolist()\n\toutput = data_out if len(data_out) > 0 else data_list\n\treturn output",
"def drop_outliers_for(feature, samples):\n return [s for s in samples if not feature.is_outlier(s)]",
"def detect_outliers(data, tolerance=2):\n medians = data.rolling(5, center=True).median()\n lowerq = data.rolling(5, center=True).quantile(.75)\n upperq = data.rolling(5, center=True).quantile(.25)\n iqrs = np.abs(upperq - lowerq)\n diffs = np.abs(data - medians)\n outliers = pd.Series(diffs > (tolerance * iqrs))\n return outliers, sum(outliers)",
"def rem_outliers(s):\n s_mean = s.mean()\n s_std = s.std()\n s_min = s_mean - 3 * s_std\n s_max = s_mean + 3 * s_std\n return s.loc[(s_min < s.loc[:]) & (s.loc[:] < s_max)].index.to_list()",
"def remove_outlier(data, Nstd=2, mask= None): #---- remove extreme data\r\n M = data.shape[0]; \r\n if mask is None:\r\n mask = np.ones((M,M)); # if mask not existed\r\n for k in range(0,M): mask[k,k]= 0; # create one and remove diagnol\r\n N = np.sum(mask); # total effective data number \r\n sumx= np.sum(data* mask);\r\n mean= sumx/ N; # new mean\r\n sum_square = np.sum(((data-mean)*mask)**2); #\r\n std = np.sqrt( sum_square/ (N-1) ); # new standard deviation\r\n #--- ---\r\n larger = data > (mean+ Nstd*std); # data too large\r\n smaller= data < (mean- Nstd*std); # data too small\r\n maskh = mask.copy();\r\n maskh[larger] = 0; maskh[smaller]= 0; # remove outlier data\r\n return maskh, mean",
"def reject_outliers(self, data, m=2):\n std = np.std(data)\n return data[abs(data - np.median(data)) < m * std]",
"def remove_outliers(value, remove_outlier):\n try:\n if len(value) > 0:\n percent = float(remove_outlier)\n value = value.dropna().astype(\"float64\")\n q75, q25 = np.percentile(\n value, [percent, 100 - percent], interpolation=\"linear\"\n )\n iqr = q75 - q25\n value = value[value >= (q25 - 1.5 * iqr)]\n value = value[value <= (q75 + 1.5 * iqr)]\n value.reset_index(drop=True)\n return value\n except:\n raise",
"def list_outliers(data, m=100.):\n p99 = numpy.percentile(data, 99)\n p1 = numpy.percentile(data, 1)\n p50 = numpy.median(data)\n # p50 to p99 is 2.32635 sigma\n rSig = (p99-p1)/(2*2.32635)\n return numpy.unique(data[numpy.abs(data - p50) > rSig*m])",
"def get_outliers(a_dataframe):\n outliers_list = []\n for category in a_dataframe.dtypes.keys():\n try:\n column = a_dataframe.loc[:, category]\n mean = np.mean(column) # check if category is numeric\n except TypeError:\n pass\n else:\n # print_hist(column, category)\n st_dev = np.std(column)\n limit_hi = mean + 2 * st_dev\n limit_lo = mean - 2 * st_dev\n flag_bad = (column < limit_lo) | (column > limit_hi)\n if category != \"fnlwgt\": # skip 'fnlwgt' var. 'cos I'll delete it\n outliers_list.append(flag_bad)\n num_outliers = sum(flag_bad)\n print_stats(category, column,\n limit_hi, limit_lo,\n num_outliers\n )\n\n return outliers_list",
"def detect_outliers(df):\n outlier_indices = {}\n # iterate over features(columns)\n for col in df.columns:\n # 1st quartile (25%)\n Q1 = np.percentile(df[col].dropna(), 25)\n # 3rd quartile (75%)\n Q3 = np.percentile(df[col].dropna(), 75)\n # Interquartile range (IQR)\n IQR = Q3 - Q1\n\n # outlier step\n outlier_step = 1.5 * IQR\n\n # Determine a list of indices of outliers for feature col\n outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index.to_list()\n\n # append the found outlier indices for col to the list of outlier indices\n outlier_indices[col]=outlier_list_col\n if outlier_list_col:\n Box_plots(df[col],col)\n return outlier_indices",
"def filter_outliers(data: pd.Series, std: int=3) -> pd.Series:\n return data[(data - data.mean()).abs() <= (std * data.std())]",
"def drop_outliers(data, cols, t=1.5):\n iqr_d = iqr(data, cols, t)\n for col in cols:\n return data[~((data[col]< iqr_d[\"low_b\"][col]) | (data[col]> iqr_d[\"upp_b\"][col]))]",
"def remove_outliers(self, data, sd_val):\n data = data.dropna()\n data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]\n return data",
"def remove_outliers(lst):\n slst = sorted(lst)\n three_iqr = 3 * get_IQR(lst)\n low_boundary = float(np.percentile(lst, 25)) - three_iqr\n high_boundary = float(np.percentile(lst, 75)) + three_iqr\n\n return filter(lambda x: x >= low_boundary and x <= high_boundary, slst)",
"def remove_outliers(y, x, ids):\n # Compute first and third quartiles and the Interquartile range\n q1 = np.percentile(x, 25, axis=0)\n q3 = np.percentile(x, 75, axis=0)\n iqr = q3 - q1\n # Set to True any entry outside the Interquartile range\n mask = (x >= q1 - 1.5 * iqr) & (x <= q3 + 1.5 * iqr)\n\n # Only filter out features with values that are spread over a range bigger than threshold_range\n # i.e. if the difference between the minimum value and the maximum value is bigger than threshold_range\n threshold_range = 10\n # Set to False any feature with range bigger than threshold\n col_mask = (x.max(axis=0) - x.min(axis=0)) < threshold_range\n mask = mask | col_mask\n row_mask = mask.all(axis=1) # sets to False rows containing any outliers\n\n return y[row_mask], x[row_mask], ids[row_mask]",
"def _remove_outliers(self, boxes):\n\n filtered_boxes = []\n for bc in boxes:\n w = bc[2] - bc[0]\n h = bc[3] - bc[1]\n if bc[1] < 450 and w > 32 and h > 32:\n filtered_boxes.append(bc)\n elif bc[1] > 450 and w > 64 and h > 64:\n filtered_boxes.append(bc)\n\n return np.array(filtered_boxes)",
"def remove_outliers(self, std_tol=1.5):\r\n from lsst.analysis import outlier\r\n for tnum in numpy.unique(self.data[\"tiles\"]):\r\n self.decimate(outlier.valid(self, self.data[\"tiles\"]==tnum, std_tol=std_tol))",
"def reject_outliers_arg(data,nSigma):\n criterion = ( (data[:] < (data[:].mean() + data[:].std() * nSigma)) &\n (data[:] > (data[:].mean() - data[:].std() * nSigma)) )\n ind = np.array(np.where(criterion))[0]\n \n return ind"
]
| [
"0.8190102",
"0.81744343",
"0.75078195",
"0.7411929",
"0.73364514",
"0.72968316",
"0.7266782",
"0.71986395",
"0.71556485",
"0.7138233",
"0.7055311",
"0.7044442",
"0.69663453",
"0.69504064",
"0.6867932",
"0.68141323",
"0.68134224",
"0.6811384",
"0.67942876",
"0.6792364",
"0.6788436",
"0.67860883",
"0.67855066",
"0.6783654",
"0.67773336",
"0.67588323",
"0.67306983",
"0.670713",
"0.6659586",
"0.6659292"
]
| 0.8220814 | 0 |
Return a tuple of data's quartile information (Q1, Q2, Q3, IQR) If you arrange all valued in the dataset from smallest to largest, Q2 is the middlemost value (the median). If you divide the dataset in half from this median and find the middlemost value in these halves, Q1 is the middlemost value of the first half and Q3 is the middlemost value of the second half, neither half of which includes Q2. The interquartile range (IQR) is the number of units between Q1 and Q3, i.e. Q3 Q1. | def get_quartile_data(cls, data: tuple or list) -> tuple:
cls._data_validation(data)
from math import floor
# Sort the data
n = cls.get_n(data)
if n == 0:
# Empty dataset, returns zeroes
return 0, 0, 0, 0
sorted_data = sorted(list(data))
n_is_odd = True if n % 2 == 1 else False
# Get middle index
odd_middle_index = floor(n / 2)
even_upper_index = floor(n / 2)
even_lower_index = floor(n / 2) - 1
# Get q2, which is the median
q2 = cls.get_median(data)
first_half_data = list()
second_half_data = list()
# add to first half until median, then add to second half
if n_is_odd:
for i in range(n):
if i < odd_middle_index:
first_half_data.append(sorted_data[i])
# note how if index = middle_index, skips
elif i > odd_middle_index:
second_half_data.append(sorted_data[i])
else:
for i in range(n):
if i <= even_lower_index:
first_half_data.append(sorted_data[i])
# note how if index = middle_index, skips
else:
second_half_data.append(sorted_data[i])
# use median method on halves to get quartiles
q1 = cls.get_median(first_half_data)
q3 = cls.get_median(second_half_data)
iqr = q3 - q1
return q1, q2, q3, iqr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_quartile_data(cls, data: tuple or list) -> tuple:\n cls._data_validation(data)\n # Sort the data\n sorted_data = sorted(list(data))\n # Get q2, which is the median\n q2 = cls.get_median(data)\n first_half_data = list()\n second_half_data = list()\n # add to first half until median, then add to second half\n for i in range(len(sorted_data)):\n # if less than q2, first half\n if sorted_data[i] < q2:\n first_half_data.append(sorted_data[i])\n # if greather than q2, second half, skips q2\n elif sorted_data[i] > q2:\n second_half_data.append(sorted_data[i])\n # use median method on halves to get quartiles\n q1 = cls.get_median(first_half_data)\n q3 = cls.get_median(second_half_data)\n iqr = q3-q1\n return q1, q2, q3, iqr",
"def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]",
"def IQR(data):\n return percentile(data, 75) - percentile(data, 25)",
"def test_get_quartile_data(self):\n LOWER = 0\n MID = 1\n UPPER = 2\n number_of_simulations = 100\n expected_lower = 25\n expected_middle = 50\n expected_upper = 75\n quartile_tuple = investment_growth.get_quartile_data(number_of_simulations)\n self.assertEqual(expected_lower, quartile_tuple[LOWER])\n self.assertEqual(expected_middle, quartile_tuple[MID])\n self.assertEqual(expected_upper, quartile_tuple[UPPER])",
"def data_quartiles(self):\n data = []\n for graph in self._graphs.values():\n data += graph.data.values()\n data.sort()\n datalen = len(data)\n return(data[0], data[datalen/4], data[datalen/2],\n data[3*datalen/4], data[-1])",
"def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]",
"def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)",
"def IQR(x):\r\n\r\n x.sort()\r\n # split values into lower and upper portions at the median\r\n odd = len(x) % 2\r\n midpoint = int(len(x) / 2)\r\n if odd:\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint + 1:]\r\n else: # if even\r\n low_vals = x[:midpoint]\r\n high_vals = x[midpoint:]\r\n # find the median of the low and high values\r\n min_val = median(low_vals)\r\n max_val = median(high_vals)\r\n return min_val, max_val",
"def quantileValues(data, device):\n r = pd.DataFrame([])\n if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):\n minValue = data.min()\n maxValue = data.max()\n q1 = data.quantile(0.25)\n # q2 = tmp['deltaSeconds'].quantile(0.5)\n q3 = data.quantile(0.75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n\n elif isinstance(data, np.ndarray):\n minValue = data.min()\n maxValue = data.max()\n q1 = np.percentile(data, 25)\n # q2 = np.percentile(data, 50)\n q3 = np.percentile(data, 75)\n QR = q3 - q1\n upper = 1.5 * QR + q3\n lower = q1 - 1.5 * QR\n r = (r.append(pd.DataFrame({'mac': device.mac, 'minValue': minValue,\n 'maxValue': maxValue, 'lower': lower,\n 'upper': upper, 'q1': q1, 'q3': q3},\n index=[0]), ignore_index=True))\n return r",
"def iqr(data, cols, t=1.5):\n Q1 = data[cols].quantile(0.25)\n Q3 = data[cols].quantile(0.75)\n IQR = Q3-Q1\n low_bound = {}\n upp_bound = {}\n for col in list(IQR.index):\n low_bound[col] = Q1[col]-t*IQR[col]\n upp_bound[col] = Q3[col]+t*IQR[col]\n return {\"low_b\": low_bound, \"upp_b\": upp_bound}",
"def calcrange_quartile(data, log=False):\n if not isinstance(data, numpy.ndarray):\n data = numpy.array(data)\n if log:\n data = data[data > 0.]\n\n if len(data) == 0:\n if log: return 0.1, 1.\n else: return 0., 1.\n\n data = numpy.sort(data)\n q1 = data[int(math.floor(0.25*len(data)))]\n q3 = data[int(math.floor(0.75*len(data)))]\n if log:\n return q1 / (q3 - q1), q3 * (q3 - q1)\n else:\n return q1 - (q3 - q1), q3 + (q3 - q1)",
"def quartile(db: pd.DataFrame, col: str) -> pd.DataFrame:\n _, bins = pd.qcut(x=db[col],\n q=[0.25, 0.75], # [0.0, 0.25, 0.5, 0.75, 1.0],\n retbins=True, duplicates='drop')\n q1 = bins[0] # lower (first) quartile\n q3 = bins[1] # upper (third) quartile\n iqr = q3 - q1 # InterQuartile Range\n lower_fence = (q1 - 1.5 * iqr)\n upper_fence = (q3 + 1.5 * iqr)\n db.loc[db[col] < lower_fence, col] = q1\n db.loc[db[col] > upper_fence, col] = q3\n return db",
"def getquantile(df, low=0.1, high=0.9):\n q1 = df.quantile(low)\n q3 = df.quantile(high)\n print(q1)\n print(q3)\n return df[df<q1],df[df>q3]",
"def _quantile(data, quantile):\r\n index = quantile * (len(data) - 1)\r\n bottom_index = int(floor(index))\r\n top_index = int(ceil(index))\r\n\r\n difference = index - bottom_index\r\n output = (1 - difference) * \\\r\n data[bottom_index] + difference * data[top_index]\r\n\r\n return output",
"def find_outliers_IQR(data): \n \n res = data.describe()\n q1 = res['25%']\n q3 = res['75%']\n thresh = 1.5*(q3-q1)\n idx_outliers =(data < (q1-thresh)) | (data > (q3+thresh))\n return idx_outliers",
"def test_quantile(self):\r\n\r\n # suffle the data to be sure, it is getting sorted\r\n sample_data = array(range(1, 11))\r\n shuffle(sample_data)\r\n\r\n # regular cases\r\n expected_output = [1.9, 2.8, 3.25, 5.5, 7.75, 7.93]\r\n list_of_quantiles = [0.1, 0.2, 0.25, 0.5, 0.75, 0.77]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(expected_output, output)\r\n\r\n sample_data = array([42, 32, 24, 57, 15, 34, 83, 24, 60, 67, 55, 17,\r\n 83, 17, 80, 65, 14, 34, 39, 53])\r\n list_of_quantiles = [0.5]\r\n output = quantile(sample_data, list_of_quantiles)\r\n assert_almost_equal(output, median(sample_data))\r\n\r\n # quantiles must be between [0, 1]\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, [0.1, 0.2, -0.1, 2, 0.3, 0.5])\r\n\r\n # quantiles must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(sample_data, 1)\r\n\r\n # the data must be a list or a numpy array\r\n with self.assertRaises(AssertionError):\r\n output = quantile(1, [0])",
"def Quantile(data, q, precision=1.0):\n N, bins = np.histogram(data, bins=precision*np.sqrt(len(data)))\n norm_cumul = 1.0*N.cumsum() / len(data)\n\n for i in range(0, len(norm_cumul)):\n if norm_cumul[i] > q:\n return bins[i]",
"def test__quantile(self):\r\n # regular cases\r\n sample_data = array(range(25, 42))\r\n assert_almost_equal(_quantile(sample_data, 0.5), median(sample_data))\r\n\r\n # sorted data is assumed for this function\r\n sample_data = sorted(\r\n array([0.17483293, 0.99891939, 0.81377467, 0.8137437,\r\n 0.51990174, 0.35521497, 0.98751461]))\r\n assert_almost_equal(_quantile(sample_data, 0.10), 0.283062154)",
"def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))",
"def interquartile_range(x):\n return (quantile(x,0.75) - quantile(x,0.25))",
"def quantiles(x, qlist=[2.5, 25, 50, 75, 97.5]):\n # Make a copy of trace\n x = x.copy()\n\n # For multivariate node\n if x.ndim > 1:\n # Transpose first, then sort, then transpose back\n sx = np.transpose(np.sort(np.transpose(x)))\n else:\n # Sort univariate node\n sx = np.sort(x)\n\n try:\n # Generate specified quantiles\n quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]\n\n return dict(zip(qlist, quants))\n\n except IndexError:\n print(\"Too few elements for quantile calculation\")",
"def get_i_of_q(data, nbins=256):\n simulation = get_simulation()\n result = simulation.result()\n axes_limits = ba.get_axes_limits(result, ba.AxesUnits.QSPACE)\n shape = data.shape\n x = np.linspace(axes_limits[0], axes_limits[1], shape[0])\n y = np.linspace(axes_limits[2], axes_limits[3], shape[1])\n\n # xx, yy = np.meshgrid (x, y)\n # q = np.sqrt(xx**2 + yy**2)\n # result = np.array([q.flatten(), data.flatten()]).transpose()\n result = []\n for i in range(shape[0]):\n for j in range(shape[1]):\n q = np.sqrt(x[i]**2 + y[j]**2)\n result.append([q, data[i, j]])\n result = np.array(result)\n # sort data\n result = result[result[:, 0].argsort()]\n\n # bin data\n bins = np.linspace(0.0, np.max(q), nbins)\n indices = np.digitize(result[:, 0], bins)\n\n a = []\n for i in range(bins.size):\n idx = np.where(indices==i)\n if idx[0].size > 0:\n a.append([bins[i], np.mean(result[:, 1][idx])])\n return np.array(a)",
"def quantiles(self, q: int):\n return self.dist.quantiles(q)",
"def interquartile_range(xs: List[float]) -> float:\n return quantile(xs, 0.75) - quantile(xs, 0.25)",
"def make_quartiles(s, q1, q3):\n return graph_objs.Scatter(\n x=[s, s],\n y=[q1, q3],\n text=['lower-quartile: ' + '{:0.2f}'.format(q1),\n 'upper-quartile: ' + '{:0.2f}'.format(q3)],\n mode='lines',\n line=graph_objs.Line(\n width=4,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text'\n )",
"def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()",
"def interquartile_range(magnitudes):\n num_obs = magnitudes.shape[0]\n per_25 = int(num_obs / 4.0)\n\n q1 = magnitudes[per_25]\n q3 = magnitudes[num_obs - per_25]\n\n return (q3 - q1)[0]",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def performance_quantiles(data, performance_measure):\n quantiles = pd.qcut(x=data[performance_measure], q=4, labels=['q1', 'q2', 'q3', 'q4'])\n bins = quantiles.to_frame(name=performance_measure + '_quantiles')\n data_quantiles = pd.merge(data, bins, right_index=True, left_index=True)\n data_quantiles.dropna(inplace=True)\n data_quantiles.sort_values(performance_measure + '_quantiles', inplace=True)\n return data_quantiles",
"def quantile(self, q, *, axis=0, **kwargs) -> \"Dataset\":\n return self._quantile(q, axis=axis, func=np.quantile, **kwargs)"
]
| [
"0.86127615",
"0.79561895",
"0.7363667",
"0.7266413",
"0.7234658",
"0.6957703",
"0.67579246",
"0.6683327",
"0.6639408",
"0.6631373",
"0.6607943",
"0.65904045",
"0.65866876",
"0.65661675",
"0.6544473",
"0.6537689",
"0.65024775",
"0.64734846",
"0.6436739",
"0.6370607",
"0.63698196",
"0.6355802",
"0.62835723",
"0.6257612",
"0.62238985",
"0.61916584",
"0.61788064",
"0.6157706",
"0.6143707",
"0.61250097"
]
| 0.8681264 | 0 |
Return the sample variance (s\u00b2) of each data set as a tuple. If is_population=True, returns the population variance (\u03c3\u00b2) instead. | def get_var(cls, data: tuple or list, is_population=False) -> float:
cls._data_validation(data)
mean = cls.get_mean(data)
variance = float()
n = cls.get_n(data)
for each_item in data:
variance += (each_item - mean) ** 2
# Checks whether is a population or sample
if is_population:
variance = variance / n
else:
variance = variance / (n - 1)
return float(variance) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_population_variance(self):\n\t\treturn self.variables.get('population_variance')",
"def variance(self):\n return self.properties.get('variance')",
"def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)",
"def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var",
"def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean",
"def variance(L, is_sample=0):\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)",
"def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance",
"def representations_variance(self):\n self.assert_sampled()\n return [z.variance().numpy() for z in self.representations]",
"def get_population_variance(iterable):\n mean = get_mean(iterable)\n squares_of_differences = [(value - mean) ** 2 for value in iterable]\n return get_mean(squares_of_differences)",
"def getVariance(self):\n return self.__variance",
"def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL3_GetVarianceOutput(self, *args)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetVarianceOutput(self, *args)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF3_GetVarianceOutput(self, *args)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF2_GetVarianceOutput(self, *args)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID3_GetVarianceOutput(self, *args)",
"def var(self):\n return self._reduce_for_stat_function(F.variance, only_numeric=True)",
"def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var",
"def variance(data, m=None):\n n, ss = _SS(data, m)\n if n < 2:\n raise ValueError('sample variance or standard deviation'\n ' requires at least two data points')\n return ss/(n-1)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS3_GetVarianceOutput(self, *args)",
"def variance(data, xbar=None):\n if iter(data) is data:\n data = list(data)\n data_len = len(data)\n if data_len < 2:\n raise StatisticsError('variance requires at least two data points')\n return _ss(data, xbar) / (data_len - 1)",
"def variance(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1*8 # Samples per variance\n + 4*8 # Detector mask\n + 4*8 # Energy mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1 # Spare\n + 1 # Comp Schema variance S\n + 3 # Comp Schema variance K\n + 3 # Comp Schema variance M\n + 2*8 # Number of data points\n )\n\n variable = (\n num_samples*1*8. # Number data points\n )\n\n return fixed_header, variable",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID2_GetVarianceOutput(self, *args)",
"def variance(self):\n return self.sigma",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC2_GetVarianceOutput(self, *args)",
"def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC3_GetVarianceOutput(self, *args)",
"def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance",
"def variance(self):\n if self.dirty:\n self._finalize()\n return self.vvar"
]
| [
"0.71327597",
"0.67674506",
"0.6685351",
"0.65718824",
"0.6553213",
"0.65447384",
"0.6540613",
"0.6483999",
"0.6459404",
"0.6353432",
"0.6345209",
"0.63438183",
"0.63338304",
"0.6325998",
"0.6280821",
"0.6251935",
"0.62394226",
"0.6235735",
"0.62145925",
"0.6191499",
"0.6191446",
"0.61905223",
"0.61846155",
"0.61721534",
"0.61468613",
"0.61362845",
"0.6118783",
"0.6104432",
"0.6104432",
"0.60757023"
]
| 0.73430026 | 0 |
Return tuple of difference of two dependent data sets Note that this method assumes that the two data sets are dependent and of equal sample sizes, i.e. this method is meaningless when applied to two independent data sets. | def get_data_diff(cls, data1: tuple, data2: tuple) -> tuple:
cls._data_validation(data1)
cls._data_validation(data2)
data1_n = StatMe.get_n(data1)
data2_n = StatMe.get_n(data2)
if data1_n != data2_n:
raise ValueError(f"Samples are not of equal length.\n"
f"Items in 'data1' = {data1_n}\n"
f"Items in 'data2' = {data2_n}")
else:
return_list = list()
for i in range(data1_n):
x1 = data1[i]
x2 = data2[i]
return_list.append(x1 - x2)
return tuple(return_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def diff(A,B):\n #A-B\n x = list(set(A) - set(B))\n #B-A\n y = list(set(B) - set(A))\n return x, y",
"def set_difference(set_a, set_b):\n \n diff = set_b - set_a\n \n return diff",
"def diff(self, x0, x1):\n nq, nv, nx = self.model.nq, self.model.nv, self.nx\n assert (x0.shape == (nx, ) and x1.shape == (nx, ))\n q0 = x0[:nq]\n q1 = x1[:nq]\n v0 = x0[-nv:]\n v1 = x1[-nv:]\n dq = pinocchio.difference(self.model, a2m(q0), a2m(q1))\n return np.concatenate([dq.flat, v1 - v0])",
"def get_list_difference(self, set_one, set_two):\n s1 = set(set_one)\n s2 = set(set_two)\n return list(s1.difference(s2))",
"def get_new_diff(new_data, old_data):\n\n diff = list(set(new_data) - set(old_data))\n return diff",
"def _dif_(x, y):\n _check_(x, y)\n return [i - j for (i, j) in zip(x, y)]",
"def diff(self, x1, x2):\n return x2 - x1",
"def diff(before: list, after: list) -> (list, list):\n additions = [item for item in after if item not in before]\n removals = [item for item in before if item not in after]\n return additions, removals",
"def difference(a, b):\r\n return list(set(b).difference(set(a)))",
"def diff(xs, ys):\n return [x for x in xs if x not in ys]",
"def diff(self, x1, x2):\n raise NotImplementedError(\"Not implemented yet.\")",
"def array_diff(a, b):",
"def diff(self, x1, x2):\n return self.State.diff(x1, x2)",
"def difference(A, B, *C):\n return setutils(\"difference\", A, B, *C)",
"def get_list_diff(list1, list2):\n\n list3 = list(np.setdiff1d(list1,list2))\n return(list3)",
"def get_diff_states(states, states2):\n return [v2 - v for (k, v), (k2, v2) in zip(states.items(), states2.items())]",
"def set_diff(seq0, seq1):\n return list(set(seq0) - set(seq1))",
"def get_diff_weights(weights, weights2):\n return [w2 - w for (w, w2) in zip(weights, weights2)]",
"def differences_between_systems(self, other_systems: Sequence['System']) -> Tuple[\n Tuple[Set['Package'], Set['Package']], List[Tuple[Set['Package'], Set['Package']]]]:\n\n differences_tuples = []\n own_packages = set(self.all_packages_dict.values())\n\n for other_system in other_systems:\n current_difference_tuple = (set(), set())\n differences_tuples.append(current_difference_tuple)\n other_packages = set(other_system.all_packages_dict.values())\n difference = own_packages ^ other_packages\n\n for differ in difference:\n if differ not in own_packages:\n current_difference_tuple[0].add(differ)\n else:\n current_difference_tuple[1].add(differ)\n\n first_return_tuple = (\n set.intersection(*[difference_tuple[0] for difference_tuple in differences_tuples]),\n set.intersection(*[difference_tuple[1] for difference_tuple in differences_tuples])\n )\n\n return_list = []\n\n for difference_tuple in differences_tuples:\n current_tuple = (set(), set())\n return_list.append(current_tuple)\n\n for installed_package in difference_tuple[0]:\n if installed_package not in first_return_tuple[0]:\n current_tuple[0].add(installed_package)\n\n for uninstalled_package in difference_tuple[1]:\n if uninstalled_package not in first_return_tuple[1]:\n current_tuple[1].add(uninstalled_package)\n\n return first_return_tuple, return_list",
"def _xgetdiff(first, second, is_common=False, **kwds):\n # Prepare for numeric comparisons.\n _isnum = lambda x: isinstance(x, Number) and not isnan(x)\n first_isnum = _isnum(first)\n second_isnum = _isnum(second)\n\n # Numeric vs numeric.\n if first_isnum and second_isnum:\n difference = first - second\n return xDeviation(difference, second, **kwds)\n\n # Numeric vs empty (or not found).\n if first_isnum and (not second or second is _xNOTFOUND):\n if second is _xNOTFOUND:\n second = None\n\n difference = first - 0\n return xDeviation(difference, second, **kwds)\n\n # Empty (or not found) vs numeric.\n if (not first or first is _xNOTFOUND) and second_isnum:\n if first is _xNOTFOUND:\n first = None\n\n if second == 0:\n difference = first\n else:\n difference = 0 - second\n return xDeviation(difference, second, **kwds)\n\n # Object vs _xNOTFOUND.\n if second is _xNOTFOUND:\n return xExtra(first, **kwds)\n\n # _xNOTFOUND vs object.\n if first is _xNOTFOUND:\n return xMissing(second, **kwds)\n\n # All other pairs of objects.\n if is_common:\n return xInvalid(first, **kwds)\n return xInvalid(first, second, **kwds)",
"def diff(self, other):\n err = []\n if self.header != other.header:\n err.append((self.header, other.header))\n for section in ('questions', 'rr', 'auth', 'ar'):\n if section == 'questions':\n k = lambda x: tuple(map(str, (x.qname, x.qtype)))\n else:\n k = lambda x: tuple(map(str, (x.rname, x.rtype, x.rdata)))\n a = dict([(k(rr), rr) for rr in getattr(self, section)])\n b = dict([(k(rr), rr) for rr in getattr(other, section)])\n sa = set(a)\n sb = set(b)\n for e in sorted(sa.intersection(sb)):\n if a[e] != b[e]:\n err.append((a[e], b[e]))\n for e in sorted(sa.difference(sb)):\n err.append((a[e], None))\n for e in sorted(sb.difference(sa)):\n err.append((None, b[e]))\n return err",
"def setdiff(self, other):\n\n return self.intersect(other, op=np.setdiff1d)",
"def absolute_difference(new_data, old_data):\n diff = 0\n assert len(new_data) == len(old_data)\n for new, old in zip(new_data, old_data):\n diff += np.sum(np.abs(new-old))\n return diff",
"def difference(self, other):\n diff_set = Set()\n\n for bucket in self.buckets:\n for element in bucket.iterate():\n if not other.contains(element):\n diff_set.add(element)\n return diff_set",
"def test_diff(self):\n _ff_source = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_source.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_source = _ff_source.load()\n _ff_dest = FlatfileDataset(_filename=os.path.join(Test_Resource_Dir, \"csv_dest_orig.csv\"),\n _has_header=True, _delimiter=\";\", _csv_dialect=\"excel-tab\",\n _quoting=\"MINIMAL\", _quotechar='\"')\n _dataset_dest = _ff_dest.load()\n # print(str(_dataset_dest))\n _missing_left, _missing_right, _difference, _sorted = compare(_dataset_source, _dataset_dest, [0], True)\n self.assertEqual(_missing_left,\n [[9, 7, ['7844', 'TURNER', 'SALESMAN', '7698', '1981-09-08 00:00:00', '1500', '', '30']],\n [12, 12, ['7999', 'BORJESSON', 'HACKER', '7839', '2013-01-01', '99999', '', '10']]],\n 'Missing left differs')\n self.assertEqual(_missing_right,\n [[6, 6, ['7782', 'CLARK', 'MANAGER', '7839', '1981-06-09 00:00:00', '2450', '', '10']],\n [7, 6, ['7788', 'SCOTT', 'ANALYST', '7566', '1982-12-09 00:00:00', '3000', '', '20']]],\n 'Missing right differs')\n\n self.assertEqual(_difference,\n [\n [0, 0, ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '800', '', '20'],\n ['7369', 'SMITH', 'CLERK', '7902', '1980-12-17 00:00:00', '700', '', '20']],\n [1, 1, ['7499', 'ALLEN', 'SALE;SMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30'],\n ['7499', 'ALLEN', 'SALESMAN', '7698', '1981-02-20 00:00:00', '1600', '300', '30']],\n [8, 6, ['7839', 'KING', 'PRESIDENT ', '', '1981-11-17 00:00:00', '5000', '', '10'],\n ['7839', 'KING', 'PRESIDENT', '', '1981-11-17 00:00:00', '4500', '', '10']],\n [9, 8, ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100,5', '', '20'],\n ['7876', 'ADAMS', 'CLERK', '7788', '1983-01-12 00:00:00', '1100', '', '20']]\n ], 'Difference differs')",
"def difference(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs - rhs)",
"def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right",
"def minusRes(res1, res2):\n return [(x - y) for x, y in zip(res1, res2)]",
"def create_pair_differences(self):\n\n # Create an empty dataframe of pair differences, we will append this later.\n pair_string_names = []\n pair_price_diff = []\n\n for pair in self.__pairs_data:\n # Choose both stocks from each pair\n stock_symbol_1 = pair[0]\n stock_symbol_2 = pair[1]\n\n # Create a string that symbolizes the pair and add it to a list of strings\n pair_string = str(stock_symbol_1) + '-' + str(stock_symbol_2)\n pair_string_names.append(pair_string)\n\n # Get both stock prices from the price dataset\n stock_price1 = self.__price_data[stock_symbol_1]\n stock_price2 = self.__price_data[stock_symbol_2]\n pair_diff = stock_price2 - stock_price1\n pair_price_diff.append(pair_diff)\n\n # Concat all the pairs into the pair differences attribute in class and set column names\n self.__pair_diff = pd.concat([pd.Series(pair_prices) for pair_prices in pair_price_diff], axis=1)\n self.__pair_diff.columns = pair_string_names\n\n return self.__pair_diff",
"def get_diff_for_otu_maps(otu_map1, otu_map2):\r\n\r\n otus1 = set(otu_map1.keys())\r\n otus2 = set(otu_map2.keys())\r\n ids1 = set([x for otu in otus1 for x in otu_map1[otu]])\r\n ids2 = set([x for otu in otus2 for x in otu_map2[otu]])\r\n\r\n return ids1 - ids2, ids2 - ids1"
]
| [
"0.69509035",
"0.68752706",
"0.6504726",
"0.65008557",
"0.642601",
"0.63917977",
"0.63459766",
"0.62474805",
"0.6239011",
"0.62191683",
"0.6194459",
"0.61915225",
"0.61788774",
"0.6174397",
"0.61711997",
"0.6162345",
"0.6144045",
"0.60510117",
"0.6031635",
"0.59960496",
"0.5993003",
"0.59880817",
"0.5986344",
"0.598207",
"0.59691554",
"0.59547585",
"0.59480643",
"0.5942872",
"0.59281176",
"0.59269595"
]
| 0.77125275 | 0 |
Return the pooled variance between the first and second data sets | def get_var_pool(cls, data1: tuple, data2: tuple) -> float:
cls._data_validation(data1)
cls._data_validation(data2)
n1 = cls.get_n(data1)
var1 = cls.get_var(data1)
n2 = cls.get_n(data2)
var2 = cls.get_var(data2)
return ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pooled_sample_variance(sample1, sample2):\n deg_freedom = len(sample1) + len(sample2) - 2\n mean1 = statistics.mean(sample1)\n squares1 = ((x - mean1) ** 2 for x in sample1)\n mean2 = statistics.mean(sample2)\n squares2 = ((x - mean2) ** 2 for x in sample2)\n\n return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)",
"def test_two_pop_unknown_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_pool = cls.get_var_pool(data1_, data2_)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_pool / n_x + var_pool / n_y)",
"def std_meandiff_pooledvar(self):\n # this uses ``_var`` to use ddof=0 for formula\n\n d1 = self.d1\n d2 = self.d2\n # could make var_pooled into attribute\n var_pooled = (\n (d1.sumsquares + d2.sumsquares)\n /\n # (d1.nobs - d1.ddof + d2.nobs - d2.ddof))\n (d1.nobs - 1 + d2.nobs - 1)\n )\n return np.sqrt(var_pooled * (1.0 / d1.nobs + 1.0 / d2.nobs))",
"def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var",
"def compute_variance(\n self,\n parameters: NDArray,\n resids: NDArray,\n sigma2: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n ) -> NDArray:",
"def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))",
"def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v",
"def _compute_covariance(self, lc1, lc2):\n return np.cov(lc1.counts, lc2.counts)[0][1]",
"def test_two_pop_known_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_x = cls.get_var(data1_, is_population=True)\n var_y = cls.get_var(data2_, is_population=True)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_x / n_x + var_y / n_y)",
"def variance_scorer(x, y):\n scores = [np.var(column) for column in x.T]\n return scores, np.array([np.NaN]*len(scores))",
"def test_profiled_mean_and_variance(self):\n\n def mean(df):\n total = 0\n for item in df:\n total += item\n return total / len(df)\n\n def var(df):\n var = 0\n mean_df = mean(df)\n for item in df:\n var += (item - mean_df) ** 2\n return var / (len(df) - 1)\n\n def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):\n delta = mean_b - mean_a\n m_a = var_a * (count_a - 1)\n m_b = var_b * (count_b - 1)\n M2 = m_a + m_b + delta ** 2 * count_a * count_b / (\n count_a + count_b)\n return M2 / (count_a + count_b - 1)\n\n data = np.linspace(-5, 5, 11).tolist()\n df1 = pd.Series(data)\n\n data = np.linspace(-3, 2, 11).tolist()\n df2 = pd.Series(data)\n\n data = np.full((10,), 1)\n df3 = pd.Series(data)\n\n num_profiler = FloatColumn(df1.name)\n num_profiler.update(df1.apply(str))\n\n self.assertEqual(mean(df1), num_profiler.mean)\n self.assertEqual(var(df1), num_profiler.variance)\n self.assertEqual(np.sqrt(var(df1)), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df2), var_b=var(df2), count_b=df2.count()\n )\n num_profiler.update(df2.apply(str))\n df = pd.concat([df1, df2])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df3), var_b=var(df3), count_b=df3.count()\n )\n num_profiler.update(df3.apply(str))\n\n df = pd.concat([df1, df2, df3])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)",
"def sample_variance(self, x_dict={}):\n raise NotImplementedError()",
"def f_test_var(data1,data2):\n var1, var2 = np.var(data1,ddof = 1),np.var(data2,ddof = 1)\t# compute variance\n df1, df2, = len(data1) - 1, len(data2) - 1\t\t# compute degrees of freedom\n if var1 > var2:\n\tprob = 2. * f.cdf(var1/var2,df1,df2)\n else:\n\tprob = 2. * f.cdf(var2/var1,df2,df1)\n if prob > 1.:\n\treturn 2. - prob\n else:\n\treturn prob",
"def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)",
"def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))",
"def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)",
"def _variance(self,gp):\r\n return self.variance",
"def reconstructions_variance(self):\n self.assert_sampled()\n return [[j.variance().numpy() for j in i] for i in self._reconstructions]",
"def variance(data, xbar=None):\n if iter(data) is data:\n data = list(data)\n data_len = len(data)\n if data_len < 2:\n raise StatisticsError('variance requires at least two data points')\n return _ss(data, xbar) / (data_len - 1)",
"def explained_variance_score(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...",
"def dp_variance(data, lower=None, upper=None, mechanism=\"Automatic\", privacy_usage=None, finite_sample_correction=True, **kwargs):\n return Component(\n \"DPVariance\",\n arguments={\n 'data': Component.of(data),\n 'lower': Component.of(lower),\n 'upper': Component.of(upper)\n },\n options={\n 'mechanism': mechanism,\n 'privacy_usage': serialize_privacy_usage(privacy_usage),\n 'finite_sample_correction': finite_sample_correction\n },\n constraints=kwargs)",
"def variance(self, mean=None):\n raise NotImplementedError",
"def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var",
"def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val",
"def variance(df, cols, dummy_col, generated_feature_name, params=None): \n group_cols = cols[:-1]\n calc_col = cols[-1]\n group = df[cols].groupby(by=group_cols)[[calc_col]].var().reset_index().rename(index=str, columns={calc_col: generated_feature_name}).fillna(0)\n dtype = {x: df[x].dtype for x in group_cols if x in df.columns.values}\n dtype[generated_feature_name] = utils.set_type(group[generated_feature_name], 'float')\n _df = df.merge(group.astype(dtype), on=group_cols, how='left')\n r = _df[[generated_feature_name]].copy()\n del dtype, _df, group\n gc.collect()\n module_logger.debug('feature generated: {}'.format(generated_feature_name))\n return r",
"def pooled_covariance_matrix(x, y, bessel=True):\n if bessel:\n n1, n2 = bessel_correction(x, y)\n else:\n try:\n n1 = x.shape[0]\n n1 = n1.compute()\n n2 = y.shape[0]\n n2 = n2.compute()\n except AttributeError:\n n1 = x.shape[0]\n n2 = y.shape[0]\n try:\n s1 = n1 * x.cov().compute()\n\n except AttributeError:\n s1 = n1 * np.cov(x, rowvar=False)\n try:\n s2 = n2 * y.cov().compute()\n except AttributeError:\n s2 = n2 * np.cov(y, rowvar=False)\n s = (s1 + s2) / (n1 + n2)\n\n return s",
"def variance(self):\n raise RuntimeError(\"Needs to be implemented in base class\")",
"def calculate_covariance(column1: pd.Series, column2: pd.Series) -> np.float64:\n\n cov = column1.cov(column2)\n return cov",
"def postfit_covariance(self) -> NONEARRAY:\n pass",
"def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance"
]
| [
"0.7066996",
"0.64504707",
"0.64277244",
"0.640207",
"0.6249601",
"0.6203304",
"0.59842765",
"0.5964988",
"0.5925388",
"0.5816522",
"0.5807429",
"0.5756509",
"0.56738883",
"0.5667132",
"0.56528944",
"0.562759",
"0.56016576",
"0.55566925",
"0.554267",
"0.5518305",
"0.55033016",
"0.5497004",
"0.54952806",
"0.549406",
"0.546881",
"0.5450373",
"0.544373",
"0.5429634",
"0.54274315",
"0.5408731"
]
| 0.6641171 | 1 |
Convert actual df into t_table lookup df. For n > 150, returns 999 (zscore lookup value for ttable). | def _get_lookup_df(cls, df_data: tuple or list, df_is_population=False) -> int:
cls._data_validation(df_data)
n = cls.get_n(df_data)
df = n-1
if df >= 150 or df_is_population:
return 999
elif df <= 30:
return df
else:
# stores the previous key in the loop
last_key = 0
# all the valid df lookup values for the t-table
list_of_lookup_dfs = sorted(cls.t_table.keys())
for lookup_df in range(len(list_of_lookup_dfs)):
current_key = list_of_lookup_dfs[lookup_df]
if current_key == df:
return df
elif max(current_key, df) == current_key:
return last_key
else:
last_key = current_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prepare_titanic_data(df):\n\n df.embark_town.fillna('Other', inplace=True)\n\n # Drop deck and embarked_town\n df.drop(columns=['deck', 'embark_town'], inplace=True)\n\n # Encoding: Objects (Categorical Variables) to Numeric\n # Use sklearn's LabelEncoder\n encoder = LabelEncoder()\n\n # Set Unknown and encode Embarked column to numbers\n # 2 == \"S\" == Southampton == 644 people\n # 0 == \"C\" == Cherbourg == 168 people\n # 1 == \"Q\" == Queenstown == 77 people\n # 3 == \"Unknown\" == 2 people\n df.embarked.fillna('Unknown', inplace=True)\n encoder.fit(df.embarked)\n df.embarked = encoder.transform(df.embarked)\n\n # Encode the Class (first class, second, etc...)\n # First class == 0\n # Second class == 1\n # Third class == 2\n encoder.fit(df[\"class\"])\n df[\"class_encoded\"] = encoder.transform(df[\"class\"])\n\n # Encode gender\n # male == 1 == 577 records\n # female == 0 == 314 records\n encoder.fit(df.sex)\n df[\"sex_encoded\"] = encoder.transform(df.sex)\n\n # Handle the 177 records with missing age values\n average_age = df.age.mean()\n df.age.fillna(average_age, inplace=True)\n\n scaler = MinMaxScaler()\n scaler.fit(df[['fare']])\n df[\"fare_scaled\"] = scaler.transform(df[['fare']])\n\n scaler = MinMaxScaler()\n scaler.fit(df[['age']])\n df[\"age_scaled\"] = scaler.transform(df[['age']])\n\n # Set the index to the passenger id\n df = df.set_index(\"passenger_id\")\n return df",
"def create_table(f, geoinfo):\n bounds_cols = xb_points + yb_points\n df = pd.read_csv(f, delimiter=\";\", index_col=\"INDEX_RC\")\n df[duration_name] = parse_duration_level(f)\n df = df.join(geoinfo[[\"X_CENT_GEO\", \"Y_CENT_GEO\", \"Col\", \"Row\"]])\n df = df.rename(columns={\"Col\": x, \"Row\": y, \"X_CENT_GEO\": lon, \"Y_CENT_GEO\": lat})\n return df",
"def dd_valley_value_map_nb(record, ts):\n return ts[record['valley_idx'], record['col']]",
"def generateLookupTable(base = None, verbose = True):\n \n pool = Pool(cpu_count());\n lut = pool.map(matchIndex, range(2**26),chunksize=2**26/8/cpu_count());\n \n return np.array(lut, dtype = bool);",
"def get_tm_mapping(mapping):\n df = mapping[['slot', 'row', 'col', 'xpix', 'ypix']]\n f_rowcol = lambda v: v.values[0] // 8\n f = dict(row=f_rowcol, col=f_rowcol, xpix='mean', ypix='mean')\n df = df.groupby('slot').agg(f, as_index=False).reset_index()\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', UserWarning)\n df.metadata = mapping.metadata\n df.metadata['n_rows'] = df['row'].max() + 1\n df.metadata['n_columns'] = df['col'].max() + 1\n df.metadata['size'] *= 8\n return df",
"def create_smarter_lookup_table(self, y=0.95):\n # First determine an approximate starting point for the lookup taqble by halving the max value till the point \n # where the cdf value is less than the cdf value we are looking for\n xold = self.xmax\n xnew = self.xmax\n y_calc = self.exgauss_cdf(xnew)\n while y_calc > y:\n xold = xnew\n xnew = xnew/2.\n y_calc = self.exgauss_cdf(xnew)\n \n # Make sure the interval over which this is being constructed is okay\n npts = 10. # Number of data pts in case the interval xold-xnew is smaller than self.dx\n if xold-xnew < self.dx:\n dx = int((xold-xnew)/npts)\n else: \n dx = self.dx\n # Now start building the lookup table from the value of x\n return self.exgauss_cdf_nparray(range(int(xnew),int(xold), dx)).tolist(), range(int(xnew),int(xold), dx)",
"def TSRANK(A: pd.DataFrame, n) -> pd.DataFrame:\r\n At = pivot_table(A)\r\n for i in range(len(At.columns)):\r\n At.iloc[:, i] = At.iloc[:, i].rolling(\r\n n).apply(lambda x: (np.argsort(x)[-1]+1)/n)\r\n res = stack_table(At)\r\n return res",
"def dl_tier(self, tier):\n\n tier_df = pd.DataFrame()\n\n for t in self.tier_tables[tier]:\n\n for y in self.years:\n\n df = get_GHGRP_records(y, t)\n\n tier_df = tier_df.append(df, sort=True, ignore_index=True)\n\n tier_df.columns = [x.lower() for x in tier_df.columns]\n\n # Fix issues with natural gas HHV reporting\n # Other fuel HHVs were exammined manually. There's a wide range for\n # wood and wood residuals, but not other fuels.\n if tier == 't2_hhv':\n\n tier_df['high_heat_value'] = \\\n tier_df.high_heat_value.astype('float32')\n\n natgas_st_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value_uom == 'mmBtu/short ton')\n ].index\n\n tier_df.loc[natgas_st_index, 'high_heat_value_uom'] = 'mmBtu/scf'\n\n m_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(1, 1.2))\n ].index\n\n tier_df.high_heat_value.update(\n tier_df.loc[m_index, 'high_heat_value'].divide(1000)\n )\n\n drop_index = tier_df[\n (tier_df.fuel_type == 'Natural Gas (Weighted U.S. Average)') &\n (tier_df.high_heat_value.between(0.0012, 0.0014))\n ].index\n\n tier_df = tier_df[~tier_df.index.isin(drop_index)]\n\n return tier_df",
"def tdist95conf_level(df):\n df = int(round(df))\n highest_table_df = len(_T_DIST_95_CONF_LEVELS)\n if df >= 200:\n return 1.960\n if df >= 100:\n return 1.984\n if df >= 80:\n return 1.990\n if df >= 60:\n return 2.000\n if df >= 50:\n return 2.009\n if df >= 40:\n return 2.021\n if df >= highest_table_df:\n return _T_DIST_95_CONF_LEVELS[highest_table_df - 1]\n return _T_DIST_95_CONF_LEVELS[df]",
"def fg_day_compare_features_n(cls, n):\n\n def func(df):\n val = cls.day_compare_features(df)\n return [np_shift(item, n, 0) for item in val]\n\n return func",
"def helper_create_data(n=500):\n N1 = list(np.random.exponential(3, n))\n N2 = list(np.random.normal(2, 2, n))\n N3 = list(np.random.normal(10, 3, n))\n N4 = list(np.random.exponential(2, n))\n C1 = list(np.random.binomial(1, 0.7, n))\n C2 = list(np.random.poisson(1, n))\n C3 = list(np.random.binomial(5, 0.4, n))\n a = ['cat', 'dog', 'lion']\n C4 = list(np.random.choice(a, n))\n df = pd.DataFrame({\n 'C1': C1,\n 'C2': C2,\n 'C3': C3,\n 'N1': N1,\n 'N2': N2,\n 'N3': N3,\n 'N4': N4,\n 'C4': C4\n })\n rows = list(np.random.randint(0, n, 20))\n cols = list(np.random.randint(0, 7, 5))\n df.iloc[rows, cols] = np.nan\n\n return df",
"def create_N_table_lookup(N=None,\n alphabet=['0', '1'],\n n_repeats=3,\n namer=lambda i: \"t{}\".format(i + 1),\n seed=123):\n np.random.seed(seed)\n inputs = np.array(list(''.join(letters)\n for letters in itertools.product(alphabet, repeat=n_repeats)))\n iter_outputs = itertools.permutations(inputs)\n if N is not None:\n iter_outputs = np.array(list(iter_outputs))\n indices = np.random.choice(range(len(iter_outputs)), size=N, replace=False)\n iter_outputs = iter_outputs[indices]\n return [pd.Series(data=outputs, index=inputs, name=namer(i)) for i, outputs in enumerate(iter_outputs)]",
"def __create_ttable(self):\n # Currently assume equiprobable distribution\n # Indexed by previous interval and current interval\n ttable = {}\n\n len_ = len(self.scale)\n\n kv = []\n for pitch in PitchRange:\n for interval in IntervalRange:\n kv.append(((interval, pitch), self.__create_trans(interval,pitch)))\n\n ttable = dict(kv)\n #pdb.set_trace()\n self.ttable = ttable",
"def t_score_lookup(samples, confidence, xBar, s, display=False):\n try:\n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n n = len(samples)\n if n >= 30:\n raise tablelookup_Error(\"Use the Z score instead. Sample size is greater than 30!\")\n # Lookup t value at given confidence interval\n t = scipy.stats.t.ppf((1 + confidence) / 2.0, n-1)\n\n # Calculate lower and upper boundaries \n lowerBound = xBar - (t * (s / (n **.5)))\n upperBound = xBar + (t * ( s / (n **.5)))\n \n if display == True:\n print(f\"T Score at {confidence} confidence: {t}\")\n print(f\"Mean is within range {lowerBound}, {upperBound} with {confidence} confidence!\")\n return lowerBound, upperBound\n \n except ValueError:\n print(\"Confidence Interval must be a numeric value\")\n except tablelookup_Error:\n print(\"Use the Z score instead. Sample size is greater than 30!\")\n except:\n print (\"Unexpected error:\", sys.exc_info()[0])\n raise",
"def get_unit_tally_at_t(dict_of_dataframes, t = np.inf):\n df = dict_of_dataframes['units']\n df['died_at'] = df['died_at'].apply(lambda x:np.inf if x is None else x)\n\n # We select the units that never died, belonnging to either player\n df = df[df['died_at'] >= t+1]\n df = df[df['finished_at'] < t]\n df = df[df['race']!= 'Neutral']\n\n # We prefix the unit names with the player id, and unit race initial (T,Z or P)\n tally = df[['owner','race','name']]\\\n .apply(lambda x: str(x['owner'])+x['race'][0]+'_'+ x['name'], axis=1)\\\n .value_counts()\n return tally",
"def t_score(colloc_count, c_pattern, c_lw, n):\r\n score = (colloc_count - (c_pattern * c_lw) / n) / (sqrt(colloc_count))\r\n return score",
"def predict_table(tree: dict, df: pd.DataFrame, col_name: str = 'predicted') -> pd.DataFrame:\r\n df = df.copy()\r\n df[col_name] = np.nan\r\n for j in range(df.shape[0]):\r\n df.iloc[j, -1] = predict_record(tree, df.iloc[j])\r\n\r\n df_predict = df\r\n return df_predict",
"def _generate_tabular(lookup_table, interpolation='linear', points_unit=u.pix, **kwargs):\n if not isinstance(lookup_table, u.Quantity):\n raise TypeError(\"lookup_table must be a Quantity.\") # pragma: no cover\n\n ndim = lookup_table.ndim\n TabularND = tabular_model(ndim, name=f\"Tabular{ndim}D\")\n\n # The integer location is at the centre of the pixel.\n points = [(np.arange(size) - 0) * points_unit for size in lookup_table.shape]\n if len(points) == 1:\n points = points[0]\n\n kwargs = {\n 'bounds_error': False,\n 'fill_value': np.nan,\n 'method': interpolation,\n **kwargs\n }\n\n t = TabularND(points, lookup_table, **kwargs)\n\n # TODO: Remove this when there is a new gWCS release\n # Work around https://github.com/spacetelescope/gwcs/pull/331\n t.bounding_box = None\n\n return t",
"def tally_target(state):\r\n\tif type(state)!=list:\r\n\t\tstate = [state]\r\n\tcounty_dict = load_county_dicts(state)\r\n\ttract_dict = load_tract_dicts(state)\r\n\r\n\ttarget = pd.DataFrame(columns = ['state','county','tract'])\r\n\tr = 0\r\n\tfor key in tract_dict.keys():\r\n\t\tfor v in tract_dict[key]:\r\n\t\t\ttarget.loc[r] = fips_dict[key[0]], key[1], v\r\n\t\t\tr +=1\r\n\r\n\ttarget = target.sort_values(by = target.columns.tolist())\r\n\r\n\treturn target.reset_index(drop = True)",
"def rt(n=1, df=1, loc=0, scale=1, ncp=None):\n # ==========================================================================\n return t.rvs(df=df, loc=loc, scale=scale, size=n)",
"def calc_raws(df, unit='leader'):\n\n df['distrust'] = df['HDIS']/(df['HDIS']+df['LDIS'])\n df['task'] = df['HTASK']/(df['HTASK']+df['LTASK'])\n df['bace'] = df['IC']/(df['IC']+df['EC'])\n df['igb'] = df['HBIAS']/(df['HBIAS']+df['LBIAS'])\n df['sc'] = df['HSC']/(df['HSC']+df['LSC'])\n df['cc'] = df['HCC']/(df['HCC']+df['LCC'])\n df['power'] = df['HPWR']/(df['HPWR']+df['LPWR'])\n\n df['i1'] = df.apply(i1_func, axis=1)\n df['i2'] = df.apply(i2_func, axis=1)\n df['i3'] = df.apply(i3_func, axis=1)\n\n df['i4a'] = df.apply(i4a_func, axis=1)\n df['i4b'] = df.apply(i4b_func, axis=1)\n df['i5ap'] = df.apply(i5ap_func, axis=1)\n df['i5pr'] = df.apply(i5pr_func, axis=1)\n df['i5re'] = df.apply(i5re_func, axis=1)\n df['i5op'] = df.apply(i5op_func, axis=1)\n df['i5th'] = df.apply(i5th_func, axis=1)\n df['i5pu'] = df.apply(i5pu_func, axis=1)\n\n df['p1'] = df.apply(p1_func, axis=1)\n df['p2'] = df.apply(p2_func, axis=1)\n df['p3'] = df.apply(p3_func, axis=1)\n df['p4'] = df.apply(p4_func, axis=1)\n df['p5'] = df.apply(p5_func, axis=1)\n\n if unit == 'leader':\n\n keep = ['firstname', 'lastname', 'name', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n elif unit == 'year':\n\n keep = ['firstname', 'lastname', 'name', 'year', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n elif unit == 'month':\n\n keep = ['firstname', 'lastname', 'name', 'yr_month','Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n df['year'] = df['yr_month'].apply(lambda x: x.split('-')[0])\n df['month'] = df['yr_month'].apply(lambda x: x.split('-')[1])\n\n elif unit == 'quarter':\n\n keep = ['firstname', 'lastname', 'name', 'yr_quarter', 'Ccode', 'vcount', 'distrust', 'task',\n 'bace', 'igb', 'sc', 'cc', 'power', 'i1', 'i2', 'i3', 'i4a', 'i4b',\n 'i5ap', 'i5pr', 'i5re', 'i5op', 'i5th', 'i5pu', 'p1', 'p2', 'p3', 'p4',\n 'p5']\n\n df = df[keep]\n\n df['year'] = df['yr_quarter'].apply(lambda x: x.split('-')[0])\n df['quarter'] = df['yr_quarter'].apply(lambda x: x.split('-')[1])\n\n return df",
"def make_tree_col(df, tnums):\n tree_inds = np.where(df.scale.values == 1)[0]\n tree_inds = np.append(tree_inds, df.shape[0])\n return np.repeat(tnums, np.diff(tree_inds))",
"def range_table(n: int, *, parallelism: int = -1) -> Dataset[ArrowRow]:\n return read_datasource(\n RangeDatasource(), parallelism=parallelism, n=n, block_format=\"arrow\"\n )",
"def fake_t2(incol):\n keys = [\n 'M-dwarf', 'KN', 'AGN', 'SLSN-I',\n 'RRL', 'Mira', 'SNIax', 'TDE',\n 'SNIa', 'SNIbc', 'SNIa-91bg',\n 'mu-Lens-Single', 'EB', 'SNII'\n ]\n values = [0.0] * len(keys)\n out = {k: v for k, v in zip(keys, values)}\n return pd.Series([out] * len(incol))",
"def tisa(date, period=36):\r\n m = ['M1 ', 'M2 ', 'M3 ', 'M4 ', 'M5 ', 'M6 ', 'M7 ', 'M8 ']\r\n d = [t.strftime(\"%Y%m\") for t in pd.date_range(start=date, periods=period / 3, freq='-3M')]\r\n req = \"select SourceId, YrMn, ElementCode, ElementValue from PI_IndValues \\\r\n where ElementCode in ({}) and YrMn in ({}) and SourceCode==' '\" \\\r\n .format(','.join([\"'\" + j + \"'\" for j in m]), ','.join([str(dd) for dd in d]))\r\n t = pd.read_sql(req, engine)\r\n tisa2 = pd.DataFrame()\r\n\r\n for u in t.SourceId.unique():\r\n try:\r\n M = [t.ElementValue[(t.ElementCode == el) & (t.SourceId == u)].values.sum() for el in m]\r\n tisa = (M[0] + M[1] + M[2] + M[4] + M[5] + M[7]) / (M[3] + M[6]) * 2E5\r\n tisa2 = tisa2.append(pd.DataFrame(data={'LocId': [u], 'PeriodEndYrMn': [d[0]], 'ResultsValue': [tisa]}))\r\n except:\r\n pass\r\n return tisa2",
"def CC_WEI_MAX_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','CC_WEI_MAX']]\n Feature_DF.loc[:,'CC_WEI_MAX_TRS'] = Feature_DF.loc[:,'CC_WEI_MAX'].pow(9/5)\n Feature_DF = Feature_DF.loc[:,['HNAME','CC_WEI_MAX_TRS']]\n\n return Feature_DF",
"def create_dummies(df):",
"def OD_CR_FAVT_TRS(Dataframe):\n\n Feature_DF = Dataframe.loc[:,['HNAME','OD_CR_FAVT']]\n Feature_DF.loc[:,'OD_CR_FAVT_TRS'] = Feature_DF.loc[:,'OD_CR_FAVT'].pow(10)\n Feature_DF = Feature_DF.loc[:,['HNAME','OD_CR_FAVT_TRS']]\n\n return Feature_DF",
"def create_test_df():\n test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [\n 10 * i for i in range(1, 1001)]})\n test_df['na_col'] = np.nan\n test_df['id_na'] = test_df.id\n test_df.loc[1:3, 'id_na'] = np.nan\n test_df['constant_col'] = 'constant'\n test_df['constant_col_num'] = 0\n test_df['character_factor'] = [\n choice(list('ABCDEFG')) for _ in range(1000)]\n test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]\n test_df['nearzerovar_variable'] = 'most_common_value'\n test_df.loc[0, 'nearzerovar_variable'] = 'one_value'\n test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]\n test_df['character_variable'] = [str(i) for i in range(1000)]\n test_df['duplicated_column'] = test_df.id\n test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700\n test_df['character_variable_fillna'] = ['A'] * \\\n 300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300\n test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200\n test_df['num_variable'] = 100.0\n test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]\n test_df['outlier'] = normal(size=1000)\n test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]\n test_df['outlier_na'] = test_df['outlier']\n test_df.loc[[300, 500], 'outlier_na'] = np.nan\n test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')\n test_df['None_100'] = [1] * 900 + [None] * 100\n test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100\n test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300\n test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300\n test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \\\n ['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \\\n ['Unknown'] * 100 + ['do_not_touch'] * 200\n return test_df",
"def convert_trsp_index(geneDictNonCoding, df, TR_index_dict):\n\n\n\tgeneDictCanon = OrderedDict()\n\t\n\tfor gene in geneDictNonCoding:\n\t\ttrDF = df.iloc[geneDictNonCoding[gene][0]:geneDictNonCoding[gene][1]]\n\t\ttrDFz = trDF.reset_index(drop=True)\n\t\t\n\t\ttrCount = 0\n\t\ttrDictLoc = OrderedDict()\n\t\t\n\t\tfor i in range(len(trDFz)):\n\t\t\tif trDFz.loc[i, 'feature'] == 'transcript':\n\t\t\t\ttr = trDFz.loc[i, 'transcript_id']\n\t\t\t\ttrdict = parse_entry(tr)\n\t\t\t\ttrName = trdict['transcript_id'][0]\n\t\t\t\ttrDictLoc[trName] = [trDFz.loc[i, 'chromStart'], trDFz.loc[i, 'chromEnd']]\n\t\t\t\ttrCount += 1\n\t\t\n\t\tif trCount > 1:\n# print gene, \"more than 1 trsp !!! \\n\"\n\t\t\t\n\t\t\trangeDict = OrderedDict() ## store the ranges, and take the longest\n\t\t\tfor key in trDictLoc:\n\t\t\t\ttrRange = len(range(int(trDictLoc[key][0]),int(trDictLoc[key][1])))\n\t\t\t\trangeDict[key] = trRange\n\t\t\t\t\n\t\t\tv=list(rangeDict.values())\n\t\t\tk=list(rangeDict.keys())\n\t\t\ttrOut = k[v.index(max(v))]\n# print trOut\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\t\t\t\n\t\t\t\n\n\t\telse: ## for genes with single transcripts\n\t\t\ttrOut = trDictLoc.keys()[0]\n\t\t\tgeneDictCanon[trOut] = [gene, TR_index_dict[trOut]]\n\treturn geneDictCanon"
]
| [
"0.54173565",
"0.53379613",
"0.52356905",
"0.52255005",
"0.51306707",
"0.51279414",
"0.511602",
"0.51081264",
"0.5105641",
"0.5036905",
"0.50213903",
"0.50027704",
"0.49914676",
"0.4990226",
"0.49587485",
"0.4940191",
"0.4919737",
"0.49194276",
"0.49128762",
"0.4908752",
"0.49070922",
"0.49007726",
"0.48857722",
"0.4857347",
"0.48559305",
"0.4847605",
"0.48306227",
"0.4829209",
"0.48215106",
"0.48175427"
]
| 0.5578196 | 0 |
Return alpha(\u03b1), determined by CL and tailedness. \u03b1 = 1 CL \u03b1 = (1 CL) / 2 for twotailed tests, to account for both possible extreme tails in the distribution. | def _get_alpha(cls, cl: float, tail: str):
alpha = (1 - cl) / 2 if tail == "two" else (1 - cl)
return round(alpha, 3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def alpha(self,k1,k2,c):\n if k1 == 0.:\n return 0.\n else:\n return 1. + k2*c/k1",
"def _tstat_alpha(self):\n return _handle_ab(self._tstat_all, self.use_const)[0]",
"def clAlphaT(self):\n A = 2 * pi * self.aspectRatioT\n B = (self.aspectRatioT * self.betaT / self.airfoilEffT)**2\n C = 1 + ((tan(radians(self.sweep50T)))**2) / (self.betaT**2)\n return A / (2 + sqrt(4 + B * C))",
"def nalpha(self) -> int:\n return self._core.nalpha()",
"def _alpha(self):\n return _handle_ab(self.solution, self.use_const)[0]",
"def comp_alpha(self):\n pass",
"def alpha(self) -> float:\n return self._alpha",
"def alpha(self):\n return self._alpha",
"def alpha(self):\n return self._alpha",
"def alpha(self):\n return self._alpha",
"def correctalpha(desiredalpha, level):\n \n correctedalpha = 1 - (1 - desiredalpha) ** (1.0 / level)\n \n return correctedalpha",
"def tstat_alpha(self):\n return self._tstat_alpha",
"def alpha_from_c2(c2p, c2s):\n return 0.55 * ((c2s/c2p) / 2.)**(-1./3.)",
"def alpha(self) -> float:\n return self.intrinsic_matrix[0, 1]",
"def alpha(asset, strategy):\n up = asset['forward_returns'][asset['forward_returns'] > 0]\n down = asset['forward_returns'][asset['forward_returns'] < 0]\n bh_alpha = np.sum(up) / np.abs(np.sum(down))\n\n strat_returns = asset['forward_returns'][strategy]\n up = strat_returns[strat_returns > 0]\n down = strat_returns[strat_returns < 0]\n strat_alpha = np.sum(up) / np.abs(np.sum(down))\n\n _alpha = (strat_alpha / bh_alpha) - 1\n return _alpha",
"def alpha(self):\r\n return self.unif[17]",
"def optimal_alpha():\n\n # When I checked all of alphas, -0.01 was the best\n alpha = -0.01\n # np.random.choice([-0.06, -0.01, 0.04, 0.1])\n return alpha",
"def Alpha(self):\r\n return self._alpha",
"def comp_alphas(self):\n Rbo = self.get_Rbo()\n\n # alpha_Tt is the angle of the tooth to have the correct top width\n alpha_Tt = 2 * float(arcsin(self.W3 / (2 * Rbo)))\n\n # alpha_0 + alpha_Tt = slot_ptich\n # Zs * (alpha_0+alpha_Tt) = 2 pi\n alpha_0 = 2 * pi / self.Zs - alpha_Tt\n\n if self.is_outwards():\n # alpha_Tb is the angle of the tooth to have the correct bottom width\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo + self.H2))))\n else:\n alpha_Tb = 2 * float(arcsin(self.W3 / (2 * (Rbo - self.H2))))\n\n # Zs * (alpha_2+alpha_Tb) = 2 pi\n alpha_2 = 2 * pi / self.Zs - alpha_Tb\n\n return (alpha_0, alpha_2)",
"def alpha_interpretation(self, alpha_score):\r\n if alpha_score <= 0.667:\r\n alpha_interpretation = 'Unreliable agreement'\r\n elif 0.667 < alpha_score < 0.81:\r\n alpha_interpretation = 'Acceptable agreement'\r\n elif 0.80 < alpha_score <= 1:\r\n alpha_interpretation = 'Substantial agreement'\r\n elif alpha_score == 1:\r\n alpha_interpretation = 'Perfect agreement'\r\n return alpha_interpretation",
"def tt_entails(kb, alpha):\n assert not variables(alpha)\n return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {})",
"def get_alpha():\n norm = np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2)\n if norm:\n return np.arccos((self.position[0] - x0)/norm) * (1 - 2*(self.position[1] > y0))\n return 0",
"def tx_alpha(cf, af, cb, ab):\n\n return round_int(\n abs(\n cf * (af * RGB_CHANNEL_SCALE) + cb * (ab * RGB_CHANNEL_SCALE) * (1 - (af * RGB_CHANNEL_SCALE))\n )\n ) & 0xFF",
"def test_compute_alphas(self):\n\t\tdetails = self.watcher.analyze(layers=[self.second_layer], pool=False, randomize=False, plot=False, mp_fit=False, pl_package=WW_POWERLAW)\n\t\t#d = self.watcher.get_details(results=results)\n\t\ta = details.alpha.to_numpy()\n\t\tself.assertAlmostEqual(a[0],1.74859, places=3)\n\t\tself.assertAlmostEqual(a[1],1.66595, places=3)\n\t\tself.assertAlmostEqual(a[3],1.43459, places=3)",
"def cointegration_test(df, alpha=0.05):\n \n out = coint_johansen(df,-1,5)\n d = {'0.90':0, '0.95':1, '0.99':2}\n traces = out.lr1\n cvts = out.cvt[:, d[str(1-alpha)]]\n def adjust(val, length= 6): return str(val).ljust(length)\n\n # Summary\n print('Name :: Test Stat > C(95%) => Signif \\n', '--'*20)\n for col, trace, cvt in zip(df.columns, traces, cvts):\n print(adjust(col), ':: ', adjust(round(trace,2), 9), \">\", adjust(cvt, 8), ' => ' , trace > cvt)",
"def alpha(self) -> float:\n return float(self.tk_ref.wm_attributes('-alpha'))",
"def superobl_alpha(k):\n if k%2==0:\n return 2**(-(k - 1))*(1 - 1/(k + 1)**2)**(k/2)\n else:\n return 2**(-(k - 1))*(1 - 1/k**2)**((k - 1)/2)",
"def pvalue_alpha(self):\n return self._pvalue_alpha",
"def test_xx1_thr(self):\n u_spec = leabra.UnitSpec()\n u_spec.act_thr = 0.25\n u = leabra.Unit(spec=u_spec)\n self.assertEqual(u_spec.xx1(-0.1), 0.0)\n self.assertTrue(0.0 < u_spec.xx1(0.1))\n self.assertEqual(u_spec.noisy_xx1(-0.1), 0.0)\n self.assertTrue(0.1 < u_spec.noisy_xx1(0.1))",
"def testAlphaZeroNllsMatchACauchyDistribution(self):\n x = jnp.linspace(-10, 10, 1000)\n scale = 1.7\n nll = self.variant(self._distribution.nllfun)(x, 0, scale)\n nll_true = -scipy.stats.cauchy(0, scale * jnp.sqrt(2)).logpdf(x)\n chex.assert_tree_all_close(nll, nll_true, atol=1e-5, rtol=1e-5)"
]
| [
"0.60391426",
"0.5964379",
"0.59364223",
"0.5921598",
"0.5899678",
"0.5892448",
"0.5811773",
"0.57884306",
"0.57884306",
"0.57884306",
"0.5761492",
"0.57613426",
"0.57104206",
"0.5643682",
"0.56156635",
"0.5586468",
"0.5562589",
"0.5561338",
"0.5516102",
"0.5499588",
"0.5403682",
"0.5393024",
"0.53877187",
"0.53768206",
"0.53517663",
"0.53508985",
"0.5350045",
"0.5332505",
"0.5310431",
"0.5307091"
]
| 0.70785445 | 0 |
Provides metadata for the UserInterfaceType plugin. This gives humanreadable information on the plugin, dependency resolution information, and tells the plugin system what this plugin can do. | def metadata():
return {
"name": "User Interface Type",
"description": "Defines a type of plug-in that communicates with the user by showing information to the user and allowing the user to control the application.",
"version": 2,
"dependencies": {},
"type": { #This is a "plug-in type" plug-in.
"type_name": "userinterface",
"api": userinterfacetype.user_interface,
"validate_metadata": validate_metadata
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_interface_info(self): # real signature unknown; restored from __doc__\n pass",
"def get_plugin_interface(self):",
"def metadata(self) -> interface.Metadata:\n return cast(interface.Metadata, self._interfaces[interface.Metadata])",
"def plugin_data(self) -> global___SummaryMetadata.PluginData:",
"def get_interface_metadata(cls, default=None):\n return metadata.get_metadata(cls, METADATA_KEY, default)",
"def plugin_info():\n\n return {\n 'name': 'MAX31865 Async plugin',\n 'version': '1.0',\n 'mode': 'async',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def get_plugin_info(self, handle):\n plugin_type = c_int()\n name = create_string_buffer(256)\n ver = c_uint()\n ckresult(\n _dll.FMOD_System_GetPluginInfo(\n self._ptr, handle, byref(plugin_type), byref(name), 256, byref(ver)\n )\n )\n return so(\n type=PLUGINTYPE(plugin_type.value), name=name.value, version=ver.value\n )",
"def fill_plugin_metadata(cls, plugin, metadata):\n metadata['plugin_id'] = plugin.id\n metadata['plugin_version'] = plugin.version\n metadata['hot_pluggable'] = plugin.is_hotpluggable",
"def plugin_info():\n return {\n 'name': 'Playback',\n 'version': '2.1.0',\n 'mode': 'async',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def metadata(cls):\n return {\n 'id': constants.WHOLE_REPO_PROFILER_ID,\n 'display_name': _('Profiler to install entire puppet repo'),\n 'types' : [constants.TYPE_PUPPET_MODULE]\n }",
"def get_user_metadata(self, instance, image_meta, nets_conf):\n\n props = image_meta.get('properties')\n user_metadata = {}\n if 'agent_type' in props:\n # TODO: check privileges agent vm creation according\n # to the agent type\n\n # add the data to connect to the neutron/nova services\n rabbit_hosts = None\n if cfg.CONF.hybrid_driver.external_rabbit_host:\n rabbit_hosts = cfg.CONF.hybrid_driver.external_rabbit_host\n else:\n for rabbit_host in cfg.CONF.oslo_messaging_rabbit.rabbit_hosts:\n if rabbit_hosts:\n rabbit_hosts = '%s, %s' % (rabbit_hosts, rabbit_host)\n else:\n rabbit_hosts = rabbit_host\n user_metadata.update({\n 'rabbit_userid': cfg.CONF.oslo_messaging_rabbit.rabbit_userid,\n 'rabbit_password': cfg.CONF.oslo_messaging_rabbit.rabbit_password,\n 'rabbit_hosts': rabbit_hosts,\n 'host': instance.uuid,\n # be careful to create the VM with the interface in a good order\n 'network_mngt_interface': 'eth0',\n 'network_data_interface': 'eth1',\n 'network_vms_interface': 'eth2',\n })\n for net_conf in nets_conf:\n d = net_conf['device']\n if net_conf['mode'].startswith('dhcp'):\n user_metadata[d] = 'dhcp'\n else:\n if d == 'eth0' and 'gateway' in net_conf:\n user_metadata['provider_gateway'] = net_conf['gateway']\n user_metadata[d] = 'manual'\n user_metadata[d + '_ip'] = net_conf['ip']\n user_metadata[d + '_netmask'] = net_conf['netmask']\n user_metadata[d + '_size'] = get_nsize(net_conf['netmask'])\n hyper_agent_vif_driver = HYPER_AGENT_DRIVER[props['agent_type']]\n user_metadata['container_image_uri'] = self._replace_in_uri(\n image_meta, 'container_image_uri')\n user_metadata['container_rootfs_uri'] = self._replace_in_uri(\n image_meta, 'container_rootfs_uri')\n user_metadata['hyper_agent_vif_driver'] = hyper_agent_vif_driver\n user_metadata['network_device_mtu'] = 1500\n user_metadata['network_device_mtu_overhead'] = 50\n else:\n return user_metadata\n\n LOG.debug('user_metadata=%s' % user_metadata)\n return user_metadata",
"def _get_interface_type(self):\n return self.__interface_type",
"def user_info(self):\n response = self.query('user_info')\n return response",
"def metadata(self): # -> list[Unknown]:\n ...",
"def metadata(self): # -> list[Unknown]:\n ...",
"def plugin_info():\n\n return {\n 'name': 'Enviro pHAT Poll Plugin',\n 'version': '1.7.0',\n 'mode': 'poll',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def userinfo(self, **kwargs):\n metadata = self.load_server_metadata()\n resp = self.get(metadata['userinfo_endpoint'], **kwargs)\n resp.raise_for_status()\n data = resp.json()\n return UserInfo(data)",
"def validate_metadata(user_interface_metadata):\n\tif \"userinterface\" not in user_interface_metadata:\n\t\traise luna.plugins.MetadataValidationError(\"This is not a user interface plug-in.\")\n\trequired_functions = {\"join\", \"start\", \"stop\"}\n\ttry:\n\t\tif not required_functions <= user_interface_metadata[\"userinterface\"].keys():\n\t\t\traise luna.plugins.MetadataValidationError(\"The user interface specifies no functions {function_names}.\".format(function_names=\", \".join(required_functions - user_interface_metadata[\"userinterface\"].keys())))\n\t\tfor function_name in required_functions:\n\t\t\tif not callable(user_interface_metadata[\"userinterface\"][function_name]): #Each must be a callable object (such as a function).\n\t\t\t\traise luna.plugins.MetadataValidationError(\"The {function_name} metadata entry is not callable.\".format(function_name=function_name))\n\texcept (AttributeError, TypeError): #Not a dictionary.\n\t\traise luna.plugins.MetadataValidationError(\"The user interface metadata is not a dictionary.\")",
"def load_details(self):\n response = self._server._api_request(\"GET\", \"/plugins/plugin/%d\" % self.id, \"\")\n if response is not None:\n self.id = response[\"id\"]\n self.name = response[\"name\"]\n self.family_name = response[\"family_name\"]\n self.attributes = response[\"attributes\"]\n return True",
"def plugin_info():\n return {\n 'name': 'Pandas CSV Reader',\n 'version': '1.7.0',\n 'mode': 'poll',\n 'type': 'south',\n 'interface': '1.0',\n 'config': _DEFAULT_CONFIG\n }",
"def metadata_for_plugin(self, plugin):\r\n if plugin in self._plugins:\r\n return self._plugins[plugin]\r\n else:\r\n fp = None\r\n metadata = None\r\n info_file = plugin + INFO_FILE_EXTENSION\r\n try:\r\n fp = open(os.path.join(self.path, info_file), 'r')\r\n metadata = json.load(fp)\r\n except Exception as e:\r\n self.log.exception('Exception caught while loading plugin metadata: %s' % e)\r\n raise e\r\n finally:\r\n if fp:\r\n fp.close()\r\n return metadata",
"def interface(self):\n\n data = ['[Interface]']\n for item in INTERFACE_KEYS:\n value = getattr(self, item, None)\n if value:\n data.append(value)\n\n return '''\n'''.join(data)",
"def metadata(self): # -> None:\n ...",
"def userinfo(self):\n return self._userinfo",
"def meta(self):\n raise NotImplementedError",
"def user_info(self) -> UserInfo:\n return self.__userInfo",
"def backend_getInterface(self):\n\t\treturn describeInterface(self)",
"def info_handler(userdata, *args):\n\t\tinfo = database.devinfo(userdata[\"cursor\"], args[0])\n\t\t\n\t\tif info is None:\n\t\t\tprint(\"can't find user \" + args[0])\n\t\t\treturn\n\t\t\n\t\tstype, connected, status = info\n\t\t\n\t\tprint(shlex.quote((\"+\" if connected else \"-\") + stype), end=\" \")\n\t\tprint(shlex.quote(status))",
"def interfaceType(self): # pylint: disable=invalid-name\n return self.interface_type",
"def plugin_description(self):\n return self.__plugin_description"
]
| [
"0.61903876",
"0.5972374",
"0.57847023",
"0.5763439",
"0.57444113",
"0.5743814",
"0.57142943",
"0.5705293",
"0.5666131",
"0.5616112",
"0.5563328",
"0.542009",
"0.5387956",
"0.53818756",
"0.53818756",
"0.53782296",
"0.53117156",
"0.5289842",
"0.5267869",
"0.52673215",
"0.52365845",
"0.523471",
"0.51897395",
"0.5140217",
"0.51254123",
"0.50892246",
"0.50834465",
"0.50830203",
"0.504333",
"0.50326556"
]
| 0.7924266 | 0 |
Validates whether the specified metadata is valid for user interface plugins. User interface metadata must have a ``userinterface`` entry, which must | def validate_metadata(user_interface_metadata):
if "userinterface" not in user_interface_metadata:
raise luna.plugins.MetadataValidationError("This is not a user interface plug-in.")
required_functions = {"join", "start", "stop"}
try:
if not required_functions <= user_interface_metadata["userinterface"].keys():
raise luna.plugins.MetadataValidationError("The user interface specifies no functions {function_names}.".format(function_names=", ".join(required_functions - user_interface_metadata["userinterface"].keys())))
for function_name in required_functions:
if not callable(user_interface_metadata["userinterface"][function_name]): #Each must be a callable object (such as a function).
raise luna.plugins.MetadataValidationError("The {function_name} metadata entry is not callable.".format(function_name=function_name))
except (AttributeError, TypeError): #Not a dictionary.
raise luna.plugins.MetadataValidationError("The user interface metadata is not a dictionary.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def metadata():\n\treturn {\n\t\t\"name\": \"User Interface Type\",\n\t\t\"description\": \"Defines a type of plug-in that communicates with the user by showing information to the user and allowing the user to control the application.\",\n\t\t\"version\": 2,\n\t\t\"dependencies\": {},\n\n\t\t\"type\": { #This is a \"plug-in type\" plug-in.\n\t\t\t\"type_name\": \"userinterface\",\n\t\t\t\"api\": userinterfacetype.user_interface,\n\t\t\t\"validate_metadata\": validate_metadata\n\t\t}\n\t}",
"def validateMetadata(self, cur, hist):\n raise NotImplementedError(\"missing validateMetadata() method\")",
"def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")",
"def validate_metadata(self):\n metadata = self.get_client_metadata()\n\n return True",
"def check_metadata(metadata):\n message = 'The given metadata contains unsupported types.'\n assert all([item['type'] in ['category', 'value'] for item in metadata['details']]), message",
"def is_plugin_data(cls, attributes):\n return attributes.get('metadata', {}).get('class') == 'plugin'",
"def test_has_plugin_fields(self, three_phase_handler, pp_valid):\n assert three_phase_handler._has_plugin_fields(pp_valid)",
"def test_metadata_invalid(aquarius_instance):\n result, errors = aquarius_instance.validate_metadata(\n {\"some_dict\": \"that is invalid\"}\n )\n assert result is False\n assert errors[0][\"message\"] == \"'main' is a required property\"",
"def isTrackPluginValid(*args, **kwargs):\n pass",
"def _check_args_motif(args):\n if args.install:\n # -n/--name must be specified\n if not args.name:\n _exit(1, \"motifscan motif --install: error: argument -n/--name \"\n \"is required\")\n # check conflict between local model and remote mode\n if args.remote and args.pfm_files:\n _exit(1, \"motifscan motif --install: error: argument -r/--remote \"\n \"is not allowed with argument -i\")\n # -i must be specified in local mode\n if not args.remote:\n if not args.pfm_files:\n _exit(1, \"motifscan motif --install: error: argument -i is \"\n \"required\")\n # check the input files are existed\n for path in args.pfm_files:\n if not os.path.isfile(path):\n _exit(1, f\"motifscan motif --install: error: file not \"\n f\"found: {path}\")\n if args.build:\n # -g/--genome must be specified\n if not args.genome:\n _exit(1, \"motifscan motif --build: error: argument -g/--genome \"\n \"is required\")",
"def verify_interface(dut, **kwargs):\n if not kwargs.get(\"interface_name\"):\n st.log(\"Interface name not provided\")\n return False\n interface_name = kwargs.get(\"interface_name\")\n cli_type = st.get_ui_type(dut, **kwargs)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n output = show_interface(dut, interface_name=interface_name, cli_type=cli_type)\n if output:\n for data in output:\n if data[\"interface\"] == interface_name:\n st.log(\"Parsing data for interface {}\".format(interface_name))\n if \"sampling_rate\" in kwargs:\n if str(data[\"sampling_rate\"]) != str(kwargs[\"sampling_rate\"]):\n st.log(\"Sampling rate verification failed ..\")\n return False\n if \"admin_status\" in kwargs:\n if data[\"admin_status\"] != kwargs[\"admin_status\"]:\n st.log(\"Admin status verification failed ..\")\n return False\n st.log(\"Verification successful ...\")\n return True\n else:\n st.log(\"Show output not found ...\")\n return False",
"def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))",
"def validate_plugin_uid(registry, plugin_uid):\n return plugin_uid in get_registered_plugin_uids(registry, flattern=True)",
"def validate_plugin(plugin):\n # list_name holds all plugin names, also the disabled ones (they won't do\n # anything as they are set as 'blocked' on pluggy)\n if plugin not in current_app.pluggy.list_name():\n raise FlaskBBCLIError(\"Plugin {} not found.\".format(plugin), fg=\"red\")\n return True",
"def validate_form_wizard_handler_plugin_uid(plugin_uid):\n return validate_plugin_uid(form_wizard_handler_plugin_registry, plugin_uid)",
"def test_check_metadata_fields(self):\n contents = self.read_metadata_contents()\n family = Metadata.get_family_metadata(contents)\n\n keys = [(\"name\", str), (\"postScriptName\", str),\n (\"fullName\", str), (\"style\", str),\n (\"weight\", int), (\"filename\", str),\n (\"copyright\", str)]\n\n missing = set([])\n unknown = set([])\n\n for j, itemtype in keys:\n\n for font_metadata in family.fonts:\n if j not in font_metadata:\n missing.add(j)\n\n for k in font_metadata:\n if k not in map(lambda x: x[0], keys):\n unknown.add(k)\n\n if unknown:\n msg = 'METADATA.json \"fonts\" property has unknown items [%s]'\n self.fail(msg % ', '.join(unknown))\n\n if missing:\n msg = 'METADATA.json \"fonts\" property items missed [%s] items'\n self.fail(msg % ', '.join(missing))",
"def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))\r\n\r\n if all([args['--userdata'], args['--userfile']]):\r\n raise ArgumentError('[-u | --userdata] not allowed with '\r\n '[-F | --userfile]')\r\n\r\n if args['--hourly'] in FALSE_VALUES:\r\n args['--hourly'] = False\r\n\r\n if args['--monthly'] in FALSE_VALUES:\r\n args['--monthly'] = False\r\n\r\n if all([args['--hourly'], args['--monthly']]):\r\n raise ArgumentError('[--hourly] not allowed with [--monthly]')\r\n\r\n if not any([args['--hourly'], args['--monthly']]):\r\n raise ArgumentError('One of [--hourly | --monthly] is required')\r\n\r\n image_args = [args['--os'], args['--image']]\r\n if all(image_args):\r\n raise ArgumentError('[-o | --os] not allowed with [--image]')\r\n\r\n if not any(image_args):\r\n raise ArgumentError('One of [--os | --image] is required')\r\n\r\n if args['--userfile']:\r\n if not os.path.exists(args['--userfile']):\r\n raise ArgumentError(\r\n 'File does not exist [-u | --userfile] = %s'\r\n % args['--userfile'])",
"def validate_form_handler_plugin_uid(plugin_uid):\n return validate_plugin_uid(form_handler_plugin_registry, plugin_uid)",
"def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")",
"def validate_integration_form_handler_plugin_uid(plugin_uid):\n return validate_plugin_uid(\n integration_form_handler_plugin_registry,\n plugin_uid\n )",
"def metaDataInputAvailable(inputType, inputKey):\n # Check if it is on metadata:\n # FIXME How can I do that using objKeyStore??\n flag = False\n from RecExConfig.InputFilePeeker import inputFileSummary\n metaItemList=inputFileSummary.get('metadata_itemsList')\n if ( '%s#%s' % (inputType, inputKey) ) in metaItemList:\n flag = True\n mlog.verbose((\"metaItemList does have ContainerType input %s with \"\n \"key %s.\"), inputType, inputKey)\n else:\n mlog.verbose((\"metaItemList does NOT have ContainerType input %s with \"\n \"key %s.\"), inputType, inputKey)\n return flag",
"def test_206_manila_plugin_to_manila(self):\n u.log.debug(\"Checking the {{ metadata.package }}:\"\n \"manila relation data...\")\n manila_generic = self.manila_generic_sentry\n relation = ['manila-plugin', 'manila:manila-plugin']\n expected = {\n 'private-address': u.valid_ip,\n '_configuration_data': u.not_null,\n '_name': 'generic'\n }\n ret = u.validate_relation_data(manila_generic, relation, expected)\n if ret:\n message = u.relation_error('manila {{ metadata.package }}', ret)\n amulet.raise_status(amulet.FAIL, msg=message)\n u.log.debug('OK')",
"def test_exposeInterfaces(self):\n if self.plugin is None:\n return\n\n cs = settings.Settings()\n results = self.plugin.exposeInterfaces(cs)\n if results is None or not results:\n return\n\n # each plugin should return a list\n self.assertIsInstance(results, list)\n for result in results:\n # Make sure that all elements in the list satisfy the constraints of the\n # hookspec\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 3)\n\n order, interface, kwargs = result\n\n self.assertIsInstance(order, (int, float))\n self.assertTrue(issubclass(interface, interfaces.Interface))\n self.assertIsInstance(kwargs, dict)",
"def validate_form_element_plugin_uid(plugin_uid):\n return validate_plugin_uid(form_element_plugin_registry, plugin_uid)",
"def _validate_admission_plugins(custom_plugins):\n # There are some plugins required by the system\n # if the plugins is specified manually, these ones might\n # be missed. We will add these automatically so the user\n # does not need to keep track of them\n required_plugins = ['NodeRestriction']\n for plugin in required_plugins:\n if plugin not in custom_plugins:\n custom_plugins = custom_plugins + \",\" + plugin\n return custom_plugins",
"def validate_plugin_configuration(cls, plugin_configuration: \"PluginConfiguration\"):\n missing_fields = []\n configuration = plugin_configuration.configuration\n configuration = {item[\"name\"]: item[\"value\"] for item in configuration}\n if not configuration[\"instance_id\"]:\n missing_fields.append(\"Instance Id\")\n if not configuration[\"secret_key\"]:\n missing_fields.append(\"Secret Key\")\n\n if plugin_configuration.active and missing_fields:\n error_msg = (\n \"To enable a plugin, you need to provide values for the \"\n \"following fields: \"\n )\n raise ValidationError(error_msg + \", \".join(missing_fields))",
"def _verifyPlugin(self, new_plugin):\n try:\n assert(new_plugin.id is not None)\n assert(new_plugin.version is not None)\n assert(new_plugin.name is not None)\n assert(new_plugin.framework_version is not None)\n except (AssertionError,KeyError):\n \n return False\n return True",
"def test_205_manila_to_manila_plugin(self):\n u.log.debug(\"Checking the manila:{{ metadata.package }}\"\n \" relation data...\")\n manila = self.manila_sentry\n relation = ['manila-plugin',\n '{{ metadata.package }}:manila-plugin']\n expected = {\n 'private-address': u.valid_ip,\n '_authentication_data': u.not_null,\n }\n ret = u.validate_relation_data(manila, relation, expected)\n if ret:\n message = u.relation_error('manila {{ metadata.package }}', ret)\n amulet.raise_status(amulet.FAIL, msg=message)\n u.log.debug('OK')",
"def init_metadata(user_model: Any) -> Dict:\n # meta_user: load metadata defined in the user_model instance\n if hasattr(user_model, \"init_metadata\"):\n try:\n meta_user = user_model.init_metadata()\n except SeldonNotImplementedError:\n meta_user = {}\n pass\n else:\n meta_user = {}\n\n if not isinstance(meta_user, dict):\n logger.error(\"init_metadata must return dict\")\n meta_user = {}\n\n # meta_env: load metadata from environmental variable\n try:\n meta_env = yaml.safe_load(os.environ.get(\"MODEL_METADATA\", \"{}\"))\n except yaml.YAMLError as e:\n logger.error(f\"Reading metadata from MODEL_METADATA env variable failed: {e}\")\n meta_env = {}\n\n meta = {**meta_user, **meta_env}\n\n try:\n return validate_model_metadata(meta)\n except SeldonInvalidMetadataError as e:\n logger.error(f\"Metadata validation error\\n{e}\")\n logger.error(f\"Failed to validate metadata {meta}\")\n return None",
"def validate_integration_form_element_plugin_uid(plugin_uid):\n return validate_plugin_uid(\n integration_form_element_plugin_registry,\n plugin_uid\n )"
]
| [
"0.5880484",
"0.57338774",
"0.5612077",
"0.54693663",
"0.5427157",
"0.5378694",
"0.5303779",
"0.52518684",
"0.522419",
"0.5138454",
"0.5125952",
"0.5111557",
"0.50984967",
"0.5088108",
"0.5084488",
"0.5055832",
"0.5045968",
"0.50456053",
"0.50423694",
"0.49870116",
"0.49784318",
"0.49295062",
"0.49182275",
"0.49048483",
"0.48697153",
"0.4859647",
"0.48416135",
"0.484007",
"0.48039082",
"0.47995338"
]
| 0.84427905 | 0 |
Add pipe failure to the leaks dict. | def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):
# If fluid not defined use fluid of the Source
fluid = fluid or self.fluid
# Failure rate coefficients; Piping failure rate is per unit of length,
# weld is dependent on number of welds, pipe OD and wall thickness
failure_rate_coeff = {'Piping': (tube.L, 1),
'Pipe weld': (tube.OD / tube.wall,
N_welds)}
# Piping and weld leaks as per Table 2
for cause in ['Piping', 'Pipe weld']:
for mode in TABLE_2[cause].keys():
if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2"
name = f'{cause} {mode.lower()}: {tube}, ' + \
f'{tube.L.to(ureg.ft):.3g~}'
temp_tube = copy(tube)
# Average path for the flow will be half of piping length
# for gas piping
temp_tube.L = tube.L / 2
fr_coef = failure_rate_coeff[cause][0]
N_events = failure_rate_coeff[cause][1]
if mode == 'Rupture':
failure_rate = fr_coef * TABLE_2[cause][mode]
# For rupture calculate flow through available
# pipe area
area = tube.area
else:
failure_rate = fr_coef * \
TABLE_2[cause][mode]['Failure rate']
area = TABLE_2[cause][mode]['Area']
if area > tube.area:
logger.warning('Leak area cannot be larger'
' than pipe area.')
continue
q_std = Source._leak_flow(temp_tube, area, fluid)
if max_flow is not None:
fluid_NTP = fluid.copy()
fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)
q_std_max = max_flow / fluid_NTP.Dmass
q_std = min(q_std, q_std_max)
self.leaks.append(
self._make_leak(name, failure_rate, q_std, N_events)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def auditmemallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfail\n\t\texcept Exception as e:\n\t\t\traise e",
"def auditportallocfail(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfail\n\t\texcept Exception as e:\n\t\t\traise e",
"def add_failed_package(self, unit, traceback):\n self.packages_error_count += 1\n self.packages_individual_errors = self.packages_individual_errors or {}\n error_key = '%s-%s-%s' % (unit.unit_key['name'], unit.unit_key['version'], unit.unit_key['author'])\n self.packages_individual_errors[error_key] = reporting.format_traceback(traceback)",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def errConnectionLost(self):\n self.logger('stderr closed by process %d' % self._pid)",
"def recordStderr(self, test, errput):\n if errput:\n test = proto_test(test)\n self.stderr_errput[test] = errput",
"def dump_leak_report(self, leaked_objects):\n\n logger.info(\"-----------------------------------------------\")\n logger.error(\"Test failure. {} objects have leaked:\".format(len(leaked_objects)))\n logger.info(\"(Default text format is <type(obj): str(obj)>\")\n\n id_to_name_map = {}\n\n # first, map IDs for leaked objects. We display these slightly differently because it\n # makes tracking inter-leak references a little easier.\n for leak in leaked_objects:\n id_to_name_map[leak.object_id] = leak\n\n # if the object has a `__dict__` attribute, then map the ID of that dictionary\n # back to the object also.\n if leak.get_object() and hasattr(leak.get_object(), \"__dict__\"):\n dict_id = id(leak.get_object().__dict__)\n id_to_name_map[dict_id] = leak\n\n # Second, go through all objects and map IDs for those (unless we've done them already).\n # In this step, we add mappings for objects and their `__dict__` attributes, but we\n # don't add `dict` objects yet. This is because we don't know if any `dict` is a user-\n # created dictionary or if it's a `__dict__`. If it's a `__dict__`, we add it here and\n # point it to the owning object. If it's just a `dict`, we add it in the last loop\n # through\n for obj in gc.get_objects():\n object_id = id(obj)\n\n if not isinstance(obj, dict):\n if object_id not in id_to_name_map:\n id_to_name_map[object_id] = TrackedObject(obj)\n\n if hasattr(obj, \"__dict__\"):\n dict_id = id(obj.__dict__)\n if dict_id not in id_to_name_map:\n id_to_name_map[dict_id] = id_to_name_map[object_id]\n\n # Third, map IDs for all dicts that we haven't done yet.\n for obj in gc.get_objects():\n object_id = id(obj)\n\n if isinstance(obj, dict):\n if object_id not in id_to_name_map:\n id_to_name_map[object_id] = TrackedObject(obj)\n\n already_reported = set()\n objects_to_report = leaked_objects.copy()\n\n # keep track of all 3 generations in handy local variables. These are here\n # for developers who might be looking at leaks inside of pdb.\n gen0 = []\n gen1 = []\n gen2 = []\n\n for generation_storage, generation_name in [\n (gen0, \"generation 0: objects that leaked\"),\n (gen1, \"generation 1: objects that refer to leaked objects\"),\n (gen2, \"generation 2: objects that refer to generation 1\"),\n ]:\n next_set_of_objects_to_report = set()\n if len(objects_to_report):\n logger.info(\"-----------------------------------------------\")\n logger.info(generation_name)\n\n # Add our objects to our generation-specific list. This helps\n # developers looking at bugs inside pdb because they can just look\n # at `gen0[0].get_object()` to see the first leaked object, etc.\n generation_storage.extend(objects_to_report)\n\n for obj in objects_to_report:\n if obj in already_reported:\n logger.info(\"already reported: {}\".format(obj.object_name))\n else:\n logger.info(\"object: {}\".format(obj.object_name))\n if not obj.get_object():\n logger.info(\" not recursing\")\n else:\n for referrer in gc.get_referrers(obj.get_object()):\n if (\n isinstance(referrer, dict)\n and referrer.get(\"dict\", None) == obj.get_object()\n ):\n # This is the dict from a TrackedObject object. Skip it.\n pass\n else:\n object_id = id(referrer)\n if object_id in id_to_name_map:\n logger.info(\n \" referred by: {}\".format(id_to_name_map[object_id])\n )\n next_set_of_objects_to_report.add(id_to_name_map[object_id])\n else:\n logger.info(\n \" referred by Non-object: {}\".format(\n get_printable_object_name(referrer)\n )\n )\n already_reported.add(obj)\n\n logger.info(\n \"Total: {} objects, referred to by {} objects\".format(\n len(objects_to_report), len(next_set_of_objects_to_report)\n )\n )\n objects_to_report = next_set_of_objects_to_report\n\n logger.info(\"-----------------------------------------------\")\n logger.info(\"Leaked objects are available in local variables: gen0, gen1, and gen2\")\n logger.info(\"for the 3 generations of leaks. Use the get_object method to retrieve\")\n logger.info(\"the actual objects\")\n logger.info(\"eg: us gen0[0].get_object() to get the first leaked object\")\n logger.info(\"-----------------------------------------------\")\n assert False, \"Test failure. {} objects have leaked:\".format(len(leaked_objects))",
"def error_pipe(self, mail):\n\t\tif self.caching:\n\t\t\tself.failed_mails.append(mail)\n\t\telif self.failure_path:\n\t\t\tself.output_mail(open(os.path.normpath(self.failure_path), \"a\"), mail)",
"def pressure_vessel_failure(self, q_std_rupture, fluid=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n for case, parameters in TABLE_2['Vessel, pressure'].items():\n name = 'Pressure vessel ' + case\n if isinstance(parameters, dict):\n area = parameters['Area']\n failure_rate = parameters['Failure rate']\n q_std = Source._leak_flow(ht.piping.Pipe(1, L=0*ureg.m), area,\n fluid)\n else:\n failure_rate = parameters\n q_std = q_std_rupture\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, 1))",
"def _make_leak(self, name, failure_rate, q_std, N):\n N_events = N * self.N\n tau = self.volume/q_std\n total_failure_rate = N_events*failure_rate\n total_failure_rate.ito(1/ureg.hr)\n return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events)",
"def get_leaks(self, packet_string: str, leaks_based_on_ner: dict) -> [bool, list]:\n mystring = urllib.parse.unquote_plus(packet_string)\n x = re.sub(\"[^a-zA-Z0-9,\\.]\", \" \", mystring)\n doc = self.nlp(x)\n\n\n for ent in doc.ents:\n if ent.label_ in leaks_based_on_ner:\n leaks_based_on_ner[ent.label_] = leaks_based_on_ner[ent.label_] + [ent.text]\n else:\n leaks_based_on_ner[ent.label_] = [ent.text]",
"def errReceived(self, data):\n\n if self._fired:\n return\n self._stderr.append(data)\n if data.find('\\n') == -1:\n return\n # This expects tcpdump to output an line like\n # tcpdump: listening on eth1, link-type EN10MB (Ethernet), capture size 96 bytes\n # as first output on stderr ...\n stderr = \"\".join(self._stderr)\n self._fired = True\n if re.search(\"listening on.*link-type\", stderr):\n self._deferred.callback((True, None, stderr))\n else:\n self._deferred.callback((False, None, stderr))",
"def addFailure(self, test, err):\r\n self.failures.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True",
"def dewar_insulation_failure(self, q_std):\n failure_rate = TABLE_1['Dewar']['Loss of vacuum']\n self.leaks.append(\n self._make_leak('Dewar insulation failure', failure_rate, q_std, 1))",
"def inject_failure(self):\n # Inject a failure only if there's a process running\n self.BqLog(\"Starting failure injection\")\n while len(self.circQ) > 0 or (self.currentProc and self.currentProc.workLeft > 0):\n t = time_to_failure()\n self.BqLog(\"Inject the next failure after %d seconds\" % (t))\n if t == 0:\n continue\n yield self.env.timeout(t)\n if len(self.circQ) >= 0 and \\\n self.currentProc.workLeft > 0:\n # Only break the machine if it is currently computing,\n # and if current proc is not restarting\n self.BqLog(\"Injecting a failure in %s\" % (self.currentProc.name))\n self.numFailures += 1\n self.process.interrupt(cause=\"failure\")",
"def addFailure(self, test, err):\n self.failures.append((proto_test(test), proto_error(err)))",
"def append_record_failure():\n\t\tpass",
"def addFailure(self, test, err, capt=None):\n exc_type, exc_val, tb = err\n tb = ''.join(traceback.format_exception(\n exc_type,\n exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),\n tb\n ))\n name = id_split(test.id())\n group = self.report_data[name[0]]\n self.stats['failures'] += 1\n group.stats['failures'] += 1\n group.tests.append({\n 'name': name[-1],\n 'failed': True,\n 'errtype': nice_classname(err[0]),\n 'message': exc_message(err),\n 'tb': tb,\n })",
"def _checkMemLeak(self):\n\t\t### Memory leak code:\n\t\t#self.stats['memlist'].append(mem.mySize()/1024)\n\t\tself.stats['memlist'].append(mem.active())\n\t\tmemfree = mem.free()\n\t\tminavailmem = 64*1024; # 64 MB, size of one image\n\t\tif(memfree < minavailmem):\n\t\t\tapDisplay.printError(\"Memory is low (\"+str(int(memfree/1024))+\"MB): there is probably a memory leak\")\n\n\t\tif(self.stats['count'] > 15):\n\t\t\tmemlist = self.stats['memlist'][-15:]\n\t\t\tn = len(memlist)\n\t\t\t\n\t\t\tgain = (memlist[n-1] - memlist[0])/1024.0\n\t\t\tsumx = n*(n-1.0)/2.0\n\t\t\tsumxsq = n*(n-1.0)*(2.0*n-1.0)/6.0\n\t\t\tsumy = 0.0; sumxy = 0.0; sumysq = 0.0\n\t\t\tfor i in range(n):\n\t\t\t\tvalue = float(memlist[i])/1024.0\n\t\t\t\tsumxy += float(i)*value\n\t\t\t\tsumy += value\n\t\t\t\tsumysq += value**2\n\t\t\t###\n\t\t\tstdx = math.sqrt(n*sumxsq - sumx**2)\n\t\t\tstdy = math.sqrt(n*sumysq - sumy**2)\n\t\t\trho = float(n*sumxy - sumx*sumy)/float(stdx*stdy+1e-6)\n\t\t\tslope = float(n*sumxy - sumx*sumy)/float(n*sumxsq - sumx*sumx)\n\t\t\tmemleak = rho*slope\n\t\t\t###\n\t\t\tif(self.stats['memleak'] > 3 and slope > 20 and memleak > 512 and gain > 2048):\n\t\t\t\tapDisplay.printError(\"Memory leak of \"+str(round(memleak,2))+\"MB\")\n\t\t\telif(memleak > 32):\n\t\t\t\tself.stats['memleak'] += 1\n\t\t\t\tapDisplay.printWarning(\"substantial memory leak \"+str(round(memleak,2))+\"MB\")\n\t\t\t\tprint \"(\",str(n),round(slope,5),round(rho,5),round(gain,2),\")\"",
"def drop_failed(self, item, line_reference, reason=''):\n logger.warning(\n f'Dropping failed {line_reference} from import job \"{self.job}\" run {self.timestamp}: {reason}'\n )\n self.failed_items.append({\n 'id': getattr(item, 'identifier', line_reference),\n 'timestamp': datetimestamp(digits_only=False),\n 'title': getattr(item, 'title', ''),\n 'uri': getattr(item, 'uri', ''),\n 'reason': reason\n })",
"def procFail(proc):\n\tif 'a' in proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying process fails')\n\t\tEMAIL.send('proc', proc, 'abort')",
"def patch_broken_pipe_error():\n from socketserver import BaseServer\n\n handle_error = BaseServer.handle_error\n\n def my_handle_error(self, request, client_address):\n type, err, tb = sys.exc_info()\n # there might be better ways to detect the specific erro\n if repr(err) == \"error(32, 'Broken pipe')\":\n pass\n else:\n handle_error(self, request, client_address)\n\n BaseServer.handle_error = my_handle_error",
"def failed(self) -> None:\n self.failure_count += 1",
"def leak2(addr, length):\n fake_str_addr = heap_base + 0x170\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"') # will be at offset 0xb0 from heap start\n\n for i in range(12):\n evl('{}'.format(i))\n\n evl('a={}+x'.format(fake_str_addr))\n return readstrvar('a')[0:length]",
"def leak_shellcode(remote, shellcode):\n assert len(shellcode) == 3\n alloc_addr = get_current_allocation_addr(remote)\n send_receive(remote, '\\x93' + shellcode) # Start with xchg eax, ebx to leak us\n return alloc_addr + 6",
"def patch_broken_pipe_error():\n\timport sys\n\tfrom SocketServer import BaseServer\n\tfrom wsgiref import handlers\n\n\thandle_error = BaseServer.handle_error\n\tlog_exception = handlers.BaseHandler.log_exception\n\n\tdef is_broken_pipe_error():\n\t\t_, err, _ = sys.exc_info()\n\t\tnum = err.errno if hasattr(err, 'errno') else 0\n\t\treturn num == 32\n\n\tdef my_handle_error(self, request, client_address):\n\t\tif not is_broken_pipe_error():\n\t\t\thandle_error(self, request, client_address)\n\t\telse:\n\t\t\tprint 'broken pipe'\n\n\tdef my_log_exception(self, exc_info):\n\t\tif not is_broken_pipe_error():\n\t\t\tlog_exception(self, exc_info)\n\n\tBaseServer.handle_error = my_handle_error\n\thandlers.BaseHandler.log_exception = my_log_exception",
"def test_is_memleak(self):\n subprocess.call(\n [\"g++\", \"-g\", \"test/with_leak.cc\", \"-o\", \"test/leaky.out\"])\n self.assertTrue(uut.is_memleak(\"test/leaky.out\"))\n subprocess.call(\n [\"g++\", \"-g\", \"test/without_leak.cc\", \"-o\",\n \"test/not_leaky.out\"])\n self.assertFalse(uut.is_memleak(\"test/not_leaky.out\"))",
"def addFailure(self, test, err):\n self.failure_count += 1\n self.total_count += 1\n unittest.TestResult.addFailure(self, test, err)\n _, _exc_str = self.failures[-1]\n output = self.complete_output()\n self.result.append((self.__class__.FAIL, test, output, _exc_str))\n if self.verbosity > 1:\n sys.stderr.write('F ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('F')",
"def add_failure(self):\n failure_time = time.time()\n\n if not self.first_failure_time:\n self.first_failure_time = failure_time\n\n self.failures.append(failure_time)"
]
| [
"0.5732793",
"0.53959626",
"0.534181",
"0.53402734",
"0.5289784",
"0.5119834",
"0.5101168",
"0.507846",
"0.50469095",
"0.5033066",
"0.5027118",
"0.5009161",
"0.50034285",
"0.49901775",
"0.49738032",
"0.49625376",
"0.49494156",
"0.4929026",
"0.4914285",
"0.4890131",
"0.48821697",
"0.47957247",
"0.47843108",
"0.47813675",
"0.47697103",
"0.47539324",
"0.4751826",
"0.4743501",
"0.4742045",
"0.47336954"
]
| 0.5879053 | 0 |
Add dewar insulation failure to leaks dict. Store failure rate, flow rate and expected time duration of the failure event for the dewar insulation failure. Based on FESHM4240. Failure modes are analyzed by `Volume.odh` method. | def dewar_insulation_failure(self, q_std):
failure_rate = TABLE_1['Dewar']['Loss of vacuum']
self.leaks.append(
self._make_leak('Dewar insulation failure', failure_rate, q_std, 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def _make_leak(self, name, failure_rate, q_std, N):\n N_events = N * self.N\n tau = self.volume/q_std\n total_failure_rate = N_events*failure_rate\n total_failure_rate.ito(1/ureg.hr)\n return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events)",
"def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)",
"def u_tube_failure(self, outer_tube, inner_tube, L, use_rate,\n fluid=None, N=1):\n # TODO Make areas adjustable, add info to docstring\n flow_path_cases = {'Small event': ht.piping.Annulus(outer_tube.ID,\n inner_tube.OD,\n L=L),\n 'Large event': outer_tube}\n for mode in TABLE_1['U-Tube change']:\n flow_path = flow_path_cases[mode]\n name = f'U-Tube {mode.lower()}: {flow_path}'\n failure_rate = TABLE_1['U-Tube change'][mode] * \\\n use_rate\n area = flow_path.area\n # TODO move this and gas leak check to separate method\n if area > outer_tube.area:\n logger.warning('Leak area cannot be larger'\n ' than outer tube area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(flow_path, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def pressure_vessel_failure(self, q_std_rupture, fluid=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n for case, parameters in TABLE_2['Vessel, pressure'].items():\n name = 'Pressure vessel ' + case\n if isinstance(parameters, dict):\n area = parameters['Area']\n failure_rate = parameters['Failure rate']\n q_std = Source._leak_flow(ht.piping.Pipe(1, L=0*ureg.m), area,\n fluid)\n else:\n failure_rate = parameters\n q_std = q_std_rupture\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, 1))",
"def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used",
"def auditmemallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _snps_failed_report(write: bool=False, miss_threshold: float=0.2,\n maf_threshold: float=0.00001, hwe_threshold: float=1e-6,\n lmissfile: str=\"plink.lmiss\", maffile: str=\"MAF_check.frq\",\n hwefile: str=\"plink.hwe\"):\n snps = {}\n ids_list = []\n lmiss = pd.read_csv(lmiss_file, delimiter=\" \", skipinitialspace=True)\n\n missing_snps = lmiss.loc[lmiss['F_MISS'] > miss_threshold]\n snps['missing_snps'] = missing_snps['SNP'].tolist()\n ids_list.append(missing_snps['SNP'].tolist())\n # print(\"total missing snps failed: \", len(missing_snps['SNP'].tolist()))\n\n # MAF\n maf = pd.read_csv(maf_file, delimiter=\" \", skipinitialspace=True)\n rare = maf.loc[maf['MAF'] < maf_threshold]\n snps['maf'] = rare['SNP'].tolist()\n ids_list.append(rare['SNP'].tolist())\n # print(\"total maf snps failed: \", len(rare['SNP'].tolist()))\n\n # HWE departures\n hardy = pd.read_csv(hwe_file, delimiter=\" \", skipinitialspace=True)\n hwe_failed = hardy.loc[hardy['P'] < hwe_threshold]\n snps['hwe'] = hwe_failed['SNP'].tolist()\n ids_list.append(hwe_failed['SNP'].tolist())\n # print(\"total hwe snps failed: \", len(hwe_failed['SNP'].tolist()))\n\n # graph everything\n tests = ['SNP Missingness', 'Minor Allele Frequency', 'Outlying HWE']\n fail_counts = [len(missing_snps['SNP'].tolist()), len(rare['SNP'].tolist()), len(hwe_failed['SNP'].tolist())]\n total_fails = set(x for l in ids_list for x in l)\n # print(\"total fails: \", len(total_fails))\n\n fig = plt.figure(figsize=(8,6))\n plt.tight_layout()\n plt.bar(x=tests, height=fail_counts)\n plt.title(\"SNPs failing QC checks (total: {}/{})\".format(len(total_fails), lmiss.shape[0]))\n plt.xlabel(\"QC Test\")\n plt.ylabel(\"Number of SNPs\")\n plt.tick_params(axis='x', rotation=90)\n\n if write:\n write_fail_file(snps, \"failed_snps_ids\")\n\n return fig",
"def test_debt_target_not_expired_when_new_issue_added(self):\n measurement = self.measurement(\n self.metric(accept_debt=True, debt_target=\"100\", issue_ids=[\"FOO-41\", \"FOO-42\"]),\n count={\"debt_target\": \"100\"},\n issue_status=[{\"status_category\": \"done\", \"issue_id\": \"FOO-41\"}],\n )\n self.assertFalse(measurement.debt_target_expired())",
"def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()",
"def add_leaks_to_submission(predictions):\n leaked_df = pd.read_feather(\"data/leak/leak.feather\")\n leaked_df.rename(columns={\"meter_reading\": \"leaked_reading\"}, inplace=True)\n leaked_df.loc[leaked_df[\"leaked_reading\"] < 0, \"leaked_reading\"] = 0\n leaked_df = leaked_df[leaked_df[\"building_id\"] != 245]\n leaked_df[\"timestamp\"] = leaked_df[\"timestamp\"].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n test_df = pd.read_csv(\"data/raw/test.csv\")\n\n test_df = test_df.merge(leaked_df, left_on=[\"building_id\", \"meter\", \"timestamp\"],\n right_on=[\"building_id\", \"meter\", \"timestamp\"], how=\"left\")\n test_df[\"meter_reading\"] = predictions\n test_df[\"meter_reading\"] = np.where(test_df[\"leaked_reading\"].isna(),\n test_df[\"meter_reading\"], test_df[\"leaked_reading\"])\n\n return test_df[\"meter_reading\"]",
"def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_accept_missing_sources_as_tech_debt_expired(self):\n metric = Metric(\n self.DATA_MODEL,\n {\"addition\": \"sum\", \"type\": \"tests\", \"accept_debt\": True, \"debt_end_date\": \"2020-01-01\"},\n METRIC_ID,\n )\n measurement = self.measurement(metric)\n self.assertIsNone(measurement.status())",
"def calc_lost_heat_recovery (self):\n if not self.cd['heat recovery operational']:\n\n self.lost_heat_recovery = [0]\n else:\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.lost_heat_recovery = \\\n (self.generation / gen_eff )* .10",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def update_timeout_penalties_by_error(penalty_dict):\n if penalty_dict and isinstance(penalty_dict, dict):\n _TIMEOUT_PENALTIES_BY_ERR_NO.update(penalty_dict)",
"def deviation(delta, mean_reliability, experiment, disks, chunk_count, spread_factor, threshold_recovery):\n \n # Approximated file loss probability\n reliability_mean_value = probability_at_least_d_fail_equal_reliability(threshold_recovery, chunk_count, mean_reliability)\n \n\n # Exact probability\n reliabilities = [element for element in \n file_loss_delta_matrix(delta, mean_reliability, experiment, disks, chunk_count, spread_factor, \n threshold_recovery)]\n \n return abs(mean(reliabilities) - reliability_mean_value)/abs(mean(reliabilities))",
"def addFailure(self, test, err):\n\n super(ForceBalanceTestResult, self).addFailure(test,err)\n self.logger.warning(\"\\r\\x1b[31;1m\" + \"FAIL\" + \"\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test, err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")",
"def test_debt_target_expired(self):\n measurement = self.measurement(\n self.metric(accept_debt=True, debt_target=\"100\", issue_ids=[\"FOO-40\"]),\n count={\"debt_target\": \"100\"},\n issue_status=[{\"status_category\": \"done\", \"issue_id\": \"FOO-40\"}],\n )\n self.assertTrue(measurement.debt_target_expired())",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def test_prometheus_rule_failures():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert constants.ALERT_PROMETHEUSRULEFAILURES not in [\n alert[\"labels\"][\"alertname\"] for alert in alerts\n ]",
"def odh(self, sources, power_outage=False):\n self.fail_modes = []\n # Probability of power failure in the building:\n # PFD_power if no outage, 1 if there is outage\n PFD_power_build = (power_outage or\n TABLE_1['Electrical Power Failure']['Demand rate'])\n # Calculate fatality rates for each source\n for source in sources:\n for leak in source.leaks:\n leak_failure_rate = leak[0]\n if leak_failure_rate is not None: # None for constant leak\n self._fatality_no_response(source, leak, source.sol_PFD,\n PFD_power_build)\n self._fatality_fan_powered(source, leak, source.sol_PFD,\n PFD_power_build)",
"def auditnsballocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditnsballocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_drc_lvs_decks(self) -> None:\n import hammer_config\n\n tech_dir, tech_dir_base = HammerToolTestHelpers.create_tech_dir(\"dummy28\")\n tech_json_filename = os.path.join(tech_dir, \"dummy28.tech.json\")\n\n def add_drc_lvs_decks(in_dict: Dict[str, Any]) -> Dict[str, Any]:\n out_dict = deepdict(in_dict)\n out_dict.update({\"drc decks\": [\n {\"tool name\": \"hammer\", \"deck name\": \"a_nail\", \"path\": \"/path/to/hammer/a_nail.drc.rules\"},\n {\"tool name\": \"chisel\", \"deck name\": \"some_wood\", \"path\": \"/path/to/chisel/some_wood.drc.rules\"},\n {\"tool name\": \"hammer\", \"deck name\": \"head_shark\", \"path\": \"/path/to/hammer/head_shark.drc.rules\"}\n ]})\n out_dict.update({\"lvs decks\": [\n {\"tool name\": \"hammer\", \"deck name\": \"a_nail\", \"path\": \"/path/to/hammer/a_nail.lvs.rules\"},\n {\"tool name\": \"chisel\", \"deck name\": \"some_wood\", \"path\": \"/path/to/chisel/some_wood.lvs.rules\"},\n {\"tool name\": \"hammer\", \"deck name\": \"head_shark\", \"path\": \"/path/to/hammer/head_shark.lvs.rules\"}\n ]})\n return out_dict\n\n HammerToolTestHelpers.write_tech_json(tech_json_filename, add_drc_lvs_decks)\n sys.path.append(tech_dir_base)\n tech = self.get_tech(hammer_tech.HammerTechnology.load_from_dir(\"dummy28\", tech_dir))\n tech.cache_dir = tech_dir\n\n tool = DummyTool()\n tool.technology = tech\n database = hammer_config.HammerDatabase()\n tool.set_database(database)\n\n self.maxDiff = None\n self.assertEqual(tech.get_drc_decks_for_tool(\"hammer\"),\n [DRCDeck(tool_name=\"hammer\", name=\"a_nail\", path=\"/path/to/hammer/a_nail.drc.rules\"),\n DRCDeck(tool_name=\"hammer\", name=\"head_shark\", path=\"/path/to/hammer/head_shark.drc.rules\")\n ])\n\n self.assertEqual(tech.get_lvs_decks_for_tool(\"hammer\"),\n [LVSDeck(tool_name=\"hammer\", name=\"a_nail\", path=\"/path/to/hammer/a_nail.lvs.rules\"),\n LVSDeck(tool_name=\"hammer\", name=\"head_shark\", path=\"/path/to/hammer/head_shark.lvs.rules\")\n ])\n\n self.assertEqual(tech.get_drc_decks_for_tool(\"chisel\"),\n [DRCDeck(tool_name=\"chisel\", name=\"some_wood\", path=\"/path/to/chisel/some_wood.drc.rules\")])\n\n self.assertEqual(tech.get_lvs_decks_for_tool(\"chisel\"),\n [LVSDeck(tool_name=\"chisel\", name=\"some_wood\", path=\"/path/to/chisel/some_wood.lvs.rules\")])",
"def verify_deviation(\n config: dict,\n type: str,\n stage: str,\n host_id: str = None,\n osd_id: int = None,\n status: str = None,\n) -> bool:\n acting_total_size = (\n acting_total_raw_use\n ) = acting_total_data = acting_total_avail = 0\n stored_data_kb = config[\"WI\"] * 4 * 1024\n pre_osd_df_stats = config[\"pre_osd_df_stats\"]\n post_osd_df_stats = config[\"post_osd_df_stats\"]\n out_osd_df_stats = config[\"out_osd_df_stats\"]\n acting_pg_set = config[\"acting_pg_set\"]\n host_osd_map = config[\"host_osd_map\"]\n deviation_multiplier = {\n \"node\": {\"iops\": 1.07, \"out\": 1.09},\n \"osd\": {\"iops\": 1.07, \"out\": 0},\n \"summary\": {\"iops\": 1.08, \"out\": 1.08},\n }\n\n if osd_id is None and type == \"node\" and stage == \"out\" and status != \"new\":\n osd_id = host_osd_map[host_id][\"iops\"]\n dev_factor = (\n 1.1\n if type == \"node\" and status == \"ignored\"\n else deviation_multiplier[type][stage]\n )\n\n for x in acting_pg_set:\n acting_total_size += pre_osd_df_stats[x][\"kb\"]\n acting_total_raw_use += pre_osd_df_stats[x][\"kb_used\"]\n acting_total_data += pre_osd_df_stats[x][\"kb_used_data\"]\n acting_total_avail += pre_osd_df_stats[x][\"kb_avail\"]\n\n try:\n if type == \"node\":\n if stage == \"iops\":\n log.info(f\"Stats verification for node {host_id} post I/Os\")\n log.info(\n f\"SIZE: {pre_osd_df_stats[host_id]['kb']} == {post_osd_df_stats[host_id]['kb']}\"\n )\n assert (\n pre_osd_df_stats[host_id][\"kb\"] == post_osd_df_stats[host_id][\"kb\"]\n )\n log.info(\n f\"RAW USE: {(pre_osd_df_stats[host_id]['kb_used'] + stored_data_kb) * dev_factor} \"\n f\">= {post_osd_df_stats[host_id]['kb_used']}\"\n )\n assert (\n int(\n (pre_osd_df_stats[host_id][\"kb_used\"] + stored_data_kb)\n * dev_factor\n )\n >= post_osd_df_stats[host_id][\"kb_used\"]\n )\n log.info(\n f\"DATA: {(pre_osd_df_stats[host_id]['kb_used_data'] + stored_data_kb) * dev_factor}\"\n f\" >= {post_osd_df_stats[host_id]['kb_used_data']}\"\n )\n assert (\n int(\n (pre_osd_df_stats[host_id][\"kb_used_data\"] + stored_data_kb)\n * dev_factor\n )\n >= post_osd_df_stats[host_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL: {pre_osd_df_stats[host_id]['kb_avail'] / 1048576}\"\n f\" ~= {post_osd_df_stats[host_id]['kb_avail'] / 1048576}\"\n )\n assert int(pre_osd_df_stats[host_id][\"kb_avail\"] / 1048576) == int(\n post_osd_df_stats[host_id][\"kb_avail\"] / 1048576\n )\n elif stage == \"out\" and status == \"old\":\n log.info(f\"Stats verification for node {host_id} after OSDs are out\")\n log.info(\n f\"SIZE: {(post_osd_df_stats[host_id]['kb'] - pre_osd_df_stats[osd_id]['kb']) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb\"]\n - pre_osd_df_stats[osd_id][\"kb\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb\"]\n )\n log.info(\n f\"RAW USE:\"\n f\" {(post_osd_df_stats[host_id]['kb_used'] - pre_osd_df_stats[osd_id]['kb_used']) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb_used']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_used\"]\n - pre_osd_df_stats[osd_id][\"kb_used\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_used\"]\n )\n value = (\n post_osd_df_stats[host_id][\"kb_used_data\"]\n - pre_osd_df_stats[osd_id][\"kb_used_data\"]\n ) * dev_factor\n log.info(\n f\"DATA: {value}\" f\" >= {out_osd_df_stats[host_id]['kb_used_data']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_used_data\"]\n - pre_osd_df_stats[osd_id][\"kb_used_data\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL:\"\n f\" {(post_osd_df_stats[host_id]['kb_avail'] - pre_osd_df_stats[osd_id]['kb_avail']) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb_avail']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_avail\"]\n - pre_osd_df_stats[osd_id][\"kb_avail\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_avail\"]\n )\n elif stage == \"out\" and status == \"new\":\n log.info(f\"Stats verification for node {host_id} post OSDs are out\")\n log.info(\n f\"SIZE: {post_osd_df_stats[host_id]['kb']} == {out_osd_df_stats[host_id]['kb']}\"\n )\n assert (\n post_osd_df_stats[host_id][\"kb\"] == out_osd_df_stats[host_id][\"kb\"]\n )\n log.info(\n f\"RAW USE: {(post_osd_df_stats[host_id]['kb_used'] + stored_data_kb) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb_used']}\"\n )\n assert (\n int(\n (post_osd_df_stats[host_id][\"kb_used\"] + stored_data_kb)\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_used\"]\n )\n log.info(\n f\"DATA: {(post_osd_df_stats[host_id]['kb_used_data'] + stored_data_kb) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb_used_data']}\"\n )\n assert (\n int(\n (post_osd_df_stats[host_id][\"kb_used_data\"] + stored_data_kb)\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL: {post_osd_df_stats[host_id]['kb_avail'] / 1048576}\"\n f\" ~= {out_osd_df_stats[host_id]['kb_avail'] / 1048576}\"\n )\n assert int(post_osd_df_stats[host_id][\"kb_avail\"] / 1048576) == int(\n out_osd_df_stats[host_id][\"kb_avail\"] / 1048576\n )\n elif stage == \"out\" and status == \"ignored\":\n log.info(f\"Stats verification for node {host_id} post OSDs are out\")\n log.info(\n f\"SIZE: {(post_osd_df_stats[host_id]['kb'] - post_osd_df_stats[osd_id]['kb']) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb\"]\n - post_osd_df_stats[osd_id][\"kb\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb\"]\n )\n log.info(\n f\"RAW USE: {(post_osd_df_stats[host_id]['kb_used'] - post_osd_df_stats[osd_id]['kb_used']) * 1.08}\"\n f\" >= {out_osd_df_stats[host_id]['kb_used']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_used\"]\n - post_osd_df_stats[osd_id][\"kb_used\"]\n )\n * 1.08\n )\n >= out_osd_df_stats[host_id][\"kb_used\"]\n )\n value = (\n post_osd_df_stats[host_id][\"kb_used_data\"]\n - post_osd_df_stats[osd_id][\"kb_used_data\"]\n ) * dev_factor\n log.info(\n f\"DATA: {value}\" f\" >= {out_osd_df_stats[host_id]['kb_used_data']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_used_data\"]\n - post_osd_df_stats[osd_id][\"kb_used_data\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL: {(post_osd_df_stats[host_id]['kb_avail'] - post_osd_df_stats[osd_id]['kb']) * dev_factor}\"\n f\" >= {out_osd_df_stats[host_id]['kb_avail']}\"\n )\n assert (\n int(\n (\n post_osd_df_stats[host_id][\"kb_avail\"]\n - post_osd_df_stats[osd_id][\"kb\"]\n )\n * dev_factor\n )\n >= out_osd_df_stats[host_id][\"kb_avail\"]\n )\n log.info(f\"Stats verification completed for {type} {host_id}: PASS\")\n elif type == \"osd\":\n if stage == \"iops\" and status == \"old\":\n log.info(f\"Stats verification for OSD {osd_id} post I/Os\")\n log.info(\n f\"SIZE: {pre_osd_df_stats[osd_id]['kb']} == {post_osd_df_stats[osd_id]['kb']}\"\n )\n assert pre_osd_df_stats[osd_id][\"kb\"] == post_osd_df_stats[osd_id][\"kb\"]\n log.info(\n f\"RAW USE: {(pre_osd_df_stats[osd_id]['kb_used'] + stored_data_kb) * dev_factor}\"\n f\" >= {post_osd_df_stats[osd_id]['kb_used']}\"\n )\n assert (\n int(\n (pre_osd_df_stats[osd_id][\"kb_used\"] + stored_data_kb)\n * dev_factor\n )\n >= post_osd_df_stats[osd_id][\"kb_used\"]\n )\n log.info(\n f\"DATA: {(pre_osd_df_stats[osd_id]['kb_used_data'] + stored_data_kb) * dev_factor}\"\n f\" >= {post_osd_df_stats[osd_id]['kb_used_data']}\"\n )\n assert (\n int(\n (pre_osd_df_stats[osd_id][\"kb_used_data\"] + stored_data_kb)\n * dev_factor\n )\n >= post_osd_df_stats[osd_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL: {pre_osd_df_stats[osd_id]['kb_avail'] / 1048576}\"\n f\" ~= {post_osd_df_stats[osd_id]['kb_avail'] / 1048576}\"\n )\n assert int(pre_osd_df_stats[osd_id][\"kb_avail\"] / 1048576) == int(\n post_osd_df_stats[osd_id][\"kb_avail\"] / 1048576\n )\n if stage == \"iops\" and status == \"new\":\n log.info(f\"Stats verification for OSD {osd_id} post I/Os\")\n log.info(\n f\"SIZE: {post_osd_df_stats[osd_id]['kb']} == {out_osd_df_stats[osd_id]['kb']}\"\n )\n assert post_osd_df_stats[osd_id][\"kb\"] == out_osd_df_stats[osd_id][\"kb\"]\n log.info(\n f\"RAW USE: {(post_osd_df_stats[osd_id]['kb_used'] + stored_data_kb) * dev_factor}\"\n f\" >= {out_osd_df_stats[osd_id]['kb_used']}\"\n )\n assert (\n int(\n (post_osd_df_stats[osd_id][\"kb_used\"] + stored_data_kb)\n * dev_factor\n )\n >= out_osd_df_stats[osd_id][\"kb_used\"]\n )\n log.info(\n f\"DATA: {(post_osd_df_stats[osd_id]['kb_used_data'] + stored_data_kb) * dev_factor}\"\n f\" >= {out_osd_df_stats[osd_id]['kb_used_data']}\"\n )\n assert (\n int(\n (post_osd_df_stats[osd_id][\"kb_used_data\"] + stored_data_kb)\n * dev_factor\n )\n >= out_osd_df_stats[osd_id][\"kb_used_data\"]\n )\n log.info(\n f\"AVAIL: {post_osd_df_stats[osd_id]['kb_avail'] / 1048576}\"\n f\" ~= {out_osd_df_stats[osd_id]['kb_avail'] / 1048576}\"\n )\n assert int(post_osd_df_stats[osd_id][\"kb_avail\"] / 1048576) == int(\n out_osd_df_stats[osd_id][\"kb_avail\"] / 1048576\n )\n if stage == \"out\":\n log.info(f\"Stats verification for OSD {osd_id} post OSDs are OUT\")\n log.info(\n f\"Reweight: {out_osd_df_stats[osd_id]['reweight']} | \"\n f\"SIZE: {out_osd_df_stats[osd_id]['kb']} | \"\n f\"RAW USE: {out_osd_df_stats[osd_id]['kb_used']} | \"\n f\"DATA: {out_osd_df_stats[osd_id]['kb_used_data']} | \"\n f\"AVAIL: {out_osd_df_stats[osd_id]['kb_avail']}\"\n )\n assert (\n out_osd_df_stats[osd_id][\"reweight\"]\n == out_osd_df_stats[osd_id][\"kb\"]\n == out_osd_df_stats[osd_id][\"kb_used\"]\n == out_osd_df_stats[osd_id][\"kb_used_data\"]\n == out_osd_df_stats[osd_id][\"kb_avail\"]\n == 0\n )\n log.info(f\"OUT OSD {osd_id} stats: PASS\")\n log.info(f\"Stats verification completed for {type} {osd_id}: PASS\")\n elif type == \"summary\":\n if stage == \"iops\":\n log.info(\"Summary Stats verification post I/Os\")\n log.info(\n f\"TOTAL SIZE: {pre_osd_df_stats['summary']['total_kb']}\"\n f\" == {post_osd_df_stats['summary']['total_kb']}\"\n )\n assert (\n pre_osd_df_stats[\"summary\"][\"total_kb\"]\n == post_osd_df_stats[\"summary\"][\"total_kb\"]\n )\n log.info(\n f\"TOTAL RAW USE: {(pre_osd_df_stats['summary']['total_kb_used'] + stored_data_kb * 3) * dev_factor}\"\n f\" >= {post_osd_df_stats['summary']['total_kb_used']}\"\n )\n assert (\n pre_osd_df_stats[\"summary\"][\"total_kb_used\"] + stored_data_kb * 3\n ) * dev_factor >= post_osd_df_stats[\"summary\"][\"total_kb_used\"]\n log.info(\n f\"TOTAL DATA: \"\n f\"{(pre_osd_df_stats['summary']['total_kb_used_data'] + stored_data_kb * 3) * dev_factor}\"\n f\" >= {post_osd_df_stats['summary']['total_kb_used_data']}\"\n )\n assert (\n pre_osd_df_stats[\"summary\"][\"total_kb_used_data\"]\n + stored_data_kb * 3\n ) * dev_factor >= post_osd_df_stats[\"summary\"][\"total_kb_used_data\"]\n log.info(\n f\"TOTAL AVAIL: {pre_osd_df_stats['summary']['total_kb_avail']}\"\n f\" ~= {post_osd_df_stats['summary']['total_kb_avail']}\"\n )\n assert int(\n pre_osd_df_stats[\"summary\"][\"total_kb_avail\"] / 1048576\n ) == int(post_osd_df_stats[\"summary\"][\"total_kb_avail\"] / 1048576)\n log.info(\"Summary Stats verification post I/Os: PASSED\")\n elif stage == \"out\":\n log.info(\"Summary Stats verification after OSDs are OUT\")\n log.info(\n f\"TOTAL SIZE: {post_osd_df_stats['summary']['total_kb'] - acting_total_size}\"\n f\" == {out_osd_df_stats['summary']['total_kb']}\"\n )\n assert (\n post_osd_df_stats[\"summary\"][\"total_kb\"] - acting_total_size\n == out_osd_df_stats[\"summary\"][\"total_kb\"]\n )\n log.info(\n f\"TOTAL RAW USE: \"\n f\"{(post_osd_df_stats['summary']['total_kb_used'] - acting_total_raw_use) * dev_factor}\"\n f\" >= {out_osd_df_stats['summary']['total_kb_used']}\"\n )\n assert (\n post_osd_df_stats[\"summary\"][\"total_kb_used\"] - acting_total_raw_use\n ) * dev_factor >= out_osd_df_stats[\"summary\"][\"total_kb_used\"]\n log.info(\n f\"TOTAL DATA:\"\n f\" {(post_osd_df_stats['summary']['total_kb_used_data'] - acting_total_data) * dev_factor}\"\n f\" >= {out_osd_df_stats['summary']['total_kb_used_data']}\"\n )\n assert (\n post_osd_df_stats[\"summary\"][\"total_kb_used_data\"]\n - acting_total_data\n ) * dev_factor >= out_osd_df_stats[\"summary\"][\"total_kb_used_data\"]\n log.info(\n f\"TOTAL AVAIL: {post_osd_df_stats['summary']['total_kb_avail'] - acting_total_avail}\"\n f\" ~= {out_osd_df_stats['summary']['total_kb_avail']}\"\n )\n assert int(\n (\n post_osd_df_stats[\"summary\"][\"total_kb_avail\"]\n - acting_total_avail\n )\n / 1048576\n ) == int(out_osd_df_stats[\"summary\"][\"total_kb_avail\"] / 1048576)\n log.info(\"Summary Stats verification after OSD are OUT: PASSED\")\n except Exception as E:\n log.info(\"^FAILED\")\n log.error(f\"Verification failed with exception: {E.__doc__}\")\n log.exception(E)\n return False\n\n return True",
"def delcomperrsessallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._delcomperrsessallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def log_manager(self, source):\n if self.fail_count[source]:\n if not (self.dname.split('.')[-1] in self.ofr_list):\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.append(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + self.error_code\n self.sys_chans['fail'].setValue(1)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n\n if self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_err', self.ps_error, self.ofr_list, self.fail_count)\n elif self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_still_out', self.ps_error, self.ofr_list, self.fail_count)\n s = 0\n for k, v in self.fail_count.items():\n s += v\n if not s:\n if self.dname.split('.')[-1] in self.ofr_list:\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.delete(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + 'PS IS RUNNING'\n self.sys_chans['fail'].setValue(0)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n else:\n log = ''\n for k, v in self.fail_count.items():\n if v:\n log = log + k + '|'\n log = log[:-1]\n # self.sys_chans['errcode'].setValue(json.dumps(log))",
"def perf_mon_collection_failure_reason(self, perf_mon_collection_failure_reason):\n\n self._perf_mon_collection_failure_reason = perf_mon_collection_failure_reason"
]
| [
"0.6150373",
"0.5749372",
"0.5595032",
"0.5496742",
"0.5341827",
"0.53159404",
"0.51895225",
"0.51661247",
"0.5164078",
"0.5149587",
"0.5141537",
"0.50924855",
"0.505334",
"0.5052884",
"0.5009815",
"0.5003419",
"0.49941376",
"0.49664646",
"0.49133098",
"0.49062636",
"0.48815998",
"0.48813778",
"0.48760533",
"0.4867822",
"0.48615012",
"0.48452625",
"0.48385438",
"0.4826641",
"0.47990808",
"0.47990006"
]
| 0.69461983 | 0 |
Add UTube failure to leaks dict. Store failure rate, flow rate and expected time duration of the failure event for the dewar insulation failure. Based on FESHM4240. Failure modes are analyzed by `Volume.odh` method. | def u_tube_failure(self, outer_tube, inner_tube, L, use_rate,
fluid=None, N=1):
# TODO Make areas adjustable, add info to docstring
flow_path_cases = {'Small event': ht.piping.Annulus(outer_tube.ID,
inner_tube.OD,
L=L),
'Large event': outer_tube}
for mode in TABLE_1['U-Tube change']:
flow_path = flow_path_cases[mode]
name = f'U-Tube {mode.lower()}: {flow_path}'
failure_rate = TABLE_1['U-Tube change'][mode] * \
use_rate
area = flow_path.area
# TODO move this and gas leak check to separate method
if area > outer_tube.area:
logger.warning('Leak area cannot be larger'
' than outer tube area.')
continue
# If fluid not defined use fluid of the Source
fluid = fluid or self.fluid
q_std = Source._leak_flow(flow_path, area, fluid)
self.leaks.append(
self._make_leak(name, failure_rate, q_std, N)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def dewar_insulation_failure(self, q_std):\n failure_rate = TABLE_1['Dewar']['Loss of vacuum']\n self.leaks.append(\n self._make_leak('Dewar insulation failure', failure_rate, q_std, 1))",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def _make_leak(self, name, failure_rate, q_std, N):\n N_events = N * self.N\n tau = self.volume/q_std\n total_failure_rate = N_events*failure_rate\n total_failure_rate.ito(1/ureg.hr)\n return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events)",
"def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()",
"def add_failure(self):\n failure_time = time.time()\n\n if not self.first_failure_time:\n self.first_failure_time = failure_time\n\n self.failures.append(failure_time)",
"def pressure_vessel_failure(self, q_std_rupture, fluid=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n for case, parameters in TABLE_2['Vessel, pressure'].items():\n name = 'Pressure vessel ' + case\n if isinstance(parameters, dict):\n area = parameters['Area']\n failure_rate = parameters['Failure rate']\n q_std = Source._leak_flow(ht.piping.Pipe(1, L=0*ureg.m), area,\n fluid)\n else:\n failure_rate = parameters\n q_std = q_std_rupture\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, 1))",
"def addFailure(self, test, err):\n self.failures.append((proto_test(test), proto_error(err)))",
"def addFailure(self, test, err):\n\n super(ForceBalanceTestResult, self).addFailure(test,err)\n self.logger.warning(\"\\r\\x1b[31;1m\" + \"FAIL\" + \"\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test, err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")",
"def addFailure(self, test, err):\n test.status = \"failed\"\n self._addError(test, err)",
"def addFailure(self, test, err, capt=None, tb_info=None):\n taken = self._timeTaken()\n tb = ''.join(traceback.format_exception(*err))\n self.xunitstats[1] += 1\n try:\n id=test.shortDescription()\n if id is None:\n id = test.id()\n except AttributeError:\n id=''\n id = id.split('.')\n name = self._quoteattr(id[-1])\n systemout = ''\n# if test.capturedOutput is not None:\n# systemout = '<system-out><![CDATA['+escape_cdata(str(test.capturedOutput))+']]></system-out>'\n xml = \"\"\"<testcase classname=%(cls)s name=%(name)s time=\"%(taken)f\">\n%(systemout)s\n<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>\n</failure></testcase>\n\"\"\" %{'cls': self._quoteattr('.'.join(id[:-1])), 'name': self._quoteattr(name), 'taken': taken, 'errtype': self._quoteattr(nice_classname(err[0])), 'message': self._quoteattr(exc_message(err)), 'tb': escape_cdata(tb), 'systemout':systemout}\n self.addstream(xml)",
"def addFailure(self, test, err):\n self.failure_count += 1\n self.total_count += 1\n unittest.TestResult.addFailure(self, test, err)\n _, _exc_str = self.failures[-1]\n output = self.complete_output()\n self.result.append((self.__class__.FAIL, test, output, _exc_str))\n if self.verbosity > 1:\n sys.stderr.write('F ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('F')",
"async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])",
"def addFailure(self, test, err, test_time=None):\n # Special case: Catch Twisted's skips that come thtrough as failures\n # and treat them as skips instead\n if len(err.traceback_lines) == 1:\n if err.traceback_lines[0].startswith(\"UnsupportedTrialFeature\"):\n reason = eval(err.traceback_lines[0][25:])[1]\n self.addSkip(test, reason)\n return\n\n test = proto_test(test)\n if test_time:\n test.test_time = str(test_time)\n err = proto_error(err)\n self.failures.append((test, err))\n self.all_errors.append((test, self.colors.error, \"Failure\", err))\n self._reportOutcome(test, \"F\", self.colors.failing, err)",
"def create_failure(test, time, failure):\n info = _TestInfo(test, time)\n info._failure = failure\n return info",
"def addFailure(self, test, err):\r\n self.failures.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True",
"def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()",
"def addFailure(self, test, err, capt=None):\n exc_type, exc_val, tb = err\n tb = ''.join(traceback.format_exception(\n exc_type,\n exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),\n tb\n ))\n name = id_split(test.id())\n group = self.report_data[name[0]]\n self.stats['failures'] += 1\n group.stats['failures'] += 1\n group.tests.append({\n 'name': name[-1],\n 'failed': True,\n 'errtype': nice_classname(err[0]),\n 'message': exc_message(err),\n 'tb': tb,\n })",
"def test_prometheus_rule_failures():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert constants.ALERT_PROMETHEUSRULEFAILURES not in [\n alert[\"labels\"][\"alertname\"] for alert in alerts\n ]",
"def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)",
"def test_manager_reports_duration_including_failure():\n\n httpretty.register_uri(\n httpretty.GET,\n \"http://test.com/long_failed\",\n body=httpretty_body_that_waits_and_returns(0.5, None),\n )\n\n report_path = os.path.join(REPORT_DIR, \"duration_report_with_failure.json\")\n spintest(\n [\"http://test.com\"],\n [\n # Fails but does not retry and is ignored\n {\"method\": \"GET\", \"route\": \"/long_failed\", \"delay\": 0, \"ignore\": True},\n ],\n generate_report=report_path,\n )\n spintest_reports = read_report(report_path)\n\n first_task_report = spintest_reports[0][\"reports\"][0]\n assert 0.5 <= first_task_report[\"duration_sec\"] <= 0.6\n\n total_duration = spintest_reports[0][\"total_duration_sec\"]\n assert 0.5 <= total_duration <= 0.6",
"def addFailure(self, result):\n result.addFailure(self, (Exception, Exception(), None))\n # Since TAP will not provide assertion data, clean up the assertion\n # section so it is not so spaced out.\n test, err = result.failures[-1]\n result.failures[-1] = (test, \"\")",
"def test_add_failure_details(self):\n self.protocol.addFailure(\n self.test, details=self.sample_tb_details)\n self.assertThat([\n compat._b((\"failure: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;charset=utf8,language=python\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n compat._b((\"failure: %s [ multipart\\n\"\n \"Content-Type: text/plain\\n\"\n \"something\\n\"\n \"F\\r\\nserialised\\nform0\\r\\n\"\n \"Content-Type: \"\n \"text/x-traceback;language=python,charset=utf8\\n\"\n \"traceback\\n\" + _remote_exception_str_chunked +\n \"]\\n\") % self.test.id()),\n ],\n matchers.Contains(self.io.getvalue())),",
"def perf_mon_collection_failure_reason(self, perf_mon_collection_failure_reason):\n\n self._perf_mon_collection_failure_reason = perf_mon_collection_failure_reason",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def snmpqosqos_error_libqos_api_failuresrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_libqos_api_failuresrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))",
"def test_add_failure(self):\n self.protocol.addFailure(\n self.test, pysubunit.RemoteError(compat._u(\"boo qux\")))\n self.assertEqual(\n self.io.getvalue(),\n compat._b(\n ('failure: %s [\\n' +\n _remote_exception_str + ': boo qux\\n]\\n')\n % self.test.id()))",
"def failure(self):\n self.logger.debug(\"Logging failure for %s\", self.key)\n self.failures = self.driver.failure(self.key)",
"def get_failure_rate(self) -> float:\n return self.failurerate"
]
| [
"0.62996244",
"0.6081853",
"0.5967863",
"0.59589446",
"0.57245386",
"0.5637934",
"0.56091994",
"0.5550118",
"0.55276215",
"0.5523503",
"0.5457722",
"0.54345715",
"0.54227185",
"0.540919",
"0.5392647",
"0.53509164",
"0.5341924",
"0.53290635",
"0.528553",
"0.5253357",
"0.52019227",
"0.5127696",
"0.51075196",
"0.50968415",
"0.50958776",
"0.5083227",
"0.5065942",
"0.50607055",
"0.5053975",
"0.5044706"
]
| 0.7077728 | 0 |
Add pressure vessel failure to leaks dict. Store failure rate, flow rate and expected time duration of the event for transfer line failure. Based on FESHM 4240. Failure modes are analyzed by `Volume.odh` method. | def pressure_vessel_failure(self, q_std_rupture, fluid=None):
# If fluid not defined use fluid of the Source
fluid = fluid or self.fluid
for case, parameters in TABLE_2['Vessel, pressure'].items():
name = 'Pressure vessel ' + case
if isinstance(parameters, dict):
area = parameters['Area']
failure_rate = parameters['Failure rate']
q_std = Source._leak_flow(ht.piping.Pipe(1, L=0*ureg.m), area,
fluid)
else:
failure_rate = parameters
q_std = q_std_rupture
self.leaks.append(
self._make_leak(name, failure_rate, q_std, 1)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def u_tube_failure(self, outer_tube, inner_tube, L, use_rate,\n fluid=None, N=1):\n # TODO Make areas adjustable, add info to docstring\n flow_path_cases = {'Small event': ht.piping.Annulus(outer_tube.ID,\n inner_tube.OD,\n L=L),\n 'Large event': outer_tube}\n for mode in TABLE_1['U-Tube change']:\n flow_path = flow_path_cases[mode]\n name = f'U-Tube {mode.lower()}: {flow_path}'\n failure_rate = TABLE_1['U-Tube change'][mode] * \\\n use_rate\n area = flow_path.area\n # TODO move this and gas leak check to separate method\n if area > outer_tube.area:\n logger.warning('Leak area cannot be larger'\n ' than outer tube area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(flow_path, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def dewar_insulation_failure(self, q_std):\n failure_rate = TABLE_1['Dewar']['Loss of vacuum']\n self.leaks.append(\n self._make_leak('Dewar insulation failure', failure_rate, q_std, 1))",
"def _make_leak(self, name, failure_rate, q_std, N):\n N_events = N * self.N\n tau = self.volume/q_std\n total_failure_rate = N_events*failure_rate\n total_failure_rate.ito(1/ureg.hr)\n return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events)",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)",
"def addFailure(self, test, err):\n\n super(ForceBalanceTestResult, self).addFailure(test,err)\n self.logger.warning(\"\\r\\x1b[31;1m\" + \"FAIL\" + \"\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test, err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")",
"def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _leak_flow(cls, tube, area, fluid):\n d = (4*area/math.pi)**0.5 # diameter for the leak opening\n exit_ = ht.piping.Exit(d)\n TempPiping = ht.piping.Piping(fluid)\n TempPiping.add(\n tube,\n exit_,\n )\n if area != tube.area:\n Hole = ht.piping.Orifice(d)\n TempPiping.insert(1, Hole)\n m_dot = TempPiping.m_dot(ht.P_NTP)\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std = m_dot / fluid_NTP.Dmass\n return q_std",
"def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:\n # Probability of power on, ODH system working, and m number of fans\n # with flow rate Q_fan on.\n P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \\\n sol_PFD * P_fan\n P_i = leak_failure_rate * P_response\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan,\n N_fan, N)\n self.fail_modes.append(f_mode)",
"def _snps_failed_report(write: bool=False, miss_threshold: float=0.2,\n maf_threshold: float=0.00001, hwe_threshold: float=1e-6,\n lmissfile: str=\"plink.lmiss\", maffile: str=\"MAF_check.frq\",\n hwefile: str=\"plink.hwe\"):\n snps = {}\n ids_list = []\n lmiss = pd.read_csv(lmiss_file, delimiter=\" \", skipinitialspace=True)\n\n missing_snps = lmiss.loc[lmiss['F_MISS'] > miss_threshold]\n snps['missing_snps'] = missing_snps['SNP'].tolist()\n ids_list.append(missing_snps['SNP'].tolist())\n # print(\"total missing snps failed: \", len(missing_snps['SNP'].tolist()))\n\n # MAF\n maf = pd.read_csv(maf_file, delimiter=\" \", skipinitialspace=True)\n rare = maf.loc[maf['MAF'] < maf_threshold]\n snps['maf'] = rare['SNP'].tolist()\n ids_list.append(rare['SNP'].tolist())\n # print(\"total maf snps failed: \", len(rare['SNP'].tolist()))\n\n # HWE departures\n hardy = pd.read_csv(hwe_file, delimiter=\" \", skipinitialspace=True)\n hwe_failed = hardy.loc[hardy['P'] < hwe_threshold]\n snps['hwe'] = hwe_failed['SNP'].tolist()\n ids_list.append(hwe_failed['SNP'].tolist())\n # print(\"total hwe snps failed: \", len(hwe_failed['SNP'].tolist()))\n\n # graph everything\n tests = ['SNP Missingness', 'Minor Allele Frequency', 'Outlying HWE']\n fail_counts = [len(missing_snps['SNP'].tolist()), len(rare['SNP'].tolist()), len(hwe_failed['SNP'].tolist())]\n total_fails = set(x for l in ids_list for x in l)\n # print(\"total fails: \", len(total_fails))\n\n fig = plt.figure(figsize=(8,6))\n plt.tight_layout()\n plt.bar(x=tests, height=fail_counts)\n plt.title(\"SNPs failing QC checks (total: {}/{})\".format(len(total_fails), lmiss.shape[0]))\n plt.xlabel(\"QC Test\")\n plt.ylabel(\"Number of SNPs\")\n plt.tick_params(axis='x', rotation=90)\n\n if write:\n write_fail_file(snps, \"failed_snps_ids\")\n\n return fig",
"def addFailure(self, test, err):\n test.status = \"failed\"\n self._addError(test, err)",
"def update_timeout_penalties_by_error(penalty_dict):\n if penalty_dict and isinstance(penalty_dict, dict):\n _TIMEOUT_PENALTIES_BY_ERR_NO.update(penalty_dict)",
"def addFailure(self, test, err):\n self.failures.append((proto_test(test), proto_error(err)))",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()",
"def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()",
"def addFailure(self, test, err):\r\n self.failures.append((test, self._exc_info_to_string(err, test)))\r\n self._mirrorOutput = True",
"def _fatality_no_response(self, source, leak, sol_PFD,\n PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n P_no_response = float(PFD_power_build) * sol_PFD + \\\n (1-PFD_power_build)*self.PFD_ODH\n P_i = leak_failure_rate * P_no_response\n Q_fan = self.vent_rate\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan, 0, N)\n self.fail_modes.append(f_mode)",
"def add_failure(self):\n failure_time = time.time()\n\n if not self.first_failure_time:\n self.first_failure_time = failure_time\n\n self.failures.append(failure_time)",
"def addFailure(self, test, err):\n self.failure_count += 1\n self.total_count += 1\n unittest.TestResult.addFailure(self, test, err)\n _, _exc_str = self.failures[-1]\n output = self.complete_output()\n self.result.append((self.__class__.FAIL, test, output, _exc_str))\n if self.verbosity > 1:\n sys.stderr.write('F ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('F')",
"def addFailure(self, test, err, test_time=None):\n # Special case: Catch Twisted's skips that come thtrough as failures\n # and treat them as skips instead\n if len(err.traceback_lines) == 1:\n if err.traceback_lines[0].startswith(\"UnsupportedTrialFeature\"):\n reason = eval(err.traceback_lines[0][25:])[1]\n self.addSkip(test, reason)\n return\n\n test = proto_test(test)\n if test_time:\n test.test_time = str(test_time)\n err = proto_error(err)\n self.failures.append((test, err))\n self.all_errors.append((test, self.colors.error, \"Failure\", err))\n self._reportOutcome(test, \"F\", self.colors.failing, err)",
"def odh(self, sources, power_outage=False):\n self.fail_modes = []\n # Probability of power failure in the building:\n # PFD_power if no outage, 1 if there is outage\n PFD_power_build = (power_outage or\n TABLE_1['Electrical Power Failure']['Demand rate'])\n # Calculate fatality rates for each source\n for source in sources:\n for leak in source.leaks:\n leak_failure_rate = leak[0]\n if leak_failure_rate is not None: # None for constant leak\n self._fatality_no_response(source, leak, source.sol_PFD,\n PFD_power_build)\n self._fatality_fan_powered(source, leak, source.sol_PFD,\n PFD_power_build)",
"def log_manager(self, source):\n if self.fail_count[source]:\n if not (self.dname.split('.')[-1] in self.ofr_list):\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.append(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + self.error_code\n self.sys_chans['fail'].setValue(1)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n\n if self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_err', self.ps_error, self.ofr_list, self.fail_count)\n elif self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_still_out', self.ps_error, self.ofr_list, self.fail_count)\n s = 0\n for k, v in self.fail_count.items():\n s += v\n if not s:\n if self.dname.split('.')[-1] in self.ofr_list:\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.delete(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + 'PS IS RUNNING'\n self.sys_chans['fail'].setValue(0)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n else:\n log = ''\n for k, v in self.fail_count.items():\n if v:\n log = log + k + '|'\n log = log[:-1]\n # self.sys_chans['errcode'].setValue(json.dumps(log))",
"def addFailure(self, test, err, capt=None):\n exc_type, exc_val, tb = err\n tb = ''.join(traceback.format_exception(\n exc_type,\n exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),\n tb\n ))\n name = id_split(test.id())\n group = self.report_data[name[0]]\n self.stats['failures'] += 1\n group.stats['failures'] += 1\n group.tests.append({\n 'name': name[-1],\n 'failed': True,\n 'errtype': nice_classname(err[0]),\n 'message': exc_message(err),\n 'tb': tb,\n })",
"def create_failure(test, time, failure):\n info = _TestInfo(test, time)\n info._failure = failure\n return info",
"def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))",
"def auditmemallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def add_leaks_to_submission(predictions):\n leaked_df = pd.read_feather(\"data/leak/leak.feather\")\n leaked_df.rename(columns={\"meter_reading\": \"leaked_reading\"}, inplace=True)\n leaked_df.loc[leaked_df[\"leaked_reading\"] < 0, \"leaked_reading\"] = 0\n leaked_df = leaked_df[leaked_df[\"building_id\"] != 245]\n leaked_df[\"timestamp\"] = leaked_df[\"timestamp\"].dt.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n test_df = pd.read_csv(\"data/raw/test.csv\")\n\n test_df = test_df.merge(leaked_df, left_on=[\"building_id\", \"meter\", \"timestamp\"],\n right_on=[\"building_id\", \"meter\", \"timestamp\"], how=\"left\")\n test_df[\"meter_reading\"] = predictions\n test_df[\"meter_reading\"] = np.where(test_df[\"leaked_reading\"].isna(),\n test_df[\"meter_reading\"], test_df[\"leaked_reading\"])\n\n return test_df[\"meter_reading\"]"
]
| [
"0.5895279",
"0.58741015",
"0.58721024",
"0.57350564",
"0.5734858",
"0.5273407",
"0.5273209",
"0.5161501",
"0.5152299",
"0.50805694",
"0.50692445",
"0.50475574",
"0.50008637",
"0.50005174",
"0.49996737",
"0.49666536",
"0.49650532",
"0.49615368",
"0.49581695",
"0.49567917",
"0.4919076",
"0.4909518",
"0.48937544",
"0.4893057",
"0.4885846",
"0.48772898",
"0.486589",
"0.48601317",
"0.48535812",
"0.48267847"
]
| 0.666339 | 0 |
Add general failure mode to leaks dict. Store failure rate, flow rate and expected time duration of the failure event for general failure mode. Failure modes are analyzed by `Volume.odh` method. | def failure_mode(self, name, failure_rate, q_std, N=1):
self.leaks.append(
self._make_leak(name, failure_rate, q_std, N)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_leak(self, name, failure_rate, q_std, N):\n N_events = N * self.N\n tau = self.volume/q_std\n total_failure_rate = N_events*failure_rate\n total_failure_rate.ito(1/ureg.hr)\n return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events)",
"def test_loss_hook(self, losses):\n self.runinfo[\"dev_losses\"].append(losses)",
"def dewar_insulation_failure(self, q_std):\n failure_rate = TABLE_1['Dewar']['Loss of vacuum']\n self.leaks.append(\n self._make_leak('Dewar insulation failure', failure_rate, q_std, 1))",
"def u_tube_failure(self, outer_tube, inner_tube, L, use_rate,\n fluid=None, N=1):\n # TODO Make areas adjustable, add info to docstring\n flow_path_cases = {'Small event': ht.piping.Annulus(outer_tube.ID,\n inner_tube.OD,\n L=L),\n 'Large event': outer_tube}\n for mode in TABLE_1['U-Tube change']:\n flow_path = flow_path_cases[mode]\n name = f'U-Tube {mode.lower()}: {flow_path}'\n failure_rate = TABLE_1['U-Tube change'][mode] * \\\n use_rate\n area = flow_path.area\n # TODO move this and gas leak check to separate method\n if area > outer_tube.area:\n logger.warning('Leak area cannot be larger'\n ' than outer tube area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(flow_path, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def auditmemallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def fail(self, measRecord, error=None):\n if error is None:\n self.flagHandler.handleFailure(measRecord)\n else:\n self.flagHandler.handleFailure(measRecord, error.cpp)",
"def addFailure(self, test, err):\n\n super(ForceBalanceTestResult, self).addFailure(test,err)\n self.logger.warning(\"\\r\\x1b[31;1m\" + \"FAIL\" + \"\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test, err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def pressure_vessel_failure(self, q_std_rupture, fluid=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n for case, parameters in TABLE_2['Vessel, pressure'].items():\n name = 'Pressure vessel ' + case\n if isinstance(parameters, dict):\n area = parameters['Area']\n failure_rate = parameters['Failure rate']\n q_std = Source._leak_flow(ht.piping.Pipe(1, L=0*ureg.m), area,\n fluid)\n else:\n failure_rate = parameters\n q_std = q_std_rupture\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, 1))",
"def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()",
"def perf_mon_collection_failure_reason(self, perf_mon_collection_failure_reason):\n\n self._perf_mon_collection_failure_reason = perf_mon_collection_failure_reason",
"def addFailure(self, test, err):\n test.status = \"failed\"\n self._addError(test, err)",
"def test_extra_status(self):\n\n def handle(event):\n ds = Dataset()\n ds.Status = 0xFFF0\n ds.ErrorComment = \"Some comment\"\n ds.ErrorID = 12\n return ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(BasicFilmSession)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_DELETE, handle)]\n )\n\n ae.add_requested_context(BasicFilmSession)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status = assoc.send_n_delete(BasicFilmSession, \"1.2.840.10008.5.1.1.40.1\")\n assert status.Status == 0xFFF0\n assert status.ErrorComment == \"Some comment\"\n assert status.ErrorID == 12\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()",
"def snmpqosqos_error_libqos_api_failuresrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_libqos_api_failuresrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:\n # Probability of power on, ODH system working, and m number of fans\n # with flow rate Q_fan on.\n P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \\\n sol_PFD * P_fan\n P_i = leak_failure_rate * P_response\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan,\n N_fan, N)\n self.fail_modes.append(f_mode)",
"def addFailure(self, test, err):\n self.failure_count += 1\n self.total_count += 1\n unittest.TestResult.addFailure(self, test, err)\n _, _exc_str = self.failures[-1]\n output = self.complete_output()\n self.result.append((self.__class__.FAIL, test, output, _exc_str))\n if self.verbosity > 1:\n sys.stderr.write('F ')\n sys.stderr.write(str(test))\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('F')",
"def test_prometheus_rule_failures():\n prometheus = ocs_ci.utility.prometheus.PrometheusAPI()\n alerts_response = prometheus.get(\n \"alerts\", payload={\"silenced\": False, \"inhibited\": False}\n )\n assert alerts_response.ok is True\n alerts = alerts_response.json()[\"data\"][\"alerts\"]\n log.info(f\"Prometheus Alerts: {alerts}\")\n assert constants.ALERT_PROMETHEUSRULEFAILURES not in [\n alert[\"labels\"][\"alertname\"] for alert in alerts\n ]",
"def test_launch_failures_hw(self):\n self.test_launch_failures()",
"def update_timeout_penalties_by_error(penalty_dict):\n if penalty_dict and isinstance(penalty_dict, dict):\n _TIMEOUT_PENALTIES_BY_ERR_NO.update(penalty_dict)",
"def addFailure(self, test, err, capt=None):\n exc_type, exc_val, tb = err\n tb = ''.join(traceback.format_exception(\n exc_type,\n exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),\n tb\n ))\n name = id_split(test.id())\n group = self.report_data[name[0]]\n self.stats['failures'] += 1\n group.stats['failures'] += 1\n group.tests.append({\n 'name': name[-1],\n 'failed': True,\n 'errtype': nice_classname(err[0]),\n 'message': exc_message(err),\n 'tb': tb,\n })",
"def useFailures(self):\n self.setupTests(tests = self.failures)",
"def create_failure(test, time, failure):\n info = _TestInfo(test, time)\n info._failure = failure\n return info",
"def testNoFailureFlag(self):\n schema = lsst.afw.table.SourceTable.makeMinimalSchema()\n\n # This is a FlagDefinition structure like a plugin might have\n flagDefs = FlagDefinitionList()\n FIRST = flagDefs.add(\"1st error\", \"this is the first failure type\")\n SECOND = flagDefs.add(\"2nd error\", \"this is the second failure type\")\n fh = FlagHandler.addFields(schema, \"test\", flagDefs)\n # Check to be sure that the FlagHandler was correctly initialized\n for index in range(len(flagDefs)):\n self.assertEqual(flagDefs.getDefinition(index).name, fh.getFlagName(index))\n\n catalog = lsst.afw.table.SourceCatalog(schema)\n\n # Now check to be sure that all of the known failures set the bits correctly\n record = catalog.addNew()\n fh.handleFailure(record)\n self.assertFalse(fh.getValue(record, FIRST.number))\n self.assertFalse(fh.getValue(record, SECOND.number))\n record = catalog.addNew()\n\n record = catalog.addNew()\n error = MeasurementError(FIRST.doc, FIRST.number)\n fh.handleFailure(record, error.cpp)\n self.assertTrue(fh.getValue(record, FIRST.number))\n self.assertFalse(fh.getValue(record, SECOND.number))\n\n record = catalog.addNew()\n error = MeasurementError(SECOND.doc, SECOND.number)\n fh.handleFailure(record, error.cpp)\n self.assertFalse(fh.getValue(record, FIRST.number))\n self.assertTrue(fh.getValue(record, SECOND.number))",
"def test_is_exipred_use_base(self):\n expired_pass = DoorPassFactory.create(device=self.device, use_limit=2, uses=2)\n good_pass = DoorPassFactory.create(device=self.device, use_limit=2, uses=0)\n self.assertTrue(expired_pass.is_expired())\n self.assertFalse(good_pass.is_expired())",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def test_running_measure_failing_checks(processor, measure_with_tools):\n measure_with_tools.pre_hooks['dummy'].fail_check = True\n processor.start_measure(measure_with_tools)\n\n process_and_assert(getattr, (processor, 'active'))\n\n process_and_join_thread(processor._thread)\n assert measure_with_tools.status == 'FAILED'\n assert 'checks' in measure_with_tools.infos\n m = processor.plugin.workbench.get_manifest('test.measure')\n assert not m.find('runtime_dummy1').collected\n assert not m.find('runtime_dummy2').collected",
"def constant_leak(self, name, q_std, N=1):\n # Failure rate assumes the volume instantly refilled\n # after being completely emptied, and continues release\n # Failure rate for constant leak doesn't depend on N or self.N\n # Dividing by self.N*N to undo _make_leak multiplication\n failure_rate = q_std/(self.volume*self.N*N)\n self.leaks.append(\n self._make_leak(name, failure_rate, N*q_std, N))",
"def addFailure(self, test, err, test_time=None):\n # Special case: Catch Twisted's skips that come thtrough as failures\n # and treat them as skips instead\n if len(err.traceback_lines) == 1:\n if err.traceback_lines[0].startswith(\"UnsupportedTrialFeature\"):\n reason = eval(err.traceback_lines[0][25:])[1]\n self.addSkip(test, reason)\n return\n\n test = proto_test(test)\n if test_time:\n test.test_time = str(test_time)\n err = proto_error(err)\n self.failures.append((test, err))\n self.all_errors.append((test, self.colors.error, \"Failure\", err))\n self._reportOutcome(test, \"F\", self.colors.failing, err)",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))"
]
| [
"0.55107766",
"0.5364057",
"0.5345816",
"0.5153793",
"0.50932765",
"0.50351113",
"0.502037",
"0.49866393",
"0.49547714",
"0.4933598",
"0.4926014",
"0.4904773",
"0.48874328",
"0.48584226",
"0.4838802",
"0.48380056",
"0.47908464",
"0.47816858",
"0.47497073",
"0.4739524",
"0.4725641",
"0.47214267",
"0.47129935",
"0.46908718",
"0.4682348",
"0.4679693",
"0.4676013",
"0.46759456",
"0.46658456",
"0.46610177"
]
| 0.68871087 | 0 |
Calculate leak flow/release for a given piping element. For this calculation the gas is assumed to have no pressure loss on entry due to the different possible options. This makes analysis simple and conservative. For the full pipe rupture case it is usually assumed that the release area is equal to tube cross section area. For other leak cases, the hole in the piping is considered to be a squareedged orifice. | def _leak_flow(cls, tube, area, fluid):
d = (4*area/math.pi)**0.5 # diameter for the leak opening
exit_ = ht.piping.Exit(d)
TempPiping = ht.piping.Piping(fluid)
TempPiping.add(
tube,
exit_,
)
if area != tube.area:
Hole = ht.piping.Orifice(d)
TempPiping.insert(1, Hole)
m_dot = TempPiping.m_dot(ht.P_NTP)
fluid_NTP = fluid.copy()
fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)
q_std = m_dot / fluid_NTP.Dmass
return q_std | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def bern_metric(pipe_diameter, delta_p, pipe_length):\n fr_c = 0.003 # assuming Reynolds number is 10**5 and pipe material is smooth copper\n fr_reyn = 0.046 / (reynolds_num(pipe_diameter, delta_p, pipe_length) ** 0.2) # Taitel and Dukler approximation\n rho = 1000 # density of water @ 4 deg celsius (kg/m**3)\n\n v = math.sqrt((2 * delta_p) / (rho * (4 * fr_reyn * (pipe_length / pipe_diameter) - 1)))\n flow_rate_turb = v * ((math.pi / 4) * (pipe_diameter ** 2))\n\n return flow_rate_turb, v",
"def pois_metric(pipe_diameter, delta_p, pipe_length):\n mu = 0.001 # water @ 25 degrees C\n pois = mu * 10\n flow_rate_lam = (math.pi * (pipe_diameter ** 4) * delta_p) / (128 * pois * pipe_length)\n\n return flow_rate_lam",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def right_boundary(linkp, pn, H0, V0, H, V, links1, p, pump, valve, dt,\n H10, V10, utype, dtype,\n friction, dVdt, dVdx, dVdt10, dVdx10):\n\n # Properties of current pipe\n link1 = [p[abs(i)-1] for i in links1]\n f = linkp.roughness # unitless\n D = linkp.diameter # m\n g = 9.8 # m/s^2\n a = linkp.wavev # m/s\n n = linkp.number_of_segments # spatial discretization\n KD = linkp.roughness_height\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start (inner boundary conditions)\n V1 = V10; H1 = H10 # upstream node\n V2 = V0[1]; H2 = H0[1] # downstream node\n dVdx1 = dVdx10 ; dVdx2 = dVdx[0]\n dVdt1 = dVdt10 ; dVdt2 = dVdt[1]\n if utype[0] == 'Pipe':\n if linkp.start_node.transient_node_type == 'SurgeTank':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs = surge_tank(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = H[0]\n linkp.start_node.tank_flow = Qs\n if linkp.start_node.transient_node_type == 'Chamber':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs, zp = air_chamber(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = zp\n linkp.start_node.tank_flow = Qs\n\n else:\n elev = linkp.start_node.elevation\n emitter_coeff = linkp.start_node.emitter_coeff + linkp.start_node.demand_coeff\n block_per = linkp.start_node.block_per\n H[0], V[0] = add_leakage(emitter_coeff, block_per,link1, linkp, elev,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif utype[0] == 'Pump':\n pumpc = pump[0]\n H[0], V[0] = pump_node(pumpc, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Valve':\n valvec = valve[0]\n H[0], V[0] = valve_node(valvec, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n # Pipe end (outer boundary conditions )\n V1 = V0[n-1]; H1 = H0[n-1]\n dVdx1 = dVdx[n-1]\n dVdt1 = dVdt[n-1]\n if dtype[0] == 'Reservoir' or dtype[0] == 'Tank':\n H[n], V[n] = rev_end (H1, V1, H[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n if dtype[0] == 'Valve':\n H[n], V[n] = valve_end (H1, V1, V[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n if dtype[0] == 'Junction':\n elev = linkp.end_node.elevation\n H[n], V[n] = dead_end (linkp ,H1, V1, elev, n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n\n\n return H, V",
"def get_flow(self, pressure_drop, coeff): \n flow = coeff * pressure_drop**0.5\n return flow",
"def bern_max_metric(pipe_diameter, delta_p):\n\n rho = 1000 # density of water kg/m^3\n flow_rate_max = ((math.pi * (pipe_diameter**2)) / 4) * math.sqrt((2 * delta_p) / rho)\n\n return flow_rate_max",
"def calc_per_flow_link_utilisation(self, flow: Tuple[int, int],\n demand: float,\n routing: np.ndarray) -> np.ndarray:\n edge_mapping = {edge: i for i, edge in\n enumerate(sorted(self.graph.edges))}\n\n link_utilisation = np.zeros(self.num_edges)\n node_flow = np.zeros(self.num_nodes) # the flow stored at a node\n node_flow[flow[0]] = demand\n\n to_explore = [flow[0]]\n while to_explore:\n current_node = to_explore.pop(0)\n current_flow = node_flow[current_node]\n\n # this is the flow destination node so we absorb all flow\n if current_node == flow[1]:\n node_flow[current_node] = 0.0\n continue\n\n # push the flow at this node over all edges\n for edge in self.graph.out_edges(current_node):\n edge_index = edge_mapping[edge]\n ratio = routing[edge_index]\n flow_to_send = ratio * current_flow\n # only send flow if greater than epsilon (so no 'infinite' loops)\n if flow_to_send > 1.e-8:\n node_flow[edge[1]] += ratio * current_flow\n # all important step, update our output\n link_utilisation[edge_index] += ratio * current_flow\n # have updated the dst so add it to the list of things to do\n to_explore.append(edge[1])\n # we've moved all the flow from this node now, so reset back to zero\n node_flow[current_node] = 0.0\n\n return link_utilisation",
"def test_pump_bypass_on_reverse_flow(use_numba):\n net = pandapipes.create_empty_network(\"net\", add_stdtypes=True)\n\n j1 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)\n j2 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)\n j3 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)\n j4 = pandapipes.create_junction(net, pn_bar=5, tfluid_k=283.15)\n\n pandapipes.create_pipe(net, j1, j2, std_type='125_PE_80_SDR_11', k_mm=1., length_km=10)\n pandapipes.create_pipe(net, j3, j4, std_type='125_PE_80_SDR_11', k_mm=1., length_km=12)\n pandapipes.create_ext_grid(net, j1, 5, 283.15, type=\"p\")\n pandapipes.create_pump(net, j2, j3, std_type='P1')\n pandapipes.create_source(net, j4, 0.02333)\n\n pandapipes.create_fluid_from_lib(net, \"hgas\", overwrite=True)\n\n pandapipes.pipeflow(net, stop_condition=\"tol\", iter=3, friction_model=\"nikuradse\",\n mode=\"hydraulics\", transient=False, nonlinear_method=\"automatic\",\n tol_p=1e-4, tol_v=1e-4, use_numba=use_numba)\n\n assert net.res_pump.deltap_bar.isin([0]).all()\n assert np.isclose(net.res_junction.loc[1, \"p_bar\"], net.res_junction.loc[2, \"p_bar\"])",
"def __CalculateDispenseHeightFor(self, vial, volumeToTransport_uL, tipLength, \\\n usingFreeAirDispense):\n dispenseHeight = 0\n \n if usingFreeAirDispense or self.__TipIsForbiddenToEnter(vial):\n dispenseHeight = vial.getBasePosition() - tipLength - vial.getMaxHeight() \n else: \n dispenseHeight = vial.getBasePosition() - tipLength - \\\n vial.getMeniscusAfterAdding(volumeToTransport_uL) + \\\n float(self._m_Settings[Instrument.WickingDispenseDepthLabel]) \n\n #self.__logger.logDebug( \"@@@@@@@@@@@@@@@@@@@@@@@@@@@ __CalculateDispenseHeightFor %f %f %f %f %f \" % (usingFreeAirDispense, \\\n # vial.getBasePosition() , tipLength , vial.getMaxHeight(),\\\n # self.__ClipHeight (dispenseHeight, vial, tipLength)))\n \n return self.__ClipHeight (dispenseHeight, vial, tipLength)",
"def Piping(T_in, p_in, m_dot, d_inner, l_pipe, f, epsilon_pipe, T_shield, N):\r\n\r\n ## Estimation of the influence of the arcs\r\n # Calculation according to VDI Heatatlas 2013\r\n # Assumption isoenthalpic flow\r\n state_Arc = FlowRestriction(T_in, p_in, m_dot, d_inner, f)\r\n p_Arc = state_Arc.get(\"p\")\r\n T_Arc = state_Arc.get(\"T\")\r\n\r\n ## Estimation of the influence of thermal radiation on the compressible flow\r\n\r\n # Emission coefficent for an enclosed vessel\r\n # Assuming much bigger hot surface -> emissivity of hot surface doesnt matter anymore, just the cold one\r\n # Thus the simple equation can be used\r\n q_pipe = epsilon_pipe * sp.constants.Stefan_Boltzmann * (T_shield**4 - T_Arc**4) #W\r\n\r\n # Calling of the function SimplePipe\r\n state_out = SimplePipe(T_Arc, p_Arc, m_dot, d_inner, l_pipe, N, 0, q_pipe)\r\n #Transfer results\r\n p_out = state_out.get(\"p\")\r\n T_out = state_out.get(\"T\")\r\n h_out = state_out.get(\"h\")\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n\r\n return state_out",
"def inner_pipe (linkp, pn, dt, links1, links2, utype, dtype, p,\n H0, V0, H, V, H10, V10, H20, V20, pump, valve,\n friction, dVdt, dVdx,\n dVdt10, dVdx10, dVdt20, dVdx20):\n\n # Properties of current pipe\n g = 9.8 # m/s^2\n link1 = [p[abs(i)-1] for i in links1]\n link2 = [p[abs(i)-1] for i in links2]\n n = linkp.number_of_segments # spatial discretization\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start\n V1 = V10; H1 = H10 #list\n V2 = V0[1]; H2 = H0[1]\n dVdx1 = dVdx10 ; dVdt1 = dVdt10\n dVdx2 = dVdx[0]; dVdt2 = dVdt[1]\n\n if utype[0] == 'Pipe':\n if linkp.start_node.transient_node_type == 'SurgeTank':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs = surge_tank(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = H[0]\n linkp.start_node.tank_flow = Qs\n elif linkp.start_node.transient_node_type == 'Chamber':\n shape = linkp.start_node.tank_shape\n H[0], V[0], Qs, zp = air_chamber(shape, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.start_node.water_level = zp\n linkp.start_node.tank_flow = Qs\n else:\n elev = linkp.start_node.elevation\n emitter_coeff = linkp.start_node.emitter_coeff + linkp.start_node.demand_coeff\n block_per = linkp.start_node.block_per\n H[0], V[0] = add_leakage(emitter_coeff, block_per, link1, linkp, elev,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Pump':\n pumpc = pump[0]\n H[0], V[0] = pump_node(pumpc, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif utype[0] == 'Valve':\n valvec = valve[0]\n H[0], V[0] = valve_node(valvec, link1, linkp,\n H1, V1, H2, V2, dt, g, 0, np.sign(links1), [-1],\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n # Pipe end\n V1 = V0[n-1]; H1 = H0[n-1]\n V2 = V20; H2 = H20\n dVdx1 = dVdx[n-1] ; dVdt1 = dVdt[n-1]\n dVdx2 = dVdx20; dVdt2 = dVdt20\n if dtype[0] == 'Pipe':\n if linkp.end_node.transient_node_type == 'SurgeTank':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs = surge_tank(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = H[n]\n linkp.end_node.tank_flow = Qs\n elif linkp.end_node.transient_node_type == 'Chamber':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs,zp = air_chamber(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = zp\n linkp.end_node.tank_flow = Qs\n else:\n elev = linkp.end_node.elevation\n emitter_coeff = linkp.end_node.emitter_coeff + linkp.end_node.demand_coeff\n block_per = linkp.end_node.block_per\n H[n], V[n] = add_leakage(emitter_coeff, block_per,linkp, link2, elev,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n elif dtype[0] == 'Pump':\n pumpc = pump[1]\n H[n], V[n] = pump_node(pumpc, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Valve':\n valvec = valve[1]\n H[n], V[n] = valve_node(valvec, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n return H, V",
"def pressure_leakage_at_t(self, time):\n t_d = self.param[\"res_k\"]*time\n t_d /= self.param[\"res_porosity\"]\n t_d /= self.param[\"viscosity\"]\n t_d /= self.param[\"compressibility\"]\n t_d /= self.param[\"frac_length\"]**2\n\n x_d = np.array([float(i)/self.number_of_segments \\\n for i in range(self.number_of_segments)])\n\n full_solution = gaver_stehfest(t_d, self.laplace_solution)\n\n return (zip(x_d*self.param[\"frac_length\"], \n full_solution[:-1]), \n full_solution[-1])",
"def run_cumulative_pipeline_damage(self):\n\t\t\"\"\" PWP1 = brittle\n\t\t\tPWP2 = ductile \"\"\"\n\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\t\t# rt = [100]\n\n\t\tfor rt_val in rt:\n\t\t\tprint('\\tmc_pipe_dmg: cumulative rt_{}' .format(rt_val))\n\t\t\t# --- reading in damage results from above analysis\n\t\t\teq_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_eq_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\ttsu_damage_results_csv = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t\t\t\t 'pipe_DS_tsu_{}yr_{}.csv'\n\t\t\t\t\t\t\t\t\t\t\t\t .format(rt_val, retrofit_key))\n\t\t\teq_df = pd.read_csv(eq_damage_results_csv)\n\t\t\ttsu_df = pd.read_csv(tsu_damage_results_csv)\n\n\t\t\teq_df.set_index('guid', inplace=True)\n\t\t\ttsu_df.set_index('guid', inplace=True)\n\n\t\t\tcolumn_keys = list(eq_df.columns)\n\n\t\t\tcum_df = np.logical_or(eq_df.values, tsu_df.values).astype(int)\n\t\t\tcum_df = pd.DataFrame(cum_df, index=eq_df.index, columns=column_keys)\n\t\t\t\n\n\t\t\tresult_name = os.path.join(self.mc_path, \n\t\t\t\t\t\t\t\t\t 'pipe_DS_cumulative_{}yr_{}.csv' \n\t\t\t\t\t\t\t\t\t\t.format(rt_val, retrofit_key))\n\n\t\t\tcum_df.to_csv(result_name, index=True)",
"def process_flowrate_data(volume, time, delta_P):\n\n\t#a simple least squares residual objective function\n\tdef obj_fcn(flow_coeff, volume, max_delta_P, time, delta_P):\n\n\t\tmodeled_press = np.array([\n\t\t\tmodel_pressure(flow_coeff, volume, t, max_delta_P) for t in time\n\t\t])\n\t\tmodeled_press = modeled_press.flatten()\n\t\tdelta_P = np.array(delta_P)\n\n\t\tres = np.sqrt(np.sum((delta_P - modeled_press)**2))\n\n\t\treturn res\n\n\t#maximum delta P (just use the last few data points)\n\tmax_delta_P = np.median(delta_P[-10:])\n\n\tresult = minimize(\n\t\tobj_fcn,\n\t\t5.0,\n\t\targs=(volume, max_delta_P, time, delta_P),\n\t\tmethod='Nelder-Mead',\n\t\toptions={'maxiter':1000, 'disp':True}\n\t)\n\n\tflow_coeff = result['x'][0]\n\n\tmax_flow = (IDEAL_GAS * TEST_TEMPERATURE / STD_TEMPERATURE) / flow_coeff\n\n\treturn {\n\t\t'flow_coeff': flow_coeff,\n\t\t'max_delta_P': max_delta_P,\n\t\t'max_flow': max_flow\n\t}",
"def __CalculateAspirationHeightFor(self, vial, volumeToTransport_uL, tipLength):\n print \"### CWJ - Label %s, basePos : %d, tipLen :%d\"%( vial.getLabel(), vial.getBasePosition(), tipLength) \n print \"bdr - __CalculateAspirationHeightFor basepsn, tiplen, meniscus, asp_depthlbl\", vial.getBasePosition(),tipLength , vial.getMeniscusAfterRemoving(volumeToTransport_uL) ,float(self._m_Settings[Instrument.AspirationDepthLabel])\n aspirationHeight = vial.getBasePosition() - tipLength - vial.getMeniscusAfterRemoving(volumeToTransport_uL) + float(self._m_Settings[Instrument.AspirationDepthLabel])\n print \"bdr - __CalculateAspirationHeightFor height\", aspirationHeight\n return self.__ClipHeight (aspirationHeight, vial, tipLength, True)",
"def compute_flowrate(self, boundary_pressures, windkessel_pressures = None):\n # Extract the relevant pressures from the inputs.\n pin = boundary_pressures['in']\n pout = boundary_pressures['out']\n if windkessel_pressures is None:\n part = self.pressure['art']\n pven = self.pressure['ven']\n else:\n part = windkessel_pressures['art']\n pven = windkessel_pressures['ven']\n\n # Extract relevant model parameters.\n rven = self.parameters['venous_resistance']\n rart = self.parameters['arterial_resistance']\n rper = self.parameters['peripheral_resistance']\n\n # Compute the mitral, aortic, and peripheral flowrates.\n q = {'ven': max((pven - pout)/rven, 0.0),\n 'art': max((pin - part)/rart, 0.0),\n 'per': (part - pven)/rper}\n\n # Compute the LVAD flowrate if this model has an LVAD defined.\n if self._lvad is not None:\n # assume that inflow pressure is left ventricular pressure if LVAD is present\n cavity_pressures = {'lv': pin}\n windkessel_pressures = {'art': part,\n 'ven': pven}\n q['lvad'] = self._lvad.compute_flowrate(windkessel_pressures,\n cavity_pressures)['lvad']\n\n return q",
"def calc_lp(self, demands: Demand, routing: Routing) -> float:\n epsilon = self.epsilon\n\n # Create the linear solver with the GLOP backend.\n solver = pywraplp.Solver('flow_utilisation_lp',\n pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)\n\n ## VARIABLES\n # Flow variables, the amount of flow along each edge\n # stored as a list flow_variables[ith_flow][jth_edge]\n flow_variables = []\n for i in range(len(self.commodities)):\n flow_variable_edges = []\n for j in range(len(self.edges)):\n flow_variable_edges.append(\n solver.NumVar(0, solver.infinity(), '({},{})'.format(i, j)))\n flow_variables.append(flow_variable_edges)\n\n ## CONSTRAINTS\n # Flow from source constraint (net flow must equal demand)\n conservation_source_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create constraint\n constraint_i = solver.Constraint(demands[i] - epsilon,\n demands[i] + epsilon,\n '(source,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]:\n # out flow is positive\n constraint_i.SetCoefficient(flow_variables[i][edge_index], 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]:\n # in flow is negative\n constraint_i.SetCoefficient(flow_variables[i][edge_index], -1)\n conservation_source_constraints.append(constraint_i)\n\n # Flow to sink constraint (in flow must equal demand, out must be zero)\n conservation_sink_constraints = []\n for i, commodity in enumerate(self.commodities):\n # create in flow constraint\n constraint_i_in = solver.Constraint(-demands[i] - epsilon,\n -demands[i] + epsilon,\n '(sink_in,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[1])]:\n # in flow is negative\n constraint_i_in.SetCoefficient(flow_variables[i][edge_index],\n -1)\n conservation_sink_constraints.append(constraint_i_in)\n\n constraint_i_out = solver.Constraint(0, 0,\n '(sink_out,{})'.format(i))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[1])]:\n # out flow is positive\n constraint_i_out.SetCoefficient(flow_variables[i][edge_index],\n 1)\n conservation_sink_constraints.append(constraint_i_out)\n\n # Flow at transit node constraint (net flow must be zero)\n conservation_transit_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n if j != commodity[0] and j != commodity[1]:\n # create constraint\n constraint_j = solver.Constraint(-epsilon, +epsilon,\n '(transit,{},{})'.format(i,\n j))\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]:\n # out flow is positive\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n 1)\n for edge_index in [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]:\n # in flow is negative\n constraint_j.SetCoefficient(\n flow_variables[i][edge_index],\n -1)\n constraints_flow_i.append(constraint_j)\n conservation_transit_constraints.append(constraints_flow_i)\n\n # Flow splitting at transit constraints (edge flow must be correct split of\n # in flow)\n splitting_ratio_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n for j in range(self.graph.number_of_nodes()):\n # Sink has not such constraint and we handle source differently\n if j != commodity[1] and j != commodity[0]:\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(j)]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(j)]\n\n # separate constraint for split of each out_edge taking into\n # account all in_edges\n for out_edge_index in out_edges:\n # create constraint\n constraint_edge = \\\n solver.Constraint(-epsilon, +epsilon,\n '(split,{},{},{})'.format(\n i, j,\n out_edge_index))\n split_ratio = routing[i][out_edge_index]\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by\n # split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n splitting_ratio_constraints.append(constraints_flow_i)\n\n # Flow splitting at source constraints (edge flow must be correct split of\n # in flow + demand)\n source_splitting_constraints = []\n for i, commodity in enumerate(self.commodities):\n constraints_flow_i = []\n in_edges = [self.edge_index_dict[edge] for edge in\n self.graph.in_edges(commodity[0])]\n out_edges = [self.edge_index_dict[edge] for edge in\n self.graph.out_edges(commodity[0])]\n for out_edge_index in out_edges:\n # create constraint\n split_ratio = routing[i][out_edge_index]\n split_demand = split_ratio * demands[i]\n constraint_edge = \\\n solver.Constraint(split_demand - epsilon,\n split_demand + epsilon,\n '(split,{},{},{})'.format(i, j,\n out_edge_index))\n # flow on out edge\n constraint_edge.SetCoefficient(\n flow_variables[i][out_edge_index], 1)\n for in_edge_index in in_edges:\n # should equal sum of flow on all in edges scaled by split ratio\n constraint_edge.SetCoefficient(\n flow_variables[i][in_edge_index],\n -1 * split_ratio)\n constraints_flow_i.append(constraint_edge)\n source_splitting_constraints.append(constraints_flow_i)\n\n solver.Solve()\n\n result_status = solver.Solve()\n\n utilisation = np.zeros(\n (len(self.commodities), self.graph.number_of_edges()))\n # # extract the actual routing. Useful for debugging, maybe use to bootstrap\n # assignment = np.zeros(\n # (len(self.commodities), self.graph.number_of_edges()))\n\n # if routing is really that bad, just bail and give a sad result\n if result_status == solver.NOT_SOLVED or result_status == solver.INFEASIBLE:\n return 1.0\n\n for i in range(len(self.commodities)):\n for j in range(self.graph.number_of_edges()):\n utilisation[i][j] = flow_variables[i][j].solution_value() / \\\n self.edges[j][2]['weight']\n # assignment[i][j] = flow_variables[i][j].solution_value()\n\n return np.max(np.sum(utilisation, axis=0))",
"def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:\n # Probability of power on, ODH system working, and m number of fans\n # with flow rate Q_fan on.\n P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \\\n sol_PFD * P_fan\n P_i = leak_failure_rate * P_response\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan,\n N_fan, N)\n self.fail_modes.append(f_mode)",
"def update_flow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n # (1): add all edges (u, v) with capacity ub-lb\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"upper_bound\"] == float('inf'):\n self.arc_info[arc][\"upper_bound\"] = B\n for arc in self.arc_info.keys():\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(int(self.arc_info[arc][\"upper_bound\"]\\\n - self.arc_info[arc][\"lower_bound\"]))\n # (2): add edge (t, s) with capacity B\n # B = max_lb * (m - n + 2)\n B = self.get_max_lb()*(self.num_edges() - len(self) + 2)\n if B == 0:\n #B = float('inf')\n B = 100000\n start_nodes.append(self.sink())\n end_nodes.append(self.source())\n capacities.append(int(B))\n # (3): for all verts, if exc > 0, add edge (s', v) with capacity exc(v),\n # and if exc < 0, add edge(s', v) with capacity -exc(v)\n s_prime = max(self.vertices) + 1\n t_prime = max(self.vertices) + 2\n print(\"s'={}, t'={}\".format(s_prime, t_prime))\n for v in self:\n #print(\"vert {} in arcs: {}\".format(v,\n # self.in_arcs_lists[v]))\n # compute exc: lower bounds of in - lower bounds of out\n sum_lb_in = 0\n for in_arc in self.in_arcs_lists[v]:\n sum_lb_in += self.arc_info[in_arc][\"lower_bound\"]\n sum_lb_out = 0\n #print(\"vert {} out arcs: {}\".format(v,\n # self.out_arcs_lists[v]))\n for out_arc in self.out_arcs_lists[v]:\n sum_lb_out += self.arc_info[out_arc][\"lower_bound\"]\n exc = sum_lb_in - sum_lb_out\n #print(\"exc is {}\".format(exc))\n if exc > 0:\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(exc))\n else:\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(-exc))\n # solve maxflow\n #print(\"s' is {} and t' is {}\".format(s_prime, t_prime))\n max_flow = pywrapgraph.SimpleMaxFlow()\n for u, v, cap in zip(start_nodes, end_nodes, capacities):\n #print(\"Adding edge {}, {} with cap {}\".format(u,v,cap))\n max_flow.AddArcWithCapacity(u, v, cap)\n success = True\n if max_flow.Solve(s_prime, t_prime) == max_flow.OPTIMAL:\n #print('Max flow: {}'.format( max_flow.OptimalFlow()))\n #print(' Arc Flow / Capacity')\n for i in range(max_flow.NumArcs()):\n # print('%1s -> %1s %3s / %3s' % (\n # max_flow.Tail(i),\n # max_flow.Head(i),\n # max_flow.Flow(i),\n # max_flow.Capacity(i)))\n # check that (s', v) edges are saturated (once we find a false,\n # stay false forever)\n if success:\n if max_flow.Tail(i) == s_prime:\n success = max_flow.Flow(i) == max_flow.Capacity(i)\n else:\n success = False\n print('There was an issue with the max flow input.')\n if success:\n # update the flows to be the flow found from maxflow problem\n for i in range(max_flow.NumArcs()):\n # if this is an original arc, update the flow\n if max_flow.Tail(i) != s_prime \\\n and max_flow.Head(i) != t_prime \\\n and not (max_flow.Tail(i) == self.sink() \\\n and max_flow.Head(i) == self.source()):\n # update arc\n start = max_flow.Tail(i)\n destin = max_flow.Head(i)\n arc = self.get_arc(start, destin)\n new_flow = self.arc_info[arc][\"lower_bound\"] + max_flow.Flow(i)\n old_flow = self.arc_info[arc][\"weight\"]\n self.arc_info[arc][\"weight\"] = new_flow\n #print(\"Edge {} {} adjusted from {} to {}\".format(\n # start,\n # destin,\n # old_flow,\n # new_flow\n # ))\n self.check_conservation_of_flow() # check that solution is valid\n return True\n else:\n return False",
"def left_boundary(linkp, pn, H, V, H0, V0, links2, p, pump, valve, dt,\n H20, V20, utype, dtype,\n friction, dVdt, dVdx, dVdt20, dVdx20) :\n\n link2 = [p[abs(i)-1] for i in links2]\n # Properties of current pipe\n f = linkp.roughness # unitless\n D = linkp.diameter # m\n g = 9.8 # m/s^2\n a = linkp.wavev # m/s\n n = linkp.number_of_segments # spatial discretization\n KD = linkp.roughness_height\n\n # inner nodes\n if friction == 'steady':\n H[1:-1], V[1:-1] = inner_node_steady(linkp, H0, V0, dt, g)\n elif friction == 'quasi-steady':\n H[1:-1], V[1:-1] = inner_node_quasisteady(linkp, H0, V0, dt, g)\n else:\n H[1:-1], V[1:-1] = inner_node_unsteady(linkp, H0, V0, dt, g,\n dVdx, dVdt)\n\n # Pipe start (outer boundayr conditions)\n V2 = V0[1]; H2 = H0[1]\n dVdx2 = dVdx[0]; dVdt2= dVdt[1]\n if utype[0] == 'Reservoir' or utype[0] == 'Tank':\n H[0], V[0] = rev_end (H2, V2, H[0], 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Valve':\n H[0], V[0] = valve_end (H2, V2, V[0], 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Junction':\n elev = linkp.start_node.elevation\n H[0], V[0] = dead_end (linkp , H2, V2, elev, 0, a, g, f, D, dt,\n KD, friction, dVdx2, dVdt2)\n elif utype[0] == 'Pump': #source pump\n H[0], V[0] = source_pump(pump[0], linkp, H2, V2, dt, g, [-1],\n friction, dVdx2, dVdt2)\n\n # Pipe end (inner boundary conditions)\n V1 = V0[n-1]; H1 = H0[n-1] # upstream node\n V2 = V20; H2 = H20 # downstream nodes\n dVdx1 = dVdx[n-1] ; dVdx2 = dVdx20\n dVdt1 = dVdt[n-1] ; dVdt2 = dVdt20\n\n if dtype[0] == 'Pipe':\n if linkp.end_node.transient_node_type == 'SurgeTank':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs = surge_tank(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = H[n]\n linkp.end_node.tank_flow = Qs\n\n elif linkp.end_node.transient_node_type == 'Chamber':\n shape = linkp.end_node.tank_shape\n H[n], V[n], Qs, zp = air_chamber(shape, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n linkp.end_node.water_level = zp\n linkp.end_node.tank_flow = Qs\n else:\n elev = linkp.end_node.elevation\n emitter_coeff = linkp.end_node.emitter_coeff + linkp.end_node.demand_coeff\n block_per = linkp.end_node.block_per\n H[n], V[n] = add_leakage(emitter_coeff, block_per,linkp, link2, elev,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Pump':\n pumpc = pump[1]\n H[n], V[n] = pump_node(pumpc, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Valve':\n valvec = valve[1]\n if links2 == []:\n H[n], V[n] = valve_end (H1, V1, V[n], n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n else:\n H[n], V[n] = valve_node(valvec, linkp, link2,\n H1, V1, H2, V2, dt, g, n, [1], np.sign(links2),\n friction, dVdx1, dVdx2, dVdt1, dVdt2)\n\n elif dtype[0] == 'Junction':\n elev = linkp.end_node.elevation\n H[n], V[n] = dead_end (linkp, H1, V1, elev, n, a, g, f, D, dt,\n KD, friction, dVdx1, dVdt1)\n\n return H, V",
"def run_pipeline_damage(self, haz_type):\n\t\trt = [100, 250, 500, 1000, 2500, 5000, 10000]\n\n\t\t# Seaside pipes\n\t\tpipe_dataset_id = \"5d2666b5b9219c3c5595ee65\"\n\n\t\tif haz_type == 'eq':\n\t\t\thazard_type = \"earthquake\"\n\t\t\trt_hazard_dict = {100: \"5dfa4058b9219c934b64d495\", \n\t\t\t\t\t\t\t 250: \"5dfa41aab9219c934b64d4b2\",\n\t\t\t\t\t\t\t 500: \"5dfa4300b9219c934b64d4d0\",\n\t\t\t\t\t\t\t 1000: \"5dfa3e36b9219c934b64c231\",\n\t\t\t\t\t\t\t 2500: \"5dfa4417b9219c934b64d4d3\", \n\t\t\t\t\t\t\t 5000: \"5dfbca0cb9219c101fd8a58d\",\n\t\t\t\t\t\t\t 10000: \"5dfa51bfb9219c934b68e6c2\"}\n\n\t\t\tfragility_key = \"pgv\"\n\n\t\t\t# seaside pipe fragility mappng for EQ\n\t\t\tmapping_id = \"5b47c227337d4a38464efea8\"\n\t\t\tpipeline_dmg = PipelineDamageRepairRate(self.client)\n\n\t\telif haz_type == 'tsu':\n\n\t\t\thazard_type = \"tsunami\"\n\t\t\trt_hazard_dict = {100: \"5bc9e25ef7b08533c7e610dc\", \n\t\t\t\t\t\t\t 250: \"5df910abb9219cd00cf5f0a5\",\n\t\t\t\t\t\t\t 500: \"5df90e07b9219cd00ce971e7\",\n\t\t\t\t\t\t\t 1000: \"5df90137b9219cd00cb774ec\",\n\t\t\t\t\t\t\t 2500: \"5df90761b9219cd00ccff258\",\n\t\t\t\t\t\t\t 5000: \"5df90871b9219cd00ccff273\",\n\t\t\t\t\t\t\t 10000: \"5d27b986b9219c3c55ad37d0\"}\n\t\t\tfragility_key = \"Non-Retrofit inundationDepth Fragility ID Code\"\n\n\t\t\t# seaside pipe fragility mappng for tsunami\n\t\t\tmapping_id = \"5d320a87b9219c6d66398b45\"\n\t\t\tpipeline_dmg = PipelineDamage(self.client)\n\n\n\t\t# test tsunami pipeline\n\t\tpipeline_dmg.load_remote_input_dataset(\"pipeline\", pipe_dataset_id)\n\t\tpipeline_dmg.set_parameter(\"mapping_id\", mapping_id)\n\t\tpipeline_dmg.set_parameter(\"hazard_type\", hazard_type)\n\t\tpipeline_dmg.set_parameter(\"fragility_key\",fragility_key)\n\t\tpipeline_dmg.set_parameter(\"num_cpu\", 1)\n\n\t\tfor rt_val in rt:\n\t\t\tprint('\\tpipe_dmg: {} rt_{}' .format(haz_type, rt_val))\n\t\t\tresult_name = os.path.join(self.pipe_output_path, \n\t\t\t\t\t\t\t\t\t 'pipe_{}_{}yr_dmg' \n\t\t\t\t\t\t\t\t\t .format(haz_type, rt_val))\n\t\t\thazard_id = rt_hazard_dict[rt_val]\n\n\t\t\tpipeline_dmg.set_parameter(\"hazard_id\", hazard_id)\n\t\t\tpipeline_dmg.set_parameter(\"result_name\",result_name)\n\n\t\t\t# Run pipeline damage analysis\n\t\t\tresult = pipeline_dmg.run_analysis()",
"def test_optical_flow_warp_flyingchairs(self):\n self.single_warp_test_helper('pwcnet/warp/test_data/06530_flow.flo', 'pwcnet/warp/test_data/06530_img1.ppm',\n 'pwcnet/warp/test_data/06530_img2.ppm', 0.031)",
"def _split(self):\r\n \r\n temp = [self.upstream.demand]\r\n for item, p in zip(self.downstream, self.priority):\r\n temp.append(item.supply/p)\r\n \r\n flow = min(temp) # total flow\r\n \r\n self.upstream.outflow = flow\r\n \r\n for item, p in zip(self.downstream, self.priority):\r\n item.inflow = p * flow",
"def generate_calliope_supply(sources, timeframe, es_name):\n\n for source in sources:\n if len(source.outputs) > 1:\n msg = (\n f\"Supply '{source.uid.name}' has multple outputs: {list(source.outputs)}. \"\n f\"This can't be handled by Calliope \"\n f\"the way Tessif handleds them. \"\n )\n logger.warning(msg)\n\n if source.uid.name.lower() == 'supply':\n # conflict with calliope parent tech. Name change only affects\n # yaml and native calliope post processing. Tessif will sort out the previous name.\n source_name = f'{list(source.outputs)[0]}_{source.uid.name}'\n else:\n source_name = source.uid.name\n\n outputs, costs, grp_constraint = dict(), dict(), dict()\n\n for output_ in source.outputs:\n outputs['constraints'] = dict( # setting the defaults (might be adjusted in parse_flow_parameters)\n {\n 'energy_prod': True,\n 'resource': 'inf',\n 'resource_unit': 'energy',\n }\n )\n outputs['constraints'].update(\n parse_flow_parameters(source, output_, len(timeframe)))\n costs['costs'] = parse_cost_parameters(source, output_)\n\n if source.timeseries:\n timeseries_max = np.array(\n source.timeseries[output_].max).astype(float)\n timeseries_min = np.array(\n source.timeseries[output_].min).astype(float)\n timeseries = timeseries_max\n\n if (timeseries_max == timeseries_min).all():\n outputs['constraints'].update({'force_resource': True})\n\n if source.flow_rates[f'{output_}'].max == float('inf'):\n flow_max = max(timeseries)\n outputs['constraints']['energy_cap_min'] = float(flow_max)\n outputs['constraints']['energy_cap_max'] = float(flow_max)\n elif source.flow_rates[f'{output_}'].max == 0:\n msg = (\n f\"Source '{source.uid.name}' has flow_rates.max of 0 \"\n f\"and a timeseries given. Calliope handles timeseries relative \"\n f\"to flow_rates.max. Falling back to timeseries max instead of flow_rates max\"\n )\n logger.warning(msg)\n # assign this new so there doesnt need to be big adjustments for this case\n flow_max = max(timeseries)\n elif outputs['constraints']['energy_cap_min'] == float('inf'):\n flow_max = max(timeseries)\n else:\n flow_max = source.flow_rates[f'{output_}'].max\n\n if float(outputs['constraints']['energy_cap_min']) <= float(max(timeseries)):\n outputs['constraints']['energy_cap_min'] = float(\n max(timeseries))\n if not source.expandable[output_]:\n outputs['constraints']['energy_cap_max'] = float(\n max(timeseries))\n flow_max = max(timeseries)\n\n for i in range(len(timeseries)):\n timeseries[i] = timeseries[i] / flow_max\n timeseries = pd.DataFrame({'': timeframe, f'{source_name}': timeseries})\n\n timeseries.to_csv(\n os.path.join(\n write_dir, 'Calliope', f'{es_name}', 'timeseries_data', f'{source_name}.csv'), index=False)\n\n outputs['constraints'].update({'resource': f'file={source_name}.csv:{source_name}'})\n\n outputs['constraints'].update({'resource_unit': f'energy_per_cap'})\n outputs['constraints'].pop('energy_cap_min_use')\n\n if outputs['constraints']['energy_cap_min'] == float('inf'):\n outputs['constraints']['energy_cap_min'] = float(flow_max)\n\n if float(source.accumulated_amounts[output_].max) != float('inf'):\n grp_constraint.update({\n f'{source_name}_accumulated_amounts_max': dict(\n techs=[source_name],\n carrier_prod_max={f'{output_}': float(source.accumulated_amounts[output_].max)})\n })\n if float(source.accumulated_amounts[output_].min) != float(0):\n grp_constraint.update({\n f'{source_name}_accumulated_amounts_min': dict(\n techs=[source_name],\n carrier_prod_min={f'{output_}': float(source.accumulated_amounts[output_].min)})\n })\n\n if outputs['constraints']['energy_cap_min'] == float('inf'):\n outputs['constraints']['energy_cap_min'] = float(0)\n\n supply = dict()\n # giving the uid information that cant get recreated on any other way\n uid = f'{source.uid.name}.{source.uid.region}.{source.uid.sector}.{source.uid.carrier}.{source.uid.node_type}'\n\n supply[f'{source_name}'] = dict(\n essentials=dict(\n name=uid,\n # only needed for visualisation in native calliope tools\n color=str('#FF7700'),\n parent='supply',\n carrier_out=list(source.outputs)[0],\n ),\n )\n\n supply[f'{source_name}'].update(outputs)\n supply[f'{source_name}'].update(costs)\n\n # creating the location in which the storage is called\n loc = dict({\n f'{source_name} location': {\n 'coordinates': {'lat': float(source.uid.latitude), 'lon': float(source.uid.longitude)},\n 'techs': {f'{source_name}': None},\n }})\n\n yield supply, loc, grp_constraint",
"def clean_flow_cycles(flows: dict):\n for e, flow in flows.items():\n if flow > 0:\n reversed_e = (e[1], e[0], *e[2:])\n if reversed_e in flows and flows[reversed_e] > 0:\n reversed_flow = flows[reversed_e]\n cycle_flow = min(flow, reversed_flow)\n flows[e] -= cycle_flow\n flows[reversed_e] -= cycle_flow",
"def set_flow_corrected(self):\n self.exh.temp_v_press_fit = (\n np.polyfit(self.exh.pressure_drop[0:4],\n self.exh.T_array[0:4], 2) ) \n self.flow_data.T_hx = np.polyval(self.exh.temp_v_press_fit,\n self.flow_data.pressure_drop) \n self.flow_data.flow = ( self.flow_data.flow_trash *\n self.flow_data.T_hx / self.flow_data.T )",
"def FlowRestriction(T_in, p_in, m_dot_out, d_inner, f):\r\n\r\n # Cross section\r\n A_cross = (np.pi/4)*(d_inner**2)\r\n\r\n # Assumption isenthalpic flow!\r\n h_in = hp.HeCalc(9, 0, 1, p_in, 2, T_in, 1) #J/kg\r\n\r\n # Iteration for the calculation of p_out even though the influence is probably negligible\r\n # I checked it and for 20 bar it really is negligible\r\n dp = 0.0\r\n p_out = 0.0\r\n for i in range(5):\r\n p_out = p_in - dp\r\n T_out = hp.HeCalc(2, 0, 1, p_out, 9, h_in, 1)\r\n Rho_out = hp.HeCalc(3, 0, 1, p_out, 2, T_out, 1) #kg/m³\r\n # Velocity of the outgoing flow\r\n u_out = m_dot_out/(A_cross*Rho_out) #m/s\r\n\r\n # Calculation of the dp with Bernoulli equation and resistance coefficient (see VDI Heatatlas 2013)\r\n dp = f * Rho_out * 0.5 * u_out**2\r\n\r\n\r\n h_out = hp.HeCalc(9, 0, 1, p_out, 2, T_out, 1)\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n return state_out",
"def test_detector1pipeline4(_bigdata):\n step = Detector1Pipeline()\n step.save_calibrated_ramp = True\n step.ipc.skip = True\n step.persistence.skip = True\n step.jump.rejection_threshold = 4.0\n step.ramp_fit.save_opt = False\n step.output_file = 'jw84600007001_02101_00001_nrs1_rate.fits'\n step.run(_bigdata+'/pipelines/jw84600007001_02101_00001_nrs1_uncal.fits')\n\n # Compare ramp product\n n_ramp = 'jw84600007001_02101_00001_nrs1_ramp.fits'\n h = fits.open( n_ramp )\n n_ref = _bigdata+'/pipelines/jw84600007001_02101_00001_nrs1_ramp_ref.fits'\n href = fits.open( n_ref )\n newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['groupdq'],h['pixeldq']])\n newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['groupdq'],h['pixeldq']])\n result = fits.diff.FITSDiff(newh,\n newhref,\n ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],\n rtol = 0.00001\n )\n assert result.identical, result.report()\n\n # Compare countrate image product\n n_cr = 'jw84600007001_02101_00001_nrs1_rate.fits'\n h = fits.open( n_cr )\n n_ref = _bigdata+'/pipelines/jw84600007001_02101_00001_nrs1_rate_ref.fits'\n href = fits.open( n_ref )\n newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['dq']])\n newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['dq']])\n result = fits.diff.FITSDiff(newh,\n newhref,\n ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],\n rtol = 0.00001\n )\n assert result.identical, result.report()",
"def _add_bal(self):\n\n c = self.components\n p = self.pipes\n\n # TODO No mass flow reversal yet\n if self.temperature_driven:\n\n lines = self.params['lines'].v()\n\n self.block.mix_temp = Var(self.TIME, lines)\n\n def _temp_bal_incoming(b, t, l):\n\n incoming_comps = collections.defaultdict(list)\n incoming_pipes = collections.defaultdict(list)\n\n for name, comp in c.items():\n if value(comp.get_mflo(t)) >= 0:\n incoming_comps['supply'].append(name)\n else:\n incoming_comps['return'].append(name)\n\n for name, pipe in p.items():\n if value(pipe.get_edge_mflo(self.name, t)) >= 0:\n incoming_pipes['supply'].append(name)\n else:\n incoming_pipes['return'].append(name)\n # Zero mass flow rate:\n if value(\n sum(c[comp].get_mflo(t) for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) == 0:\n # mixed temperature is average of all joined pipes, actual value should not matter,\n # because packages in pipes of this time step will have zero size and components do not take over\n # mixed temperature in case there is no mass flow\n\n return b.mix_temp[t, l] == (\n sum(c[comp].get_temperature(t, l) for comp in c) +\n sum(p[pipe].get_temperature(self.name, t, l) for\n pipe in p)) / (\n len(p) + len(c))\n\n\n else: # mass flow rate through the node\n return (sum(\n c[comp].get_mflo(t) for comp in incoming_comps[l]) +\n sum(p[pipe].get_edge_mflo(self.name, t) for pipe in\n incoming_pipes[l])) * b.mix_temp[t, l] == \\\n sum(c[comp].get_mflo(t) * c[comp].get_temperature(t,\n l)\n for comp in incoming_comps[l]) + \\\n sum(p[pipe].get_edge_mflo(self.name, t) * p[\n pipe].get_edge_temperature(self.name, t, l)\n for pipe in incoming_pipes[l])\n\n self.block.def_mixed_temp = Constraint(self.TIME,\n lines,\n rule=_temp_bal_incoming)\n\n def _temp_bal_outgoing(b, t, l, comp):\n\n outgoing_comps = collections.defaultdict(list)\n outgoing_pipes = collections.defaultdict(list)\n\n for name, comp_obj in c.items():\n if comp_obj.get_mflo(t) >= 0:\n outgoing_comps['return'].append(name)\n else:\n outgoing_comps['supply'].append(name)\n\n for name, pipe_obj in p.items():\n if pipe_obj.get_edge_mflo(self.name, t) >= 0:\n outgoing_pipes['return'].append(name)\n else:\n outgoing_pipes['supply'].append(name)\n\n if t == 0:\n return Constraint.Skip\n if comp in outgoing_pipes[l]:\n return p[comp].get_edge_temperature(self.name, t, l) == \\\n b.mix_temp[t, l]\n elif comp in outgoing_comps[l]:\n return c[comp].get_temperature(t, l) == b.mix_temp[t, l]\n else:\n return Constraint.Skip\n\n self.block.outgoing_temp_comps = Constraint(self.TIME,\n lines,\n c.keys(),\n rule=_temp_bal_outgoing)\n self.block.outgoing_temp_pipes = Constraint(self.TIME,\n lines,\n p.keys(),\n rule=_temp_bal_outgoing)\n\n elif self.repr_days is None:\n\n def _heat_bal(b, t):\n return 0 == sum(\n self.components[i].get_heat(t) for i in self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME,\n rule=_heat_bal)\n\n def _mass_bal(b, t):\n return 0 == sum(\n self.components[i].get_mflo(t) for i in self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME,\n rule=_mass_bal)\n\n else:\n def _heat_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_heat(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_heat(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_heat_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_heat_bal)\n\n def _mass_bal(b, t, c):\n return 0 == sum(\n self.components[i].get_mflo(t, c) for i in\n self.components) \\\n + sum(\n pipe.get_edge_mflo(self.name, t, c) for pipe in p.values())\n\n self.block.ineq_mass_bal = Constraint(self.TIME, self.REPR_DAYS,\n rule=_mass_bal)"
]
| [
"0.6381292",
"0.5661798",
"0.5626378",
"0.54133904",
"0.5259986",
"0.5236037",
"0.52057636",
"0.5197794",
"0.5169742",
"0.51044613",
"0.5099405",
"0.5072147",
"0.50551397",
"0.5022336",
"0.50140405",
"0.49782217",
"0.49536902",
"0.4939579",
"0.49342322",
"0.489776",
"0.48742488",
"0.4850784",
"0.4806796",
"0.47758335",
"0.47483826",
"0.4743606",
"0.4738785",
"0.47324836",
"0.47293544",
"0.4723838"
]
| 0.7203139 | 0 |
Format failure rate, flow rate and expected time duration of the failure event for a leak. | def _make_leak(self, name, failure_rate, q_std, N):
N_events = N * self.N
tau = self.volume/q_std
total_failure_rate = N_events*failure_rate
total_failure_rate.ito(1/ureg.hr)
return (name, total_failure_rate, q_std, tau.to(ureg.min), N_events) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def formatFailure(self, test, err):\n return self.formatError(test, err)",
"def test_manager_reports_duration_including_failure():\n\n httpretty.register_uri(\n httpretty.GET,\n \"http://test.com/long_failed\",\n body=httpretty_body_that_waits_and_returns(0.5, None),\n )\n\n report_path = os.path.join(REPORT_DIR, \"duration_report_with_failure.json\")\n spintest(\n [\"http://test.com\"],\n [\n # Fails but does not retry and is ignored\n {\"method\": \"GET\", \"route\": \"/long_failed\", \"delay\": 0, \"ignore\": True},\n ],\n generate_report=report_path,\n )\n spintest_reports = read_report(report_path)\n\n first_task_report = spintest_reports[0][\"reports\"][0]\n assert 0.5 <= first_task_report[\"duration_sec\"] <= 0.6\n\n total_duration = spintest_reports[0][\"total_duration_sec\"]\n assert 0.5 <= total_duration <= 0.6",
"def get_failure_rate(self) -> float:\n return self.failurerate",
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def print_leaks(self):\n for key in sorted(self.leaks.keys()):\n print('Failure mode: '+key)\n print('Failure rate: {:.2~}'.format(self.leaks[key][0]))\n print('Flow rate: {:.2~}'.format(\n self.leaks[key][1].to(ureg.ft**3/ureg.min)))\n print('Event duration: {:.2~}'.format(self.leaks[key][2]))\n print()",
"def u_tube_failure(self, outer_tube, inner_tube, L, use_rate,\n fluid=None, N=1):\n # TODO Make areas adjustable, add info to docstring\n flow_path_cases = {'Small event': ht.piping.Annulus(outer_tube.ID,\n inner_tube.OD,\n L=L),\n 'Large event': outer_tube}\n for mode in TABLE_1['U-Tube change']:\n flow_path = flow_path_cases[mode]\n name = f'U-Tube {mode.lower()}: {flow_path}'\n failure_rate = TABLE_1['U-Tube change'][mode] * \\\n use_rate\n area = flow_path.area\n # TODO move this and gas leak check to separate method\n if area > outer_tube.area:\n logger.warning('Leak area cannot be larger'\n ' than outer tube area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(flow_path, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def addFailure(self, test, err):\n\n super(ForceBalanceTestResult, self).addFailure(test,err)\n self.logger.warning(\"\\r\\x1b[31;1m\" + \"FAIL\" + \"\\x1b[0m \" + test.shortDescription() + \"\\n\")\n\n errorMessage = self.buildErrorMessage(test, err)\n\n for line in errorMessage.splitlines():\n self.logger.warning(\"\\t >\\t\" + line + \"\\n\")",
"def format_fail(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='red', start='[FAIL] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)",
"def auditmemallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditmemallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def auditportallocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditportallocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def auditnsballocfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditnsballocfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def annotate_error_diff(desc, stderr_e, stderr_a, stderr_e_strp, stderr_a_strp):\n id_str= \"%s_\" % desc\n result[id_str + \"stderr_expected\"] = stderr_e\n result[id_str + \"stderr_actual\"] = stderr_a\n result[id_str + \"stderr_expected_stripped\"]= stderr_e_strp\n result[id_str + \"stderr_actual_stripped\"] = stderr_a_strp\n result[id_str + \"stderr_stripped_diff\"] = '\\n'.join( difflib.ndiff( stderr_e_strp.splitlines(),\n stderr_a_strp.splitlines() ))\n result.fail(\"Expected error output from %s does not match actual error output.\" % desc)",
"def monitor_usage(records_output_csv, total_time, delay_time):\n total_time *= 60.0 # convert to seconds\n get_usage(total_time, delay_time, records_output_csv)",
"def formatError(self, test, err):\n test.capturedOutput = output = self.buffer\n self._buf = None\n if not output:\n # Don't return None as that will prevent other\n # formatters from formatting and remove earlier formatters\n # formats, instead return the err we got\n return err\n ec, ev, tb = err\n return (ec, self.addCaptureToErr(ev, output), tb)",
"def addFailure(self, test, err, capt=None, tb_info=None):\n taken = self._timeTaken()\n tb = ''.join(traceback.format_exception(*err))\n self.xunitstats[1] += 1\n try:\n id=test.shortDescription()\n if id is None:\n id = test.id()\n except AttributeError:\n id=''\n id = id.split('.')\n name = self._quoteattr(id[-1])\n systemout = ''\n# if test.capturedOutput is not None:\n# systemout = '<system-out><![CDATA['+escape_cdata(str(test.capturedOutput))+']]></system-out>'\n xml = \"\"\"<testcase classname=%(cls)s name=%(name)s time=\"%(taken)f\">\n%(systemout)s\n<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>\n</failure></testcase>\n\"\"\" %{'cls': self._quoteattr('.'.join(id[:-1])), 'name': self._quoteattr(name), 'taken': taken, 'errtype': self._quoteattr(nice_classname(err[0])), 'message': self._quoteattr(exc_message(err)), 'tb': escape_cdata(tb), 'systemout':systemout}\n self.addstream(xml)",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def auditlog32errsyslogallocnsbfailrate(self) :\n\t\ttry :\n\t\t\treturn self._auditlog32errsyslogallocnsbfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_manager_reports_duration_including_delays_and_retries():\n\n httpretty.register_uri(\n httpretty.GET,\n \"http://test.com/long_500\",\n body=httpretty_body_that_waits_and_returns(0.1, [500, {}, \"Hello!\"]),\n )\n\n report_path = os.path.join(REPORT_DIR, \"duration_report_with_delay_and_retry.json\")\n spintest(\n [\"http://test.com\"],\n [\n # Errors and retries once with 1sec delay\n {\"method\": \"GET\", \"route\": \"/long_500\", \"retry\": 1, \"delay\": 1},\n ],\n generate_report=report_path,\n )\n spintest_reports = read_report(report_path)\n\n first_task_report = spintest_reports[0][\"reports\"][0]\n assert 1.2 <= first_task_report[\"duration_sec\"] <= 1.3\n\n total_duration = spintest_reports[0][\"total_duration_sec\"]\n assert 1.2 <= total_duration <= 1.3",
"async def test_failed_samples(self):\n self.set_source_parameter(\"test_result\", [\"failed\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"6\", entities=[])",
"def log_failure(self, request):\n self.log_file.write(self.TYPE_FAILURE + \",%f,,,%f,,\\n\" %\n (float(request.resources[0]['amount']),\n float(request.offer)))",
"def dewar_insulation_failure(self, q_std):\n failure_rate = TABLE_1['Dewar']['Loss of vacuum']\n self.leaks.append(\n self._make_leak('Dewar insulation failure', failure_rate, q_std, 1))",
"def test_runner_long_duration(caplog, replay_rate):\n\n caplog.set_level(logging.INFO)\n\n path = 'test_data/test_data.parquet'\n time_column = 'requesttimestamp'\n start_date = datetime.datetime(2020, 7, 10, 0, 1, 0)\n end_date = datetime.datetime(2020, 7, 10, 0, 5, 0)\n replay_rate = replay_rate\n bootstrap_servers = 'kafka:9092'\n topic = 'test_stream_2'\n\n fileconnector = ParquetFileConnector(path=path, time_column=time_column, \n start_date=start_date, end_date=end_date)\n\n fileconnector.startup_checks()\n\n publisher = KafkaPublisher(\n bootstrap_servers=bootstrap_servers,\n topic=topic\n )\n\n runner = CentralRunner(db_connection=fileconnector, \n output_system=publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n #publisher.close()\n\n end = time.perf_counter()\n\n code_time = end - start\n\n period_duration = (end_date - datetime.timedelta(seconds=replay_rate) - start_date).total_seconds()\n\n assert abs(code_time - period_duration/replay_rate) < 1\n\n #assert int(code_time) == (end_date - start_date).total_seconds() * replay_rate",
"def error_rate():\n query = \"\"\"select to_char(date, 'FMMonth DD, YYYY') as date,\n round(error_req::numeric/total_req*100, 2) as error_rate\n from daily_errorreq_totalreq\n where round(error_req::numeric/total_req*100, 2) > 1.00\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report function\n report = table_to_report(result_table, '%')\n return \"Days Where Over 1% of Requests Leading to Errors:\\n\" + report",
"def record_failure(self, now=None) -> None:\n logging.info('Recording failure at %r', now or int(time.time()))\n self.failure_timestamp = now or int(time.time())\n self.put()",
"def pretty_str(self, unit_time):\n return \"{}: {} left and {}% done\".format(self.name, get_time_str(self.time * unit_time), round((self.total_time - self.time) / self.total_time * 100))",
"def _print_async_failure_report(terminalreporter, failed, validated):\n\n terminalreporter.line(\"\")\n terminalreporter.write_sep(\"=\", \"ASYNC FAILURE\" if len(failed) == 1 else \"ASYNC FAILURES\")\n\n # Create a tree filename > functions > fails\n files = collections.defaultdict(lambda: collections.defaultdict(list))\n for fail in failed:\n filename, _, function = fail.get_test_source_info()\n files[filename][function].append(fail)\n\n for filename, functions in files.items():\n\n for function, fails in functions.items():\n terminalreporter.write_sep(\"_\", function, red=True, bold=True)\n terminalreporter.line(\"\")\n fails = sorted(fails, key=lambda v: v.frame.lineno)\n\n latest_frame = fails[-1].frame\n lines, function_line = inspect.findsource(latest_frame[0])\n\n lines = [f\" {line[:-1]}\" for line in lines] # add padding, remove endline\n\n for fail in fails:\n line = lines[fail.frame.lineno - 1]\n lines[fail.frame.lineno - 1] = \"E\" + line[1:]\n\n lines = lines[function_line : latest_frame.lineno]\n\n for line in lines:\n terminalreporter.line(line, red=line.startswith(\"E\"), bold=line.startswith(\"E\"))\n\n terminalreporter.line(\"\")\n logs = []\n for fail in fails:\n filename = filename.replace(\"/app/\", \"\")\n\n terminalreporter.write(filename, bold=True, red=True)\n terminalreporter.line(\n f\":{fail.frame.lineno}: {m(fail.message)} not validated on {fail._interface} interface\"\n )\n\n logs += fail.logs\n\n if len(logs) != 0:\n terminalreporter.write_sep(\"-\", \"Captured stdout call\")\n f = get_log_formatter()\n for log in logs:\n terminalreporter.line(f.format(log))\n\n terminalreporter.line(\"\")\n terminalreporter.write_sep(\"=\", \"short async test summary info\")\n\n for filename, functions in files.items():\n for function, fails in functions.items():\n filename, klass, function = fails[0].get_test_source_info()\n terminalreporter.line(f\"FAILED {filename}::{klass}::{function} - {len(fails)} fails\")\n\n msg = \", \".join(\n [\n terminalreporter._tw.markup(f\"{len(failed)} failed\", red=True, bold=True),\n terminalreporter._tw.markup(f\"{len(validated)} passed\", green=True, bold=True),\n ]\n )\n\n terminalreporter.write_sep(\"=\", msg, fullwidth=terminalreporter._tw.fullwidth + 23, red=True)\n\n terminalreporter.line(\"\")",
"def flange_failure(self, Pipe, fluid=None, N=1):\n # TODO Make leak and rupture areas adjustable, add info to docstring\n table = TABLE_2['Flange, reinforced gasket']\n area_cases = {\n 'Leak': table['Leak']['Area'],\n 'Rupture': Pipe.area}\n for mode in table:\n name = f'Flange {mode.lower()}: {Pipe}'\n if isinstance(table[mode], dict):\n failure_rate = table[mode]['Failure rate']\n else:\n failure_rate = table[mode]\n area = area_cases[mode]\n # TODO move this and gas leak check to separate method\n if area > Pipe.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n q_std = Source._leak_flow(Pipe, area, fluid)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def _log_failed(cls, count):\n MetricLogger.log_metric(FUNCTION_NAME, MetricLogger.SQS_FAILED_RECORDS, count)",
"def submit_errors_metric(lambda_context):\n if not are_enhanced_metrics_enabled():\n return\n\n lambda_metric(\n \"{}.errors\".format(ENHANCED_METRICS_NAMESPACE_PREFIX),\n 1,\n tags=get_enhanced_metrics_tags(lambda_context),\n )",
"def test_log_time(time_taken, capsys, test_df):\n\n @log_step(time_taken=time_taken)\n def do_nothing(df, *args, **kwargs):\n return df\n\n test_df.pipe(do_nothing)\n\n captured = capsys.readouterr()\n print_statements = captured.out.split(\"\\n\")\n\n assert (\"time=\" in print_statements[0]) == time_taken"
]
| [
"0.6140188",
"0.5897113",
"0.585269",
"0.5849131",
"0.5757911",
"0.5635867",
"0.55230266",
"0.55227655",
"0.55061984",
"0.5505019",
"0.54786354",
"0.54182583",
"0.5416461",
"0.5405261",
"0.5389772",
"0.53731793",
"0.5275777",
"0.52040815",
"0.519116",
"0.51494825",
"0.5131447",
"0.5105165",
"0.50949216",
"0.5089083",
"0.5083708",
"0.5081333",
"0.5050622",
"0.5038529",
"0.50328594",
"0.5031199"
]
| 0.6359496 | 0 |
Combine several ODH sources sharing volume. Can be used for failure modes affecting several sources in parallel. | def combine(name, sources):
fluid = ht.ThermState(sources[0].fluid.name, T=ht.T_NTP, P=ht.P_NTP)
if all([source.fluid.name == fluid.name for source in sources]):
total_volume = sum([source.volume for source in sources])
return Source(name, fluid, total_volume)
else:
print('\nAll volumes should contain the same fluid')
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perform_combination(sonar_model, input_paths, output_path, engine):\n # TODO: there should be compression option for the combined file too...\n\n def coerce_type(ds, group):\n if group == 'Beam':\n if sonar_model == 'EK80':\n ds['transceiver_software_version'] = ds['transceiver_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n elif sonar_model == 'EK60':\n ds['gpt_software_version'] = ds['gpt_software_version'].astype('<U10')\n ds['channel_id'] = ds['channel_id'].astype('<U50')\n\n print(f\"{dt.now().strftime('%H:%M:%S')} combining files...\")\n\n # TODO: add in the documentation that the Top-level and Sonar groups are\n # combined by taking values (attributes) from the first file\n # Combine Top-level group, use values from the first file\n with xr.open_dataset(input_paths[0], engine=engine) as ds_top:\n io.save_file(ds_top, path=output_path, mode='w', engine=engine)\n\n # Combine Sonar group, use values from the first file\n with xr.open_dataset(input_paths[0], group='Sonar', engine=engine) as ds_sonar:\n io.save_file(ds_sonar, path=output_path, mode='a', engine=engine, group='Sonar')\n\n # Combine Provenance group,\n ds_prov = assemble_combined_provenance(input_paths)\n io.save_file(ds_prov, path=output_path, mode='a', engine=engine, group='Provenance')\n\n # TODO: Put the following in docs:\n # Right now we follow xr.combine_by_coords default to only combine files\n # with nicely monotonically varying ping_time/location_time/mru_time.\n # However we know there are lots of problems with pings going backward in time for EK60/EK80 files,\n # and we will need to clean up data before calling merge.\n # Combine Beam\n with xr.open_mfdataset(input_paths, group='Beam',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_beam:\n coerce_type(ds_beam, 'Beam')\n io.save_file(ds_beam.chunk({'range_bin': DEFAULT_CHUNK_SIZE['range_bin'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}), # these chunk sizes are ad-hoc\n path=output_path, mode='a', engine=engine, group='Beam')\n\n # Combine Environment group\n with xr.open_mfdataset(input_paths, group='Environment',\n concat_dim='ping_time', data_vars='minimal', engine=engine) as ds_env:\n io.save_file(ds_env.chunk({'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Environment')\n\n # Combine Platform group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Platform',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='identical', engine=engine) as ds_plat:\n io.save_file(ds_plat, path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model == 'EK60':\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'ping_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'ping_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n elif sonar_model in ['EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform',\n concat_dim=['location_time', 'mru_time'],\n data_vars='minimal', engine=engine) as ds_plat:\n io.save_file(ds_plat.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time'],\n 'mru_time': DEFAULT_CHUNK_SIZE['ping_time']}),\n path=output_path, mode='a', engine=engine, group='Platform')\n\n # Combine Platform/NMEA group\n if sonar_model in ['EK60', 'EK80', 'EA640']:\n with xr.open_mfdataset(input_paths, group='Platform/NMEA',\n concat_dim='location_time', data_vars='minimal', engine=engine) as ds_nmea:\n io.save_file(ds_nmea.chunk({'location_time': DEFAULT_CHUNK_SIZE['ping_time']}).astype('str'),\n path=output_path, mode='a', engine=engine, group='Platform/NMEA')\n\n # Combine Vendor-specific group\n if sonar_model == 'AZFP':\n with xr.open_mfdataset(input_paths, group='Vendor',\n concat_dim=['ping_time', 'frequency'],\n data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n else:\n with xr.open_mfdataset(input_paths, group='Vendor',\n combine='nested', # nested since this is more like merge and no dim to concat\n compat='no_conflicts', data_vars='minimal', engine=engine) as ds_vend:\n io.save_file(ds_vend, path=output_path, mode='a', engine=engine, group='Vendor')\n\n # TODO: print out which group combination errors out and raise appropriate error\n\n print(f\"{dt.now().strftime('%H:%M:%S')} all files combined into {output_path}\")",
"def transfer(self, source, dest, volume, one_source=False, one_tip=False,\n mix_after=False, mix_before=False, mix_vol=None,\n repetitions=10, flowrate=\"100:microliter/second\"):\n source = WellGroup(source)\n dest = WellGroup(dest)\n opts = []\n if len(source.wells) > 1 and len(dest.wells) == 1:\n dest = WellGroup(dest.wells * len(source.wells))\n if isinstance(volume,str) or isinstance(volume, Unit):\n volume = [Unit.fromstring(volume)] * len(dest.wells)\n elif isinstance(volume, list) and len(volume) == len(dest.wells):\n volume = map(lambda x: Unit.fromstring(x), volume)\n else:\n raise RuntimeError(\"Unless the same volume of liquid is being \"\n \"transferred to each destination well, each \"\n \"destination well must have a corresponding \"\n \"volume\")\n if (len(volume) != len (dest.wells)) and (len(dest.wells) != len(volume)) and not one_source:\n raise RuntimeError(\"To transfer liquid from multiple wells \"\n \"containing the same source, set one_source to \"\n \"True. Otherwise, you must specify the same \"\n \"number of source and destinationi wells to \"\n \"do a one-to-one transfer.\")\n elif one_source:\n sources = []\n for idx, d in enumerate(dest.wells):\n for s in source.wells:\n while s.volume > volume[idx] and (len(sources) < len(dest.wells)):\n sources.append(s)\n s.volume -= volume[idx]\n source = WellGroup(sources)\n\n for s,d,v in list(zip(source.wells, dest.wells, volume)):\n if mix_after and not mix_vol:\n mix_vol = v\n if v > Unit(900, \"microliter\"):\n diff = Unit.fromstring(vol) - Unit(900, \"microliter\")\n self.transfer(s, d, \"900:microliter\", mix_after,\n mix_vol, repetitions, flowrate)\n self.transfer(s, d, diff, one_source, one_tip, mix_after,\n mix_vol, repetitions, flowrate)\n xfer = {\n \"from\": s,\n \"to\": d,\n \"volume\": v\n }\n if mix_before:\n xfer[\"mix_before\"] = {\n \"volume\": mix_vol,\n \"repetitions\": repetitions,\n \"speed\": flowrate\n }\n if mix_after:\n xfer[\"mix_after\"] = {\n \"volume\": mix_vol,\n \"repetitions\": repetitions,\n \"speed\": flowrate\n }\n opts.append(xfer)\n if d.volume:\n d.volume += v\n else:\n d.volume = v\n if s.volume:\n s.volume -= v\n if one_tip:\n self.append(Pipette([{\"transfer\": opts}]))\n else:\n for x in opts:\n self.pipette([{\"transfer\": [x]}])",
"def run(self):\n # Get data objects (in a dict) from the controller process \n dataDict = self.controller.recv()\n self.orderedStreams = dataDict['orderedStreams']\n\n ID = None\n data = None\n output_compressed = set()\n output_normal = set()\n while self.clients:\n result = self.resultQ.get()\n if result is None:\n self.clients -= 1\n continue\n ID, data = result\n for pDict in data:\n if pDict['gzipped']:\n for filename in pDict['files']:\n output_compressed.add(filename)\n else:\n for filename in pDict['files']:\n output_normal.add(filename)\n for filename in pDict['files']:\n self.outputfiles.add(filename)\n \n self.integrateStats(data)\n self.Counter.value += len(data)\n\n # Now concatenate any output files together\n if self.heartbeat is not None:\n self.heartbeat.message(\"Beginning file block merging..\", True)\n\n fcount = 0\n blkavg = 0\n for extension in ('sam', \n 'pp.sam',\n '1.fastq',\n '2.fastq',\n 'pp.1.fastq',\n 'pp.2.fastq',\n 'sh.fastq',\n 'sh.pp.fastq'):\n fc,ba = self.concatenate(output_compressed, extension, do_gzip=True)\n fcount += fc\n blkavg += ba\n fc,ba = self.concatenate(output_normal, extension, do_gzip=False)\n fcount += fc\n blkavg += ba\n\n if self.heartbeat is not None and fcount > 0:\n self.heartbeat.message(\n \"Merged %d blocks (avg) in each of %d output files\" % \n (int(round(blkavg * 1.0 / fcount)), fcount), True)\n \n\n # Send updated data (stats mainly) via the pipe directly back to\n # the MPController object, close filehandles and finish up.\n self.updateObjectsToController()",
"def distribute(self, source, dest, volume, allow_carryover=False,\n mix_before=False, mix_vol=None, repetitions=10,\n flowrate=\"100:microliter/second\"):\n opts = {}\n dists = self.fill_wells(dest, source, volume)\n groups = []\n for d in dists:\n opts = {}\n if mix_before:\n if not mix_vol:\n raise RuntimeError(\"No mix volume specified for \"\n \"mix_before\")\n opts[\"mix_before\"] = {\n \"volume\": mix_vol,\n \"repetitions\": repetitions,\n \"speed\": flowrate\n }\n if allow_carryover:\n opts[\"allow_carryover\"] = allow_carryover\n opts[\"from\"] = d[\"from\"]\n opts[\"to\"] = d[\"to\"]\n groups.append(\n {\"distribute\": opts}\n )\n\n self.pipette(groups)",
"def read_combine_elia_activated_energy(path,status):\r\n #loop, read in and combine all data files into one \"combined_data\"\r\n i=0\r\n dfsprice = []\r\n dfsvol = []\r\n data_files_price = glob.glob(path + 'ActivatedEnergyPrices*')\r\n data_files_volume = glob.glob(path + 'ActivatedEnergyVolumes*')\r\n print(str(datetime.datetime.utcnow()) + \" amount of files to combine: \" + str(len(data_files_volume)+len(data_files_price)))\r\n \r\n for file1 in data_files_price:\r\n i=i+1\r\n print(str(datetime.datetime.utcnow()) + \" processing file number: \"+ str(i))\r\n df1 = read_elia_activated_energy_prices(file1,status)\r\n dfsprice.append(df1)\r\n combined_data_price = pd.concat(dfsprice, axis = 0)\r\n \r\n #remove \"NRV in MW\" column, because it is duplicate \r\n combined_data_price = combined_data_price.drop(combined_data_price.columns[7], axis=1)\r\n \r\n for file2 in data_files_volume:\r\n i=i+1\r\n print(str(datetime.datetime.utcnow()) + \" processing file number: \"+ str(i))\r\n df2 = read_elia_activated_energy_volumes(file2,status)\r\n dfsvol.append(df2)\r\n combined_data_vol = pd.concat(dfsvol, axis = 0)\r\n \r\n result = pd.concat([combined_data_price, combined_data_vol], axis=1)\r\n result.reset_index(inplace=True)\r\n result[\"Timestamp\"]=pd.to_datetime(result[\"Timestamp\"],format=(\"%d/%m/%Y %H:%M\"))\r\n result=result.set_index(\"Timestamp\")\r\n print(str(datetime.datetime.utcnow()) + \" finished\")\r\n return result",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def _load_sources(self):\n self.point_sources= []\n if os.path.exists(os.path.join(self.folder,'pickle.zip')):\n pzip = zipfile.ZipFile(os.path.join(self.folder,'pickle.zip'))\n files = ['pickle/HP12_%04d.pickle' %i for i in range(1728)]\n assert all(f in pzip.namelist() for f in files), 'Improper model zip file'\n opener = pzip.open\n else:\n files = glob.glob(os.path.join(self.folder, 'pickle', '*.pickle'))\n files.sort()\n opener = open\n self.nside = int(np.sqrt(len(files)/12))\n if len(files) != 12*self.nside**2:\n msg = 'Number of pickled ROI files, %d, found in folder %s, not consistent with HEALpix' \\\n % (len(files),os.path.join(self.folder, 'pickle'))\n raise Exception(msg)\n \n ####self.global_sources = sources.GlobalSourceList() # allocate list to index parameters for global sources\n self.extended_sources=[] # list of unique extended sources\n self.changed=set() # to keep track of extended models that are different from catalog\n moved=0\n nfreed = 0\n self.tagged=set()\n source_names =[]\n for i,file in enumerate(files):\n p = pickle.load(opener(file))\n index = int(os.path.splitext(file)[0][-4:])\n assert i==index, 'logic error: file name %s inconsistent with expected index %d' % (file, i)\n roi_sources = p.get('sources', {}) # don't know why this needed\n extended_names = {} if (self.__dict__.get('extended_catalog') is None) else self.extended_catalog.names\n for key,item in roi_sources.items():\n if key in extended_names: continue\n if key in source_names:\n #if not self.quiet: print ('SkyModel warning: source with name %s in ROI %d duplicates previous entry: ignored'%(key, i))\n continue\n source_names.append(key)\n skydir = item['skydir']\n if self.update_positions is not None:\n ellipse = item.get('ellipse', None)\n ts = item['ts']\n if ellipse is not None and not np.any(np.isnan(ellipse)) :\n fit_ra, fit_dec, a, b, ang, qual, delta_ts = ellipse\n if qual<5 and a < 0.2 and \\\n ts>self.update_positions and delta_ts>0.1:\n skydir = SkyDir(float(fit_ra),float(fit_dec))\n moved +=1\n self.tagged.add(i)\n \n ps = sources.PointSource(name=key,\n skydir=skydir, model= sources.convert_model(item['model']),\n ts=item['ts'],band_ts=item['band_ts'], index=index)\n if sources.validate(ps,self.nside, self.filter):\n self._check_position(ps) # check that it is not coincident with previous source(warning for now?)\n self.point_sources.append( ps)\n # make a list of extended sources used in the model \n names = p.get('diffuse_names')\n for name, oldmodel in zip(names, p['diffuse']):\n model = sources.convert_model(oldmodel) # convert from old Model version if necessary \n key = name.split('_')[0]\n if key in self.diffuse_dict:\n self.diffuse_dict.add_model(index, name, model)\n elif self.extended_catalog_name=='ignore': \n continue\n else:\n try:\n es = self.extended_catalog.lookup(name) if self.extended_catalog is not None else None\n except Exception as msg:\n print ('Skymodel: Failed to create model for %s' %name)\n raise\n if es is None:\n #raise Exception( 'Extended source %s not found in extended catalog' %name)\n print ('SkyModel warning: Extended source %s not found in extended catalog, removing' %name)\n continue\n if self.hpindex(es.skydir)!=index: continue\n \n if es.model.name!=model.name:\n if name not in self.changed:\n if not self.quiet: print ('SkyModel warning: catalog model %s changed from %s for source %s: keeping change'%\\\n (es.model.name, model.name, name))\n self.changed.add(name)\n es.smodel=es.model=model #update with current fit values always\n if sources.validate(es,self.nside, self.filter): #lambda x: True): \n self.extended_sources.append(es)\n # check for new extended sources not yet in model\n self._check_for_extended()\n if self.update_positions and moved>0:\n print ('updated positions of %d sources, healpix ids in tagged' % moved)",
"def merge_datasets(dslist):\n # We use a variant of our fast stitching routine\n # So first create a sorted list of angles and source files\n container = []\n print 'Passed %d datasets for merging ' % len(dslist)\n proc_info = \"\"\"This dataset was created by collating points from multiple datasets. Data reduction \n information for the individual source datasets is as follows:\"\"\"\n title_info = \"Merge:\"\n for num,dataset in enumerate(dslist):\n storage_info = zip(dataset.axes[0],dataset.storage,dataset.var.storage)\n container.extend(storage_info)\n proc_info += \"\\n\\n===Dataset %s===\\n\" % str(dataset.title)\n try:\n proc_info += dataset.harvest_metadata(\"CIF\")[\"_pd_proc_info_data_reduction\"]\n except KeyError:\n pass\n title_info = title_info + dataset.title + ':'\n # So we have a list of angle,intensity,variance triples which we sort on angle\n container = sorted(container, key=lambda(angle,intensity,variance):angle)\n angles = map(lambda (a,b,c):a,container)\n intensities = map(lambda (a,b,c):b,container)\n variances = map(lambda (a,b,c):c,container)\n rs = Dataset(intensities)\n rs.var = variances\n rs.axes[0] = angles\n rs.axes[0].title = 'Two theta (degrees)'\n rs.title = title_info\n # Add metadata\n AddCifMetadata.add_standard_metadata(rs)\n rs.add_metadata(\"_pd_proc_info_data_reduction\",proc_info,\"CIF\")\n return rs",
"def calculator_build_events_from_sources(self):\n gsims_by_trt = self.calculator.csm.full_lt.get_gsims_by_trt()\n print('FetchOpenQuake: self.calculator.csm.src_groups = ')\n print(self.calculator.csm.src_groups)\n sources = self.calculator.csm.get_sources()\n print('FetchOpenQuake: sources = ')\n print(sources)\n for src in sources:\n src.nsites = 1 # avoid 0 weight\n src.num_ruptures = src.count_ruptures()\n maxweight = sum(sg.weight for sg in self.calculator.csm.src_groups) / (\n self.calculator.oqparam.concurrent_tasks or 1)\n print('FetchOpenQuake: weights = ')\n print([sg.weight for sg in self.calculator.csm.src_groups])\n print('FetchOpenQuake: maxweight = ')\n print(maxweight)\n eff_ruptures = general.AccumDict(accum=0) # trt => potential ruptures\n calc_times = general.AccumDict(accum=np.zeros(3, np.float32)) # nr, ns, dt\n allargs = []\n if self.calculator.oqparam.is_ucerf():\n # manage the filtering in a special way\n for sg in self.calculator.csm.src_groups:\n for src in sg:\n src.src_filter = self.calculator.srcfilter\n srcfilter = calc.filters.nofilter # otherwise it would be ultra-slow\n else:\n srcfilter = self.calculator.srcfilter\n logging.info('Building ruptures')\n for sg in self.calculator.csm.src_groups:\n if not sg.sources:\n continue\n logging.info('Sending %s', sg)\n par = self.calculator.param.copy()\n par['gsims'] = gsims_by_trt[sg.trt]\n for src_group in sg.split(maxweight):\n allargs.append((src_group, srcfilter, par))\n\n smap = [] \n for curargs in allargs:\n smap.append(calc.stochastic.sample_ruptures(curargs[0], curargs[1], curargs[2]))\n\n print('smap = ')\n print(smap)\n self.calculator.nruptures = 0\n mon = self.calculator.monitor('saving ruptures')\n for tmp in smap:\n dic = next(tmp)\n print(dic)\n # NB: dic should be a dictionary, but when the calculation dies\n # for an OOM it can become None, thus giving a very confusing error\n if dic is None:\n raise MemoryError('You ran out of memory!')\n rup_array = dic['rup_array']\n if len(rup_array) == 0:\n continue\n if dic['calc_times']:\n calc_times += dic['calc_times']\n if dic['eff_ruptures']:\n eff_ruptures += dic['eff_ruptures']\n with mon:\n n = len(rup_array)\n rup_array['id'] = np.arange(\n self.calculator.nruptures, self.calculator.nruptures + n)\n self.calculator.nruptures += n\n hdf5.extend(self.calculator.datastore['ruptures'], rup_array)\n hdf5.extend(self.calculator.datastore['rupgeoms'], rup_array.geom)\n\n if len(self.calculator.datastore['ruptures']) == 0:\n raise RuntimeError('No ruptures were generated, perhaps the '\n 'investigation time is too short')\n\n # must be called before storing the events\n self.calculator.store_rlz_info(eff_ruptures) # store full_lt\n self.calculator.store_source_info(calc_times)\n imp = commonlib.calc.RuptureImporter(self.calculator.datastore)\n print('self.calculator.datastore.getitem(ruptures)')\n print(self.calculator.datastore.getitem('ruptures'))\n with self.calculator.monitor('saving ruptures and events'):\n imp.import_rups_events(self.calculator.datastore.getitem('ruptures')[()], getters.get_rupture_getters)",
"def combine( rasterio_rst_1, rasterio_rst_2, combine_list, output_filename ):\n\tmeta = rasterio_rst_1.meta\n\n\twith rasterio.open( output_filename, \n\t\t\t\t\t\tmode='w', \n\t\t\t\t\t\tdriver='GTiff', \n\t\t\t\t\t\twidth=meta['width'], \n\t\t\t\t\t\theight=meta['height'], \n\t\t\t\t\t\tcount=meta['count'], \n\t\t\t\t\t\tdtype=meta['dtype'], \n\t\t\t\t\t\tnodata=meta['nodata'], \n\t\t\t\t\t\tcrs=meta['crs'], \n\t\t\t\t\t\ttransform=meta['transform'] ) as out_rst:\n\n\t\tassert len(set(rasterio_rst_1.block_shapes)) == 1\n\n\t\tfor idx,window in rasterio_rst_1.block_windows( 1 ):\n\t\t\tout_band = out_rst.read_band( 1, window=window ) \n\t\t\tout_band[ out_band != 0 ] = 0\n\t\t\trst1_band = rasterio_rst_1.read_band( 1, window=window )\n\t\t\trst2_band = rasterio_rst_2.read_band( 1, window=window )\n\t\t\t\n\t\t\tfor comb in combine_list:\n\t\t\t\tout_band[ np.logical_and( rst1_band == comb[0], rst2_band == comb[1] ) ] = comb[2]\n\t\t\t\n\t\t\tout_rst.write_band( 1, out_band, window=window )\n\treturn rasterio.open( output_filename )",
"def combine_data_main(data1,data2,lookup,foutput):\n\n # Get the maximum number of ortholog probesets we'll have to append\n max_orthologs = 0\n for probe_set_id in data1.keys():\n max_orthologs = max(max_orthologs,len(lookup(probe_set_id)))\n logging.debug(\"Max_orthologs = %d\" % max_orthologs)\n \n # Write header line\n line = [data1.header()]\n for i in range(1,max_orthologs+1):\n logging.debug(\"Adding header set #%d\" % i)\n for item in data2.header().split('\\t'): line.append(\"%s_%s\" % (item,i))\n foutput.write(\"%s\\n\" % '\\t'.join(line))\n\n # Append data\n for probe_set_id in data1.keys():\n # Build line to output to file\n line = [data1.fetch(probe_set_id)]\n # Get the corresponding ortholog probe set ID(s)\n logging.debug(\"Processing probe set ID %s\" % probe_set_id)\n for ortholog_probe_set_id in lookup(probe_set_id):\n ortholog_data = data2.fetch(ortholog_probe_set_id)\n if ortholog_data is not None:\n line.append(ortholog_data)\n # Write line to file\n foutput.write(\"%s\\n\" % '\\t'.join(line))",
"def resolve(labeled_volumes, volume_dim, voxel_dim, expansion_params, optics_params):\n #Create volume\n volumes = []\n #Resolve each channel one by one\n #Make sure they're sorted by name for consistency\n channels = sorted(optics_params['channels'].keys())\n for channel in channels:\n print \"Resolving {}\".format(channel)\n channel_vol = np.zeros(volume_dim, np.uint32)\n channel_params = optics_params['channels'][channel]\n #Each fluorophore may produce photons in the given channel\n for fluorophore in labeled_volumes:\n #Merge parameters\n params = optics_params.copy()\n params.update(channel_params)\n #Compute photon count\n mean_photon = mean_photons(fluorophore, **params)\n #Only spend time convolving if the fluorophore is not orthogonal to\n #this channel\n if mean_photon > 0:\n fluo_vol = np.zeros(volume_dim, np.float64)\n Z, X, Y = np.nonzero(labeled_volumes[fluorophore])\n photons = np.random.poisson(mean_photon, size = len(Z)).astype(np.uint32)\n photons = np.multiply(labeled_volumes[fluorophore][Z, X, Y], photons)\n np.add.at(fluo_vol, (Z, X, Y), photons)\n #Convolve with point spread\n psf_vol = psf_volume(voxel_dim, expansion_params['factor'], fluorophore, **params)\n (d, w, h) = psf_vol.shape\n #Resize fluo_vol for convolution\n fluo_vol = np.pad(fluo_vol, ((d / 2, d /2), (w / 2, w /2), (h / 2, h / 2)), 'reflect')\n channel_vol += np.round(fftconvolve(fluo_vol, psf_vol, 'valid')).astype(np.uint32)\n #Add noise\n channel_vol += baseline_volume(channel_vol.shape, **optics_params)\n #Optical scaling\n channel_vol = scale(channel_vol, voxel_dim, expansion_params['factor'], **optics_params)\n #Normalize\n channel_vol = normalize(channel_vol)\n volumes.append(channel_vol)\n\n return volumes",
"def combine_host():\n lines = []\n for path in [google_hosts, my_hosts]:\n with open(path, 'r') as f:\n lines += f.readlines()\n with open(output_hosts, 'w') as f:\n f.writelines(line for line in lines)",
"def _cat_multi_vol_zip(src, dst):\n concat_cmd = \"zip -s 0 {} --out {}\".format(src, dst)\n os.system(concat_cmd)",
"def apply_merge(volume, volumes, merge_directions):\n \n def get_new_volume(volume, lowcorner):\n v2 = get_volume(lowcorner)\n if v2 != None:\n return merge_volumes(volume, v2)\n else:\n return volume\n\n def get_volume(lowcorner):\n if not isinstance(lowcorner, tuple):\n raise TypeError() # required for \"==\"\n\n for i in range(len(volumes)):\n v = volumes[i]\n if v.p1 == lowcorner:\n logger.debug(\"\\tMerging volume with low corner %s\", v.p1)\n return volumes.pop(i)\n \n logger.warning(\"\\tNo volume to merge with\")\n return None\n\n import copy\n\n logger.debug(\"\\t== Function == apply_merge\")\n\n p1, p2 = volume.get_corners()\n logger.debug(\"\\tTargetting volume with low corner %s\", p1)\n\n if len(merge_directions) == 1:\n if Axes.k in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.k.value] = p2[Axes.k.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.j in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.j.value] = p2[Axes.j.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif Axes.i in merge_directions:\n p1_target = list(copy.deepcopy(p1))\n p1_target[Axes.i.value] = p2[Axes.i.value]\n new_volume = get_new_volume(volume, tuple(p1_target))\n\n elif len(merge_directions) == 2:\n logger.debug(\"\\tMerge directions: %s\", merge_directions)\n axis1, axis2 = merge_directions\n\n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_volume_axis1 = apply_merge(volume_axis1, volumes, [axis2])\n new_volume_axis2 = apply_merge(volume, volumes, [axis2])\n new_volume = merge_volumes(new_volume_axis1, new_volume_axis2)\n\n elif len(merge_directions) == 3:\n logger.debug(\"\\tMerge directions %s\", merge_directions)\n axis1, axis2, axis3 = merge_directions\n \n p1_target = list(copy.deepcopy(p1))\n p1_target[axis1.value] = p2[axis1.value]\n volume_axis1 = get_new_volume(volume, tuple(p1_target))\n\n new_vol1 = apply_merge(volume, volumes, [axis2, axis3])\n new_vol2 = apply_merge(volume_axis1, volumes, [axis2, axis3])\n new_volume = merge_volumes(new_vol1, new_vol2)\n\n else:\n raise ValueError()\n\n logger.debug(\"\\tEnd\")\n return new_volume",
"def test_adding_sources():\n s1 = magpy.magnet.Cuboid()\n s2 = magpy.magnet.Cylinder()\n s3 = magpy.magnet.CylinderSegment()\n s4 = magpy.magnet.Sphere()\n s5 = magpy.current.Loop()\n s6 = magpy.current.Line()\n s7 = magpy.misc.Dipole()\n x1 = magpy.Sensor()\n c1 = magpy.Collection()\n c2 = magpy.Collection()\n\n for obj in [s1, s2, s3, s4, s5, s6, s7, x1, c1]:\n c2.add(obj)\n\n strs = \"\"\n for src in c2:\n strs += str(src)[:3]\n\n assert strs == \"CubCylCylSphLooLinDipSenCol\"",
"def combine_netCDF_rh_cmip6(directory, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n \n # Load cube\n cube = iris.load_cube(newlist[i])\n \n # matching all standard names\n cube.standard_name = 'heterotrophic_respiration_carbon_flux'\n\n # matching cube metadata\n if i == 0:\n metadata1 = cube.metadata\n else:\n cube.metadata = metadata1\n \n # creating latitude and longitude bounds\n if model=='IPSL-CM6A-LR' or model=='CNRM-ESM2-1':\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n \n # removing time attributes\n if model=='IPSL-CM6A-LR':\n cube.coord('time').attributes.pop('time_origin')\n \n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n \n for cube in Cubelist:\n lon_bounds = Cubelist[0].coord('longitude').bounds\n cube.coord('longitude').bounds = lon_bounds\n\n for i, cube in enumerate(Cubelist):\n if cube.coord('time').units == Cubelist[0].coord('time').units:\n pass\n else:\n print(i)\n\n return new_cube",
"def merge(self , station = '' , datasets = ''):\n \n \n \n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n \n \n \"\"\"\n try:\n a = self.initialize_data( station = station, datasets = datasets ) # reading the input files \n dummy = self.merge_all_data() \n logging.info('*** Finished merging, now writing the output netCDF file ***' ) \n a = self.write_merged_file()\n logging.info('*** Done writing the output ! ***')\n return True\n except:\n print('Failed: ' , station )\n return False \n \"\"\"",
"def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]",
"def combine_latest(source: Observable[Any]) -> Observable[Any]:\n\n sources = (source,) + others\n\n return reactivex.combine_latest(*sources)",
"def combine_images(args):\n\n # Read all images into a cube (TODO: think about the RAM)\n with fits.open(args.input[0]) as im0:\n lx, ly = im0[0].data.shape\n ref_hdr = im0[0].header\n\n headers = [fits.open(im_name)[0].header for im_name in args.input]\n cube = numpy.ma.zeros((len(args.input), lx, ly))\n cube.mask = numpy.zeros_like(cube.data)\n for ii, im_name in enumerate(args.input):\n with astroim.Astroim(im_name) as im:\n cube.data[ii, :,:] = im.chips[0].data\n if im.chips[0].mask is not None:\n cube.mask[ii,:,:] = im.chips[0].mask\n\n # Scale images\n scale_functions = {\"median\": numpy.ma.median,\n \"mean\": numpy.ma.mean,\n \"mode\": scipy.stats.mstats.mode,\n \"none\": lambda x: 1}\n for ii, im_name in enumerate(args.input):\n func = scale_functions[args.scale.lower()]\n cube[ii,:,:] /= func(cube[ii,:,:])\n\n\n # Reproject all images to the ref_hdr\n for ii, _ in enumerate(args.input):\n if ii == 0:\n continue\n cube.data[ii,:,:], footprint = reproject_interp((cube.data[ii,:,:], headers[ii]), ref_hdr)\n cube.mask[ii,:,:], footprint = reproject_interp((cube.mask[ii,:,:], headers[ii]), ref_hdr)\n #whr = numpy.isnan(cube.data[ii,:,:])\n #cube.mask[ii,:,:][whr] = True\n\n # Do average\n average_functions = {\"median\": numpy.ma.median, \"mean\": numpy.ma.mean, \"sum\": numpy.ma.sum}\n func = average_functions[args.average.lower()]\n final_image = func(cube, axis=0)\n ref_hdr[\"NCOMBINE\"] = len(args.input)\n\n mask_name = utilities.replace_extension(args.output, \".fits.msk\")\n mask_name_header = utilities.replace_extension(os.path.basename(args.output), \".fits.msk\")\n ref_hdr[\"MASK\"] = mask_name_header\n fits.writeto(args.output, final_image.data, ref_hdr, clobber=True )\n fits.writeto(mask_name, numpy.array(final_image.mask, dtype=int), clobber=True)\n\n return args.output",
"def combine_all(self):\n combined = copy.deepcopy(self.train)\n\n def _combine_data(data):\n for img_path, pid, camid in data:\n\n if pid in self._junk_pids:\n continue\n #pdb.set_trace()\n pid = self.dataset_name + \"_\" + str(pid)\n camid = self.dataset_name + \"_\" + str(camid)\n combined.append((img_path, pid, camid))\n\n _combine_data(self.query)\n _combine_data(self.gallery)\n\n self.train = combined\n self.num_train_pids = self.get_num_pids(self.train)",
"def _load_sources(self):\n ss_dir = SteelScriptDir('AppResponse', 'files')\n\n for svc in [PACKETS_REPORT_SERVICE_NAME,\n GENERAL_REPORT_SERVICE_NAME]:\n svc_version = self.appresponse.versions[svc]\n sw_version = (self.appresponse.get_info()['sw_version']\n .replace(' ', ''))\n sources_filename = ('{}-sources-{}-{}.pcl'\n .format(svc, svc_version, sw_version))\n sources_file = ss_dir.get_data(sources_filename)\n\n sources_file.read()\n\n if not sources_file.data:\n svcdef = self.appresponse.find_service(svc)\n\n # sources is a list of dictionaries\n sources = svcdef.bind('sources').execute('get').data['items']\n\n # the whole set of sources for current service\n all_sources = {}\n\n for source in sources:\n cols = source['columns']\n source['columns'] = \\\n OrderedDict(sorted(zip(map(lambda x: x['id'], cols),\n cols)))\n source['filters_on_metrics'] = \\\n source['capabilities']['filters_on_metrics']\n if 'granularities' not in source:\n source['granularities'] = None\n\n all_sources[source['name']] = source\n\n if source['name'] in report_source_to_groups:\n self._sources[source['name']] = source\n\n # source_file writes the whole set of sources to disk\n sources_file.data = all_sources\n sources_file.write()\n logger.debug(\"Wrote sources data into {}\"\n .format(sources_filename))\n else:\n logger.debug(\"Loading sources data from {}\"\n .format(sources_filename))\n # Only load valid sources based on settings\n for k, v in sources_file.data.iteritems():\n if k in report_source_to_groups:\n self._sources[k] = v\n\n return",
"def combine_device_addrs(*args, **kwargs):\n return _uhd_swig.combine_device_addrs(*args, **kwargs)",
"def match_source_blend_isochrones(params,source,blend,log):\n\n if 'none' in str(params['isochrone_file']).lower():\n log.info('No input file with isochrone data provided, skipping isochrone analysis.')\n\n else:\n log.info('\\n')\n log.info('Analysing isochrones for source star\\n')\n star_data = isochrone_utilities.analyze_isochrones(source.gr_0,source.ri_0,\n params['isochrone_file'],\n log=log)\n source.mass = star_data[0]\n source.sig_mass = star_data[1]\n source.teff = star_data[2]\n source.sig_teff = star_data[3]\n source.logg = star_data[4]\n source.sig_logg = star_data[5]\n source.estimate_luminosity_class(log=log)\n\n log.info('\\n')\n log.info('Analysing isochrones for blend\\n')\n\n star_data = isochrone_utilities.analyze_isochrones(blend.gr_0,blend.ri_0,\n params['isochrone_file'],\n log=log)\n blend.mass = star_data[0]\n blend.sig_mass = star_data[1]\n blend.teff = star_data[2]\n blend.sig_teff = star_data[3]\n blend.logg = star_data[4]\n blend.sig_logg = star_data[5]\n blend.estimate_luminosity_class(log=log)\n\n return source, blend",
"def concat_vsource_sink_csv(csv_fn1,csv_fn2,merged_source_sink_in,\n csv_type,csv_merged,freq='infer',how='left'):\n # merged_source_sink_in: the merged source_sink.in or source_sink.yaml file \n # where the data sources are from csv_fn1, csv_fn2. \n if merged_source_sink_in.endswith('yaml'):\n df_sources,df_sinks = read_source_sink_yaml(merged_source_sink_in)\n elif merged_source_sink_in.endswith('in'):\n df_sources,df_sinks = read_source_sink_in(merged_source_sink_in)\n else:\n raise NotImplementedError(\n 'merged_source_sink_in can either be .yaml or .in file')\n if csv_type == 'sources':\n sites = df_sources.index\n elif csv_type == 'sink':\n sites = df_sinks.index\n else:\n raise NotImplementedError('csv_type can either be sources or sinks')\n th1 = read_source_sink_csv(csv_fn1)\n th2 = read_source_sink_csv(csv_fn2)\n if freq=='infer':\n if th1.index.freq!=th2.index.freq:\n print(\"th1 and th2 has different frequency\")\n else:\n th1 = th1.asfreq(freq)\n th2 = th2.asfreq(freq)\n th_merged = th1.join(th2,how=how,rsuffix='r').drop(columns=['datetimer'])\n th_merged = th_merged.fillna(-9999.0)\n cols = np.append(['datetime'],sites)\n th_merged = th_merged[cols] #rearrange the array to have the same order as defined in merged_source_sink_in\n th_merged['datetime'] = np.datetime_as_string(th_merged.index.values,'h')\n write_source_sink_csv(th_merged,csv_merged)",
"def add_source_and_vol (well_list):\n\n source = input('From which source well will you be shooting in this region? ')\n\n vol = int(input('How much volume (nL) do you want to shoot in this region? '))\n\n if vol%25 != 0: #modulo needs to be 0 for multiple of 25\n raise ValueError('This number is not compatible with the Echo, please enter a multiple of 25 nL.')\n\n return (source, vol, well_list)",
"def office_distribute_adjoint_sources(parser, args, params):\n parser.parse_known_args(args)\n control.distribute_adjoint_sources(params)",
"def combine(img_list, output_file=None,\n method='average', weights=None, scale=None, mem_limit=16e9,\n clip_extrema=False, nlow=1, nhigh=1,\n minmax_clip=False, minmax_clip_min=None, minmax_clip_max=None,\n sigma_clip=False,\n sigma_clip_low_thresh=3, sigma_clip_high_thresh=3,\n sigma_clip_func=ma.mean, sigma_clip_dev_func=ma.std,\n dtype=None, combine_uncertainty_function=None,\n overwrite_output=False, **ccdkwargs):\n if not isinstance(img_list, list):\n # If not a list, check whether it is a numpy ndarray or string of\n # filenames separated by comma\n if isinstance(img_list, np.ndarray):\n img_list = img_list.tolist()\n elif isinstance(img_list, str) and (',' in img_list):\n img_list = img_list.split(',')\n else:\n try:\n # Maybe the input can be made into a list, so try that\n img_list = list(img_list)\n except TypeError:\n raise ValueError(\n \"unrecognised input for list of images to combine.\")\n\n # Select Combine function to call in Combiner\n if method == 'average':\n combine_function = 'average_combine'\n elif method == 'median':\n combine_function = 'median_combine'\n elif method == 'sum':\n combine_function = 'sum_combine'\n else:\n raise ValueError(\"unrecognised combine method : {0}.\".format(method))\n\n # First we create a CCDObject from first image for storing output\n if isinstance(img_list[0], CCDData):\n ccd = img_list[0].copy()\n else:\n # User has provided fits filenames to read from\n ccd = CCDData.read(img_list[0], **ccdkwargs)\n\n if dtype is None:\n dtype = np.float64\n\n # Convert the master image to the appropriate dtype so when overwriting it\n # later the data is not downcast and the memory consumption calculation\n # uses the internally used dtype instead of the original dtype. #391\n if ccd.data.dtype != dtype:\n ccd.data = ccd.data.astype(dtype)\n\n # If the template image doesn't have an uncertainty, add one, because the\n # result always has an uncertainty.\n if ccd.uncertainty is None:\n ccd.uncertainty = StdDevUncertainty(np.zeros_like(ccd.data))\n\n # If the template doesn't have a mask, add one, because the result may have\n # a mask\n if ccd.mask is None:\n ccd.mask = np.zeros_like(ccd.data, dtype=bool)\n\n size_of_an_img = _calculate_size_of_image(ccd,\n combine_uncertainty_function)\n\n no_of_img = len(img_list)\n\n # Set a memory use factor based on profiling\n if method == 'median':\n memory_factor = 3\n else:\n memory_factor = 2\n\n memory_factor *= 1.3\n\n # determine the number of chunks to split the images into\n no_chunks = int((memory_factor * size_of_an_img * no_of_img) / mem_limit) + 1\n if no_chunks > 1:\n log.info('splitting each image into {0} chunks to limit memory usage '\n 'to {1} bytes.'.format(no_chunks, mem_limit))\n xs, ys = ccd.data.shape\n\n # Calculate strides for loop\n xstep, ystep = _calculate_step_sizes(xs, ys, no_chunks)\n\n # Dictionary of Combiner properties to set and methods to call before\n # combining\n to_set_in_combiner = {}\n to_call_in_combiner = {}\n\n # Define all the Combiner properties one wants to apply before combining\n # images\n if weights is not None:\n to_set_in_combiner['weights'] = weights\n\n if scale is not None:\n # If the scale is a function, then scaling function need to be applied\n # on full image to obtain scaling factor and create an array instead.\n if callable(scale):\n scalevalues = []\n for image in img_list:\n if isinstance(image, CCDData):\n imgccd = image\n else:\n imgccd = CCDData.read(image, **ccdkwargs)\n\n scalevalues.append(scale(imgccd.data))\n\n to_set_in_combiner['scaling'] = np.array(scalevalues)\n else:\n to_set_in_combiner['scaling'] = scale\n\n if clip_extrema:\n to_call_in_combiner['clip_extrema'] = {'nlow': nlow,\n 'nhigh': nhigh}\n\n if minmax_clip:\n to_call_in_combiner['minmax_clipping'] = {'min_clip': minmax_clip_min,\n 'max_clip': minmax_clip_max}\n\n if sigma_clip:\n to_call_in_combiner['sigma_clipping'] = {\n 'low_thresh': sigma_clip_low_thresh,\n 'high_thresh': sigma_clip_high_thresh,\n 'func': sigma_clip_func,\n 'dev_func': sigma_clip_dev_func}\n\n # Finally Run the input method on all the subsections of the image\n # and write final stitched image to ccd\n for x in range(0, xs, xstep):\n for y in range(0, ys, ystep):\n xend, yend = min(xs, x + xstep), min(ys, y + ystep)\n ccd_list = []\n for image in img_list:\n if isinstance(image, CCDData):\n imgccd = image\n else:\n imgccd = CCDData.read(image, **ccdkwargs)\n\n # Trim image and copy\n # The copy is *essential* to avoid having a bunch\n # of unused file references around if the files\n # are memory-mapped. See this PR for details\n # https://github.com/astropy/ccdproc/pull/630\n ccd_list.append(imgccd[x:xend, y:yend].copy())\n\n # Create Combiner for tile\n tile_combiner = Combiner(ccd_list, dtype=dtype)\n\n # Set all properties and call all methods\n for to_set in to_set_in_combiner:\n setattr(tile_combiner, to_set, to_set_in_combiner[to_set])\n for to_call in to_call_in_combiner:\n getattr(tile_combiner, to_call)(**to_call_in_combiner[to_call])\n\n # Finally call the combine algorithm\n combine_kwds = {}\n if combine_uncertainty_function is not None:\n combine_kwds['uncertainty_func'] = combine_uncertainty_function\n\n comb_tile = getattr(tile_combiner, combine_function)(**combine_kwds)\n\n # add it back into the master image\n ccd.data[x:xend, y:yend] = comb_tile.data\n if ccd.mask is not None:\n ccd.mask[x:xend, y:yend] = comb_tile.mask\n if ccd.uncertainty is not None:\n ccd.uncertainty.array[x:xend, y:yend] = comb_tile.uncertainty.array\n # Free up memory to try to stay under user's limit\n del comb_tile\n del tile_combiner\n del ccd_list\n\n # Write fits file if filename was provided\n if output_file is not None:\n ccd.write(output_file, overwrite=overwrite_output)\n\n return ccd",
"def create_om_sources(self): #TODO: Prob. should make file names specifiable\n if isinstance(self.sources, connectivity_module.Connectivity):\n sources_file = self._tvb_connectivity_to_txt(\"sources.txt\")\n om_sources = om.Matrix()\n elif isinstance(self.sources, surfaces_module.Cortex):\n sources_file = self._tvb_surface_to_tri(\"sources.tri\")\n om_sources = om.Mesh()\n else:\n LOG.error(\"sources must be either a Connectivity or Cortex.\")\n\n om_sources.load(sources_file)\n return om_sources"
]
| [
"0.5694566",
"0.56148976",
"0.54036075",
"0.53820014",
"0.5315765",
"0.52329516",
"0.5208108",
"0.5146966",
"0.513335",
"0.5095876",
"0.5057739",
"0.5045067",
"0.5022017",
"0.500303",
"0.49761328",
"0.49656466",
"0.49650297",
"0.4956805",
"0.4940756",
"0.49406424",
"0.48968565",
"0.48876444",
"0.4869968",
"0.48280343",
"0.48275173",
"0.48251376",
"0.48238224",
"0.4818706",
"0.48149648",
"0.48079085"
]
| 0.6918295 | 0 |
Calculate ODH fatality rate for given `Source`s. For each leak of each source ODH conditions are analyzed and fatality rates are calculated. The results are collected in fail_modes list. | def odh(self, sources, power_outage=False):
self.fail_modes = []
# Probability of power failure in the building:
# PFD_power if no outage, 1 if there is outage
PFD_power_build = (power_outage or
TABLE_1['Electrical Power Failure']['Demand rate'])
# Calculate fatality rates for each source
for source in sources:
for leak in source.leaks:
leak_failure_rate = leak[0]
if leak_failure_rate is not None: # None for constant leak
self._fatality_no_response(source, leak, source.sol_PFD,
PFD_power_build)
self._fatality_fan_powered(source, leak, source.sol_PFD,
PFD_power_build) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:\n # Probability of power on, ODH system working, and m number of fans\n # with flow rate Q_fan on.\n P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \\\n sol_PFD * P_fan\n P_i = leak_failure_rate * P_response\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan,\n N_fan, N)\n self.fail_modes.append(f_mode)",
"def _fatality_no_response(self, source, leak, sol_PFD,\n PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n P_no_response = float(PFD_power_build) * sol_PFD + \\\n (1-PFD_power_build)*self.PFD_ODH\n P_i = leak_failure_rate * P_no_response\n Q_fan = self.vent_rate\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan, 0, N)\n self.fail_modes.append(f_mode)",
"def _average_duration_of_issues(self, *metric_source_ids: str) -> int:\n for query_id in metric_source_ids:\n try:\n self._metric_source.sum_for_all_issues(query_id, self._get_days_in_progress, self._extra_info_data)\n except ValueError:\n return -1 # Error already logged in utils.eval_json\n days = self.__sum_days(self._extra_info_data)\n stories = self.__count_stories(self._extra_info_data)\n return days / stories if stories > 0 else -1",
"def summary_source(classes_fold_score_list, classes_periods, classes): \n scores = []\n for idx in range(len(classes_periods)):\n temp = pd.concat([classes_fold_score_list[idx], classes_periods[idx].source], axis=1)\n temp = temp.groupby(['source', 'catalog']).size().unstack(fill_value=0).T\n scores.append(temp)\n score_df = pd.concat(scores, keys=classes)\n score_df.index.set_levels([\"Wrong\", \"Right\", \"Multiply\"], \n level=1,\n inplace=True)\n score_df = (score_df.T.fillna(0))\n dividend = score_df.iloc[:, score_df.columns.get_level_values(1)==\"Right\"].T.droplevel(-1).T\n divisor = score_df.T.groupby(level=0).sum().T.loc[:,[\"RRL\",\"Ceph\",\"LPV\",\"DSCT\",\"EB\"]]\n return dividend.divide(divisor).round(2).mean(axis=1)",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def report_invalid_sources(self):\n if not self.invalid_sources:\n return\n total = sum(self.invalid_sources[s] for s in self.invalid_sources)\n self.logger.info(\n \"Dropping %d messages with invalid sources: %s\",\n total,\n \", \".join(\"%s: %s\" % (s, self.invalid_sources[s])\n for s in self.invalid_sources)\n )\n self.invalid_sources = defaultdict(int)",
"def abc_reject_analyse(obs):\n def closest(lst, K):\n lst = np.asarray(lst)\n idx = (np.abs(lst - K)).argmin()\n return idx\n failure_results = [1, 1]\n suffixes = ('', '_hm')\n w = np.ones(1000)\n if (os.path.exists('%s/abc_reject.pkl' % obs.results_dir) and\n os.path.exists('%s/abc_reject_hm.pkl' % obs.results_dir)):\n for test in range(2):\n with open('%s/abc_reject%s.pkl' % (obs.results_dir, suffixes[test]), 'rb') as pfile:\n results = pickle.load(pfile)\n params = pd.DataFrame([(r.scout_prob, r.survival_prob) for r in results],\n columns=('scout prob', 'survival prob'))\n X, Y, PDF = pyabc.visualization.kde.kde_2d(params, w, x=\"scout prob\", y=\"survival prob\")\n x_idx = closest(X[0], obs.parameters.scout_prob)\n y_idx = closest([y[0] for y in Y], obs.parameters.survival_prob)\n posterior = PDF[y_idx][x_idx]\n ratio = posterior / np.amax(PDF)\n if ratio > 0.5:\n failure_results[test] = 0\n return failure_results",
"def test_US_metrics(df):\n # Limit to only most recent day by source\n df_max_date = df.groupby(['data_source'])['date'].max().reset_index()\n latest_df = df.merge(df_max_date,\n how='inner',\n on=['data_source','date'])\n\n us = df.loc[df['country_or_region'] == 'US']\n latest_us = latest_df.loc[latest_df['country_or_region'] == 'US']\n\n LOGGER.info(f\"US Total Cases: {latest_us['running_total_cases'].sum()}\")\n LOGGER.info(f\"US Total Deaths: {latest_us['running_total_deaths'].sum()}\")\n\n assert latest_us['running_total_cases'].sum() == us['daily_new_cases'].sum()\n assert latest_us['running_total_deaths'].sum() == us['daily_new_deaths'].sum()",
"def consistency_check(self):\n\n for mode in [\"instantaneous\", \"infinite_duration\", \"fixed_duration\"]:\n for key, value in getattr(self.modes, mode).sources.items():\n dim = self.dimensions\n for axis in [\"x\", \"y\", \"z\"]:\n par = getattr(value, axis)\n bound = min(getattr(dim, axis))\n if isinstance(par, list):\n for item in par:\n if item < 0 or item > bound:\n raise ConsistencyError(\n f\"{mode} source {key} x position is \"\n f\"outside space domain (0, {bound}).\")\n else:\n if par < 0 or par > bound:\n raise ConsistencyError(\n f\"{mode} source {key} x position is \"\n f\"outside space domain (0, {bound}).\")\n\n for mode in [\"instantaneous\", \"infinite_duration\"]:\n for key, value in getattr(self.modes, mode).sources.items():\n if isinstance(value.time, list):\n for item in value.time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} time is \"\n f\"outside time domain [0, {self.total_time}).\")\n\n for key, value in getattr(self.modes, \"fixed_duration\").sources.items():\n if isinstance(value.start_time, list):\n for item in value.start_time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} start time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.start_time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} start time is \"\n f\"outside time domain [0, {self.total_time}).\")\n\n if isinstance(value.end_time, list):\n for item in value.end_time:\n if item > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} end time is \"\n f\"outside time domain [0, {self.total_time}).\")\n else:\n if value.end_time > self.total_time:\n raise ConsistencyError(\n f\"{mode} source {key} end time is \"\n f\"outside time domain [0, {self.total_time}).\")\n \n dims = [\"x\", \"y\", \"z\"]\n for plane in self.models.eddy_diffusion.monitor_locations.planes.values():\n dim = [axis for axis in dims if axis not in str(plane.axis)][0]\n if isinstance(plane.distance, list):\n if max(plane.distance) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(f\"{plane} is outside the space domain\")\n else:\n if plane.distance > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(f\"{plane} is outside the space domain\")\n\n for key, point in self.models.eddy_diffusion.monitor_locations.points.items():\n for dim in dims:\n if getattr(point, dim) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(\n f\"{key}'s {dim} value, is outside space domain \"\n f\"(0, {getattr(self.dimensions, dim)})\")\n\n for key, line in self.models.eddy_diffusion.monitor_locations.lines.items():\n for dim in dims:\n if getattr(line.point, dim) > min(getattr(self.dimensions, dim)):\n raise ConsistencyError(\n f\"{key}'s {dim} value, is outside space domain \"\n f\"(0, {getattr(self.dimensions, dim)})\")\n\n thresh = self.thresholds\n if len(thresh.concentration) > 5 or len(thresh.exposure) > 5:\n raise ConsistencyError(f\"Cannot exceed more than 5 thresholds\")\n \n line_number = self.models.eddy_diffusion.lines_plots.number\n if isinstance(line_number, list):\n for item in line_number:\n if item > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested line plots ({line_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")\n else:\n if line_number > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested line plots ({line_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")\n \n contour_number = self.models.eddy_diffusion.planes_plots.number\n if isinstance(contour_number, list):\n for item in contour_number:\n if item > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested contour plots ({item}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}.)\")\n else:\n if contour_number > self.time_samples:\n raise ConsistencyError(\n f\"The number of requested contour plots ({contour_number}) cannot exceed the \"\n f\"number of time samples ({self.time_samples}).\")",
"def pull_dalys(cause_ids, nonfatal_cause_ids, location_ids, ages, sexes, index_cols):\n if len(cause_ids) + len(nonfatal_cause_ids) == 0:\n raise Exception(\"Must select at least one fatal or nonfatal cause_id\")\n \n #init empty dfs\n ylds, ylls = pd.DataFrame(), pd.DataFrame()\n \n if len(nonfatal_cause_ids)>0:\n ylds = get_draws(\n gbd_id_type='cause_id',\n gbd_id=cause_ids,\n source='como',\n measure_id=3,\n metric_id=3, # only available as rate\n location_id=location_ids,\n year_id=2019,\n age_group_id=ages,\n sex_id=sexes,\n gbd_round_id=6,\n status='best',\n decomp_step='step5',\n ).set_index(index_cols + ['cause_id'])\n ylds = ylds.drop(columns=[c for c in ylds.columns if 'draw' not in c])\n\n #convert rate to count\n pop = get_population(\n location_id=location_ids,\n year_id=2019,\n age_group_id=ages,\n sex_id=sexes,\n gbd_round_id=6,\n decomp_step='step4').set_index(index_cols)\n for i in list(range(0, 1000)):\n ylds[f'draw_{i}'] = ylds[f'draw_{i}'] * pop['population']\n else:\n print(\"No nonfatal ids selected; returning ylls only\")\n \n if len(cause_ids)>0:\n ylls = get_draws(\n gbd_id_type='cause_id',\n gbd_id=cause_ids,\n source='codcorrect',\n measure_id=4,\n metric_id=1,\n location_id=location_ids,\n year_id=2019,\n age_group_id=ages,\n sex_id=sexes,\n gbd_round_id=6,\n status='latest',\n decomp_step='step5',\n ).set_index(index_cols + ['cause_id']).replace(np.nan, 0)\n ylls = ylls.drop(columns=[c for c in ylls.columns if 'draw' not in c])\n else:\n print(\"No fatal ids selected; returning ylds only\")\n \n return ylls + ylds",
"def check_causes(\n self,\n cause_metrics: List[CauseMetricBase],\n grouped_data: List[BreakdownGroupedData],\n no_of_days: int,\n breakdown: str,\n ) -> Tuple[Optional[recommendation_enums_union], Optional[float], Optional[str]]:\n\n # TODO extract this logic to a method and use twice\n for cause_metric in cause_metrics:\n for metric_clause in cause_metric.metric_clauses:\n metric_name = metric_clause.metric_field.name\n\n metric_data = get_group_data_from_list(grouped_data, metric_name, no_of_days)\n\n # When Cost Per Result is None.\n if metric_data is None:\n return None, None, None\n\n no_of_breakdowns = 1\n if metric_clause.is_divided_by_no_breakdowns:\n no_of_breakdowns = len({s for s in metric_data.get_breakdowns()}) - 1\n\n if metric_data.get_breakdown_datapoints(TOTAL_KEY) is None:\n continue\n\n reference_data = metric_data.get_breakdown_total(TOTAL_KEY) / no_of_breakdowns\n\n current_data = metric_data.get_breakdown_total(breakdown)\n if current_data == INVALID_METRIC_VALUE:\n return None, None, None\n\n trend = self.trend(reference_data, current_data)\n variance = self.variance(reference_data, current_data, trend)\n\n if variance >= metric_clause.variance_percentage:\n return cause_metric.output, variance, metric_name\n\n return None, None, None",
"def log_manager(self, source):\n if self.fail_count[source]:\n if not (self.dname.split('.')[-1] in self.ofr_list):\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.append(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + self.error_code\n self.sys_chans['fail'].setValue(1)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n\n if self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_err', self.ps_error, self.ofr_list, self.fail_count)\n elif self.dname.split('.')[-1] == 'WG1_2':\n if self.error_code == 'U_out_of_range':\n print('WG1_2_still_out', self.ps_error, self.ofr_list, self.fail_count)\n s = 0\n for k, v in self.fail_count.items():\n s += v\n if not s:\n if self.dname.split('.')[-1] in self.ofr_list:\n time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n self.ofr_list.delete(self.dname.split('.')[-1])\n log = str(time) + '|' + self.dname.split('.')[-1] + '|' + 'PS IS RUNNING'\n self.sys_chans['fail'].setValue(0)\n self.sys_info_d['ofr'].setValue(json.dumps(self.ofr_list))\n self.sys_info_d['logs'].setValue(log)\n else:\n log = ''\n for k, v in self.fail_count.items():\n if v:\n log = log + k + '|'\n log = log[:-1]\n # self.sys_chans['errcode'].setValue(json.dumps(log))",
"def test_sources_not_ok_on_connection_error(self):\n measurement = self.measurement(\n self.metric(),\n sources=[\n {\n \"source_uuid\": SOURCE_ID,\n \"value\": None,\n \"total\": None,\n \"parse_error\": None,\n \"connection_error\": \"Oops!\",\n },\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())",
"def do_countystats(df: pd.DataFrame):\n\tasof = df.date.max()\n\tdfstats: pd.DataFrame = df.loc[df.date==asof]\n\tdfstats.set_index('fips', drop=False, append=False, inplace=True)\n\tfipslist = list(dfstats.fips.unique())\n\n\tfor x in iter(fipslist):\n\t\tpriormth: dt.date = asof - dt.timedelta(days=30)\n\t\ttry:\n\t\t\tprior_row = df.loc[(str(x), priormth)]\n\t\texcept KeyError:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_30'] = None\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_30'] = None\n\t\telse:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_30'] = prior_row['cases']\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_30'] = prior_row['deaths']\n\t\tpriormth: dt.date = asof - dt.timedelta(days=60)\n\t\ttry:\n\t\t\tprior_row = df.loc[(str(x), priormth)]\n\t\texcept KeyError:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_60'] = None\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_60'] = None\n\t\telse:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_60'] = prior_row['cases']\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_60'] = prior_row['deaths']\n\t\tpriormth: dt.date = asof - dt.timedelta(days=90)\n\t\ttry:\n\t\t\tprior_row = df.loc[(str(x), priormth)]\n\t\texcept KeyError:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_90'] = None\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_90'] = None\n\t\telse:\n\t\t\tdfstats.at[dfstats['fips']==x, 'cases_90'] = prior_row['cases']\n\t\t\tdfstats.at[dfstats['fips']==x, 'deaths_90'] = prior_row['deaths']\n\n\tdfstats.set_index('fips')\n\tdfstats.sort_index()\n\tdfstats['caserate'] = (dfstats['cases']/ dfstats['pop'])* 100\n\tdfstats['caserate'] = dfstats['caserate'].round(2)\n\tdfstats['ddtodc'] = ((dfstats['deaths']-dfstats['deaths_30'])/\n\t\t\t\t\t\t(dfstats['cases']-dfstats['cases_30']))\n\tdfstats['ddtodc'] = dfstats['ddtodc'].round(2)\n\tdfstats['ddtodc30'] = ((dfstats['deaths_30'] - dfstats['deaths_60'])/\n\t\t\t\t\t\t(dfstats['cases_30'] - dfstats['cases_60']))\n\tdfstats['ddtodc30'] = dfstats['ddtodc30'].round(2)\n\tdfstats['ddtodc60'] = ((dfstats['deaths_60'] - dfstats['deaths_90'])/\n\t (dfstats['cases_60'] - dfstats['cases_90']))\n\tdfstats['ddtodc60'] = dfstats['ddtodc60'].round(2)\n\treturn dfstats",
"def test_missing_source(self):\n metric = MetricSourceAgeMetricUnderTest(self.__subject, project=domain.Project())\n self.assertEqual('missing_source', metric.status())",
"async def test_minimum_severity(self):\n self.set_source_parameter(\"severities\", [\"medium\", \"high\"])\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"0\", entities=[])",
"def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used",
"def process(self, source0):\n # Step HSL_Threshold0:\n self.__hsl_threshold_input = source0\n (self.hsl_threshold_output) = self.__hsl_threshold(self.__hsl_threshold_input, self.__hsl_threshold_hue, self.__hsl_threshold_saturation, self.__hsl_threshold_luminance)\n\n # Step Find_Contours0:\n self.__find_contours_input = self.hsl_threshold_output\n (self.find_contours_output) = self.__find_contours(self.__find_contours_input, self.__find_contours_external_only)",
"def _ground_truth_calculation(\n discount_factor,\n target_log_policy,\n behaviour_log_policy,\n rewards,\n target_value,\n clip_rho_threshold,\n clip_cs_threshold,\n ):\n vs = []\n seq_len = len(target_log_policy)\n\n importance_sampling = target_log_policy - behaviour_log_policy\n rhos = np.exp(importance_sampling)\n\n # Truncated importance sampling\n cs = np.minimum(rhos, clip_cs_threshold)\n clipped_rhos = np.minimum(rhos, clip_rho_threshold)\n\n # Inefficient method close to the iterative formulation\n for s in range(seq_len):\n v_s = np.copy(target_value[s]) # Very important copy.\n for t in range(s, seq_len):\n v_s += (\n pow(discount_factor, t - s)\n * np.prod(cs[s:t], axis=0)\n * clipped_rhos[t]\n * (\n rewards[t]\n + discount_factor * target_value[t + 1]\n - target_value[t]\n )\n )\n vs.append(v_s)\n vs = np.stack(vs, axis=0)\n return vs",
"def test_sources_not_ok_on_config_error(self):\n measurement = self.measurement(\n self.metric(metric_type=\"sentiment\"),\n sources=[\n {\"source_uuid\": SOURCE_ID, \"value\": \"5\", \"total\": \"100\", \"parse_error\": None, \"connection_error\": None},\n {\n \"source_uuid\": SOURCE_ID2,\n \"value\": \"7\",\n \"total\": \"100\",\n \"parse_error\": None,\n \"connection_error\": None,\n },\n ],\n )\n self.assertFalse(measurement.sources_ok())",
"def test_value_with_missing_source(self):\n metric = MetricSourceAgeMetricUnderTest(self.__subject,\n project=domain.Project(metric_sources={domain.MetricSource: []}))\n self.assertEqual(-1, metric.value())",
"def quality_checks(ds):\n parameters = ['barometric_pressure', 'relative_humidity', 'air_temperature', 'longwave_irradiance',\n 'precipitation', 'shortwave_irradiance', 'sea_surface_temperature', 'sea_surface_conductivity',\n 'sea_surface_salinity', 'eastward_wind_velocity', 'northward_wind_velocity']\n for p in parameters:\n # The primary failure mode of the METBK is to repeat the last value it received from a sensor.\n # Use the IOOS QARTOD flat line test to identify these cases (consider it suspect if it repeats\n # for 20+ minutes and failed if it repeats for 35+ minutes).\n flags = qartod.flat_line_test(ds[p].values, ds['time'].values, 1200, 2100, 0.00001)\n\n # The secondary failure mode occurs when the METBK logger sets values to a NaN if no sensor data is available.\n # In the case of the sea surface conductivity and temperature data, different values are used to represent\n # missing data. Specifically, the values are set to a 0.0 and -5.0, respectively. In either case, (NaNs or\n # 0.0 and -5.0) set the QC flag to 9 to indicate \"Missing\" data, and then convert the 0.0 and -5.0 values to\n # a NaN to avoid propagating false numbers into subsequent calculations (e.g. salinity or heat flux).\n if p == 'sea_surface_temperature':\n m = ds[p] < -4.0 # use a floating point value just above -5\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n elif p == 'sea_surface_conductivity':\n m = ds[p] < 0.5 # use a floating point value just above 0\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n else:\n m = np.isnan(ds[p])\n flags[m] = 9\n\n # add the qc_flags to the dataset, rolling up the results into a single value\n qc_summary = p + '_qc_summary_flag'\n if qc_summary in ds.variables:\n # add the new test results to the existing QC summary results\n qc = ds[qc_summary]\n flags = np.array([flags, qc.values])\n ds[qc_summary] = ('time', flags.max(axis=0, initial=1))\n else:\n # create a new QC summary variable\n ds[qc_summary] = ('time', flags)\n\n # set up the attributes for the new variable\n ds[qc_summary].attrs = dict({\n 'long_name': '%s QC Summary Flag' % ds[p].attrs['long_name'],\n 'standard_name': 'aggregate_quality_flag',\n 'comment': ('Summary quality flag combining the results of the instrument-specific quality tests with '\n 'existing OOI QC tests, if available, to create a single QARTOD style aggregate quality flag'),\n 'flag_values': np.array([1, 2, 3, 4, 9]),\n 'flag_meanings': 'pass not_evaluated suspect_or_of_high_interest fail missing'\n })",
"def analyze_coverage(results, outcomes, allow_list, full_coverage):\n available = check_test_cases.collect_available_test_cases()\n for key in available:\n hits = outcomes[key].hits() if key in outcomes else 0\n if hits == 0 and key not in allow_list:\n if full_coverage:\n results.error('Test case not executed: {}', key)\n else:\n results.warning('Test case not executed: {}', key)\n elif hits != 0 and key in allow_list:\n # Test Case should be removed from the allow list.\n if full_coverage:\n results.error('Allow listed test case was executed: {}', key)\n else:\n results.warning('Allow listed test case was executed: {}', key)",
"def theils_u(x,\n y,\n nan_strategy=_REPLACE,\n nan_replace_value=_DEFAULT_REPLACE_VALUE):\n\n print(x.name + ' to ' + y.name + ' with Theils U')\n\n if nan_strategy == _REPLACE:\n x, y = replace_nan_with_value(x, y, nan_replace_value)\n elif nan_strategy == _DROP:\n x, y = remove_incomplete_samples(x, y)\n\n contingency = pd.crosstab(x, y)\n c, p, dof, expected = ss.chi2_contingency(contingency)\n\n s_xy = conditional_entropy(x, y)\n x_counter = Counter(x)\n total_occurrences = sum(x_counter.values())\n p_x = list(map(lambda n: n / total_occurrences, x_counter.values()))\n s_x = ss.entropy(p_x)\n if s_x == 0:\n return 1, 0\n else:\n return (s_x - s_xy) / s_x, p, r'$U$'",
"def calc_uncertainty(self):\n y = self.y\n y_true = self.y_true\n j_lim = self.j_lim\n Nj = self.Nj\n if issubclass(y.dtype.type, np.integer):\n # Categorial: percentage of wrong classes\n uncertainty_global = np.count_nonzero(y_true != y)/self.N\n uncertainty_group = np.empty(self.J)\n for j in range(self.J):\n uncertainty_group[j] = (\n np.count_nonzero(\n y_true[j_lim[j]:j_lim[j+1]] != y[j_lim[j]:j_lim[j+1]]\n ) / Nj[j]\n )\n else:\n # Continuous: R squared\n sst = np.sum(np.square(y - np.mean(y)))\n sse = np.sum(np.square(y - y_true))\n uncertainty_global = 1 - sse/sst\n uncertainty_group = np.empty(self.J)\n for j in range(self.J):\n sst = np.sum(np.square(\n y[j_lim[j]:j_lim[j+1]] - np.mean(y[j_lim[j]:j_lim[j+1]])\n ))\n sse = np.sum(np.square(\n y[j_lim[j]:j_lim[j+1]] - y_true[j_lim[j]:j_lim[j+1]]\n ))\n uncertainty_group[j] = 1 - sse/sst\n return uncertainty_global, uncertainty_group",
"def _check_over_counting_category(\n self,\n category: \"HierarchicalCategory\",\n source_categorization: \"Categorization\",\n descendants: dict[str, set[str]],\n ) -> typing.Optional[OverCountingProblem]:\n\n # A(c)\n ancestral_set = set(category.ancestors)\n ancestral_set.add(category)\n\n # PA_S(c)\n relevant_rules = self.relevant_rules(\n categories=ancestral_set,\n source_categorization=source_categorization,\n simple_sums_only=True,\n )\n # TODO: for now, only use rules that don't have aux categories\n relevant_rules = [rule for rule in relevant_rules if not rule.is_restricted]\n projected_ancestral_set: list[set[HierarchicalCategory]] = []\n for rule in relevant_rules:\n if source_categorization == self.categorization_a:\n fc = rule.factors_categories_b\n else:\n fc = rule.factors_categories_a\n target_categories = {cat for cat, factor in fc.items() if factor == 1}\n projected_ancestral_set.append(target_categories) # type: ignore\n\n if not projected_ancestral_set: # trivial\n return None\n\n # for performance, use codes (which are guaranteed to be unique within a\n # categorization) for the comparisons here\n projected_ancestral_set_codes = [\n {c.codes[0] for c in group} for group in projected_ancestral_set\n ]\n\n # hull(PA_S(c))\n hull: set[str] = set().union(*projected_ancestral_set_codes)\n\n # L(PA_S(c))\n leave_node_groups = [\n m\n for m in projected_ancestral_set\n if self._leave_node_group(m, hull, descendants)\n ]\n\n leave_hull = set().union(*leave_node_groups)\n largest = max(leave_node_groups, key=len)\n\n if len(leave_hull) != len(largest):\n return OverCountingProblem(\n category=category,\n rules=relevant_rules,\n leave_node_groups=leave_node_groups,\n )\n else:\n return None",
"def catch_up(employed_adults_directory, covid_pol_directory):\n # load in df\n try:\n df = pd.read_csv(employed_adults_directory, index_col=0)\n except Exception as e:\n print(str(e))\n print(\"Please enter the correct directory for employed_adults_apr2020_jul2020.csv\")\n \n # create target variable\n target = df.PREMPNOT_y.apply(job_loss_categorization)\n \n # append target to df\n df['target'] = target\n \n # drop future data\n to_drop = [column for column in df.columns if \"_y\" in column]\n df = df.drop(columns=to_drop)\n \n # remove _x from columns\n df.columns = [column.split(\"_\")[0] for column in df.columns]\n \n # add IND_ID_FINAL and HH_ID\n df = clean_CPS_df(df)\n \n # feature list\n feature_list = [\n 'HEHOUSUT', # type of housing unit to dummy \n 'HWHHWGT', # Household weight\n 'GESTFIPS', # state codes\n \"GTMETSTA\", # Metropolitan or not \n 'HEFAMINC', # total family income \n \"HRNUMHOU\", # total number of people living in the house hold\n 'HRHTYPE', # household type eg civilian or married etc\n 'PRTAGE', # person's age\n 'PEMARITL', # marital status\n 'PESEX', # gender 1 == male, 2 == female\n 'PEEDUCA', # level of education see dict for coding\n 'PTDTRACE', # race composition of the house. See data dict\n \"PEHSPNON\", # hispanic or not hispanic\n 'PENATVTY', # country of birth ie US born or not\n \"PRCITSHP\", # citezen status\n \"PRINUSYR\", # Year since immigration -1== us born, else coded by decade\n \"PRHRUSL\", # Hours at work, dummy into full time or not full time\n \"HUBUS\", # Does anyone have a business or a farm? are you a business owner?\n \"PEMJOT\", # Do you have more than 1 job?\n \"PEMJNUM\", # how many jobs do you have?\n \"PEHRFTPT\", # Do you normally spend more than 35 hours a week at your main job?\n \"PEHRRSN2\", # what is the main reason you do not want to work 35 hours. Speaks to motivation of keeping job.\n \"PEHRACTT\", # sum of hours worked between all jobs\n \"PRAGNA\", # Agricultural industry yes or no\n \"PRNMCHLD\", # number of children less than 18 years old (-1 not a parent, 0-99 number of children)\n \"PECYC\", # How much college credit in years has the reference person recieved?\n \"PECERT1\", # Do you have a professional certification issued at state or federal level.\n \"PRMJIND1\", # industry cat\n \"PRMJOCC1\", # occupation cat\n 'target',\n 'HH_ID',\n 'IND_ID_FINAL'\n ]\n \n # subset the data frame with our desired columns\n df = df[feature_list]\n \n \n # dummy var list for transformation\n list_of_dummyvars = [\n 'PRCITSHP',\n 'PEHRRSN2',\n 'PRMJIND1',\n 'PRMJOCC1',\n \"PESEX\",\n \"PEEDUCA\",\n \"PTDTRACE\", \n \"PEHSPNON\",\n\n ]\n \n # Binning/transforming variables\n df = feature_transformations(df)\n \n # Dummying variables\n df = feature_dummies(df, list_of_dummyvars)\n \n # add political and covid geolocation data\n try:\n df = merge_on_fip(df, covid_pol_directory)\n except Exception as e:\n print(str(e))\n print(\"Please add the correct directory for covid_pol.csv\")\n \n return df",
"def snmpqosqos_cfy_udp_unknownrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_cfy_udp_unknownrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_cases():\n # Deprecated warning\n url = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\"\n warnings.warn(\"This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.\", DeprecatedWarning, stacklevel=2)\n print(\"These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).\")\n return _get_table(url, \"time_series_covid19_confirmed_global.csv\", source=\"jhu\", update=True)",
"def _failed_tests(self, metric_source_id: str) -> int:\n return self.__test_count(metric_source_id, 'failed')"
]
| [
"0.639882",
"0.61853623",
"0.5198145",
"0.48865837",
"0.4809793",
"0.4745529",
"0.47022194",
"0.4641142",
"0.4628895",
"0.45942694",
"0.4469745",
"0.4404132",
"0.43621033",
"0.43478933",
"0.43275335",
"0.43183514",
"0.431352",
"0.4298347",
"0.4293593",
"0.42874545",
"0.4277915",
"0.42576307",
"0.42549756",
"0.42065433",
"0.42062032",
"0.419913",
"0.41794398",
"0.41747278",
"0.41451198",
"0.41417825"
]
| 0.7015947 | 0 |
Calculate fatality rate in the volume for ODH protection failure. Calculate failure rate of leak occuring and no safety response occuring due to power failure and isolation solenoid failure, or power on and ODH system failure. O2 concentration is limited only by amount of inert gas the source has. Fans are not operational. Adds calculation results to the fail_modes list. | def _fatality_no_response(self, source, leak, sol_PFD,
PFD_power_build):
(failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak
P_no_response = float(PFD_power_build) * sol_PFD + \
(1-PFD_power_build)*self.PFD_ODH
P_i = leak_failure_rate * P_no_response
Q_fan = self.vent_rate
O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)
F_i = self._fatality_prob(O2_conc)
phi_i = P_i*F_i
f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,
leak_failure_rate, P_i, F_i,
PFD_power_build == 1, q_leak, tau, Q_fan, 0, N)
self.fail_modes.append(f_mode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:\n # Probability of power on, ODH system working, and m number of fans\n # with flow rate Q_fan on.\n P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \\\n sol_PFD * P_fan\n P_i = leak_failure_rate * P_response\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan,\n N_fan, N)\n self.fail_modes.append(f_mode)",
"def odh(self, sources, power_outage=False):\n self.fail_modes = []\n # Probability of power failure in the building:\n # PFD_power if no outage, 1 if there is outage\n PFD_power_build = (power_outage or\n TABLE_1['Electrical Power Failure']['Demand rate'])\n # Calculate fatality rates for each source\n for source in sources:\n for leak in source.leaks:\n leak_failure_rate = leak[0]\n if leak_failure_rate is not None: # None for constant leak\n self._fatality_no_response(source, leak, source.sol_PFD,\n PFD_power_build)\n self._fatality_fan_powered(source, leak, source.sol_PFD,\n PFD_power_build)",
"def calculate_criticality(self, item_hr):\n _error_code = 0\n _msg = 'RAMSTK SUCCESS: Calculating failure mode {0:d} criticality.'.\\\n format(self.mode_id)\n\n if item_hr < 0.0:\n _error_code = 2010\n _msg = _(u\"RAMSTK ERROR: Item hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.mode_ratio <= 1.0:\n _error_code = 2010\n _msg = _(\n u\"RAMSTK ERROR: Failure mode ratio is outside the range of \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n if self.mode_op_time < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode operating time has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.effect_probability <= 1.0:\n _error_code = 2010\n _msg = _(u\"Failure effect probability is outside the range \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n\n self.mode_hazard_rate = item_hr * self.mode_ratio\n self.mode_criticality = self.mode_hazard_rate \\\n * self.mode_op_time * self.effect_probability\n\n if self.mode_hazard_rate < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if self.mode_criticality < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode criticality has a negative value.\")\n raise OutOfRangeError(_msg)\n\n return _error_code, _msg",
"def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery",
"def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)",
"def calc_diesel_equiv_captured (self):\n if self.generation_wind_proposed == 0:\n excess_percent = 0\n else:\n excess_percent = self.excess_energy / self.generation_wind_proposed\n excess_captured_percent = excess_percent * \\\n (self.cd['percent excess energy capturable'] / 100.0)\n if self.comp_specs['secondary load']:\n net_excess_energy = excess_captured_percent * \\\n self.generation_wind_proposed\n else:\n net_excess_energy = 0\n\n #~ conversion = 0.99/0.138/0.8/293\n conversion = self.cd['efficiency electric boiler']/ \\\n (1/constants.mmbtu_to_gal_HF)/ \\\n self.cd['efficiency heating oil boiler']/\\\n (constants.mmbtu_to_kWh)\n self.diesel_equiv_captured = net_excess_energy * conversion\n\n #~ print 'self.diesel_equiv_captured ',self.diesel_equiv_captured",
"def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result",
"def calc_lost_heat_recovery (self):\n if not self.cd['heat recovery operational']:\n\n self.lost_heat_recovery = [0]\n else:\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.lost_heat_recovery = \\\n (self.generation / gen_eff )* .10",
"def get_failure_rate(self) -> float:\n return self.failurerate",
"def _fatality_prob(self, O2_conc):\n if O2_conc >= 0.18: # Lowest oxygen concentration above 18%\n Fi = 0\n elif O2_conc <= 0.088: # 8.8% of oxygen is assumed to be 100% fatal\n Fi = 1\n else:\n # Fi formula, reverse engineered using 8.8% and 18% thresholds\n Fi = 10**(6.5-76*O2_conc)\n return Fi",
"def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def calc_error_prop_ef(self, tier2_hhv, tier3):\n\n #Import boostrap data (by year and fuel) for hhv and tier 3 data\n hhv_boot = pd.read_csv('../calculation_data/hhv_uncertainty.csv',\n index_col=[0,1])\n\n mw_boot = pd.read_csv('../calculation_data/mw_uncertainty.csv',\n index_col=[0,1])\n\n cc_boot = pd.read_csv('../calculation_data/cc_uncertainty.csv',\n index_col=[0,1])\n\n def calc_sq_std(df):\n \"\"\"\n Calculate the square of the standard deviation divided by\n mean. Returns a dataframe.\n \"\"\"\n\n sq_std = df.dropna()\n\n sq_std = (df['std'].divide(df['mean']))**2\n\n sq_std.name = 'sq_std'\n\n sq_std = pd.DataFrame(sq_std)\n\n sq_std = sq_std.dropna()\n\n return sq_std\n\n gas_scf_to_kg = pd.DataFrame()\n\n for f in ['Natural Gas (Weighted U.S. Average)', 'Fuel Gas']:\n\n scf_df = pd.DataFrame(tier3['molecular_weight'].xs(\n f, level='FUEL_TYPE'\n ))\n\n scf_df['FUEL_TYPE'] = f\n\n gas_scf_to_kg = gas_scf_to_kg.append(scf_df)\n\n gas_scf_to_kg.set_index('FUEL_TYPE', append=True, inplace=True)\n\n error_prop = pd.merge(\n calc_sq_std(hhv_boot), calc_sq_std(cc_boot),\n left_index=True, right_index=True, how='inner',\n suffixes=['_hhv', '_C']\n )\n\n # Data reporting began in 2014 and coverage is spotty.\n # Repeat 2015 data for 2010 - 2014.\n error_prop.drop(2014, axis=0, level=0, inplace=True)\n\n for y in range(2010, 2015):\n\n new_year = error_prop.loc[2015].reset_index()\n\n new_year['reporting_year'] = y\n\n error_prop = error_prop.append(\n new_year.set_index(['reporting_year', 'fuel_type'])\n )\n\n error_prop.sort_index(level=0, inplace=True)\n\n # Include error of molecular weight of fuel gas and natural gas\n error_prop = pd.merge(error_prop, calc_sq_std(mw_boot),\n left_index=True, right_index=True, how='outer')\n\n error_prop.rename(columns={'sq_std': 'sq_std_mw'}, inplace=True)\n\n error_prop['final_uncert'] = error_prop.sq_std_hhv.add(\n error_prop.sq_std_C\n ).add(\n error_prop.sq_std_mw, fill_value=0\n )\n\n error_prop.dropna(subset=['sq_std_hhv'], axis=0, inplace=True)\n\n # Calculate kg-mol per SCF for natural gas and fuel gas. SCF defined in\n # SI units as 101.560 kPa, 288.706 K, 0.02316847 m3. Ideal gas\n # constant is 8.314 kPa*m3/(kg-mol*K)\n scf_per_kgmol = (8.314 * 288.706) / (101.560 * 0.028316847)\n\n conv_dict = {'percent by weight, expressed as a decimal fraction': \\\n 907.185, 'kg C per kg': scf_per_kgmol,\n 'kg C per gallon': 1}\n\n t2t3_efs = pd.DataFrame(index=error_prop.index,\n columns=['reported_mean'])\n\n for fuel in error_prop.index:\n\n if 'Gas' in fuel:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = (tier2_hhv.xs(\n fuel, level='FUEL_TYPE'\n ).hhv_wa.mean() * scf_per_kgmol / \\\n tier3.xs(fuel, level='FUEL_TYPE').molecular_weight.mean() / \\\n tier3.xs(fuel, level='FUEL_TYPE').carbon_content.mean() * \\\n (12/44))**-1\n\n\n if 'Oil' in fuel:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = tier3.xs(\n fuel, level='FUEL_TYPE'\n ).carbon_content.mean() / \\\n tier2_hhv.xs(fuel, level='FUEL_TYPE').hhv_wa.mean() * \\\n (44/12)\n\n else:\n\n t2t3_efs.loc[fuel, 'reported_mean'] = tier3.xs(\n fuel, level='FUEL_TYPE'\n ).carbon_content.mean() / 100 / \\\n tier2_hhv.xs(fuel, level='FUEL_TYPE').hhv_wa.mean() * \\\n (44/12)\n\n t2t3_efs = pd.merge(t2t3_efs, error_prop[['final_uncert']],\n left_index=True, right_index=True,\n how='inner')\n\n t2t3_efs.rename(columns={'mean': 'kgCO2_per_mmBtu'}, inplace=True)\n\n # Create column for the uncertainty amount in kg CO2/mmBtu (+/-)\n t2t3_efs['ef_plus_minus'] = t2t3_efs.kgCO2_per_mmBtu.multiply(\n t2t3_efs.final_uncert\n )\n\n return error_prop, mw_boot",
"def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy",
"def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0",
"def systcpconnfailrate(self) :\n\t\ttry :\n\t\t\treturn self._systcpconnfailrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def abc_reject_analyse(obs):\n def closest(lst, K):\n lst = np.asarray(lst)\n idx = (np.abs(lst - K)).argmin()\n return idx\n failure_results = [1, 1]\n suffixes = ('', '_hm')\n w = np.ones(1000)\n if (os.path.exists('%s/abc_reject.pkl' % obs.results_dir) and\n os.path.exists('%s/abc_reject_hm.pkl' % obs.results_dir)):\n for test in range(2):\n with open('%s/abc_reject%s.pkl' % (obs.results_dir, suffixes[test]), 'rb') as pfile:\n results = pickle.load(pfile)\n params = pd.DataFrame([(r.scout_prob, r.survival_prob) for r in results],\n columns=('scout prob', 'survival prob'))\n X, Y, PDF = pyabc.visualization.kde.kde_2d(params, w, x=\"scout prob\", y=\"survival prob\")\n x_idx = closest(X[0], obs.parameters.scout_prob)\n y_idx = closest([y[0] for y in Y], obs.parameters.survival_prob)\n posterior = PDF[y_idx][x_idx]\n ratio = posterior / np.amax(PDF)\n if ratio > 0.5:\n failure_results[test] = 0\n return failure_results",
"def snmpqosqos_error_libqos_api_failuresrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_error_libqos_api_failuresrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used",
"def test_function_fuel_sum(data, mode_constrained, space_heating_enduses):\n fuel_in = 0\n fuel_in_solid_fuel = 0\n fuel_in_gas = 0\n fuel_in_elec = 0\n fuel_in_oil = 0\n fuel_in_heat = 0\n fuel_in_hydrogen = 0\n fuel_in_biomass = 0\n\n fuel_heating_all_fueltypes = 0\n fuel_heating_gas = 0\n tot_heating = 0\n #mode_constrained = True #SCRAP\n\n for region in data['rs_fuel_disagg']:\n for enduse in data['rs_fuel_disagg'][region]:\n fuel_in += np.sum(data['rs_fuel_disagg'][region][enduse])\n fuel_in_heat += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses: #Exclude inputs for heating\n tot_heating += np.sum(data['rs_fuel_disagg'][region][enduse])\n #pass\n else:\n fuel_in_elec += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['biomass']])\n \n for region in data['ss_fuel_disagg']:\n for enduse in data['ss_fuel_disagg'][region]:\n for sector in data['ss_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n for region in data['is_fuel_disagg']:\n for enduse in data['is_fuel_disagg'][region]:\n for sector in data['is_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n return fuel_in, fuel_in_biomass, fuel_in_elec, fuel_in_gas, fuel_in_heat, fuel_in_hydrogen, fuel_in_solid_fuel, fuel_in_oil, tot_heating",
"def calculate_reliability(data):\n\n\tsuccess_ratio = data['success'].sum() * 1.0 / len(data)\n\tprint \"Reliability: {}\".format(success_ratio)",
"def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2",
"def component_parallel_failure_rate_calculation(self, parallel_fr_list):\n failure_rate_of_parallel_value = float(0.0)\n result = 0.0\n\n if parallel_fr_list:\n for item in parallel_fr_list:\n if float(item) != 0.0 and str(item) != \"None\":\n failure_rate_of_parallel_value += float(item)\n\n result = float(failure_rate_of_parallel_value) * float(self.parallel_count_calculate_func(len(parallel_fr_list)))\n\n else:\n result = float(failure_rate_of_parallel_value)\n\n return float(result)",
"def pipe_failure(self, tube, fluid=None, N_welds=1, max_flow=None):\n # If fluid not defined use fluid of the Source\n fluid = fluid or self.fluid\n # Failure rate coefficients; Piping failure rate is per unit of length,\n # weld is dependent on number of welds, pipe OD and wall thickness\n failure_rate_coeff = {'Piping': (tube.L, 1),\n 'Pipe weld': (tube.OD / tube.wall,\n N_welds)}\n # Piping and weld leaks as per Table 2\n for cause in ['Piping', 'Pipe weld']:\n for mode in TABLE_2[cause].keys():\n if tube.D > 2 or mode != 'Large leak': # Large leak only for D > 2\"\n name = f'{cause} {mode.lower()}: {tube}, ' + \\\n f'{tube.L.to(ureg.ft):.3g~}'\n temp_tube = copy(tube)\n # Average path for the flow will be half of piping length\n # for gas piping\n temp_tube.L = tube.L / 2\n fr_coef = failure_rate_coeff[cause][0]\n N_events = failure_rate_coeff[cause][1]\n if mode == 'Rupture':\n failure_rate = fr_coef * TABLE_2[cause][mode]\n # For rupture calculate flow through available\n # pipe area\n area = tube.area\n else:\n failure_rate = fr_coef * \\\n TABLE_2[cause][mode]['Failure rate']\n area = TABLE_2[cause][mode]['Area']\n if area > tube.area:\n logger.warning('Leak area cannot be larger'\n ' than pipe area.')\n continue\n q_std = Source._leak_flow(temp_tube, area, fluid)\n if max_flow is not None:\n fluid_NTP = fluid.copy()\n fluid_NTP.update_kw(P=ht.P_NTP, T=ht.T_NTP)\n q_std_max = max_flow / fluid_NTP.Dmass\n q_std = min(q_std, q_std_max)\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N_events))",
"def odh_class(self):\n if self.phi < 1e-7/ureg.hr:\n return 0\n elif self.phi < 1e-5/ureg.hr:\n return 1\n elif self.phi < 1e-3/ureg.hr:\n return 2\n else:\n # TODO add a custom exception for ODH > 2\n print('ODH fatality rate is too high. Please, check calculations')\n return None",
"def errorChecks(self):\n stop_calculation = False\n found_error = False\n errors = {\"Info\": [], \"Critical\": []}\n error_types = []\n ori_images = 0\n of_images = 0\n depth_images = 0\n back_of_images = 0\n\n if os.path.exists(self.savePathJoin(\"Images\")):\n ori_images = len(\n listDirectory(self.savePathJoin(\"Images\"), extension=\"png\")\n )\n # Check image folder\n if self.img_exist and not os.path.exists(self.savePathJoin(\"Images\")):\n if os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Images folder {0} doesn't exist -> Recreate it and recalculate optical flow and depth estimations\".format(\n self.savePathJoin(\"Images\")\n )\n )\n error_types.append(\"NoImages\")\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif self.img_exist and os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Both the video {0} and Images folder {1} exist -> using Images folder by default\".format(\n self.user[\"Video\"], self.savePathJoin(\"Images\")\n )\n )\n elif not self.img_exist and not os.path.isfile(self.user[\"Video\"]):\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n\n # Check video file\n if self.user[\"Video\"] != \"\" and not os.path.isfile(self.user[\"Video\"]):\n if os.path.exists(self.savePathJoin(\"Images\")):\n errors[\"Info\"].append(\n (\n \"Video file {0} doesn't exist -> Using images in the Images folder instead\".format(\n self.user[\"Video\"]\n )\n )\n )\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif os.path.isfile(self.user[\"Video\"]) and os.path.exists(\n self.savePathJoin(\"Images\")\n ):\n pass\n\n # Check optical flow\n if self.of_exist and not os.path.exists(self.savePathJoin(\"Of\")):\n errors[\"Info\"].append(\n (\n \"Optical flow folder {0} doesn't exist -> Recalculating optical flow\".format(\n self.savePathJoin(\"Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.of_exist:\n of_images = len(listDirectory(self.savePathJoin(\"Of\"), extension=\"png\"))\n if of_images != ori_images - 1 and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Optical flow image number {0} doesn't match video image number {1} - 1 -> Recalculating optical flow\".format(\n of_images, ori_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check backward optical flow\n if self.back_of_exist and not os.path.exists(self.savePathJoin(\"Back_Of\")):\n errors[\"Info\"].append(\n (\n \"Backward optical flow folder {0} doesn't exist -> Recalculating backward optical flow\".format(\n self.savePathJoin(\"Back_Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.back_of_exist:\n back_of_images = len(\n listDirectory(self.savePathJoin(\"Back_Of\"), extension=\"png\")\n )\n if back_of_images != of_images:\n errors[\"Info\"].append(\n (\n \"Backward optical flow image number {0} doesn't match optical flow image number {1} -> Recalculating backward optical flow\".format(\n back_of_images, of_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check depth estimation\n if self.depth_exist and not os.path.exists(self.savePathJoin(\"Depth\")):\n errors[\"Info\"].append(\n (\n \"Depth folder {0} doesn't exist -> Recalculating depth\".format(\n self.savePathJoin(\"Depth\")\n )\n )\n )\n error_types.append(\"NoDepth\")\n elif self.depth_exist:\n depth_images = len(\n listDirectory(self.savePathJoin(\"Depth\"), extension=\"png\")\n )\n if depth_images != ori_images and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Depth image number {0} doesn't match video image number {1} -> Recalculating depth\".format(\n depth_images, ori_images\n )\n )\n )\n error_types.append(\"NoDepth\")\n\n # Check ground truth\n if self.gt_exist and not os.path.isfile(self.user[\"GT\"]):\n errors[\"Info\"].append(\n (\n \"Ground Truth file {0} doesn't exist -> File won't be used\".format(\n self.user[\"GT\"]\n )\n )\n )\n error_types.append(\"NoGT\")\n\n # Check super pixel labels\n if (\n self.super_pixel_method != \"\"\n and os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n and ori_images != 0\n and len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n extension=\".npy\",\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n (\n \"Super pixel label number {0} doesn't match image number {1} -> Recalculating super pixel labels\".format(\n len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"),\n self.super_pixel_method,\n ),\n extension=\".npy\",\n )\n ),\n ori_images,\n )\n )\n )\n error_types.append(\"LabelError\")\n\n # Check object detection\n if self.ui.c_object_detection.isChecked() and os.path.exists(\n self.savePathJoin(\"ObjectDetection\")\n ):\n if (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection image number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n elif (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection numpy array number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n\n answer = \"\"\n if len(errors[\"Info\"]) > 0 and len(errors[\"Critical\"]) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\n \"Some calculations might not run the way you expect them.\\nIn show details check the right side of the arrows to see what will happen.\"\n )\n msg.setWindowTitle(\"Information\")\n all_info = \"\"\n for info in errors[\"Info\"]:\n all_info += info + \"\\n\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Abort)\n answer = msg.exec_()\n elif len(errors[\"Critical\"]) > 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n \"Found critical error\\nCouldn't start run, see show details for more information\"\n )\n msg.setWindowTitle(\"Critical Error\")\n all_info = \"\"\n for info in errors[\"Critical\"]:\n all_info += info + \"\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Abort)\n answer = msg.exec_()\n\n if answer != int(\"0x00040000\", 16):\n for ty in error_types:\n logging.info(\"Solve error: {0}\".format(ty))\n if ty == \"NoImage\":\n self.img_exist = False\n self.of_exist = False\n self.back_of_exist = False\n self.depth_exist = False\n elif ty == \"NoOf\":\n self.of_exist = False\n self.back_of_exist = False\n elif ty == \"NoDepth\":\n self.depth_exist = False\n elif ty == \"NoGT\":\n self.gt_exist = False\n self.user[\"GT\"] = \"\"\n elif ty == \"LabelError\":\n self.create_super_pixel_label = True\n shutil.rmtree(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n )\n )\n elif ty == \"ObDetError\":\n self.object_detection_dir_exist = False\n shutil.rmtree(self.savePathJoin(\"ObjectDetection\"))\n\n return answer == int(\"0x00040000\", 16) or stop_calculation",
"def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")",
"def failure_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"failure_threshold\")",
"def get_alcohol_loss():\n F_ton_per_yr = LAOs.wastewater.get_flow('ton/day', fatty_alcohols)\n return F_ton_per_yr * LAOs.LAOs_tea.operating_days",
"def isothermal_rates(variables, time):\n \n TiCl4, O2, T = variables\n \n k1 = A1*np.exp(-Ea/(R*T)) * 1e-3\n\n k2 = A2*np.exp(-Ea/(R*T)) * 1e-3\n \n if O2 - (k1+k2*np.sqrt(O2))*TiCl4*end_time/steps <= 0 or np.isclose(0,O2) :\n return (0, 0, 0)\n \n else:\n rate_TiCl4 = -(k1+k2*np.sqrt(O2))*TiCl4\n rate_O2 = -(k1+k2*np.sqrt(O2))*TiCl4\n \n return (rate_TiCl4, rate_O2, 0)"
]
| [
"0.60235065",
"0.5961136",
"0.58948976",
"0.5526284",
"0.55170983",
"0.53160745",
"0.5297183",
"0.5253064",
"0.51723504",
"0.51687574",
"0.5096973",
"0.5010972",
"0.49728975",
"0.49670574",
"0.4959852",
"0.49482557",
"0.48530757",
"0.48435175",
"0.4823226",
"0.4820656",
"0.48118454",
"0.48079997",
"0.47886422",
"0.47818327",
"0.47766647",
"0.4772087",
"0.4697501",
"0.4697501",
"0.46944457",
"0.46634552"
]
| 0.6028143 | 0 |
Calculate fatality rates for fan failure on demand. Calculate fatality rates for the case of ODH system responding and fans powered but some of the fans failing on demand. See wiki for further explanation. Adds calculation results to the fail_modes list. | def _fatality_fan_powered(self, source, leak, sol_PFD, PFD_power_build):
(failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak
for (P_fan, Q_fan, N_fan) in self.Fan_flowrates:
# Probability of power on, ODH system working, and m number of fans
# with flow rate Q_fan on.
P_response = (1-PFD_power_build) * (1-self.PFD_ODH) * \
sol_PFD * P_fan
P_i = leak_failure_rate * P_response
O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)
F_i = self._fatality_prob(O2_conc)
phi_i = P_i*F_i
f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,
leak_failure_rate, P_i, F_i,
PFD_power_build == 1, q_leak, tau, Q_fan,
N_fan, N)
self.fail_modes.append(f_mode) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fan_fail(self):\n # TODO add fans with different volumetric rates (see report as well)\n Fail_rate = self.lambda_fan\n Fan_flowrates = []\n for m in range(self.N_fans+1):\n # Probability of exactly m units starting\n P_m_fan_work = prob_m_of_n(m, self.N_fans, self.Test_period,\n Fail_rate)\n flowrate = self.Q_fan*m\n if flowrate == Q_('0 m**3/min'):\n flowrate = self.vent_rate\n Fan_flowrates.append((P_m_fan_work, flowrate, m))\n self.Fan_flowrates = Fan_flowrates",
"def odh(self, sources, power_outage=False):\n self.fail_modes = []\n # Probability of power failure in the building:\n # PFD_power if no outage, 1 if there is outage\n PFD_power_build = (power_outage or\n TABLE_1['Electrical Power Failure']['Demand rate'])\n # Calculate fatality rates for each source\n for source in sources:\n for leak in source.leaks:\n leak_failure_rate = leak[0]\n if leak_failure_rate is not None: # None for constant leak\n self._fatality_no_response(source, leak, source.sol_PFD,\n PFD_power_build)\n self._fatality_fan_powered(source, leak, source.sol_PFD,\n PFD_power_build)",
"def _fatality_no_response(self, source, leak, sol_PFD,\n PFD_power_build):\n (failure_mode_name, leak_failure_rate, q_leak, tau, N) = leak\n P_no_response = float(PFD_power_build) * sol_PFD + \\\n (1-PFD_power_build)*self.PFD_ODH\n P_i = leak_failure_rate * P_no_response\n Q_fan = self.vent_rate\n O2_conc = conc_vent(self.volume, q_leak, Q_fan, tau)\n F_i = self._fatality_prob(O2_conc)\n phi_i = P_i*F_i\n f_mode = failure_mode(phi_i, source, failure_mode_name, O2_conc,\n leak_failure_rate, P_i, F_i,\n PFD_power_build == 1, q_leak, tau, Q_fan, 0, N)\n self.fail_modes.append(f_mode)",
"def calculate_criticality(self, item_hr):\n _error_code = 0\n _msg = 'RAMSTK SUCCESS: Calculating failure mode {0:d} criticality.'.\\\n format(self.mode_id)\n\n if item_hr < 0.0:\n _error_code = 2010\n _msg = _(u\"RAMSTK ERROR: Item hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.mode_ratio <= 1.0:\n _error_code = 2010\n _msg = _(\n u\"RAMSTK ERROR: Failure mode ratio is outside the range of \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n if self.mode_op_time < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode operating time has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.effect_probability <= 1.0:\n _error_code = 2010\n _msg = _(u\"Failure effect probability is outside the range \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n\n self.mode_hazard_rate = item_hr * self.mode_ratio\n self.mode_criticality = self.mode_hazard_rate \\\n * self.mode_op_time * self.effect_probability\n\n if self.mode_hazard_rate < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if self.mode_criticality < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode criticality has a negative value.\")\n raise OutOfRangeError(_msg)\n\n return _error_code, _msg",
"def set_defensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0:\n opp_fga = opp_team[\"t2p_int\"] + opp_team[\"t3p_int\"]\n opp_fgm = opp_team[\"t2p_conv\"] + opp_team[\"t3p_conv\"]\n try:\n dor = Decimal(opp_team[\"reb_of\"] / (opp_team[\"reb_of\"] + team[\"reb_def\"]))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n dor = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n dor = 0\n\n try:\n dfg = Decimal(opp_fgm / opp_fga)\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n dfg = 0\n try:\n fmwt = Decimal((dfg * (1 - dor)) / (dfg * (1 - dor) + (1 - dfg) * dor))\n except:\n fmwt = 0\n stops1 = bx[\"steals\"] + bx[\"block_shots\"] * fmwt * (1 - Decimal('1.07') * dor) + bx[\"reb_def\"] * (1 - fmwt)\n\n try:\n stops2 = (Decimal((opp_fga - opp_fgm - team[\"block_shots\"]) / team[\"minutes\"]) * fmwt * (1 - Decimal('1.07') * dor) + Decimal((opp_team[\"turnovers\"] - team[\"steals\"]) / team[\"minutes\"])) * bx[\"minutes\"] + Decimal(bx[\"fouls_cm\"] / team[\"fouls_cm\"]) * Decimal('0.4') * opp_team[\"tl_int\"] * (1 - Decimal(opp_team[\"tl_conv\"] / opp_team[\"tl_int\"]))**2\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n stops2 = 0\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n stops2 = 0\n\n stops = stops1 + stops2\n poss = self.get_team_possessions()\n if bx[\"minutes\"] > 0:\n stop_percentage = (float(stops) * float(opp_team[\"minutes\"])) / (float(poss) * float(bx[\"minutes\"]))\n else:\n stop_percentage = 0.00\n opp_points = opp_team[\"t2p_conv\"] * 2 + opp_team[\"t3p_conv\"] * 3 + opp_team[\"tl_conv\"]\n team_defensive_rating = 100 * (float(opp_points) / poss)\n try:\n d_pts_per_scposs = float(opp_points) / (float(opp_fgm) + (1 - (1 - (float(opp_team[\"tl_conv\"]) / float(opp_team[\"tl_int\"])))**2) * float(opp_team[\"tl_int\"])*0.4)\n result = Decimal(team_defensive_rating) + Decimal('0.2') * (100 * Decimal(d_pts_per_scposs) * (1 - Decimal(stop_percentage)) - Decimal(team_defensive_rating))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n d_pts_per_scposs = 0\n result = 0.00\n\n\n\n # print(\"dor: \" + str(dor))\n # print(\"dfg: \" + str(dfg))\n # print(\"fmwt: \" + str(fmwt))\n # print(\"stops1: \" + str(stops1))\n # print(\"stops2: \" + str(stops2))\n # print(\"stops: \" + str(stops))\n # print(\"poss: \" + str(poss))\n # print(\"stop_percentage: \" + str(stop_percentage))\n # print(\"opp_points: \" + str(opp_points))\n # print(\"team_defensive_rating: \" + str(team_defensive_rating))\n # print(\"d_pts_per_scposs: \" + str(d_pts_per_scposs))\n # print(\"drtg: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n self.drtg = \"%.2f\" % round(result, 2)",
"def component_parallel_failure_rate_calculation(self, parallel_fr_list):\n failure_rate_of_parallel_value = float(0.0)\n result = 0.0\n\n if parallel_fr_list:\n for item in parallel_fr_list:\n if float(item) != 0.0 and str(item) != \"None\":\n failure_rate_of_parallel_value += float(item)\n\n result = float(failure_rate_of_parallel_value) * float(self.parallel_count_calculate_func(len(parallel_fr_list)))\n\n else:\n result = float(failure_rate_of_parallel_value)\n\n return float(result)",
"def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery",
"def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()",
"def fanofactor(spiketrains, warn_tolerance=0.1 * pq.ms):\n # Build array of spike counts (one per spike train)\n spike_counts = np.array([len(st) for st in spiketrains])\n\n # Compute FF\n if all(count == 0 for count in spike_counts):\n # empty list of spiketrains reaches this branch, and NaN is returned\n return np.nan\n\n if all(isinstance(st, neo.SpikeTrain) for st in spiketrains):\n if not is_time_quantity(warn_tolerance):\n raise TypeError(\"'warn_tolerance' must be a time quantity.\")\n durations = [(st.t_stop - st.t_start).simplified.item()\n for st in spiketrains]\n durations_min = min(durations)\n durations_max = max(durations)\n if durations_max - durations_min > warn_tolerance.simplified.item():\n warnings.warn(\"Fano factor calculated for spike trains of \"\n \"different duration (minimum: {_min}s, maximum \"\n \"{_max}s).\".format(_min=durations_min,\n _max=durations_max))\n\n fano = spike_counts.var() / spike_counts.mean()\n return fano",
"def unocc_fan_operation(self, dx_result):\n avg_duct_stcpr = 0\n percent_on = 0\n fanstat_on = [(fan[0].hour, fan[1]) for fan in self.fanstat_values if int(fan[1]) == 1]\n fanstat = [(fan[0].hour, fan[1]) for fan in self.fanstat_values]\n hourly_counter = []\n\n for counter in range(24):\n fan_on_count = [fan_status_time[1] for fan_status_time in fanstat_on if fan_status_time[0] == counter]\n fan_count = [fan_status_time[1] for fan_status_time in fanstat if fan_status_time[0] == counter]\n if len(fan_count):\n hourly_counter.append(fan_on_count.count(1)/len(fan_count)*100)\n else:\n hourly_counter.append(0)\n\n if self.sched_time:\n if self.fanstat_values:\n percent_on = (len(fanstat_on)/len(self.fanstat_values)) * 100.0\n if self.stcpr_arr:\n avg_duct_stcpr = mean(self.stcpr_arr)\n\n if percent_on > self.unocc_time_threshold:\n msg = 'Supply fan is on during unoccupied times.'\n dx_msg = 63.1\n else:\n if avg_duct_stcpr < self.unocc_stp_threshold:\n msg = 'No problems detected for schedule diagnostic.'\n dx_msg = 60.0\n else:\n msg = ('Fan status show the fan is off but the duct static '\n 'pressure is high, check the functionality of the '\n 'pressure sensor.')\n dx_msg = 64.2\n else:\n msg = 'No problems detected for schedule diagnostic.'\n dx_msg = 60.0\n\n if dx_msg != 64.2:\n for _hour in range(24):\n push_time = self.timestamp[0].date()\n push_time = datetime.combine(push_time, datetime.min.time())\n push_time = push_time.replace(hour=_hour)\n dx_table = {SCHED_RCX + DX: 60.0}\n if hourly_counter[_hour] > self.unocc_time_threshold:\n dx_table = {SCHED_RCX + DX: dx_msg}\n table_key = create_table_key(self.sched_file_name_id, push_time)\n dx_result.insert_table_row(table_key, dx_table)\n else:\n push_time = self.timestamp[0].date()\n table_key = create_table_key(self.sched_file_name_id, push_time)\n dx_result.insert_table_row(table_key, {SCHED_RCX + DX: dx_msg})\n dx_result.log(msg, logging.INFO)\n return dx_result",
"def set_offensive_ratio(self):\n bx = self.get_standard_stats()\n team = self.get_team_stats()\n opp_team = self.get_opp_team_stats()\n if bx[\"minutes\"] > 0 and (bx[\"t2p_int\"] + bx[\"t3p_int\"]) > 0:\n fgm = bx[\"t2p_conv\"] + bx[\"t3p_conv\"]\n fga = bx[\"t2p_int\"] + bx[\"t3p_int\"]\n team_fgm = team[\"t2p_conv\"] + team[\"t3p_conv\"]\n team_fga = team[\"t2p_int\"] + team[\"t3p_int\"]\n team_points = team[\"t2p_conv\"]*2 + team[\"t3p_conv\"]*3 + team[\"tl_conv\"]\n points = bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"]\n\n try:\n qAST = (Decimal(bx[\"minutes\"] / (team[\"minutes\"] / 5)) * (Decimal('1.14') * Decimal((team[\"assists\"] - bx[\"assists\"]) / team_fgm))) + \\\n Decimal((((team[\"assists\"] / team[\"minutes\"]) * bx[\"minutes\"] * 5 - bx[\"assists\"]) / ((team_fgm / team[\"minutes\"]) * bx[\"minutes\"] * 5 - fgm)) * (1 - (bx[\"minutes\"] / (team[\"minutes\"] / 5))))\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n qAST = 1\n except InvalidOperation:\n print(BCOLORS.WARNING + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n qAST = 1\n\n fg_part = fgm * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n ast_part = Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2*(team_fga - fga))) * bx[\"assists\"]\n except ZeroDivisionError:\n print(BCOLORS.WARNING + \"Error: División por cero\" + BCOLORS.ENDC)\n ast_part = 0\n\n if bx[\"tl_int\"] > 0:\n ft_part = Decimal(1 - (1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_part = 0\n team_scoring_poss = Decimal(team_fgm + Decimal(1 - (1 - (team[\"tl_conv\"] / team[\"tl_int\"]))**2) * team[\"tl_int\"] * Decimal('0.4'))\n try:\n team_orb_percentage = Decimal(team[\"reb_of\"] / (team[\"reb_of\"] + ((opp_team[\"reb_def\"] + opp_team[\"reb_of\"]) - opp_team[\"reb_of\"])))\n except ZeroDivisionError:\n print(BCOLORS.FAIL + \"Error: División por cero\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_percentage = 0\n\n team_play_percentage = Decimal(team_scoring_poss / (team_fga + team[\"tl_int\"] * Decimal('0.4') + team[\"turnovers\"]))\n try:\n team_orb_weight = ((1 - team_orb_percentage) * team_play_percentage) / ((1 - team_orb_percentage) * team_play_percentage + team_orb_percentage * (1 - team_play_percentage))\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n team_orb_weight = 0\n\n orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage\n\n fg_x_poss = (fga - fgm) * (1 - Decimal('1.07') * team_orb_percentage)\n if bx[\"tl_conv\"] > 0:\n ft_x_poss = Decimal((1 - (bx[\"tl_conv\"] / bx[\"tl_int\"]))**2) * Decimal('0.4') * bx[\"tl_int\"]\n else:\n ft_x_poss = Decimal(1 - (bx[\"tl_conv\"] / 1)**2) * Decimal('0.4') * bx[\"tl_int\"]\n try:\n sc_poss = (fg_part + ast_part + ft_part) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n sc_poss =0\n\n tot_poss = sc_poss + fg_x_poss + ft_x_poss + bx[\"turnovers\"]\n\n pprod_fg_part = 2 * (fgm + Decimal('0.5') * bx[\"t3p_conv\"]) * (1 - Decimal('0.5') * Decimal((points - bx[\"tl_conv\"]) / (2 * fga)) * qAST)\n\n try:\n pprod_ast_part = 2 * ((team_fgm - fgm + Decimal('0.5') * (team[\"t3p_conv\"] - bx[\"t3p_conv\"])) / (team_fgm - fgm)) * Decimal('0.5') * Decimal(((team_points - team[\"tl_conv\"]) - (points - bx[\"tl_conv\"])) / (2 * (team_fga - fga))) * bx[\"assists\"]\n except:\n pprod_ast_part = 0\n\n pprod_orb_part = bx[\"reb_of\"] * team_orb_weight * team_play_percentage * (team_points / (team_fgm + Decimal(1 - (team[\"tl_conv\"] / team[\"tl_int\"])**2) * Decimal('0.4') * team[\"tl_int\"]))\n try:\n pprod = (pprod_fg_part + pprod_ast_part + bx[\"tl_conv\"]) * (1 - (team[\"reb_of\"] / team_scoring_poss) * team_orb_weight * team_play_percentage) + pprod_orb_part\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n pprod = 0\n\n try:\n result = 100 * (pprod / tot_poss)\n except InvalidOperation:\n print(BCOLORS.FAIL + \"Error: Invalid Operation\" + BCOLORS.ENDC)\n result = 0\n\n # print(\"fgm: \" + str(fgm))\n # print(\"fga: \" + str(fga))\n # print(\"team_fgm: \" + str(team_fgm))\n # print(\"team_fga: \" + str(team_fga))\n # print(\"team_points: \" + str(team_points))\n # print(\"points: \" + str(points))\n # print(\"qAST: \" + str(qAST))\n # print(\"fg_part: \" + str(fg_part))\n # print(\"ast_part: \" + str(ast_part))\n # print(\"ft_part: \" + str(ft_part))\n # print(\"team_scoring_poss: \" + str(team_scoring_poss))\n # print(\"team_orb_percentage: \" + str(team_orb_percentage))\n # print(\"team_play_percentage: \" + str(team_play_percentage))\n # print(\"team_orb_weight: \" + str(team_orb_weight))\n # print(\"orb_part: \" + str(orb_part))\n # print(\"fg_x_poss: \" + str(fg_x_poss))\n # print(\"ft_x_poss: \" + str(ft_x_poss))\n # print(\"sc_poss: \" + str(sc_poss))\n # print(\"tot_poss: \" + str(tot_poss))\n # print(\"pprod_fg_part: \" + str(pprod_fg_part))\n # print(\"pprod_ast_part: \" + str(pprod_ast_part))\n # print(\"pprod_orb_part: \" + str(pprod_orb_part))\n # print(\"pprod: \" + str(pprod))\n # print(\"result: \" + str(result) + \"\\n\")\n else:\n result = 0.00\n\n self.ortg = \"%.2f\" % round(result, 2)\n if Decimal(self.ortg) < 0 or Decimal(self.ortg) >= 1000:\n \"\"\"For one game, maybe we've got a negative result or one so big, so, for just only a game, we get the ORTG \n using team's formula\"\"\"\n print(BCOLORS.OKBLUE + \"ORTG negativo o superior a 1000 para jugadora => recalculamos a través de la fórmula de equipo\" + BCOLORS.ENDC)\n bx = self.get_standard_stats()\n result = round((bx[\"t2p_conv\"]*2 + bx[\"t3p_conv\"]*3 + bx[\"tl_conv\"])/self.get_team_possessions(), 2)\n self.ortg = \"%.2f\" % result",
"def calc_lost_heat_recovery (self):\n if not self.cd['heat recovery operational']:\n\n self.lost_heat_recovery = [0]\n else:\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.lost_heat_recovery = \\\n (self.generation / gen_eff )* .10",
"def failure_mode(self, name, failure_rate, q_std, N=1):\n self.leaks.append(\n self._make_leak(name, failure_rate, q_std, N))",
"def verify_no_cable_errors(self):\n i = 0\n for dpid in self.dpids:\n i += 1\n labels = {'dp_id': '0x%x' % int(dpid), 'dp_name': 'faucet-%u' % i}\n self.assertEqual(\n 0, self.scrape_prometheus_var(\n var='stack_cabling_errors_total', labels=labels, default=None))\n self.assertGreater(\n self.scrape_prometheus_var(\n var='stack_probes_received_total', labels=labels), 0)",
"def get_failure_rate(self) -> float:\n return self.failurerate",
"def measure_fock(self, modes, select=None, **kwargs):\n return self.circuit.measure_fock(self._remap_modes(modes), select=select)",
"def error_f(self, df_list):\n diffs = [\n abs(df[\"Actual\"] - df[f\"{self._num2str(num)}_phase\"]).sum()\n for (num, df) in enumerate(df_list, start=1)\n ]\n return sum(diffs)",
"def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results",
"def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)",
"def compute(self, failures):\n pass",
"def test_failure(database):\n # Create a 12 character random fain\n fain_1 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_2 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_3 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_4 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n\n # Simple addition that doesn't add up right\n af_1_row_1 = AwardFinancialFactory(transaction_obligated_amou=1100, fain=fain_1, allocation_transfer_agency=None)\n af_1_row_2 = AwardFinancialFactory(transaction_obligated_amou=11, fain=fain_1.lower(),\n allocation_transfer_agency=None)\n # Incorrect addition based on assistance type in AFA\n af_2 = AwardFinancialFactory(transaction_obligated_amou=9999, fain=fain_2, allocation_transfer_agency=None)\n # Don't ignore when ATA and AID match\n af_3 = AwardFinancialFactory(transaction_obligated_amou=1111, fain=fain_3, allocation_transfer_agency='good',\n agency_identifier='good')\n # Not ignored with TOA of 0\n af_4 = AwardFinancialFactory(transaction_obligated_amou=0, fain=fain_4, allocation_transfer_agency='good',\n agency_identifier='good')\n\n # Sum of this fain doesn't add up to af fain sum\n afa_1_row_1 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='2')\n afa_1_row_2 = AwardFinancialAssistanceFactory(fain=fain_1.lower(), federal_action_obligation=-10,\n original_loan_subsidy_cost=None, record_type='3')\n # Both of these rows use the column that isn't filled in for summing so neither results in the correct number\n afa_2_row_1 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=-9999,\n original_loan_subsidy_cost=None, record_type='2')\n afa_2_row_2 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=None,\n original_loan_subsidy_cost=-9999, assistance_type='07',\n record_type='3')\n # This shouldn't be ignored\n afa_3 = AwardFinancialAssistanceFactory(fain=fain_3, federal_action_obligation=0, original_loan_subsidy_cost=None,\n record_type='2')\n # Shouldn't be ignored with a TOA of 0\n afa_4 = AwardFinancialAssistanceFactory(fain=fain_4, federal_action_obligation=1, original_loan_subsidy_cost=None,\n record_type='2')\n\n errors = number_of_errors(_FILE, database, models=[af_1_row_1, af_1_row_2, af_2, af_3, af_4, afa_1_row_1,\n afa_1_row_2, afa_2_row_1, afa_2_row_2, afa_3, afa_4])\n assert errors == 4",
"def _process_failures(self, target):\n crash_synopses = self._fuzz_data_logger.failed_test_cases.get(self._fuzz_data_logger.all_test_cases[-1], [])\n if len(crash_synopses) > 0:\n self._fuzz_data_logger.open_test_step(\"Failure summary\")\n\n # retrieve the primitive that caused the crash and increment it's individual crash count.\n self.crashing_primitives[self.fuzz_node.mutant] = self.crashing_primitives.get(self.fuzz_node.mutant, 0) + 1\n self.crashing_primitives[self.fuzz_node] = self.crashing_primitives.get(self.fuzz_node, 0) + 1\n\n # print crash synopsis\n if len(crash_synopses) > 1:\n # Prepend a header if > 1 failure report, so that they are visible from the main web page\n synopsis = \"({0} reports) {1}\".format(len(crash_synopses), \"\\n\".join(crash_synopses))\n else:\n synopsis = \"\\n\".join(crash_synopses)\n self.procmon_results[self.total_mutant_index] = crash_synopses\n self._fuzz_data_logger.log_info(synopsis)\n\n if self.fuzz_node.mutant is not None and \\\n self.crashing_primitives[self.fuzz_node] >= self._crash_threshold_node:\n skipped = self.fuzz_node.num_mutations() - self.fuzz_node.mutant_index\n self._skip_current_node_after_current_test_case = True\n self._fuzz_data_logger.open_test_step(\n \"Crash threshold reached for this request, exhausting {0} mutants.\".format(skipped))\n self.total_mutant_index += skipped\n self.fuzz_node.mutant_index += skipped\n elif self.fuzz_node.mutant is not None and \\\n self.crashing_primitives[self.fuzz_node.mutant] >= self._crash_threshold_element:\n if not isinstance(self.fuzz_node.mutant, primitives.Group)\\\n and not isinstance(self.fuzz_node.mutant, blocks.Repeat):\n skipped = self.fuzz_node.mutant.num_mutations() - self.fuzz_node.mutant.mutant_index\n self._skip_current_element_after_current_test_case = True\n self._fuzz_data_logger.open_test_step(\n \"Crash threshold reached for this element, exhausting {0} mutants.\".format(skipped))\n self.total_mutant_index += skipped\n self.fuzz_node.mutant_index += skipped\n\n self.restart_target(target)\n return True\n else:\n return False",
"def error_calculation_test(self):\n dataOrg = [[1,1], [2,2], [3,3], [4,4], [5,5], [6,6], [7,8], [7.3, 5], [8, 0], [9,10]]\n dataCalc = [[1,3], [2,5], [3,0], [4,3], [5,5], [6.1,6], [7,3], [7.3, 5], [8, 0], [9,9]]\n # abs difference: 2 3 3 1 0 NA 5 0 NA 1\n # local errors: 200 150 200 50 0 NA 125 0 NA 20\n # sum: 745\n\n tsOrg = TimeSeries.from_twodim_list(dataOrg)\n tsCalc = TimeSeries.from_twodim_list(dataCalc)\n\n wmape = WeightedMeanAbsolutePercentageError()\n wmape.initialize(tsOrg, tsCalc)\n assert str(wmape.get_error())[:6] == \"93.125\"",
"def errorChecks(self):\n stop_calculation = False\n found_error = False\n errors = {\"Info\": [], \"Critical\": []}\n error_types = []\n ori_images = 0\n of_images = 0\n depth_images = 0\n back_of_images = 0\n\n if os.path.exists(self.savePathJoin(\"Images\")):\n ori_images = len(\n listDirectory(self.savePathJoin(\"Images\"), extension=\"png\")\n )\n # Check image folder\n if self.img_exist and not os.path.exists(self.savePathJoin(\"Images\")):\n if os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Images folder {0} doesn't exist -> Recreate it and recalculate optical flow and depth estimations\".format(\n self.savePathJoin(\"Images\")\n )\n )\n error_types.append(\"NoImages\")\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif self.img_exist and os.path.exists(self.user[\"Video\"]):\n errors[\"Info\"].append(\n \"Both the video {0} and Images folder {1} exist -> using Images folder by default\".format(\n self.user[\"Video\"], self.savePathJoin(\"Images\")\n )\n )\n elif not self.img_exist and not os.path.isfile(self.user[\"Video\"]):\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n\n # Check video file\n if self.user[\"Video\"] != \"\" and not os.path.isfile(self.user[\"Video\"]):\n if os.path.exists(self.savePathJoin(\"Images\")):\n errors[\"Info\"].append(\n (\n \"Video file {0} doesn't exist -> Using images in the Images folder instead\".format(\n self.user[\"Video\"]\n )\n )\n )\n else:\n stop_calculation = True\n errors[\"Critical\"].append(\n (\n \"Images folder {0} and video file {1} don't exist -> Stopping run\".format(\n self.savePathJoin(\"Images\"), self.user[\"Video\"]\n )\n )\n )\n elif os.path.isfile(self.user[\"Video\"]) and os.path.exists(\n self.savePathJoin(\"Images\")\n ):\n pass\n\n # Check optical flow\n if self.of_exist and not os.path.exists(self.savePathJoin(\"Of\")):\n errors[\"Info\"].append(\n (\n \"Optical flow folder {0} doesn't exist -> Recalculating optical flow\".format(\n self.savePathJoin(\"Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.of_exist:\n of_images = len(listDirectory(self.savePathJoin(\"Of\"), extension=\"png\"))\n if of_images != ori_images - 1 and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Optical flow image number {0} doesn't match video image number {1} - 1 -> Recalculating optical flow\".format(\n of_images, ori_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check backward optical flow\n if self.back_of_exist and not os.path.exists(self.savePathJoin(\"Back_Of\")):\n errors[\"Info\"].append(\n (\n \"Backward optical flow folder {0} doesn't exist -> Recalculating backward optical flow\".format(\n self.savePathJoin(\"Back_Of\")\n )\n )\n )\n error_types.append(\"NoOf\")\n elif self.back_of_exist:\n back_of_images = len(\n listDirectory(self.savePathJoin(\"Back_Of\"), extension=\"png\")\n )\n if back_of_images != of_images:\n errors[\"Info\"].append(\n (\n \"Backward optical flow image number {0} doesn't match optical flow image number {1} -> Recalculating backward optical flow\".format(\n back_of_images, of_images\n )\n )\n )\n error_types.append(\"NoOf\")\n\n # Check depth estimation\n if self.depth_exist and not os.path.exists(self.savePathJoin(\"Depth\")):\n errors[\"Info\"].append(\n (\n \"Depth folder {0} doesn't exist -> Recalculating depth\".format(\n self.savePathJoin(\"Depth\")\n )\n )\n )\n error_types.append(\"NoDepth\")\n elif self.depth_exist:\n depth_images = len(\n listDirectory(self.savePathJoin(\"Depth\"), extension=\"png\")\n )\n if depth_images != ori_images and ori_images != 0:\n errors[\"Info\"].append(\n (\n \"Depth image number {0} doesn't match video image number {1} -> Recalculating depth\".format(\n depth_images, ori_images\n )\n )\n )\n error_types.append(\"NoDepth\")\n\n # Check ground truth\n if self.gt_exist and not os.path.isfile(self.user[\"GT\"]):\n errors[\"Info\"].append(\n (\n \"Ground Truth file {0} doesn't exist -> File won't be used\".format(\n self.user[\"GT\"]\n )\n )\n )\n error_types.append(\"NoGT\")\n\n # Check super pixel labels\n if (\n self.super_pixel_method != \"\"\n and os.path.exists(\n os.path.join(self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method)\n )\n and ori_images != 0\n and len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n ),\n extension=\".npy\",\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n (\n \"Super pixel label number {0} doesn't match image number {1} -> Recalculating super pixel labels\".format(\n len(\n listDirectory(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"),\n self.super_pixel_method,\n ),\n extension=\".npy\",\n )\n ),\n ori_images,\n )\n )\n )\n error_types.append(\"LabelError\")\n\n # Check object detection\n if self.ui.c_object_detection.isChecked() and os.path.exists(\n self.savePathJoin(\"ObjectDetection\")\n ):\n if (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection image number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".png\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n elif (\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n )\n != ori_images\n ):\n errors[\"Info\"].append(\n \"Object Detection numpy array number {0} doesn't match image number of video {1} -> Recalculating object detection\".format(\n len(\n listDirectory(\n self.savePathJoin(\"ObjectDetection\"), extension=\".npy\"\n )\n ),\n ori_images,\n )\n )\n error_types.append(\"ObDetError\")\n\n answer = \"\"\n if len(errors[\"Info\"]) > 0 and len(errors[\"Critical\"]) == 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\n \"Some calculations might not run the way you expect them.\\nIn show details check the right side of the arrows to see what will happen.\"\n )\n msg.setWindowTitle(\"Information\")\n all_info = \"\"\n for info in errors[\"Info\"]:\n all_info += info + \"\\n\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Ok | QMessageBox.Abort)\n answer = msg.exec_()\n elif len(errors[\"Critical\"]) > 0:\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Critical)\n msg.setText(\n \"Found critical error\\nCouldn't start run, see show details for more information\"\n )\n msg.setWindowTitle(\"Critical Error\")\n all_info = \"\"\n for info in errors[\"Critical\"]:\n all_info += info + \"\\n\"\n msg.setDetailedText(all_info)\n msg.setStandardButtons(QMessageBox.Abort)\n answer = msg.exec_()\n\n if answer != int(\"0x00040000\", 16):\n for ty in error_types:\n logging.info(\"Solve error: {0}\".format(ty))\n if ty == \"NoImage\":\n self.img_exist = False\n self.of_exist = False\n self.back_of_exist = False\n self.depth_exist = False\n elif ty == \"NoOf\":\n self.of_exist = False\n self.back_of_exist = False\n elif ty == \"NoDepth\":\n self.depth_exist = False\n elif ty == \"NoGT\":\n self.gt_exist = False\n self.user[\"GT\"] = \"\"\n elif ty == \"LabelError\":\n self.create_super_pixel_label = True\n shutil.rmtree(\n os.path.join(\n self.savePathJoin(\"Super_Pixel\"), self.super_pixel_method\n )\n )\n elif ty == \"ObDetError\":\n self.object_detection_dir_exist = False\n shutil.rmtree(self.savePathJoin(\"ObjectDetection\"))\n\n return answer == int(\"0x00040000\", 16) or stop_calculation",
"def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0",
"def processInfidelity(chiIdeal, chiActual):\n return 1 - processFidelity(chiIdeal, chiActual)",
"def testFailed(self):\r\n failedExprKeys = list(self.__testFailedExpressions.keys())\r\n for i in range(len(failedExprKeys)):\r\n for expr in self.__testFailedExpressions[failedExprKeys[i]]:\r\n self.__Calculator.setExpression(expr)\r\n self.__Calculator.calculateResult()\r\n self.assertEqual(self.__testErrors[failedExprKeys[i]], self.__Calculator.getError())",
"def zero_failures(self) -> bool:\n return abs(self.failurerate) < 1e-7",
"def test_function_fuel_sum(data, mode_constrained, space_heating_enduses):\n fuel_in = 0\n fuel_in_solid_fuel = 0\n fuel_in_gas = 0\n fuel_in_elec = 0\n fuel_in_oil = 0\n fuel_in_heat = 0\n fuel_in_hydrogen = 0\n fuel_in_biomass = 0\n\n fuel_heating_all_fueltypes = 0\n fuel_heating_gas = 0\n tot_heating = 0\n #mode_constrained = True #SCRAP\n\n for region in data['rs_fuel_disagg']:\n for enduse in data['rs_fuel_disagg'][region]:\n fuel_in += np.sum(data['rs_fuel_disagg'][region][enduse])\n fuel_in_heat += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses: #Exclude inputs for heating\n tot_heating += np.sum(data['rs_fuel_disagg'][region][enduse])\n #pass\n else:\n fuel_in_elec += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['rs_fuel_disagg'][region][enduse][data['lookups']['fueltypes']['biomass']])\n \n for region in data['ss_fuel_disagg']:\n for enduse in data['ss_fuel_disagg'][region]:\n for sector in data['ss_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['ss_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['ss_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n for region in data['is_fuel_disagg']:\n for enduse in data['is_fuel_disagg'][region]:\n for sector in data['is_fuel_disagg'][region][enduse]:\n fuel_in += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n fuel_in_heat += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['heat']])\n\n if mode_constrained == False and enduse in space_heating_enduses:\n tot_heating += np.sum(data['is_fuel_disagg'][region][enduse][sector])\n else:\n fuel_in_elec += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['electricity']])\n fuel_in_gas += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['gas']])\n fuel_in_hydrogen += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['hydrogen']])\n fuel_in_oil += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['oil']])\n fuel_in_solid_fuel += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['solid_fuel']])\n fuel_in_biomass += np.sum(data['is_fuel_disagg'][region][enduse][sector][data['lookups']['fueltypes']['biomass']])\n \n return fuel_in, fuel_in_biomass, fuel_in_elec, fuel_in_gas, fuel_in_heat, fuel_in_hydrogen, fuel_in_solid_fuel, fuel_in_oil, tot_heating",
"def component_serial_failure_rate_calculation(cls, serial_fr_list):\n failure_rate_of_serial_value = 0.0\n\n if serial_fr_list:\n for item in serial_fr_list:\n if float(item) != 0.0 and str(item) != \"None\":\n failure_rate_of_serial_value += float(item)\n\n return float(failure_rate_of_serial_value)"
]
| [
"0.5976335",
"0.5800688",
"0.5793206",
"0.5278259",
"0.51749235",
"0.51479864",
"0.50197566",
"0.4980716",
"0.4886527",
"0.48797902",
"0.48643532",
"0.48379484",
"0.48011443",
"0.47967914",
"0.47813833",
"0.47725174",
"0.47659168",
"0.47551543",
"0.4754042",
"0.47385153",
"0.47090274",
"0.47009668",
"0.47005132",
"0.46887252",
"0.46696386",
"0.46404296",
"0.46359363",
"0.4633086",
"0.4632221",
"0.46145865"
]
| 0.677475 | 0 |
Calculate fatality probability for given oxygen concentration. The equation is fitted from the FESHM 4240 plot. | def _fatality_prob(self, O2_conc):
if O2_conc >= 0.18: # Lowest oxygen concentration above 18%
Fi = 0
elif O2_conc <= 0.088: # 8.8% of oxygen is assumed to be 100% fatal
Fi = 1
else:
# Fi formula, reverse engineered using 8.8% and 18% thresholds
Fi = 10**(6.5-76*O2_conc)
return Fi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pr_dominant_offpring(offspring_zygosity):\n\n homozygous_dominant, heterozygous, homozygous_recessive = offspring_zygosity\n\n total = homozygous_dominant + heterozygous + homozygous_recessive\n dominant = homozygous_dominant + heterozygous\n\n pr_dominant = dominant / total\n\n return pr_dominant",
"def u_inf_crit(self, z0):\n # Get h_P, which is independent of the crossflow velocity\n h_P = self.h_P(z0)\n \n # Define an objective function for root finding\n def residual(us):\n \"\"\"\n Residual for use in root finding to find u_inf_crit\n \n Returns the difference h_S - h_P, which should be zero at the \n critical cross-flow velocity.\n \n \"\"\"\n return self.h_S(z0, us) - h_P\n \n # Return the critical crossflow velocity\n return fsolve(residual, 0.0001)[0]",
"def eclogite_foliated():\n\n rho = 3300.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 203.45; C[0,1] = 67.76; C[0,2] = 64.47; C[0,3] = 0.08; C[0,4] = 1.9; C[0,5] = -0.4\n C[1,0] = C[0,1]; C[1,1] = 220.58; C[1,2] = 63.65; C[1,3] = 0.46; C[1,4] = 0.59; C[1,5] = 0.06\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 189.75; C[2,3] = 0.13; C[2,4] = 0.95; C[2,5] = -0.2\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.32; C[3,4] = -0.27; C[3,5] = 0.73\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.77; C[4,5] = -0.02\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.75\n\n return C, rho",
"def h_P(self, z0):\n # Get the governing variables\n (B, N, u_slip, u_inf) = self.get_variables(z0, 0.)\n \n # Compute U_N\n U_N = u_slip / (B * N)**(1./4.)\n \n # Compute the correlation equation\n return 5.2 * np.exp(-(U_N - 1.8)**2 / 10.24) * (B / N**3)**(1./4.)",
"def fOilDensity(APIGravity, GasOilRatioOFU, GasGravity, Temperature, Pressure):\t\n\tT = Temperature\n\tP = Pressure / 145.038 # converts psia to MPa.\n\tGasOilRatio=GasOilRatioOFU*(28.3168/158.9873) # Converts scf/bbl to l/l\n\n\t# A reference density that can be used to characterize an oil Rho_0 is measured\n\t# at 15.6 degC and atmospheric pressure.\n\tRho_0 = 141.5 / (APIGravity + 131.5)\n\n\t# B_0 is a volume factor derived by Standing (1962)\n\tB_0 = 0.972 + 0.00038 * ((2.4 * GasOilRatio * ((GasGravity/Rho_0)**0.5) + T + 1.78)**1.175)\n\n\t# True densities of live oils are also calculated using B_0, but\n\t# the mass of dissolved gas must be included.\n\tRho_G = (Rho_0 + 0.0012*GasGravity*GasOilRatio) / B_0\n\n\t# The pressure dependence is comparatively small and the published data for density at\n\t# pressure pp can be described by the polynomial\n\tRho_GP = Rho_G + (0.00277*P - 1.71e-7*(P**3)) * ((Rho_G - 1.15)**2) + (3.49e-4*P)\n\n\t# The effect of temperature is larger, and one of the most\n\t# common expressions used to calculate the in-situ density\n\t# was developed by Dodson and Standing (1945).\n\t# Rho_T = Rho_P / (0.972 + 0.000381 * ((T + 17.78) ** 1.175))\n\t# This is accounted for in the B_0 and Rho_G terms which collapse when GasOilRation = 0\n\n\treturn Rho_GP",
"def const_violation( Cmx ):\n \n gp = 0.01 * ( Cmx - 0.1069 )\n \n phi = max( 0 , gp)\n \n return phi",
"def Vega_zero_Jy(self):\n c = 1e-8 * Constants.c.to('m/s').value\n f = 1e5 / c * (self.lpivot.to('AA').value ** 2 *\n self.Vega_zero_flux.to('erg*s**-1*cm**-2*AA**-1').value)\n return f * Unit('Jy')",
"def diffusive_heat_flux(discr, eos, cv, j):\n if isinstance(eos, MixtureEOS):\n h_alpha = eos.species_enthalpies(cv)\n return sum(h_alpha.reshape(-1, 1) * j)\n return 0",
"def dynamic_viscosity_of_air(self) -> float:\n\n return (1.458 * (10 ** (-6)) * (self.ambient_temperature**1.5)) / (\n self.ambient_temperature + 110.4\n )",
"def dry_snow_density(self):\n return (self.rho - self.h2o_vol * RHO_W0) / \\\n (1 - self.h2o_vol * RHO_W0 / RHO_ICE)",
"def atmospheric_ion_neutral_collision_frequency(self):\n nu = 3.8e-11*self.msis[\"nn\"]\n return nu",
"def P_nu_of(rho_cgs, temp_mev, munu_mev, rho_trap=10 ** 12.5):\n eta_nu = munu_mev / temp_mev\n\n # First term\n P_nu = 4.0 * numpy.pi / 3.0 * temp_mev ** 4.0 / hc_mevcm ** 3.0\n # Fermi integral term\n P_nu *= 21.0 / 60.0 * numpy.pi ** 4.0 \\\n + 0.5 * eta_nu ** 2 * (numpy.pi ** 2 + 0.5 * eta_nu ** 2)\n # Decoupling at lower densities\n P_nu *= numpy.exp(-rho_trap / rho_cgs)\n\n # convert from MeV to erg and return\n return P_nu * (CGS_EV * 1.0e6)",
"def CE(self, p_true, p_model):\n return np.sum(-np.array(p_true)*np.log2(np.array(p_model)))",
"def fluxonium_potential(self):\n return -0.5*(self.Ej * ((1+self.d)*cos(self.phis - 2. * pi * self.phi - 2. * pi * self.phiL) + (1-self.d)*cos(self.phis-2. * pi * self.phiL))) + self.El/2. * (self.phis) ** 2\n #return -0.5*(self.Ej * cos(self.phis - 2. * pi * self.phi) + self.Ej * cos(self.phis)) + self.El/2. * (self.phis-self.phiL)** 2",
"def uncertainty(self):\n return self.uncertainty_per_deg * self.temp * self.n_atoms",
"def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration",
"def _compute_Z_conditionnal_density(self,Y):\n proba_cond = np.exp(self._estimate_weighted_log_prob(Y)) # Pi_k * g_k(yi)\n s = proba_cond.sum(axis=1)[:,np.newaxis] # sum_k (Pi_k * g_k(yi))\n return proba_cond / s #On normalise",
"def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)",
"def get_production_factor(self, temp_atmosphere):\n a1 = self.damages_terms[0]\n a2 = self.damages_terms[1]\n a3 = self.damages_terms[2]\n pf = self.params.prod_frac\n return ne.evaluate('1 - pf * (1 - 1 / (1 + a1 * temp_atmosphere + a2 * temp_atmosphere ** a3))')",
"def P0(self):\n if not self.isVaild():\n pass\n temp = ((self.r()**self.C)*self.C) / \\\n (math.factorial(self.C)*(self.C - self.r()))\n temp2 = 0\n for i in range(0, self.C):\n temp3 = ((self.r())**i)/(math.factorial(i))\n temp2 += temp3\n return 1.0/(temp2 + temp)",
"def non_specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value * math.log(focal.cardinal, 2)\n return round(result, 6)",
"def carbon_prime(C,p,p0):\r\n \r\n if p > p0:\r\n return C\r\n else:\r\n return .03",
"def p_value(beta_hat_j, sigma_hat_j):\n if beta_hat_j > 0:\n return 2 - (1 * norm.cdf(beta_hat_j / sigma_hat_j))\n else:\n return 2 * norm.cdf(beta_hat_j / sigma_hat_j)",
"def get_exact_poisson_uncertainty(x:float, nsigmas:float=1) -> float:\n # see formula at:\n # https://en.wikipedia.org/wiki/Poisson_distribution#Confidence_interval\n pl = stats.norm.cdf(-1*nsigmas, loc=0, scale=1)\n pu = stats.norm.cdf(1*nsigmas, loc=0, scale=1)\n\n lb = stats.chi2.ppf(pl, 2*x)/2 if x!= 0 else 0\n ub = stats.chi2.ppf(pu, 2*(x+1))/2 \n\n # average err is almost equal to sqrt(x)+0.5\n err = (ub-lb)/2\n\n return err",
"def CFHX(m_dot, p_nominal, epsilon, p_HP_in, T_HP_in, p_LP_in, T_LP_in):\r\n\r\n\r\n # Data from Aleks:\r\n # Length CFHX = 22 cm\r\n # D_in = 23.6 mm, D_out 40.5 mm\r\n # T range: 40 K - 5 K\r\n # m_dot = 0.5 g/s\r\n # p = 1 bar\r\n # Effectiveness: 97.4 %\r\n # dp_HP = 4.8 mbar (= dp23)\r\n # dp_LP = 5 mbar (= dp78)\r\n\r\n # Geometry of the CFHX\r\n A_HP = 0.25 * np.pi * 0.0236**2 #m²\r\n A_LP = 0.25 * np.pi * (0.0405**2 - 0.0236**2) #m²\r\n\r\n\r\n ## Calculation of the outgoing pressure with the scaled pressure drops\r\n\r\n # Scaling of the pressure drop with the Darcy--Weisbach equation\r\n # dp = f *L/D_i * 0.5 * Rho * u**2\r\n dp_HP_Aleks = 4.8e2 #Pa\r\n dp_LP_Aleks = 5.0e2 #Pa\r\n # Mean density with the arithmetic mean of the temperature range values\r\n Rho_Aleks = hp.HeCalc(3, 0, 1, 1e5, 2, 0.5*(40+5), 1) #kg/m³\r\n u_HP_Aleks = 0.5e-3/(A_HP*Rho_Aleks) #m/s\r\n u_LP_Aleks = 0.5e-3/(A_LP*Rho_Aleks) #m/s\r\n\r\n # Mean density of the two inlet temperatures and the nominal pressure to be able to compare the dp\r\n Rho = hp.HeCalc(3, 0, 1, p_nominal, 2, 0.5*(T_HP_in + T_LP_in), 1) #kg/m³\r\n u_HP = m_dot/(A_HP*Rho) #m/s\r\n u_LP = m_dot/(A_LP*Rho) #m/s\r\n\r\n # Actual scaling\r\n dp_HP = Rho/Rho_Aleks * u_HP**2/u_HP_Aleks**2 * dp_HP_Aleks #Pa\r\n dp_LP = Rho/Rho_Aleks * u_LP**2/u_LP_Aleks**2 * dp_LP_Aleks #Pa\r\n\r\n # Calculation of the outgoing pressure with the scaled pressure drops\r\n p_HP_out = p_HP_in - dp_HP #Pa\r\n p_LP_out = p_LP_in - dp_LP #Pa\r\n\r\n\r\n ## Calculation of the outgoing temperatures using the effectiveness\r\n # Asumming that the effectiveness is the same for both the HP and the LP side!\r\n\r\n # Check which stream restricts the heat exchange -> Pinch point\r\n # See \"Compact heat exchangers\" by Kays, London : Chapter 7\r\n dh_HP_max = hp.HeCalc(9, 0, 1, p_HP_in, 2, T_HP_in, 1) - hp.HeCalc(9, 0, 1, p_HP_out, 2, T_LP_in, 1)\r\n dh_LP_max = hp.HeCalc(9, 0, 1, p_LP_out, 2, T_HP_in, 1) - hp.HeCalc(9, 0, 1, p_LP_in, 2, T_LP_in, 1)\r\n\r\n # The maximum possible heat transfer corresponds to the restricting one\r\n dh_max = min(dh_HP_max, dh_LP_max)\r\n\r\n # Calculating the specific enthalpy with all known pressures and temperatures\r\n h_HP_in = hp.HeCalc(9, 0, 1, p_HP_in, 2, T_HP_in, 1) #J/kg\r\n h_LP_in = hp.HeCalc(9, 0, 1, p_LP_in, 2, T_LP_in, 1) #J/kg\r\n\r\n # Calculating the outgoing enthalpies\r\n h_HP_out = h_HP_in - epsilon * dh_max #J/kg\r\n h_LP_out = h_LP_in + epsilon * dh_max #J/kg\r\n\r\n # Calculation of the temperatures dependend on the specific enthalpy and the pressure\r\n T_HP_out = hp.HeCalc(2, 0, 1, p_HP_out, 9, h_HP_out, 1) #K\r\n T_LP_out = hp.HeCalc(2, 0, 1, p_LP_out, 9, h_LP_out, 1) #K\r\n\r\n # Cross check the dp scaling\r\n # print(\"u_HP_Aleks\", u_HP_Aleks)\r\n # print(\"u_HP\", u_HP)\r\n # print(\"Rho_Aleks\", Rho_Aleks)\r\n # print(\"Rho\", Rho)\r\n # print(\"dp_HP\", dp_HP)\r\n # print(\"dp_HP/dp_HP_Aleks \", dp_HP/dp_HP_Aleks)\r\n # print(\"dp_LP/dp_LP_Aleks \", dp_LP/dp_LP_Aleks)\r\n\r\n # Output of the results\r\n state_out = {\"h_HP\": h_HP_out, \"T_HP\": T_HP_out, \"p_HP\": p_HP_out,\r\n \"h_LP\": h_LP_out, \"T_LP\": T_LP_out, \"p_LP\": p_LP_out}\r\n return state_out",
"def F_nu_mu_1(self, x, E_p):\n\n L = np.log(E_p/1e3)\n\n B_prime = 1.75 + 0.204*L + 0.010*L*L\n beta_prime = 1./(1.67 + 0.111*L + 0.0038*L*L)\n k_prime = 1.07 - 0.086*L + 0.002*L*L\n\n y = x/0.427 #1-(m_mu/m_pi)^2\n\n # mask values for x>0.427 due to numerical problems\n # F should drop off rapidly but has a pole increses afterwards\n y = np.ma.masked_greater_equal(y, 1)\n\n logY = np.log(y)\n y_beta = y**beta_prime\n logY_beta = logY**beta_prime\n\n A = B_prime * logY / y\n B = ( 1-y_beta ) / ( 1. + k_prime*y_beta*(1-y_beta) )\n C = ( 4*beta_prime*y_beta) / ( 1-y_beta )\n D = (4*k_prime*beta_prime*y_beta * (1-2*y_beta) ) / ( 1 + k_prime*y_beta * (1-y_beta) )\n\n F = A*B**4. * ( 1./logY - C - D )\n try:\n return F.filled(0.)\n except:\n return F",
"def eggleton_formula(mass_ratio):\n\n two_third = mass_ratio**(2.0/3.0)\n one_third = mass_ratio**(1.0/3.0)\n return 0.49 * two_third / ( 0.6 * two_third + numpy.log(1.0 + one_third))",
"def dominant_probability(homozygous_dominant: int, heterozygous: int, homozygous_recessive :int):\n d, h, r = homozygous_dominant, heterozygous, homozygous_recessive\n all_ = d + h + r\n result = d * (d + 2 * h + 2 * r - 1) + h * (0.75 * h + r - 0.75)\n result /= all_ * (all_ - 1)\n return result",
"def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0",
"def get_pressure_coefficient(self):\n depth = self.params[\"Measured_Pressure\"][\"depth\"]\n coef = self.params[\"Measured_Pressure\"][\"coef\"]\n pres = self.params[\"Measured_Pressure\"][\"data\"]\n if depth and not coef and pres:\n hydro = hydrostatic_pressure(self.depth,\n kelly_bushing=self.kelly_bushing,\n depth_w=self.water_depth)\n coef_data = list()\n for dp, pr in zip(depth, pres):\n idx = np.searchsorted(self.depth, dp)\n coef_data.append(pr / hydro[idx])\n log = Log()\n log.depth = depth\n log.data = coef_data\n return log\n else:\n log = Log()\n log.depth = depth\n log.data = coef\n return log"
]
| [
"0.58419925",
"0.58070016",
"0.56442904",
"0.5618727",
"0.56060743",
"0.55602694",
"0.5558371",
"0.55557066",
"0.5547927",
"0.5523127",
"0.5514237",
"0.5470292",
"0.54664814",
"0.546445",
"0.54536456",
"0.54408586",
"0.5437179",
"0.54363346",
"0.5434748",
"0.54204845",
"0.5416267",
"0.5395649",
"0.5380651",
"0.53775555",
"0.5370778",
"0.53540665",
"0.53521085",
"0.53485596",
"0.53443563",
"0.53404725"
]
| 0.7696928 | 0 |
Calculate ODH class as defined in FESHM 4240. Returns int ODH class. | def odh_class(self):
if self.phi < 1e-7/ureg.hr:
return 0
elif self.phi < 1e-5/ureg.hr:
return 1
elif self.phi < 1e-3/ureg.hr:
return 2
else:
# TODO add a custom exception for ODH > 2
print('ODH fatality rate is too high. Please, check calculations')
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DAM_class_level(self, class_entity: und.Ent):\n if \"Interface\" in class_entity.kindname():\n return 2.0\n\n private_variables = len(class_entity.ents(\"Define\", \"Java Variable Private Member\"))\n protected_variables = len(class_entity.ents(\"Define\", \"Java Variable Protected Member\"))\n default_variables = len(class_entity.ents(\"Define\", \"Java Variable Default Member\"))\n public_variables = len(class_entity.ents(\"Define\", \"Java Variable Public Member\"))\n\n try:\n enum_ = private_variables + protected_variables\n denum_ = private_variables + protected_variables + default_variables + public_variables\n ratio = enum_ / denum_\n except ZeroDivisionError:\n # logger.error('ZeroDivisionError in computing QMOOD DAM metric.')\n ratio = 2.0\n return 1. + ratio",
"def _get_odd_class(self):\n return '_1jca28'",
"def _get_class(self, obj):\n\n object_type = obj.object_type\n\n 'Background class'\n object_class = 0\n\n # Don't care classes\n if object_type in ['DontCare', 'Person_sitting'] or obj.truncation > 0.75 or obj.occlusion > 1:\n object_class = 1\n\n # Vehicle classes\n elif object_type in ['Car', 'Van']:\n object_class = 2\n\n # Pedestrian class\n elif object_type in ['Pedestrian']: # TODO: Consider change this with ==\n object_class = 3\n\n # Cyclist class\n elif object_type in ['Cyclist']: # TODO: Consider change this with ==\n object_class = 4\n\n return object_class",
"def CAMC_class_level(self, class_entity: und.Ent):\n if \"Interface\" in class_entity.kindname():\n return 2.\n\n percentage = class_entity.metric(['PercentLackOfCohesion']).get('PercentLackOfCohesion', 0)\n\n if percentage is None:\n percentage = 0\n cohesion_ = 1. - (percentage / 100.)\n # print(class_entity.longname(), cohesion_)\n return 1. + round(cohesion_, 5)",
"def extract_redundancy_factor(oclass):\n match = re.search(\"EC_[0-9]+P([0-9])+\", oclass)\n if match:\n return int(match.group(1))\n match = re.search(\"RP_([0-9]+)\", oclass)\n if match:\n return int(match.group(1)) - 1\n return 0",
"def get_nh_type(self):\n return int(self.get('nhr_type'))",
"def estimate_class(self, observation: np.ndarray) -> int:\n neighbor_classes, distances = self.get_neighbor_classes(observation)\n weights = 1 / np.square(distances)\n classes = np.unique(neighbor_classes)\n class_weight = [sum(weights[neighbor_classes == neighbor_class]) for neighbor_class in classes]\n return classes[np.argmax(class_weight)]",
"def device_class(self):\n unit = get_uom_from_status(self._device.status)\n if unit == HS_UNIT_LUX:\n return DEVICE_CLASS_ILLUMINANCE\n elif unit == HS_UNIT_CELSIUS or unit == HS_UNIT_FAHRENHEIT:\n return DEVICE_CLASS_TEMPERATURE\n elif unit == HS_UNIT_A or unit == HS_UNIT_AMPERES:\n return DEVICE_CLASS_CURRENT\n elif unit == HS_UNIT_KW:\n return DEVICE_CLASS_POWER\n elif unit == HS_UNIT_KWH:\n return DEVICE_CLASS_ENERGY\n elif unit == HS_UNIT_V or unit == HS_UNIT_VOLTS:\n return DEVICE_CLASS_VOLTAGE\n elif unit == HS_UNIT_W or unit == HS_UNIT_WATTS:\n return DEVICE_CLASS_POWER\n return None",
"def NOM_class_level(self, class_entity: und.Ent):\n if class_entity is not None:\n # print(class_entity.metric(['CountDeclMethod']).get('CountDeclMethod', 0))\n # kind_filter = 'Java Method ~Unknown ~Unresolved ~Jar ~Library ~Constructor ~Implicit ~Lambda ~External'\n # method_list = class_entity.ents('Define', kind_filter)\n # counter = 0\n # for method_ in method_list:\n # if method_.metric(['Cyclomatic']).get('Cyclomatic', 0) > 1:\n # counter += 1\n # return counter\n if \"Interface\" in class_entity.kindname():\n return 0\n # wmc = class_entity.metric(['SumCyclomatic']).get('SumCyclomatic', 0)\n wmc2 = class_entity.metric(['SumCyclomaticModified']).get('SumCyclomaticModified', 0)\n # print(class_entity.longname(), wmc, wmc2)\n return wmc2\n return 0",
"def get_class_checker_and_chain_length(nwClass):\n ClassChecker = ClassAllNetworks\n length = 1\n if nwClass == \"TC\":\n ClassChecker = ClassTreeChild\n length = 3\n elif nwClass == \"SF\":\n ClassChecker = ClassStackFree\n length = 3\n elif nwClass == \"O\":\n ClassChecker = ClassOrchard\n length = 3\n elif nwClass == \"TB\":\n ClassChecker = ClassTreeBased\n length = 2\n return ClassChecker, length",
"def get_data_parity_number(self, oclass):\n if 'EC' not in oclass:\n self.log.error(\"Provide EC Object type only and not %s\",\n str(oclass))\n return 0\n\n tmp = re.findall(r'\\d+', oclass)\n return {'data': tmp[0], 'parity': tmp[1]}",
"def get_mpg_class(mpg):\n\n if(mpg >= 45):\n return 10\n elif(mpg >= 37 and mpg < 45):\n return 9\n elif(mpg >= 31 and mpg < 37):\n return 8\n elif(mpg >= 27 and mpg < 31):\n return 7\n elif(mpg >= 24 and mpg < 27):\n return 6\n elif(mpg >= 20 and mpg < 24):\n return 5\n elif(mpg >= 17 and mpg < 20):\n return 4\n elif(mpg >= 15 and mpg < 17):\n return 3\n elif(mpg >= 14 and mpg < 15):\n return 2\n else:\n return 1",
"def num_class(self):\r\n return self._num_class",
"def assignClass(self):\n classes = {}\n classes['en'] = 0\n classes['nl'] = 0\n assignedClass = \"\"\n\n for record in self.data:\n if record[-1] == 'en':\n classes['en'] += 1\n elif record[-1] == 'nl':\n classes['nl'] += 1\n\n max = 0\n for key in classes.keys():\n # get max class\n if max < classes[key]:\n max = classes[key]\n assignedClass = key\n\n self.enClass = classes['en']\n self.nlClass = classes['nl']\n\n return assignedClass",
"def class_uc(x):\r\n if Class(x) == \"G\" :\r\n return 1\r\n else :\r\n if Class(x) == \"I\" :\r\n return 2\r\n else :\r\n return 0",
"def CIS_class_level(self, class_entity: und.Ent):\n if \"Interface\" in class_entity.kindname():\n value = class_entity.metric(['CountDeclMethodAll']).get('CountDeclMethodAll', 0)\n else:\n value = class_entity.metric(['CountDeclMethodPublic']).get('CountDeclMethodPublic', 0)\n # public_methods = len(class_entity.ents(\"Define\", \"Java Method Public Member\"))\n\n if value is None:\n value = 0.\n # print(class_entity.longname(), value)\n return value",
"def class_num(self) -> int:\n return int(np.argmax(self.class_scores))",
"def sound_horizon_Class(self):\n if 'classy' not in sys.modules:\n warnings.warn(\"Class not installed, using a custom function to compute sound horizon (not precise)\")\n return self.r_s_drag()\n else:\n params = {\n 'A_s': self.As,\n 'n_s': self.ns, \n 'h': self.h,\n 'omega_b': self.Omega_b*self.h**2.,\n 'omega_cdm': self.Omega_cdm*self.h**2.,\n 'Omega_k': self.Omega_K,\n 'Omega_fld': self.Omega_lambda,\n 'w0_fld': self.w0,\n 'wa_fld': self.wa,\n 'N_ur': self.massless_nu,\n 'N_ncdm': self.massive_nu}\n if self.massive_nu != 0:\n params['m_ncdm'] = ''\n params['T_ncdm'] = ''\n for im, m in enumerate(self.M_nu):\n params['m_ncdm'] += '%.8f, ' %(m)\n params['T_ncdm'] += '%.8f, ' %(self.Gamma_nu)\n params['m_ncdm'] = params['m_ncdm'][:-2]\n params['T_ncdm'] = params['T_ncdm'][:-2]\n\n cosmo = Class()\n cosmo.set(params)\n cosmo.compute()\n\n rs = cosmo.rs_drag()*cosmo.h()\n\n cosmo.struct_cleanup()\n cosmo.empty()\n\n return rs",
"def DCC_class_level(self, class_entity: und.Ent):\n others = list()\n if \"Interface\" in class_entity.kindname():\n return 0\n\n for ref in class_entity.refs(\"Define\", \"Variable\"):\n if ref.ent().type() in self.all_classes:\n others.append(ref.ent().type())\n\n kind_filter = \"Method ~Unknown ~Jar ~Library ~Constructor ~Implicit ~Lambda ~External\"\n for ref in class_entity.refs(\"Define\", kind_filter):\n for ref2 in ref.ent().refs(\"Java Define\", \"Java Parameter\"):\n if ref2.ent().type() in self.all_classes:\n others.append(ref2.ent().type())\n\n for ref in class_entity.refs(\"Define\", kind_filter):\n for ref2 in ref.ent().refs(\"Java Use Return\"):\n if ref2.ent().type() in self.all_classes:\n others.append(ref2.ent().type())\n\n return len(set(others))",
"def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]",
"def H2(self) -> int:\n return self.raw_measure()[0]",
"def get_num_classes(self):",
"def _get_cls_out_channels(self):\n # Class numbers (k) + objectness (1)\n return self.num_classes",
"def find_class(complex,heme,nucleotide,control, steroid):\n\n if complex in heme:\n return 0\n elif complex in nucleotide:\n return 1\n elif complex in control :\n return 2\n elif steroid in control :\n return 3",
"def class_hp(my_class):\n class_dictionary = {'barbarian': roll_die(1, 12), 'bard': roll_die(1, 8), 'cleric': roll_die(1, 8),\n 'druid': roll_die(1, 8), 'fighter': roll_die(1, 10), 'monk': roll_die(1, 8),\n 'paladin': roll_die(1, 10), 'ranger': roll_die(1, 10), 'rogue': roll_die(1, 8),\n 'sorcerer': roll_die(1, 6), 'warlock': roll_die(1, 8), 'wizard': roll_die(1, 6),\n 'blood hunter': roll_die(1, 10)} # Created a dictionary with the appropriate class die\n if my_class in class_dictionary.keys(): # If my_class is listed, it will return the corresponding class die\n return class_dictionary[my_class] # Don't need an else statement, as user must pick a listed class to get here",
"def hsv_classify(img):\n hue_to_count, _ = get_hsv_hist(img)\n dominant_hue = max(hue_to_count, key=hue_to_count.get)\n\n all_hues = cfg.ALL_HUE_VALUES.keys()\n all_hues.sort()\n\n class_num = all_hues.index(dominant_hue)\n return class_num",
"def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_fair:\n # We are much higher than fair.\n cls = 2\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 1\n elif tsh_fair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls",
"def get_weight_class(weight):\n\n if(weight >= 3500):\n return 5\n elif(weight >= 3000 and weight < 3500):\n return 4\n elif(weight >= 2500 and weight < 3000):\n return 3\n elif(weight >= 2000 and weight < 2500):\n return 2\n else:\n return 1",
"def get_HA(HA_):\n HA, Self, HA_nt, HA_vt, HA_zt = 0, 0, 0, 0, 0\n if HA_ == \"HA\":\n HA = 1\n if HA_ == \"Self\":\n Self = 1\n if HA_ == \"nt\":\n HA_nt = 1\n if HA_ == \"vt\":\n HA_vt = 1\n if HA_ == \"zt\":\n HA_zt = 1\n\n return HA, Self, HA_nt, HA_vt, HA_zt",
"def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n # Threshold between unfair and very unfair.\n tsh_unfair = 0.4\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_unfair:\n # We are much higher than fair.\n cls = 4\n elif -1 * tsh_unfair <= dif < -1 * tsh_fair:\n # We are not that much higher than fair.\n cls = 3\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 2\n elif tsh_fair < dif <= tsh_unfair:\n # We are not that much lower than fair.\n cls = 1\n elif tsh_unfair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls"
]
| [
"0.61941946",
"0.6073732",
"0.6039141",
"0.59284633",
"0.5847515",
"0.5661549",
"0.56504405",
"0.5536489",
"0.5534853",
"0.55204695",
"0.5511418",
"0.55110085",
"0.54881746",
"0.54308605",
"0.53876334",
"0.53790814",
"0.53224164",
"0.525254",
"0.5241441",
"0.5236536",
"0.52161103",
"0.52135736",
"0.519624",
"0.5181662",
"0.5174322",
"0.51183647",
"0.50930905",
"0.50846475",
"0.5077151",
"0.50701964"
]
| 0.76211125 | 0 |
Print a report for failure modes and effects. The report is sorted by fatality rate descending. | def report(self, brief=True, sens=None):
self.fail_modes.sort(key=lambda x: x.phi, reverse=True)
sens = sens or SHOW_SENS
title = f'ODH report for {self}'
padding = len(title) + 10
print('#'*padding)
print(title)
print('-'*padding)
if brief:
print('Printing brief ODH report')
print(f'Only leaks with Fatality rate > {sens} are shown')
for f_mode in self.fail_modes:
if f_mode.phi >= sens or not brief:
print()
print(f' Source: {f_mode.source.name}')
print(f' Failure: {f_mode.name}')
print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')
print(f' Building is powered: {not f_mode.outage}')
print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '
f'{f_mode.O2_conc/0.21:.0%} percent of norm')
print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')
print(' ODH protection PFD: '
f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')
print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')
print(f' Leak rate: {f_mode.q_leak:.2~}')
print(f' Event duration: {f_mode.tau:.2~}')
print(f' Fans working: {f_mode.N_fan}')
print(f' Fan rate: {f_mode.Q_fan:.2~}')
print(f' Fatality prob: {f_mode.F_i:.0%}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_fails(self,result,cause=False,detail=False):\n fails = result.get_fails()\n if fails:\n print ('=== FAILS '+('='*60))\n for fail in fails:\n print (fail.id)\n if cause:\n print (' ',fail.get_cause())\n if detail:\n for key in ['ISQL_stripped_diff','Python_stripped_diff',\n 'ISQL_stderr_stripped_diff',\n 'Python_stderr_stripped_diff']:\n if fail.has_key(key):\n print ('-' * 70)\n print ('%s:' % key)\n print (as_utf8(fail[key]))\n print ()",
"def print_report():\n with open(report_path) as f:\n report = json.load(f)\n\n tests = report['tests']\n\n num_passed = 0\n num_failed = 0\n failures = []\n ok = '\\033[32m' + 'ok' + '\\033[0m' # green 'ok'\n failed = '\\033[31m' + 'FAILED' + '\\033[0m' # red 'FAILED'\n for test in tests:\n name = os.path.relpath(test['filename'], samples_path)\n print('import', name, '... ', end='')\n if test['result'] == 'PASSED':\n print(ok, \"(%.4f s)\" % test['timeElapsed'])\n num_passed += 1\n else:\n print(failed)\n print(test['error'])\n num_failed += 1\n failures.append(name)\n\n if failures:\n print('\\nfailures:')\n for name in failures:\n print(' ', name)\n\n result = ok if num_failed == 0 else failed\n print(\n '\\ntest result: %s. %d passed; %d failed\\n' %\n (result, num_passed, num_failed)\n )\n\n exit_code = 0 if num_failed == 0 else 3\n sys.exit(exit_code)",
"def print_failed(failed: list, spaces: int = 8) -> str:\n lines = []\n for key, val in sorted(failed.items()):\n if key != 'failed-cutoff':\n lines.append(f'{spaces * \" \"}{key.replace(\"-\", \" \").title()}: {len(val)}')\n return \"\\n\".join(lines) if lines else \"\"",
"def report(self):\n self.report_status()\n print\n self.report_charset()\n print\n self.report_key()\n print\n self.report_keyset()",
"def report(self):\n for c in self._call_chain:\n print c.title\n print '=' * len(c.title)\n c.report()\n print",
"def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)",
"def error_report():\n db, c = connect(DBNAME)\n c.execute(\"select to_char(time,'FMMonth DD, YYYY') as date, \"\n \"round((sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100,2) \"\n \"as percent_error from log group by date \"\n \"having (sum(case when status = '200 OK' \"\n \"then 0 else 1 end)::decimal / count(*)) * 100 > 1\")\n error_table = c.fetchall()\n db.close()\n print \"\\nDates on Which Over 1% of Requests Led to Errors:\"\n for error in error_table:\n if __name__ == '__main__':\n print str(error[0]) + \" - \" + str(error[1]) + \"%\"",
"def output_report(x, y, clf, test_flag=False):\n\n # Calculate recall at k time series\n y, y_prob, y_pred, recall_at_k, avg_recall = calculate_recall_at_k_time_series(x, y, clf)\n # Output confusion_matrix\n print(\"Confusion matrix: \\n\", confusion_matrix(y, y_pred))\n # Output recall_at_k\n print(\"Recall at {}, {}, {}, {}, {}: \".format(5, 10, 20, 50, 100), end=' ')\n for top in [5, 10, 20, 50, 100]:\n if top == 100:\n print(str(round(recall_at_k[top - 1], 2)) + \" accordingly\")\n else:\n print(\"{}, \".format(round(recall_at_k[top - 1], 2)), end='')\n print(\"Average recalls over 100: \", round(avg_recall, 2))\n if test_flag:\n _, recall_at_k, _, _, _ = calculate_recall_at_k(y_prob[:, 1], y, k_max=y.shape[0])\n print(\n \"Positions of escalation flags: \", ([1] if recall_at_k[0] != 0 else []) +\n [i + 1 for i in range(1, len(recall_at_k)) if\n recall_at_k[i] != recall_at_k[i - 1]])",
"def report_table(self, filename='ODH_report'):\n table = []\n header = ['Source', 'Failure', 'Event failure rate, 1/hr', '# of',\n 'Total failure rate, 1/hr', 'Leak rate, SCFM',\n '# fans working', 'Fan rate, SCFM', 'Event duration, min',\n 'Oxygen concentration', 'Fatality prob', 'Case prob',\n 'Fatality rate, 1/hr']\n # 'Total failure rate', 'ODH protection PFD', 'Building is powered'\n table.append(header)\n self.fail_modes.sort(key=lambda x: x.source.name)\n for f_mode in self.fail_modes:\n table.append([\n f_mode.source.name,\n f_mode.name,\n (f_mode.leak_fr/f_mode.N).m_as(1/ureg.hr),\n f_mode.N,\n f_mode.leak_fr.m_as(1/ureg.hr),\n f_mode.q_leak.m_as(ureg.ft**3/ureg.min),\n f_mode.N_fan,\n f_mode.Q_fan.m_as(ureg.ft**3/ureg.min),\n f_mode.tau.m_as(ureg.min),\n f_mode.O2_conc,\n f_mode.F_i,\n f_mode.P_i/f_mode.leak_fr,\n f_mode.phi.m_as(1/ureg.hr)])\n filename += '.xlsx'\n with xlsxwriter.Workbook(filename) as workbook:\n header_format = workbook.add_format({'bold': True,\n 'font_size': 12,\n 'bottom': 3})\n worksheet = workbook.add_worksheet()\n col_width = [len(x) for x in table[0]]\n for row_n, row in enumerate(table):\n for col_n, data in enumerate(row):\n worksheet.write(row_n, col_n, data)\n if col_n in (0, 1, 10):\n # For source names, failure names\n # and 'Total failure rate'\n col_width[col_n] = max(col_width[col_n], len(str(data)))\n sci_format = workbook.add_format({'num_format': '0.00E+00'},)\n flow_format = workbook.add_format({'num_format': '#'},)\n percent_format = workbook.add_format({'num_format': '0%'},)\n number_format = workbook.add_format({'num_format': '0'},)\n worksheet.set_row(0, None, header_format)\n worksheet.set_column(2, 2, None, sci_format)\n worksheet.set_column(4, 4, None, sci_format)\n worksheet.set_column(5, 5, None, flow_format)\n worksheet.set_column(8, 8, None, sci_format)\n worksheet.set_column(9, 9, None, percent_format)\n worksheet.set_column(10, 12, None, sci_format)\n # Writing total/summary\n N_rows = len(table)\n N_cols = len(table[0])\n worksheet.write(N_rows+1, N_cols-2, 'Total fatality rate, 1/hr')\n worksheet.write(N_rows+1, N_cols-1,\n self.phi.m_as(1/ureg.hr))\n worksheet.write(N_rows+2, N_cols-2, 'ODH class')\n worksheet.write(N_rows+2, N_cols-1, self.odh_class(),\n number_format)\n # Autofit column width\n for col_n, width in enumerate(col_width):\n adj_width = width - 0.005 * width**2\n worksheet.set_column(col_n, col_n, adj_width)\n # Adding usability\n worksheet.conditional_format(\n 1, N_cols-1, N_rows-1, N_cols-1,\n {'type': '3_color_scale', 'min_color': '#008000',\n 'max_color': '#FF0000'})\n worksheet.freeze_panes(1, 0)",
"def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)",
"def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)",
"def report_short_table(self, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n table = [[\"Failure mode\", \"Fans on\", \"O_2\", \"Duration, min\", \"\\\\phi_i\"]]\n table.append(None)\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens:\n row = []\n row.append(f'{f_mode.source.name} {f_mode.name}')\n row.append(f'{f_mode.N_fan}')\n row.append(f'{f_mode.O2_conc:.0%}')\n row.append(f'{f_mode.tau.m_as(ureg.min):,.1f}')\n row.append(f'{f_mode.phi.m_as(1/ureg.hr):.2}')\n table.append(row)\n return table",
"def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')",
"def printErrors(self):\n if self.dots:\n self.stream.writeln()\n\n # Skipped Test Report\n if not self.args.no_skip_report:\n for test, reason in self.skipped:\n self.stream.writeln(\n \"\\n{} {} - {}\".format(\n self.colors.blue(\"Skipped\"),\n self.colors.bold(test.dotted_name),\n reason,\n )\n )\n\n # Captured output for non-failing tests\n if not self.args.quiet_stdout:\n failing_tests = {x[0] for x in self.all_errors}\n for test in list(self.stdout_output) + list(self.stderr_errput):\n if test not in failing_tests:\n self.displayStdout(test)\n self.displayStderr(test)\n\n # Actual tracebacks and captured output for failing tests\n for test, color_func, outcome, err in self.all_errors:\n # Header Line\n self.stream.writeln(\n f\"\\n{color_func(outcome)} in {self.colors.bold(test.dotted_name)}\"\n )\n\n # Traceback\n if not self.args.no_tracebacks:\n relevant_frames = []\n for i, frame in enumerate(err.traceback_lines):\n # Python2 tracebacks containing unicode need some special handling\n # This doesn't always make it readable, but at least it doesn't\n # crash\n if sys.version_info[0] == 2: # pragma: no cover\n try:\n \"\".join([frame]) # intentionally trigger exceptions\n except UnicodeDecodeError:\n frame = frame.decode(\"utf-8\")\n debug(\n \"\\n\"\n f\"{'*' * 30}Frame {i}:{'*' * 30}\\n\" + self.colors.yellow(frame),\n level=3,\n )\n # Ignore useless frames\n if self.verbose < 4:\n if frame.strip() == \"Traceback (most recent call last):\":\n continue\n # Done with this frame, capture it.\n relevant_frames.append(frame)\n self.stream.write(\"\".join(relevant_frames))\n\n # Captured output for failing tests\n self.displayStdout(test)\n self.displayStderr(test)",
"def print_untested(self,result,cause=False):\n untested = result.get_untested()\n if untested:\n print ('=== UNTESTED '+('='*59))\n for u in untested:\n print (u.id)\n if cause:\n print (' ',u.get_cause())",
"def print_model_quality_report(pred_path: str, ground_path: str):\n predictions = np.load(pred_path).argmax(axis=1)\n groundtruth = pd.read_csv(ground_path).open_channels.values\n groups = pd.read_csv(ground_path).group.values\n\n print(\"Macro F1 score, F1 scores and confusion matrix per group:\")\n for group in range(6):\n pred = predictions[groups == group]\n true = groundtruth[groups == group]\n print(f\"Group {group} macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 5 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[2_000_000:2_500_000]\n true = groundtruth[2_000_000:2_500_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Batch 9 macro F1 score, F1 scores and confusion matrix:\")\n pred = predictions[4_500_000:5_000_000]\n true = groundtruth[4_500_000:5_000_000]\n print(f1_score(true, pred, average='macro'))\n print(f1_score(true, pred, average=None))\n print(confusion_matrix(true, pred, normalize='true').round(3))\n print()\n\n print(\"Overall OOF macro F1 score, F1 scores and confusion matrix:\")\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average='macro'))\n print(f1_score(groundtruth[:5_000_000], predictions[:5_000_000], average=None))\n print(confusion_matrix(groundtruth[:5_000_000], predictions[:5_000_000], normalize='true').round(3))\n print()",
"def report():\n pass",
"def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]",
"def test_max_reports_per_category(self):\n self._check_initialized()\n configuration = self._style_checker_configuration()\n error_handler = self._error_handler(configuration)\n\n confidence = 5\n\n # First call: usual reporting.\n self._call_error_handler(error_handler, confidence)\n self.assertEquals(1, self._error_count)\n self.assertEquals(1, len(self._error_messages))\n self.assertEquals(self._error_messages,\n [\"foo.h(100): message [whitespace/tab] [5]\\n\"])\n\n # Second call: suppression message reported.\n self._call_error_handler(error_handler, confidence)\n # The \"Suppressing further...\" message counts as an additional\n # message (but not as an addition to the error count).\n self.assertEquals(2, self._error_count)\n self.assertEquals(3, len(self._error_messages))\n self.assertEquals(self._error_messages[-2],\n \"foo.h(100): message [whitespace/tab] [5]\\n\")\n self.assertEquals(self._error_messages[-1],\n \"Suppressing further [whitespace/tab] reports \"\n \"for this file.\\n\")\n\n # Third call: no report.\n self._call_error_handler(error_handler, confidence)\n self.assertEquals(3, self._error_count)\n self.assertEquals(3, len(self._error_messages))",
"def report(self):\n tolerance_name = self.tolerance_name()\n valid_tolerance_names = self.valid_tolerance_names()\n\n if tolerance_name not in self.valid_tolerance_names():\n error_message = f'{tolerance_name} is not a valid tolerance '\n error_message += f'({\",\".join(valid_tolerance_names)})'\n raise ValueError(error_message)\n\n vulnerabilities = Vulnerabilities(self.show_all())\n unused_allowed_vulnerabilities = self.vulnerabilities_allowed_list()\n filename = self.filename()\n\n if filename:\n stream = open(filename)\n grype_data = json.load(stream)\n stream.close()\n else:\n grype_data = json.load(sys.stdin)\n\n logging.debug(\n f'Tolerance is {self.tolerance_name()} '\n + f'({self.tolerance_level()})'\n )\n logging.debug(f\"Grype version {grype_data['descriptor']['version']}\")\n\n for match in grype_data['matches']:\n vulnerability = match['vulnerability']\n artifact = match['artifact']\n vulnerability_name = artifact['name']\n vulnerability_installed = artifact['version']\n vulnerability_id = vulnerability['id']\n vulnerability_severity = vulnerability['severity']\n level = self.tolerance_name2level(vulnerability_severity)\n\n if vulnerability_id in unused_allowed_vulnerabilities:\n unused_allowed_vulnerabilities.remove(vulnerability_id)\n\n if vulnerability_id in self.vulnerabilities_allowed_list():\n allowed = True\n else:\n allowed = False\n\n if level <= self.tolerance_level() and not self.show_all():\n add_vulnerability = False\n elif level <= self.tolerance_level() and self.show_all():\n add_vulnerability = True\n elif level > self.tolerance_level():\n if not allowed:\n add_vulnerability = True\n self.max_severity_level(level)\n elif allowed and self.show_all():\n add_vulnerability = True\n else:\n add_vulnerability = False\n\n if add_vulnerability:\n vulnerabilities.add(\n vulnerability_name,\n vulnerability_installed,\n vulnerability_id,\n vulnerability_severity,\n allowed\n )\n\n print(vulnerabilities)\n\n if len(unused_allowed_vulnerabilities):\n for vulnerability_id in unused_allowed_vulnerabilities:\n msg = f'\"{vulnerability_id}\" is in the allowed list '\n msg += 'but not found in the scan!'\n logging.warning(msg)\n\n logging.debug(\n f'Max severity level found was {self.max_severity_level()}.')\n\n if self.max_severity_level() <= self.tolerance_level():\n return 0\n else:\n return self.max_severity_level()",
"def print_failures(failures):\n if failures:\n print(\"\\n({}) Failure{}:\".format(len(failures),\n \"s\" if len(failures) != 1 else \"\"))\n for f in failures:\n print(\"[{}:{}] In {}: {}\".format(\n f.filename, f.lineno, f.case, f.data),\n end='')\n print(\" (\\\"{}\\\")\".format(f.alt) if f.alt else \"\")\n print()",
"def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()",
"def report(self):\r\n print(\"\".join(self.memory), self.error, self.steps)",
"def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0",
"def _print_report(self, result: Dict[str, Dict[str, float]]):\n\n print('\\n\\tprecision recall f1_score num')\n for type_ in self.types:\n print(type_, end='\\t')\n print('{0: .3f}'.format(result[type_]['precision']), end=' ')\n print('{0: .3f}'.format(result[type_]['recall']), end=' ')\n print('{0: .3f}'.format(result[type_]['f1_score']), end=' ')\n print('{0: d}'.format(result[type_]['num']), end='\\n')",
"def _atexit_print_fn():\r\n to_sum = []\r\n for ps in _atexit_print_list:\r\n if ps.fct_callcount or ps.compile_time > 0:\r\n ps.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)\r\n if not isinstance(ps, ScanProfileStats):\r\n to_sum.append(ps)\r\n else:\r\n #TODO print the name if there is one!\r\n print 'Skipping empty Profile'\r\n if len(to_sum) > 1:\r\n # Make a global profile\r\n cum = copy.copy(to_sum[0])\r\n cum.message = \"Sum of all printed profiles at exit excluding Scan op profile.\"\r\n for ps in to_sum[1:]:\r\n for attr in [\"compile_time\", \"fct_call_time\", \"fct_callcount\",\r\n \"vm_call_time\", \"optimizer_time\", \"linker_time\",\r\n \"validate_time\"]:\r\n setattr(cum, attr, getattr(cum, attr) + getattr(ps, attr))\r\n\r\n #merge dictonary\r\n for attr in [\"apply_time\", \"apply_callcount\",\r\n \"apply_cimpl\", \"variable_shape\", \"variable_strides\"]:\r\n cum_attr = getattr(cum, attr)\r\n for key, val in getattr(ps, attr).iteritems():\r\n assert key not in cum_attr\r\n cum_attr[key] = val\r\n\r\n if cum.optimizer_profile and ps.optimizer_profile:\r\n merge = cum.optimizer_profile[0].merge_profile(\r\n cum.optimizer_profile[1],\r\n ps.optimizer_profile[1])\r\n cum.optimizer_profile = (cum.optimizer_profile[0], merge)\r\n else:\r\n cum.optimizer_profile = None\r\n\r\n cum.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)",
"def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())",
"def report():\r\n print(\"Water: \" + str(resources[\"water\"]) + \"ml\")\r\n print(\"Milk: \" + str(resources[\"milk\"]) + \"ml\")\r\n print(\"Coffee: \" + str(resources[\"coffee\"]) + \"g\")\r\n print(\"Money: $\" + str(money))",
"def print_diagnostic(df, price_z_score_threshold = 10, time_z_score_threshold = 3):\n print(df.info())\n print_has_time_stamp_duplicates(df)\n scan_missing_values(df)\n print_time_interval_stats(df, time_z_score_threshold)\n outlier_scanning(df, price_z_score_threshold)\n scan_invalid_ask_order(df)\n scan_invalid_bid_order(df)\n scan_bid_higher_than_ask(df)\n scan_zero_or_negative_quantity(df)\n scan_valid_price_and_quantity(df)\n\n print_limit_quantity_stats(df)\n print_last_trade_quantity_stats(df)",
"def generate_report():\n\n # Fetch the top 3 most viewed articles and number of views and print them\n articles_query = get_articles_query()\n popular_articles = execute_query(articles_query)\n print_top_articles(popular_articles)\n\n # Fetch the most popular authors and print them\n authors_query = get_authors_query()\n popular_authors = execute_query(authors_query)\n print_authors(popular_authors)\n\n # Print the days when there were more than 1% errors in HTTP requests\n errors_query = get_errorData_query()\n error_data = execute_query(errors_query)\n print_error_data(error_data)"
]
| [
"0.6206705",
"0.59326273",
"0.58146775",
"0.5759397",
"0.57398576",
"0.5687681",
"0.56708777",
"0.56109154",
"0.55646807",
"0.5552639",
"0.5552112",
"0.55355096",
"0.5464556",
"0.5440957",
"0.5431149",
"0.54164475",
"0.54160124",
"0.540715",
"0.5400654",
"0.54003173",
"0.5395628",
"0.53808016",
"0.5379085",
"0.5346461",
"0.5341781",
"0.53321433",
"0.53111786",
"0.53099835",
"0.530978",
"0.5302557"
]
| 0.6278852 | 0 |
Prepare a short table for failure modes and effects. The report is sorted by fatality rate descending. | def report_short_table(self, sens=None):
self.fail_modes.sort(key=lambda x: x.phi, reverse=True)
sens = sens or SHOW_SENS
table = [["Failure mode", "Fans on", "O_2", "Duration, min", "\\phi_i"]]
table.append(None)
for f_mode in self.fail_modes:
if f_mode.phi >= sens:
row = []
row.append(f'{f_mode.source.name} {f_mode.name}')
row.append(f'{f_mode.N_fan}')
row.append(f'{f_mode.O2_conc:.0%}')
row.append(f'{f_mode.tau.m_as(ureg.min):,.1f}')
row.append(f'{f_mode.phi.m_as(1/ureg.hr):.2}')
table.append(row)
return table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_table(type_, test_type, device_name, thresholds):\n x = PrettyTable() \n x.field_names = [device_name] + thresholds\n \n \"Chrome,\" + test_type + \",\" + str(notAfter_date) + \",\" + thresholds[index], \",fail\"\n \n ##read all Chromep entries\n ##get all test_type rows\n ##loop rows\n ##show table",
"def report_table(self, filename='ODH_report'):\n table = []\n header = ['Source', 'Failure', 'Event failure rate, 1/hr', '# of',\n 'Total failure rate, 1/hr', 'Leak rate, SCFM',\n '# fans working', 'Fan rate, SCFM', 'Event duration, min',\n 'Oxygen concentration', 'Fatality prob', 'Case prob',\n 'Fatality rate, 1/hr']\n # 'Total failure rate', 'ODH protection PFD', 'Building is powered'\n table.append(header)\n self.fail_modes.sort(key=lambda x: x.source.name)\n for f_mode in self.fail_modes:\n table.append([\n f_mode.source.name,\n f_mode.name,\n (f_mode.leak_fr/f_mode.N).m_as(1/ureg.hr),\n f_mode.N,\n f_mode.leak_fr.m_as(1/ureg.hr),\n f_mode.q_leak.m_as(ureg.ft**3/ureg.min),\n f_mode.N_fan,\n f_mode.Q_fan.m_as(ureg.ft**3/ureg.min),\n f_mode.tau.m_as(ureg.min),\n f_mode.O2_conc,\n f_mode.F_i,\n f_mode.P_i/f_mode.leak_fr,\n f_mode.phi.m_as(1/ureg.hr)])\n filename += '.xlsx'\n with xlsxwriter.Workbook(filename) as workbook:\n header_format = workbook.add_format({'bold': True,\n 'font_size': 12,\n 'bottom': 3})\n worksheet = workbook.add_worksheet()\n col_width = [len(x) for x in table[0]]\n for row_n, row in enumerate(table):\n for col_n, data in enumerate(row):\n worksheet.write(row_n, col_n, data)\n if col_n in (0, 1, 10):\n # For source names, failure names\n # and 'Total failure rate'\n col_width[col_n] = max(col_width[col_n], len(str(data)))\n sci_format = workbook.add_format({'num_format': '0.00E+00'},)\n flow_format = workbook.add_format({'num_format': '#'},)\n percent_format = workbook.add_format({'num_format': '0%'},)\n number_format = workbook.add_format({'num_format': '0'},)\n worksheet.set_row(0, None, header_format)\n worksheet.set_column(2, 2, None, sci_format)\n worksheet.set_column(4, 4, None, sci_format)\n worksheet.set_column(5, 5, None, flow_format)\n worksheet.set_column(8, 8, None, sci_format)\n worksheet.set_column(9, 9, None, percent_format)\n worksheet.set_column(10, 12, None, sci_format)\n # Writing total/summary\n N_rows = len(table)\n N_cols = len(table[0])\n worksheet.write(N_rows+1, N_cols-2, 'Total fatality rate, 1/hr')\n worksheet.write(N_rows+1, N_cols-1,\n self.phi.m_as(1/ureg.hr))\n worksheet.write(N_rows+2, N_cols-2, 'ODH class')\n worksheet.write(N_rows+2, N_cols-1, self.odh_class(),\n number_format)\n # Autofit column width\n for col_n, width in enumerate(col_width):\n adj_width = width - 0.005 * width**2\n worksheet.set_column(col_n, col_n, adj_width)\n # Adding usability\n worksheet.conditional_format(\n 1, N_cols-1, N_rows-1, N_cols-1,\n {'type': '3_color_scale', 'min_color': '#008000',\n 'max_color': '#FF0000'})\n worksheet.freeze_panes(1, 0)",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def fail_table(self):\n return self._table(self.missing())",
"def _build_sort1_table(key_itime, keys_map, header_dict,\n form, form_results, form_resultsi,\n disp_dict, stress_dict, strain_dict, force_dict,\n strain_energy_dict, gpstress_dict, log):\n is_results = False\n form_resultsi_subcase = []\n #for key, value in header_dict.items():\n #print(key, value)\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index) = key\n key_itime0 = key_itime[0]\n key0 = key_itime0[0]\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n subcase_id_old = key0[0]\n count_old = key0[3]\n ogs_old = key0[4]\n subtitle_old = key0[5]\n subtitle_old, label_old, superelement_adaptivity_index_old, unused_pval_step_old = keys_map[key0]\n del label_old\n del superelement_adaptivity_index_old\n\n # now that we have the data built, we put it in the form\n # in sorted order\n #\n # TODO: consider pval_step\n for key, itime in key_itime:\n # (isubcase, analysis_code, sort_method,\n # count, ogs, superelement_adaptivity_index, pval_step) = key\n #print('key =', key)\n subcase_id = key[0]\n count = key[3]\n ogs = key[4]\n #print('*ogs =', ogs)\n #subtitle = key[4]\n try:\n subtitle, unused_label, superelement_adaptivity_index, unused_pval_step = keys_map[key]\n except Exception:\n subcase_id = subcase_id_old\n subtitle = subtitle_old + '?'\n superelement_adaptivity_index = '?'\n raise\n\n #print('key =', key)\n if subcase_id != subcase_id_old or subtitle != subtitle_old or ogs != ogs_old:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s%s' % (\n subcase_id_old, subtitle_old, superelement_adaptivity_index, count_str, ogs_str)\n #print(subcase_str)\n res = (\n subcase_str.rstrip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n form_resultsi_subcase = []\n subcase_id_old = subcase_id\n subtitle_old = subtitle\n count_old = count\n ogs_old = ogs\n\n\n try:\n header = header_dict[(key, itime)]\n except KeyError: # this hits for strain energy\n msg = 'Missing (key, itime) in header_dict\\n'\n msg += ' key=%s\\n' % str(key)\n\n (subcase, analysis_code, sort_method,\n count, ogs, superelement_adaptivity_index, pval_step) = key\n msg += f' subcase={subcase}\\n'\n msg += f' analysis_code={analysis_code}\\n'\n msg += f' sort_method={sort_method}\\n'\n msg += f' count={count}\\n'\n msg += f' ogs={ogs}\\n'\n msg += f' superelement_adaptivity_index={superelement_adaptivity_index!r}\\n'\n msg += f' pval_step={pval_step!r}\\n'\n\n msg += ' itime=%s\\n' % itime\n msg += ' %s\\n' % str((key, itime))\n msg += 'Possible (key, time):\\n'\n for keyi in header_dict:\n msg += ' %s\\n' % str(keyi)\n #print(msg.rstrip())\n #print('expected = (%s, %r)\\n' % (str(key), itime))\n log.error(msg.rstrip() + '\\n')\n #self.log.error('expected = (%s, %r)\\n' % (str(key), itime))\n continue\n #raise KeyError(msg)\n try:\n header = header.strip()\n except Exception:\n print('header = %r' % header)\n raise\n\n\n form_outi = []\n form_out = (header, None, form_outi)\n disp_formi = disp_dict[(key, itime)]\n stress_formi = stress_dict[(key, itime)]\n strain_formi = strain_dict[(key, itime)]\n force_formi = force_dict[(key, itime)]\n strain_energy_formi = strain_energy_dict[(key, itime)]\n gpstress_formi = gpstress_dict[(key, itime)]\n if disp_formi:\n form_outi += disp_formi\n #form_outi.append(('Disp', None, disp_formi))\n if stress_formi:\n form_outi.append(('Stress', None, stress_formi))\n is_results = True\n if strain_formi:\n form_outi.append(('Strain', None, strain_formi))\n is_results = True\n if force_formi:\n form_outi.append(('Force', None, force_formi))\n is_results = True\n if strain_energy_formi:\n form_outi.append(('Strain Energy', None, strain_energy_formi))\n is_results = True\n if gpstress_formi:\n form_outi.append(('Grid Point Stresses', None, gpstress_formi))\n is_results = True\n\n if form_outi:\n is_results = True\n form_resultsi_subcase.append(form_out)\n #break\n\n #print(\"subcase_id = \", subcase_id)\n if subcase_id:\n count_str = '' if count == 0 else ' ; opt_count=%s' % count_old\n ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old\n subcase_str = 'Subcase %s; %s%s%s' % (subcase_id, subtitle, count_str, ogs_str)\n #print('*', subcase_str)\n res = (\n subcase_str.strip('; '),\n None,\n form_resultsi_subcase\n )\n form_resultsi.append(res)\n assert len(form_out) > 0, form_out\n form_resultsi_subcase = []\n\n if is_results:\n form.append(form_results)\n assert len(form_out) > 0, form_out\n #print('formi =', formi)\n #print('form_out =', form_out)\n #print('form_resultsi =', form_resultsi)\n #print('form_results =', form_results)\n #print(form)\n #if len(formi):\n #form.append(form0)\n #print(form)\n #aa\n #print('form', form)\n #print('form_results =', form_results)\n return form",
"def _generate_expected_summary_table():\n expected_summary = SummaryTable()\n # 1 pub/send per default emit period\n expected_summary.increment_pub()\n expected_summary.increment_send()\n return expected_summary",
"def _generate_expected_summary_table():\n expected_summary = SummaryTable()\n # 1 pub/send per default emit period\n expected_summary.increment_pub()\n expected_summary.increment_send()\n return expected_summary",
"def lint(self):\n report = OrderedDict()\n\n for index, scale in self.scales.iterrows():\n inputs, outputs = create_activations(scale)\n inf = inputs[0].format[:4]\n outf = outputs[0].format[:4]\n is_qdq = ('Int8' in inputs[0].format) ^ ('Int8' in outputs[0].format)\n if is_qdq:\n dq = 'Int8' in inputs[0].format\n role = \"Quantize\" if not dq else \"Dequanitize\"\n report[scale.Name] = OrderedDict({\n 'name': scale.Name,\n 'type conversion': f\"{inf} -> {outf}\",\n 'hazard': f\"Unfused {role} layer\",\n 'mitigation': f\"Check why the {role} layer is not fused\",\n 'help': f\"Unfused Quantize/Dequantize nodes are wasteful and \"\n \"should be avoided. Quantize nodes may be necessary \"\n \"for quantizing inputs.\"\n })\n\n df = pd.DataFrame.from_dict(report, orient='index')\n return df",
"def table_summary():\n \n t = dict()\n t['name'] = get_names()\n t['Name'] = [get_properties(name)['label'] for name in t['name']]\n N = len(t['name'])\n \n # host\n t['host'] = ['Sagittarius', 'Sagittarius', 'none', 'Gaia-Sausage-Enceladus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Sequoia / Arjuna', np.nan, np.nan, 'Sequoia / Arjuna', 'Gaia-Sausage-Enceladus', 'Sequoia / Arjuna', 'Helmi / Wukong', 'Helmi / Wukong', 'Sagittarius', 'in situ / Helmi / Wukong', 'Helmi / Wukong', 'Cetus', 'Cetus', 'Sagittarius', 'Sequoia / Arjuna / I\\'itoi', 'Cetus', 'Sequoia / Arjuna / I\\'itoi']\n \n # progenitor\n t['progenitor'] = [np.nan, np.nan, 'itself', 'NGC 5139', 'NGC 4590', np.nan, 'NGC 3201', '(Wukong / Helmi)', '(Wukong / Helmi)', np.nan, np.nan, np.nan, np.nan, 'NGC 5024', np.nan, 'NGC 5272', 'NGC 5024', 'NGC 5824', 'NGC 5824', np.nan, np.nan, np.nan, np.nan]\n \n # progenitor type\n t['type'] = ['DG' if name in ['elqui', 'indus', 'jhelum'] else 'GC' for name in t['name']]\n \n # metallicity\n t['feh'] = [-2.4, -2.4, -2.2, -1.5, -2.16, -2.3, -1.5, -2.1, -2.1, -1.6, -1.95, -1.6, -2.7, np.nan, -1.7, -1.1, -2.7, -1.9, np.nan, np.nan, -2.2, np.nan, -1.9]\n \n # associations\n t['friends'] = ['ATLAS', 'Aliqa Uma', np.nan, np.nan, np.nan, np.nan, np.nan, 'Jhelum', 'Indus', np.nan, np.nan, np.nan, np.nan, 'Sylgr', np.nan, np.nan, 'Ravi', 'Turbio', 'Triangulum', np.nan, np.nan, np.nan, np.nan]\n \n tout = Table(t)\n tout.pprint()\n tout.write('../data/stream_origin.fits', overwrite=True)",
"def _buildtable(self):\n\n tabrows = []\n\n for i, (expid, exfiles) in enumerate(self._exposure_files.items()):\n specflux_b, specflux_r, specflux_z = [], [], []\n tab = None\n\n if len(exfiles) == 0:\n continue\n\n print(expid)\n for exfile in exfiles:\n print(exfile)\n hdu = fits.open(exfile)\n\n # The following tables are present in the redux sframes and the\n # nightwatch qcframes.\n wave = hdu['WAVELENGTH'].data\n\n # However, in the nightwatch files the wavelength data are a\n # table of size nfiber x nwavelength.\n if self._filetype == 'nightwatch':\n if wave.ndim > 1:\n wave = wave[0]\n\n fluxhead = hdu['FLUX'].header\n fluxdata = hdu['FLUX'].data\n ivardata = hdu['IVAR'].data\n fibermap = hdu['FIBERMAP'].data\n exptime = fluxhead['EXPTIME']\n if not np.all(self._unditherfa['FIBER'] ==\n np.arange(len(self._unditherfa))):\n raise ValueError('weird fiberassign file format!')\n fibermap = self._unditherfa[fibermap['FIBER']]\n\n target_id = fibermap['TARGETID']\n target_ra = fibermap['TARGET_RA']\n target_dec = fibermap['TARGET_DEC']\n fiber = fibermap['FIBER']\n objtype = fibermap['OBJTYPE']\n flux_g = fibermap['FLUX_G']\n flux_r = fibermap['FLUX_R']\n flux_z = fibermap['FLUX_Z']\n x, y = [fibermap['FIBERASSIGN_{}'.format(val)] for val in ('X', 'Y')]\n\n camera = fluxhead['CAMERA'][0].upper()\n\n if getattr(self, '_deltara', None) is not None:\n dra = self._deltara[i]*np.ones(len(fiber))\n ddec = self._deltadec[i]*np.ones(len(fiber))\n elif self._dithertype == 'telescope':\n dithra = self._ditherfa['target_ra']\n dithdec = self._ditherfa['target_dec']\n udithra = self._unditherfa['target_ra']\n udithdec = self._unditherfa['target_dec']\n ontarget = ((self._ditherfa['targetid'] ==\n self._unditherfa['targetid']) &\n (self._ditherfa['objtype'] == 'TGT'))\n dfiberra = (dithra-udithra)*np.cos(np.radians(udithdec))*60*60\n dfiberdec = (dithdec-udithdec)*60*60\n if not np.all(self._ditherfa['FIBER'] ==\n np.arange(len(self._ditherfa))):\n raise ValueError('unexpected shape of dither file')\n dfiberra[~ontarget] = np.nan\n dfiberdec[~ontarget] = np.nan\n dfiberra = dfiberra[fiber]\n dfiberdec = dfiberdec[fiber]\n wcs = self.lookup_wcs(fluxhead['MJD-OBS'])\n centralwcs = self._central_wcs\n if (~np.isfinite(centralwcs['cenra'][1]) or\n ~np.isfinite(centralwcs['cendec'][1])):\n raise ValueError('central pointing ra/dec is NaN!')\n dtelra = (wcs['cenra'][1]-centralwcs['cenra'][1])\n dtelra *= np.cos(np.radians(centralwcs['cendec'][1]))\n dteldec = wcs['cendec'][1]-centralwcs['cendec'][1]\n dra = dfiberra + dtelra*60*60\n ddec = dfiberdec + dteldec*60*60\n if np.all(~np.isfinite(dra)):\n print('warning: no good telescope offset for %s' %\n exfile)\n else:\n raise ValueError('not implemented')\n \n for j, fiber_id in enumerate(fiber):\n flux = fluxdata[j]\n ivar = ivardata[j]\n if not np.any(ivar > 0):\n specflux = 0\n specflux_ivar = 0\n else:\n meanivar = np.mean(ivar[ivar > 0])\n mask = ivar > meanivar / 100\n specflux = np.trapz(flux*mask, wave)\n specflux_ivar = 1./np.sum(ivar[mask]**-1)\n # Schlegel: sum over correct wavelengths, all three\n # filters, plus 11 pixel median filter to reject\n # cosmics.\n # will require being better about reading in\n # the spectrographs together.\n tabrows.append((expid, exptime,\n target_id[j], target_ra[j], target_dec[j],\n fiber[j], objtype[j],\n flux_g[j], flux_r[j], flux_z[j],\n specflux, specflux_ivar, camera,\n dra[j], ddec[j],\n x[j], y[j]))\n\n tab = Table(rows=tabrows,\n names=('EXPID', 'EXPTIME',\n 'TARGETID', 'TARGET_RA', 'TARGET_DEC',\n 'FIBER', 'OBJTYPE',\n 'FLUX_G', 'FLUX_R', 'FLUX_Z',\n 'SPECTROFLUX', 'SPECTROFLUX_IVAR', 'CAMERA',\n 'DELTA_X_ARCSEC', 'DELTA_Y_ARCSEC',\n 'XFOCAL', 'YFOCAL'),\n meta={'EXTNAME' : 'DITHER',\n 'TILEID' : '{}'.format(self._tileid)})\n\n return tab",
"def _prepare_summary_table(self, raw_stats_paths, descr_paths):\n\n sum_tbl = OrderedDict()\n sum_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n sum_tbl[res.reportid] = OrderedDict()\n\n # Add tool information.\n key = \"tool_info\"\n sum_tbl[\"Title\"][key] = \"Data collection tool\"\n for res in self.rsts:\n sum_tbl[res.reportid][key] = f\"{res.info['toolname'].capitalize()} version \" \\\n f\"{res.info['toolver']}\"\n\n # Add datapoint counts.\n key = \"datapoints_cnt\"\n sum_tbl[\"Title\"][key] = \"Datapoints Count\"\n for res in self.rsts:\n sum_tbl[res.reportid][key] = len(res.df.index)\n\n # Add measurement resolution.\n for res in self.rsts:\n key = \"device_resolution\"\n resolution = res.info.get(\"resolution\")\n if resolution:\n sum_tbl[\"Title\"][key] = \"Device Resolution\"\n sum_tbl[res.reportid][key] = f\"{resolution}ns\"\n\n # Add links to the raw statistics directories.\n if raw_stats_paths:\n key = \"raw_stats\"\n sum_tbl[\"Title\"][key] = \"Raw statistics\"\n for res in self.rsts:\n path = raw_stats_paths.get(res.reportid, \"Not available\")\n sum_tbl[res.reportid][key] = path\n\n # Add links to the descriptions.\n if descr_paths:\n key = \"descr\"\n sum_tbl[\"Title\"][key] = \"Test description\"\n for res in self.rsts:\n path = descr_paths.get(res.reportid, \"Not available\")\n sum_tbl[res.reportid][key] = path\n\n return sum_tbl",
"def lint(self):\n report = OrderedDict()\n\n for index, slice in self.slices.iterrows():\n inputs, outputs = create_activations(slice)\n inf = inputs[0].format[:4]\n outf = outputs[0].format[:4]\n if inf != outf:\n mitigation = \"\"\n if \"INT8\" in [inf, outf]:\n mitigation = \"Consider adding quantization around float operations.\"\n report[slice.Name] = OrderedDict({\n 'name': slice.Name,\n 'type conversion': f\"{inf} -> {outf}\",\n 'shape conversion': f\"{inputs[0].shape} -> {outputs[0].shape}\",\n 'hazard': \"Slice layer is converting operand data type.\",\n 'mitigation': mitigation,\n 'help': \"Conversions between float32 and float16 are a red \"\n \"flag, as are conversions between float32/16 <=> INT8.\"\n })\n\n df = pd.DataFrame.from_dict(report, orient='index')\n return df",
"def human_comparison_table(self, feature_slice=6):\n conditions = [ERCondition.DOMAIN, ERCondition.EXPERT, ERCondition.RANDOM]\n df = pd.read_pickle(self.path_final_evaluation_aucs)\n table = EffectSizeMatrix(df, conditions, remove_null=True, rename_columns=True)\n data = table.get_result_df()\n latex = table.get_latex()\n print(latex)\n return data",
"def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table",
"def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl",
"def build_table(self):\n if len(self._abslines) == 0:\n return\n comp_tbl = QTable()\n comp_tbl.add_column(Column([iline.wrest.to(u.AA).value for iline in self._abslines]*u.AA,name='wrest'))\n for attrib in ['z', 'flagN', 'N', 'Nsig']:\n comp_tbl.add_column(Column([iline.attrib[attrib] for iline in self._abslines], name=attrib))\n # Return\n return comp_tbl",
"def __str__(self) -> str:\n header = [(\"Computation\", \"Time\"), (\"Error Term\", \"Draws\")]\n values = [format_seconds(self.computation_time), self.draws]\n if self.fp_iterations.sum() > 0 or self.contraction_evaluations.sum() > 0:\n header.extend([(\"Fixed Point\", \"Iterations\"), (\"Contraction\", \"Evaluations\")])\n values.extend([self.fp_iterations.sum(), self.contraction_evaluations.sum()])\n return format_table(header, values, title=\"Optimal Instrument Results Summary\")",
"def make_performance_table(self):\n table = Table()\n table.add_column(\"Classifier\", ratio=25)\n table.add_column(\"Score\", ratio=10, justify=\"center\", no_wrap=True)\n table.add_column(\"Params\", ratio=25, no_wrap=False)\n table.add_column(\"Model ID\",ratio=40, no_wrap=True)\n\n for name, stuff in self.trainer.performance.items():\n score, params, hash_id = stuff\n style = \"bold green\" if name == self.trainer.best_classifier__name else \"\"\n best_one = \" ***\" if name == self.trainer.best_classifier__name else \"\"\n \n table.add_row(\n str(name),\n str(np.round(score, 3)), \n str(params), \n f\"{str(hash_id)}{best_one}\",\n style=style)\n \n return table",
"def CheckWarningFlags(data_table):\n\n df = pd.DataFrame.from_dict(data_table)\n reason_dict_list = []\n\n # Checking EPS Growth positive gradient\n df['EPS Growth'] = df['EPS Growth'].map(lambda x: x.rstrip('%')).astype(float) / 100\n if df.loc[df['EPS Growth'].diff(-1) < 0].Year.tolist():\n warning_data = df.loc[df['EPS Growth'].diff(-1) < 0].Year.tolist()\n eps_string = ''\n\n for year in range(len(warning_data)-1, -1, -1):\n eps_string = eps_string + str(warning_data[year]) + ', '\n\n reason_dict_list.append(dict(reason=f'Há redução na taxa de crescimento em {eps_string}'))\n\n # Checking ROE mean\n df['ROE'] = df['ROE'].map(lambda x: float(x))\n if df.ROE.mean() < 0.15:\n reason_dict_list.append(dict(reason=f'A média do ROE é de {df.ROE.mean():.2f}, menor que 0,15'))\n\n # Checking ROA mean\n df['ROA'] = df['ROA'].map(lambda x: float(x))\n if df.ROA.mean() < 0.07:\n reason_dict_list.append(dict(reason=f'A média do ROA é de {df.ROA.mean():.2f}, menor que 0,07'))\n\n # Checking Long Term Debt is < 5 * net income\n df['Total Long Term Debt'] = df['Total Long Term Debt'].map(lambda x: x.replace(',', '')).astype(float)\n df['Net Income'] = df['Net Income'].map(lambda x: x.replace(',', '')).astype(float)\n\n if df['Total Long Term Debt'].head(1).values[0] > 5 * df['Net Income'].head(1).values[0]:\n reason_dict_list.append(dict(reason=f'A Dívida de Longo Prazo maior que cinco vezes o Lucro Líquido.'))\n\n return reason_dict_list",
"def _status_sort(self):\n s = 0\n if self.status == 'failed':\n s += 10\n if self.image_status == 'diff':\n s += 3\n elif self.image_status == 'missing':\n s += 4\n if self.hash_status == 'diff':\n s += 1\n elif self.hash_status == 'missing':\n s += 5\n return f\"{s:02.0f}\"",
"def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload",
"def _get_summary_table(token, run_id):\n # return (success_or_only_flakiness, failed_test_summary_table)\n return summarize.summarize_logs(dir=_LOG_OUTPUT_DIR, markdown=True)",
"def prepare_table(self):\n i = 0\n for item in ['DN[-]', 'd_out[mm]', 'tl_trub[mm]', 'roztec_trub[mm]', 'delka[mm]', 'roztec_prep[mm]', 'vyska_prep[mm]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1\n for item in ['tl_prep[mm]','pocet_prep[-]', 'pocet_trub[-]', 'TP[m/s]', 'MZP[m/s]', 'vykon [W]',\n 'tlak_ztraty[Pa]', 'hmotnost[kg]']:\n self.table.insertColumn(i)\n self.table.setHorizontalHeaderItem(i, QTableWidgetItem(item))\n i += 1",
"def lint(self):\n report = OrderedDict()\n\n for index, reformat in self.reformats.iterrows():\n inputs, outputs = create_activations(reformat)\n inf = inputs[0].format[:4]\n outf = outputs[0].format[:4]\n if inf != outf:\n mitigation = \"\"\n if \"INT8\" in [inf, outf]:\n mitigation = \"Consider adding quantization around float operations.\"\n report[reformat.Name] = OrderedDict({\n 'name': reformat.Name,\n 'origin': reformat['attr.origin'],\n 'type conversion': f\"{inf} -> {outf}\",\n 'shape conversion': f\"{inputs[0].shape} -> {outputs[0].shape}\",\n 'hazard': \"Reformat layer is converting operand data type.\",\n 'mitigation': mitigation,\n 'help': \"Conversions between float32 and float16 are a red \"\n \"flag, as are conversions between float32/16 and INT8.\"\n })\n\n df = pd.DataFrame.from_dict(report, orient='index')\n return df",
"def breakdown_resources(self):\n print('Resources breakdown:')\n headers = ['Faction', 'Power', 'Leech', 'Coins', 'Ore', 'Knowledge', 'QIC', 'Power Tokens']\n rows = []\n for faction, stats in self.faction_stats.items():\n rows.append([\n faction,\n stats.power,\n stats.leech,\n stats.coins,\n stats.ore,\n stats.knowledge,\n stats.qic,\n stats.pt,\n ])\n print(tabulate(rows, headers=headers))",
"def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])",
"def get_result_table_and_info(cls):\n winning_dict = cls.get_winning_topics()\n winning_topics = winning_dict['winning_topics']\n runoff_poll_warning = winning_dict['runoff_poll_warning']\n\n # Create table\n result_table = []\n all_categories = sorted(Category.objects.all(), key=attrgetter('sum_of_votes', 'weight'), reverse=True)\n for category in all_categories:\n category_hoechstzahls = filter(lambda hoechstzahl: hoechstzahl.topic.category == category, cls.all_hoechstzahls)\n category_hoechstzahls.sort(key=lambda hoechstzahl: hoechstzahl.value, reverse=True)\n runoff_poll_warning = second_runoff_poll_check(runoff_poll_warning, category_hoechstzahls, winning_topics)\n category_hoechstzahls += (max(config['openslides_topicvoting_posts'], 3) - len(category_hoechstzahls)) * [None]\n result_table.append(category_hoechstzahls)\n\n # Return table and flags as dictionary\n return {'result_table': result_table,\n 'winning_topics': winning_topics,\n 'runoff_poll_warning': runoff_poll_warning,\n 'topic_post_warning': winning_dict['topic_post_warning']}",
"def updateTableOutcomes(self):\n if not self.data or not self.predictors or not self.outvar:\n return\n\n classification = self.outvar.varType == orange.VarTypes.Discrete\n\n # sindx is the column where these start\n sindx = len(self.data.domain.variables)\n col = sindx\n showprob = self.showProb and len(self.selectedClasses)\n fmt = \"%%1.%df\" % self.precision\n if self.showClass or (classification and showprob):\n for (cid, c) in enumerate(self.predictors.values()):\n if classification:\n for (i, d) in enumerate(self.data):\n (cl, p) = c(d, orange.GetBoth)\n\n self.classifications[i].append(cl)\n s = \"\"\n if showprob:\n s = \" : \".join([fmt % p[k] for k in self.selectedClasses])\n if self.showClass: s += \" -> \"\n if self.showClass: s += \"%s\" % str(cl)\n self.table.setItem(self.rindx[i], col, QTableWidgetItem(s))\n print s, self.rindx[i], col\n else:\n # regression\n for (i, d) in enumerate(self.data):\n cl = c(d)\n self.classifications[i].append(cl)\n self.table.setItem(self.rindx[i], col, QTableWidgetItem(str(cl)))\n col += 1\n else:\n for i in range(len(self.data)):\n for c in range(len(self.predictors)):\n self.table.setItem(self.rindx[i], col+c, QTableWidgetItem(''))\n col += len(self.predictors)\n\n for i in range(sindx, col):\n if self.showClass or (classification and self.showProb):\n self.table.showColumn(i)\n## self.table.adjustColumn(i)\n else:\n self.table.hideColumn(i)",
"def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)",
"def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()"
]
| [
"0.64098376",
"0.5957766",
"0.58932674",
"0.54565907",
"0.539092",
"0.53548634",
"0.53548634",
"0.5280158",
"0.52681416",
"0.52667576",
"0.5242095",
"0.520481",
"0.5186109",
"0.51242334",
"0.5049847",
"0.5039647",
"0.5037103",
"0.503655",
"0.50297123",
"0.5002275",
"0.49964055",
"0.4986349",
"0.4981937",
"0.49628454",
"0.49576846",
"0.49544477",
"0.49258268",
"0.49157262",
"0.49146897",
"0.49086097"
]
| 0.73222107 | 0 |
Calculate the probability of m out of n units working. Calculation is done using binomial distribution. | def prob_m_of_n(m, n, T, l):
PFD_one_unit = l*T
m_of_n = binom(n, m) * (PFD_one_unit)**(n-m) * (1-PFD_one_unit)**m
return m_of_n | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_probability(k: int, m: int, n: int) -> float:\n population = [\"AA\" for _ in range(k)] + [\"Aa\" for _ in range(m)] + [\"aa\" for _ in range(n)]\n pairings = it.combinations(population, 2)\n probabilities = [PROBABILITIES[pairing] for pairing in pairings]\n output = sum(probabilities) / len(probabilities)\n\n return output",
"def BinomialCoefficient(n, m):\n if m == 0:\n return 1\n\n elif m == 1:\n return n\n\n else:\n ma = max(n - m, m)\n mi = min(n - m, m)\n\n enum = functools.reduce(lambda x, y: x * y, range(ma + 1, n + 1), 1)\n\n return enum / Factorial(mi)",
"def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))",
"def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans",
"def binom_pdf(k, n,p,binom):\n return binom * p**k * (1-p)**(n-k)",
"def chance(n, p):\n total = 0.0\n for k in range(n+1):\n total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)\n return total",
"def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob",
"def dbinom(self, x, n, p):\n f = math.factorial\n C = Decimal(f(n) / (f(x) * f(n-x)))\n return C * p**x * (1-p)**(n-x)",
"def probability(n, k, p):\n prob = 0\n power = expotentation_by_squaring((1-p), n)\n count_mult = math.log(n, 2)\n p_fraction = p/(1-p)\n count_mult += 1\n for i in range(0, k+1):\n element = newton(n, i)*power\n prob += element\n power *= p_fraction\n count_mult += 2\n return prob, count_mult",
"def tourney_prob(k, N, m):\n\n if N < m:\n print \"The second argument cannot be smaller than the third one.\"\n sys.exit()\n\n if m < 1 or k <= 0:\n return 0.0\n elif m == 1:\n return 1.0 / N\n else:\n return float(N - k) * m / (N * (m - 1)) * tourney_prob(k, N - 1, m - 1)",
"def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n",
"def base_binom_pro(pro,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0)\n a = 0\n for i in range(n0+1):\n a = a + res[i]\n if a>=pro: \n return i",
"def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result",
"def prbs(m, n):\n return np.array(np.random.rand(m, n) > 0.5, dtype=np.int) - 0.5",
"def base_binom_num(x,n0):\n res = stats.binom.pmf(range(n0+1), n0, 1/2.0) \n a = 0 \n for i in range(n0+1):\n if i <= x:\n a = a +res[i]\n return a",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number< p:\n n_success += 1\n\n return n_success",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success",
"def binom(n, r):\n return factorial(n) // ((factorial(r) * factorial(n - r)))",
"def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success",
"def perform_bernoulli_trials(n, p):\n # Initialize number of successes: n_success\n n_success = 0\n\n # Perform trials\n for i in range(n):\n # Choose random number between zero and one: random_number\n random_number = np.random.random()\n\n # If less than p, it's a success so add one to n_success\n if random_number < p:\n n_success += 1\n\n return n_success",
"def bernoulli_num(n):\n return mp.bernoulli(n)",
"def _calculate_probability(self,k):\n\t\tif abs(k * self.delta_x) > (3 * np.sqrt(self.variance)):\n\t\t\treturn 0.0\n\t\tbinom_coeff = special.binom(self.n,(self.n + k)/2)\n\t\tb_value = binom_coeff * ((self.p) ** ((self.n + k)/2)) * ((1-self.p) ** ((self.n - k)/2))\n\t\treturn b_value",
"def normal_approximation_to_binomial(n, p):\r\n mu = p*n\r\n sigma = math.sqrt(p * (1 - p) * n)\r\n\r\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p ) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def normal_approximation_to_binomial(n, p):\n mu = p * n\n sigma = math.sqrt(p * (1 - p) * n)\n return mu, sigma",
"def bpmf(k, n, p):\n # this does not work for large n\n return comb(n, k) * (p**k) * ((1 - p)**(n - k))"
]
| [
"0.79381037",
"0.7571825",
"0.75367785",
"0.74892473",
"0.718715",
"0.7186842",
"0.71784484",
"0.7104355",
"0.70734644",
"0.695075",
"0.69473517",
"0.691506",
"0.6875324",
"0.6825012",
"0.6772642",
"0.67398417",
"0.673924",
"0.6729206",
"0.6699638",
"0.6692936",
"0.66819865",
"0.668028",
"0.66693807",
"0.6648317",
"0.6593942",
"0.6583839",
"0.6583839",
"0.6583839",
"0.6583839",
"0.6561219"
]
| 0.7670718 | 1 |
Calculate the final oxygen concentration for continuous flow. Equivalent to conc_vent(V, R, Q, float('inf')). | def conc_final(V, R, Q):
if Q > 0:
C = 0.21/(Q+R)*Q
elif abs(Q) <= abs(R):
C = 0
elif abs(Q) > abs(R):
C = 0.21*(1-R/abs(Q))
return C | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conc_vent(V, R, Q, t):\n if Q > 0:\n C = 0.21/(Q+R) * (Q+R*math.e**-((Q+R)/V*t))\n elif abs(Q) <= R:\n C = 0.21*math.e**-(R/V*t)\n elif abs(Q) > R:\n C = 0.21*(1-R/abs(Q)*(1-math.e**-(abs(Q)*t/V)))\n return C",
"def conc_after(V, C_e, Q, t, t_e):\n C = 0.21-(0.21-C_e)*math.e**-(abs(Q)/V*(t-t_e))\n return C",
"def concentration(self, time: float) -> _VectorisedFloat:\n concentration = self.concentration_model.concentration(time)\n for interaction in self.short_range:\n concentration += interaction.short_range_concentration(self.concentration_model, time)\n return concentration",
"def get_convection_vent(self,T_i,el):\n\n rad = radiation.Radiation()\n T_atm = rad.getTemp(el)\n\n Q_vent = self.mdot*self.Cp_air0*(T_i-T_atm) # Convection due to released air\n return Q_vent",
"def concentration(self, time: float) -> _VectorisedFloat:\n return (self._normed_concentration_cached(time) * \n self.normalization_factor())",
"def concentration(self):\n return self._gev_bijector.concentration",
"def compute_centrifugal(self):\r\n # update the coordinates\r\n self.get_coords()\r\n\r\n # compute the centrifugal force\r\n self.centrifugal.assign(project(\r\n -1*self.rho*cross(self.omega, cross(self.omega, self.r)), self.V))",
"def _get_concentration(self, state):\n return self.fc(state.float_features).exp() + self.EPSILON",
"def conductivity(self):\n m = 1.67296736e-02 # Determined from optimisation\n c = 8.54665149e-05 # Determined from optimisation\n return m * self.concentration + c",
"def crestCavity(self):\n return self.optimiseParam(lambda ph: -self.phaseToMomentum(ph), 'Crest cavity', 'phase', 'degrees', tol=1e-4)",
"def get_coulomb_virial(self):\n if self._coulomb_virial is None:\n self._coulomb_virial = self._get_potential(self._system._coulomb)\n return self._coulomb_virial",
"def _normed_concentration(self, time: float) -> _VectorisedFloat:\n # The model always starts at t=0, but we avoid running concentration calculations\n # before the first presence as an optimisation.\n if time <= self._first_presence_time():\n return self.min_background_concentration()/self.normalization_factor()\n \n next_state_change_time = self._next_state_change(time)\n\n RR = self.removal_rate(next_state_change_time)\n # If RR is 0, conc_limit does not play a role but its computation \n # would raise an error -> we set it to zero.\n try:\n conc_limit = self._normed_concentration_limit(next_state_change_time)\n except ZeroDivisionError:\n conc_limit = 0.\n\n t_last_state_change = self.last_state_change(time)\n conc_at_last_state_change = self._normed_concentration_cached(t_last_state_change)\n\n delta_time = time - t_last_state_change\n fac = np.exp(-RR * delta_time)\n\n return conc_limit * (1 - fac) + conc_at_last_state_change * fac",
"def capenergy(C, V):\n energy = 1 / 2 * C * V ** 2\n return energy",
"def vcapcharge(t, Vs, R, C):\n if t < 0:\n raise ValueError(\"Time must be greater than or equal to zero.\")\n if R * C == 0:\n raise ValueError(\"Resistance and Capacitance must be non-zero.\")\n Vc = Vs * (1 - _np.exp(-t / (R * C)))\n return Vc",
"def convex_conj(self):\n return ConstantFunctional(self.domain, -self.constant)",
"def _GetConcentrationCorrection(self): \n # Shorthand for coeff * log(concentration)\n mult_log_c_list = [c.coeff * numpy.log(c.phase.Value())\n for c in self.reactants]\n\n # Compute log(Q) - the log of the reaction quotient\n log_Q = sum(mult_log_c_list)\n \n _r = constants.R\n _t = constants.DEFAULT_TEMP\n return _r * _t * log_Q",
"def convex_conj(self):\n return IndicatorZero(self.domain, -self.constant)",
"def calc_cogen_const(q_heat_Wh, thermal_eff, electrical_eff):\n q_fuel_Wh = q_heat_Wh / thermal_eff\n p_el_Wh = q_fuel_Wh * electrical_eff\n q_anth_Wh = q_fuel_Wh - (q_heat_Wh + p_el_Wh)\n return q_fuel_Wh, p_el_Wh, q_anth_Wh",
"def __calc_concentration(self, diam, data, dmin, dmax):\n\n dp = np.log10(diam*1e-9)\n conc = data # smoothed\n dmin = np.max((np.log10(dmin),dp[0]))\n dmax = np.min((np.log10(dmax),dp[-1]))\n dpi = np.arange(dmin,dmax,0.001)\n conci = np.sum(interp1d(dp,conc,kind='nearest')(dpi)*0.001,axis=1)\n return conci",
"def watercondense(f=0.1, t=1800, rh=0.8):\n p1 = 25e-3 # saturated water partial pressure at air temperature, [bar]\n p2 = 12.3e-3 # saturated water partial pressure underground, [bar]\n c1 = p1 * 1e2 * rh / (R * (tem + 11)) # water mol on the earth surface, [mol/L]\n c2 = p2 * 1e2 / (R * tem) # saturated water mol in the pipe, [mol/L]\n c_con = c1 - c2 # water concentration difference for condensation, [mol/L]\n v_con = c_con * f * t * 18 / 1000 # total volume of condensed water in 1800 s, [L]\n num = int(v_con / V_drop)\n return v_con, num",
"def convex_conj(self):\n return IndicatorNuclearNormUnitBall(\n self.domain,\n conj_exponent(self.outernorm.exponent),\n conj_exponent(self.pwisenorm.exponent))",
"def _normed_concentration(self, concentration_model: ConcentrationModel, time: float) -> _VectorisedFloat: \n start, stop = self.presence.boundaries()[0]\n # Verifies if the given time falls within a short-range interaction\n if start <= time <= stop:\n dilution = self.dilution_factor()\n jet_origin_concentration = self.expiration.jet_origin_concentration()\n # Long-range concentration normalized by the virus viral load\n long_range_normed_concentration = self._long_range_normed_concentration(concentration_model, time)\n \n # The long-range concentration values are then approximated using interpolation:\n # The set of points where we want the interpolated values are the short-range particle diameters (given the current expiration); \n # The set of points with a known value are the long-range particle diameters (given the initial expiration);\n # The set of known values are the long-range concentration values normalized by the viral load.\n long_range_normed_concentration_interpolated=np.interp(np.array(self.expiration.particle.diameter), \n np.array(concentration_model.infected.particle.diameter), long_range_normed_concentration)\n \n # Short-range concentration formula. The long-range concentration is added in the concentration method (ExposureModel).\n # based on continuum model proposed by Jia et al (2022) - https://doi.org/10.1016/j.buildenv.2022.109166\n return ((1/dilution)*(jet_origin_concentration - long_range_normed_concentration_interpolated))\n return 0.",
"def short_range_concentration(self, concentration_model: ConcentrationModel, time: float) -> _VectorisedFloat:\n return (self._normed_concentration(concentration_model, time) * \n concentration_model.virus.viral_load_in_sputum)",
"def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration",
"def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for open circuit measurement\n voc = oc_data_point[3] # open circuit voltage measurement\n print('Open circuit voltage: ', voc)\n write_data_tofile(oc_data_point) # write data to file\n \n return voc",
"def species_concentration_evolution(self, T, end_t, n_steps=101):\n time_steps = np.linspace(0, end_t, n_steps)\n # solver = ODE_int_solver(T, self.xi, self.ki, self.b_ki, self.vi_p, self.vi_dp)\n solver = ODE_int_solver(T, self)\n sol, _, _ = solver.solve(time_steps)\n return sol",
"def convex_conj(self):\n if self.operator is None:\n tmp = IndicatorZero(space=self.domain, constant=-self.constant)\n if self.vector is None:\n return tmp\n else:\n return tmp.translated(self.vector)\n\n if self.vector is None:\n # Handle trivial case separately\n return QuadraticForm(operator=self.operator.inverse,\n constant=-self.constant)\n else:\n # Compute the needed variables\n opinv = self.operator.inverse\n vector = -opinv.adjoint(self.vector) - opinv(self.vector)\n constant = self.vector.inner(opinv(self.vector)) - self.constant\n\n # Create new quadratic form\n return QuadraticForm(operator=opinv,\n vector=vector,\n constant=constant)",
"def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))",
"def _normed_concentration_limit(self, time: float) -> _VectorisedFloat:\n V = self.room.volume\n RR = self.removal_rate(time)\n \n return (self.population.people_present(time) / (RR * V) +\n self.min_background_concentration()/self.normalization_factor())",
"def builtin_voltage(\n donor_conc: float, # donor concentration\n acceptor_conc: float, # acceptor concentration\n intrinsic_conc: float, # intrinsic concentration\n) -> float:\n\n if donor_conc <= 0:\n raise ValueError(\"Donor concentration should be positive\")\n elif acceptor_conc <= 0:\n raise ValueError(\"Acceptor concentration should be positive\")\n elif intrinsic_conc <= 0:\n raise ValueError(\"Intrinsic concentration should be positive\")\n elif donor_conc <= intrinsic_conc:\n raise ValueError(\n \"Donor concentration should be greater than intrinsic concentration\"\n )\n elif acceptor_conc <= intrinsic_conc:\n raise ValueError(\n \"Acceptor concentration should be greater than intrinsic concentration\"\n )\n else:\n return (\n Boltzmann\n * T\n * log((donor_conc * acceptor_conc) / intrinsic_conc**2)\n / physical_constants[\"electron volt\"][0]\n )"
]
| [
"0.7371679",
"0.68841326",
"0.62628317",
"0.6067613",
"0.600988",
"0.5925458",
"0.58317536",
"0.57587767",
"0.57530713",
"0.57408696",
"0.55814916",
"0.55788034",
"0.5552848",
"0.5534186",
"0.5490134",
"0.5466125",
"0.5437804",
"0.5426694",
"0.542646",
"0.5424657",
"0.5414025",
"0.5396273",
"0.53424823",
"0.5338725",
"0.53349346",
"0.5327268",
"0.5318109",
"0.53123826",
"0.5306586",
"0.5303254"
]
| 0.6911598 | 1 |
Print the results of the ODH analysis for a volume. If several volumes given (in case of interlapping volumes) the worst case will be printed. | def print_result(*Volumes):
max_phi = -1/ureg.hr
for volume in Volumes:
if volume.phi > max_phi:
max_volume = volume
line_1 = '# Fatality rate for {} is {:.1e} # '.format(max_volume, volume.phi)
pad = len(line_1)
line_2 = '# Recommended ODH class {}'.format(max_volume.odh_class()).ljust(pad-1)+'#'
print('#'*pad)
print(line_1)
print(line_2)
print('#'*pad) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_results(inside_ellipsoid_per_step, inside_ellipsoid_all, conf):\n if conf.verbosity > 0:\n per_step_percentage = np.sum(inside_ellipsoid_per_step, axis=0) / float(\n conf.n_rollouts)\n print(\"\"\"\\n=====Uncertainty propagation===== \"\"\")\n print(\"\"\"Per step percentage of system states inside ellipsoid:\"\"\")\n step_str = \"Step \"\n perc_str = \"Inside Percentage \"\n for i in range(conf.n_safe):\n step_str += \"| {} \".format(i)\n perc_str += \"| {} \".format(per_step_percentage[i])\n print(step_str)\n print(perc_str)\n print((\"\"\" Trajectory safety percentage: {}\"\"\".format(\n float(sum(inside_ellipsoid_all)) / conf.n_rollouts)))",
"def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()",
"def printh(*args, **kwargs):\n if hvd.rank() == 0:\n print(*args, **kwargs)",
"def print_run_perfs(verbose_dico):\n for item_strat, v1 in verbose_dico.items():\n print(item_strat)\n list_of_perfs = []\n list_of_prints = []\n for skill_strat, v2 in v1.items():\n list_of_params = []\n acpl_perfs = []\n acpr_perfs = []\n for param_value, v3 in v2.items():\n list_of_params.append(param_value)\n for period, v4 in v3.items():\n strat_perf = np.mean(verbose_dico[item_strat][skill_strat][param_value][period])\n if period == \"learning\":\n acpl_perfs.append(strat_perf)\n if period == \"retention\":\n acpr_perfs.append(strat_perf)\n # Find best parameters\n best_param_index = np.argmax(acpr_perfs)\n list_of_perfs.append(np.around(acpr_perfs[best_param_index],3))\n list_of_prints.append(\"\\t {0:>26} | Best param : {1:>3} | ACPL = {2:>6} | ACPR = {3:>6}\".format(skill_strat,\n np.around(list_of_params[best_param_index],2),\n np.around(acpl_perfs[best_param_index],3),\n np.around(acpr_perfs[best_param_index],3)))\n for strat_index in np.argsort(list_of_perfs)[::-1]:\n print(list_of_prints[strat_index])",
"def brain_vol_info():\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=\"Query brain volume data.\")\n parser.add_argument(\"volume\", help=\"The volume file to load. Should be in mgh, mgz or nifti format.\")\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('-c', '--crs', nargs='*', help=\"The query voxel, defined as a 0-based index into the volume. For a 3D volume, this would be 3 integers which represent the CRS (column, row, slice) of the voxel, like 128 128 128.\")\n group.add_argument(\"-f\", \"--crs-file\", help=\"A file containing the voxels to query, one per line. A voxel should be given by zero-based indices into each dimension of the volume, e.g., '0 23 188'.\")\n group.add_argument('-a', '--all-values', help=\"Instead of returning the value for a single voxel, return all voxel values which occur in the volume. Forces integer values (by rounding).\", action=\"store_true\")\n group.add_argument('-l', '--all-value-counts', help=\"Instead of returning the value for a single voxel, return the counts for all voxel values which occur in the volume. The order of the counts is guaranteed to be identical to the order of the output when running with '-a'. Forces integer values (by rounding).\", action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Increase output verbosity.\", action=\"store_true\")\n parser.add_argument(\"-s\", \"--separator\", help=\"Output separator (between vertex coords / indices).\", default=\" \")\n args = parser.parse_args()\n\n volume_file = args.volume\n verbose = args.verbose\n sep = args.separator\n\n vol_data = nib.load(volume_file).get_data()\n if verbose:\n print(\"---Brain Vol Info---\")\n print(\"Volume has %d dimensions, shape %s and data type %s. It contains %d voxels.\" % (len(vol_data.shape), vol_data.shape, vol_data.dtype, len(np.ravel(vol_data))))\n\n voxel_value_print_format = \"%f\"\n if np.issubdtype(vol_data.dtype, np.integer):\n voxel_value_print_format = \"%d\"\n\n if args.all_values or args.all_value_counts:\n if verbose:\n print(\"NOTE: This mode treats the intensity values in the volume as integers. You should only use it if that is suitable for the input volume.\")\n voxel_value_print_format = \"%d\"\n vol_data = np.rint(vol_data).astype(int) # Force integer values. For floats, you would get as many values of there are voxels, and this does not make sense.\n vol_data_flat = np.ravel(vol_data)\n occuring_values = dict()\n for value in vol_data_flat:\n if value in occuring_values:\n occuring_values[value] = occuring_values[value] + 1\n else:\n occuring_values[value] = 1\n if args.all_values:\n if verbose:\n print(\"Printing all %d different intensity values that occur within the volume.\" % (len(occuring_values)))\n print(sep.join([str(k) for k in sorted(occuring_values.keys())]))\n else:\n if verbose:\n print(\"Printing the counts for the %d different intensity values that occur within the volume. Sum of counts is %d.\" % (len(occuring_values), sum(occuring_values.values())))\n print(sep.join([str(pair[1]) for pair in sorted(occuring_values.items(), key=lambda pair: pair[0])]))\n\n else:\n if args.crs:\n voxel_index = tuple([int(x) for x in args.crs])\n voxel_display_string = \" \".join(args.crs)\n if verbose:\n print(\"Received 1 voxel index (with %d dimensions) from the command line. Printing intensity value of the voxel '%s' in the volume.\" % (len(voxel_index), voxel_display_string))\n if len(voxel_index) != len(vol_data.shape):\n warnings.warn(\"Dimension mismatch: Received query voxel with %d dimenions, but the volume has %d.\" % (len(voxel_index), len(vol_data.shape)))\n print(voxel_value_print_format % (vol_data[voxel_index]))\n else:\n voxel_indices = nit.load_voxel_indices(args.crs_file)\n voxel_values = []\n if voxel_indices.shape[1] != len(vol_data.shape):\n warnings.warn(\"Dimension mismatch: Received query voxels with %d dimensions, but the volume has %d.\" % (voxel_indices.shape[1], len(vol_data.shape)))\n if verbose:\n print(\"Received %d voxel indices (with %d dimensions) from file '%s'. Printing their intensity values in the volume.\" % (voxel_indices.shape[0], voxel_indices.shape[1], args.crs_file))\n for voxel_index in voxel_indices:\n voxel_index = tuple(voxel_index.tolist())\n voxel_values.append(vol_data[voxel_index])\n print(sep.join([str(v) for v in voxel_values]))\n\n sys.exit(0)",
"def PrintDetailedE(GaussE, TinkE, sumE, result_format):\n ## Force to lowercase for search\n result_format = [each_form.lower() for each_form in result_format]\n if 'all' in result_format:\n print(\"\\n\")\n print(f\"Gaussian Energy (au): {GaussE/har2ev:.10f} +\")\n print(f\"Tinker Energy (au): {TinkE/har2ev:.10f}\")\n print(\"===========================================\")\n print(f\"QM/MM Energy (au): {sumE/har2ev:.10f}\\n\")\n print(\"\\n\")\n print(f\"Gaussian Energy (kcal/mol): {GaussE/kcal2ev:.10f} +\")\n print(f\"Tinker Energy (kcal/mol): {TinkE/kcal2ev:.10f}\")\n print(\"====================================================\")\n print(f\"QM/MM Energy (kcal/mol): {sumE/kcal2ev:.10f}\\n\")\n print(\"\\n\")\n print(f\"Gaussian Energy (eV): {GaussE:.10f} +\")\n print(f\"Tinker Energy (eV): {TinkE:.10f}\")\n print(\"============================================\")\n print(f\"QM/MM Energy (eV): {sumE:.10f}\\n\")\n else:\n one_printed = False\n print(\"\")\n for form in result_format:\n if form.lower() == 'au':\n print(f\"Gaussian Energy (au): {GaussE/har2ev:.10f} +\")\n print(f\"Tinker Energy (au): {TinkE/har2ev:.10f}\")\n print(\"==========================================\")\n print(f\"QM/MM Energy (au): {sumE/har2ev:.10f}\\n\")\n one_printed = True\n elif form.lower() in ('hartree', 'hartrees', 'hartree(s)'):\n print(f\"Gaussian Energy (Hartrees): {GaussE/har2ev:.10f} +\")\n print(f\"Tinker Energy (Hartrees): {TinkE/har2ev:.10f}\")\n print(\"================================================\")\n print(f\"QM/MM Energy (Hartrees): {sumE/har2ev:.10f}\\n\")\n one_printed = True\n elif form.lower() in ('kcal', 'kcal/mol'):\n print(f\"Gaussian Energy (kcal/mol): {GaussE/kcal2ev:.10f} +\")\n print(f\"Tinker Energy (kcal/mol): {TinkE/kcal2ev:.10f}\")\n print(\"====================================================\")\n print(f\"QM/MM Energy (kcal/mol): {sumE/kcal2ev:.10f}\\n\")\n one_printed = True\n elif form.lower() == 'ev':\n print(f\"Gaussian Energy (eV): {GaussE:.10f} +\")\n print(f\"Tinker Energy (eV): {TinkE:.10f}\")\n print(\"============================================\")\n print(f\"QM/MM Energy (eV): {sumE:.10f}\\n\")\n one_printed = True\n else:\n if one_printed == True:\n print(f\"I can't parse your requested result_format, '{form}'.\")\n print(\"I only accept a list of values, such as:\")\n print(\" 'au', 'hartrees', 'ev', 'kcal', 'all'\")\n else:\n print(f\"I can't parse your requested result_format, '{form}'.\")\n print(\"I only accept a list of values, such as:\")\n print(\" 'au', 'hartrees', 'ev', 'kcal', 'all'\")\n print(\"Printing au instead...\\n\")\n print(f\"Gaussian Energy (au): {GaussE/har2ev:.10f} +\")\n print(f\"Tinker Energy (au): {TinkE/har2ev:.10f}\")\n print(\"============================================\")\n print(f\"QM/MM Energy (au): {sumE/har2ev:.10f}\\n\")",
"def report(self, brief=True, sens=None):\n self.fail_modes.sort(key=lambda x: x.phi, reverse=True)\n sens = sens or SHOW_SENS\n title = f'ODH report for {self}'\n padding = len(title) + 10\n print('#'*padding)\n print(title)\n print('-'*padding)\n if brief:\n print('Printing brief ODH report')\n print(f'Only leaks with Fatality rate > {sens} are shown')\n for f_mode in self.fail_modes:\n if f_mode.phi >= sens or not brief:\n print()\n print(f' Source: {f_mode.source.name}')\n print(f' Failure: {f_mode.name}')\n print(f' Fatality rate: {f_mode.phi.to(1/ureg.hr):.2~}')\n print(f' Building is powered: {not f_mode.outage}')\n print(f' Oxygen concentration: {f_mode.O2_conc:.0%}, '\n f'{f_mode.O2_conc/0.21:.0%} percent of norm')\n print(f' Leak failure rate: {f_mode.leak_fr:.3g~}')\n print(' ODH protection PFD: '\n f'{(f_mode.P_i/f_mode.leak_fr).to(ureg.dimensionless):.2~}')\n print(f' Total failure rate: {f_mode.P_i.to(1/ureg.hr):.2~}')\n print(f' Leak rate: {f_mode.q_leak:.2~}')\n print(f' Event duration: {f_mode.tau:.2~}')\n print(f' Fans working: {f_mode.N_fan}')\n print(f' Fan rate: {f_mode.Q_fan:.2~}')\n print(f' Fatality prob: {f_mode.F_i:.0%}')",
"def main(verbose=True):\n if verbose: \n print(\"\\n---------------\")\n printCommonSNPCounts()\n print(\"---------------\")\n \n print(\"Charles River\")\n print(\"---------------\") \n getCommonSNPIndices(\"C\", save=True)\n print(\"---------------\")\n \n print(\"Harlan River\")\n getCommonSNPIndices(\"H\", save=True)\n print(\"---------------\")\n else:\n getCommonSNPIndices(\"C\", save=True)\n getCommonSNPIndices(\"H\", save=True)",
"def show_summary(self, out = None, debug = False):\n if (out is None) : out = sys.stdout\n results = self.matching_candidates\n if (len(results) > 0):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n if (self.no_final):\n print(\" Found potential ion%s outside of specified set:\" % \\\n (\"s\" if len(results) > 1 else \"\"), file=out)\n if (self.final_choice is not None):\n # We have one result that we are reasonably certain of\n elem_params, score = results[0]\n if elem_params.element not in mmtbx.ions.HALIDES:\n self.atom_props.show_ion_results(\n identity = str(self.final_choice),\n out = out,\n valence_used = self.valence_used,\n confirmed = True)\n else:\n print(\" Probable anion:\", str(elem_params), file=out)\n print(\"\", file=out)\n elif (len(results) > 1):\n # We have a couple possible identities for the atom\n below_cutoff = [ elem_params for elem_params, score in results\n if score < self.ambiguous_valence_cutoff]\n if len(below_cutoff) == 1:\n elem_params = below_cutoff[0]\n print(\" ambigous results, best valence from %s\" % \\\n str(elem_params), file=out)\n self.atom_props.show_ion_results(\n identity = str(elem_params),\n out = out,\n valence_used = True)\n print(\"\", file=out)\n else:\n ions = [str(i[0]) for i in sorted(results, key = lambda x: x[1])]\n print(\" ambiguous results, could be %s\" % \", \".join(ions), file=out)\n for elem_params, score in results :\n self.atom_props.show_ion_results(identity = str(elem_params),\n out = out)\n print(\"\", file=out)\n else:\n if (self.atom_type != WATER) or (self.nuc_phosphate_site):\n self.atom_props.show_properties(identity = \"HOH\", out = out)\n if (self.nuc_phosphate_site):\n print(\" appears to be nucleotide coordination site\", file=out)\n # try anions now\n if (self.looks_like_halide):\n print(\" Probable cation: %s\" % str(self.final_choice), file=out)\n print(\"\", file=out)\n else:\n # atom is definitely not water, but no reasonable candidates found\n # print out why all the metals we tried failed\n if (debug) and (len(self.filtered_candidates) > 0):\n print(\" insufficient data to identify atom\", file=out)\n possible = True\n for params in self.filtered_candidates:\n if (self.atom_props.has_compatible_ligands(str(params))):\n if possible:\n print(\" possible candidates:\", file=out)\n possible = False\n self.atom_props.show_ion_results(identity = str(params),\n out = out)\n else :\n print(\" incompatible ligands for %s\" % str(params), file=out)\n #print >> out, \" rejected as unsuitable:\"\n #for params in self.rejected_candidates:\n # if (self.atom_props.has_compatible_ligands(str(params))):\n # self.atom_props.show_ion_results(identity = str(params),\n # out = out)\n # else :\n # print >> out, \" incompatible ligands for %s\" % str(params)\n print(\"\", file=out)",
"def check_volumes(\n self, showall: bool = False, display: bool = True, raise_error: bool = False\n ) -> str | None:\n volumes = self.consumed_and_produced_volumes()\n conslines = []\n badlines = []\n for k, (consumed, made) in volumes.items():\n if made.m == 0:\n conslines.append(f\"Consuming {consumed} of untracked {k}.\")\n elif consumed > made:\n badlines.append(f\"Making {made} of {k} but need at least {consumed}.\")\n elif showall:\n conslines.append(f\"Consuming {consumed} of {k}, making {made}.\")\n\n if badlines and raise_error:\n raise VolumeError(\"\\n\".join(badlines))\n\n if display:\n print(\"\\n\".join(badlines))\n print(\"\\n\")\n print(\"\\n\".join(conslines))\n return None\n else:\n return \"\\n\".join(badlines) + \"\\n\" + \"\\n\".join(conslines)",
"def run(pv, time_span, abs_z, with_no_data, output, verbose_off):\n avg, df = fetch_data(pv, time_span, abs_z, not with_no_data,\n not verbose_off)\n _s = f\"Average readings for each PV in the past {time_span} seconds:\"\n print(_s)\n print(\"-\" * len(_s))\n for i, (ipv, iavg) in enumerate(zip(pv, avg)):\n print(f\"[{i+1}] {ipv:<30s} : {iavg:>.6g}\")\n print(\"-\" * len(_s))\n if df is not None:\n if output is None:\n click.secho(\n \"Print out the data to the screen, define --output to write into a CSV file.\",\n fg=\"red\")\n try:\n print(df.to_string())\n sys.stdout.flush()\n except BrokenPipeError:\n devnull = os.open(os.devnull, os.O_WRONLY)\n os.dup2(devnull, sys.stdout.fileno())\n sys.exit(1)\n else:\n click.secho(f\"Write the data into {output}\", fg=\"blue\")\n df.to_csv(output)",
"def print_output_genetic_algorithm(individual: str, violations: int, title: str = None):\n\n # Here is shown the title\n if title is not None:\n print(title)\n try:\n print(violations, '\\t', hex(int(individual, 2)))\n except:\n print(violations, '\\t', individual)",
"def print_intersection_report(self):\n try:\n filename = ''\n\n if self.data_filename:\n filename = '../output/' + self.data_filename + '.results.txt'\n else:\n filename = '../output/random.results.p' + str(self.parts) + '.n' + str(self.nodes) + '.txt'\n\n if self.verbose:\n print 'Printing Report data to ' + filename\n\n with open(filename, 'wt') as f:\n for report_row in self.intersection_report_data:\n f.write(str(report_row[0]) + '|' + str(report_row[1]) + '|' + str(report_row[2]))\n\n except Exception, e:\n print 'Unexpected error:', str(e)\n print 'Problems writing the data output file.'\n exit()",
"def print(self):\n\n if self._delayed_mode:\n self._nev = list(dask.compute(*self._nev))\n nev = self._nev\n print(\"N-1 selection stats:\")\n for i, name in enumerate(self._names):\n print(\n f\"Ignoring {name:<20}: pass = {nev[i+1]:<20}\\\n all = {nev[0]:<20}\\\n -- eff = {nev[i+1]*100/nev[0]:.1f} %\"\n )\n\n if True:\n print(\n f\"All cuts {'':<20}: pass = {nev[-1]:<20}\\\n all = {nev[0]:<20}\\\n -- eff = {nev[-1]*100/nev[0]:.1f} %\"\n )",
"def print_collisions(self):",
"def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)",
"def report(self):\n print('total 1', len(self.videoids1))\n print('total 2', len(self.videoids2))\n print('total of repeats in_1', len(self.videoids_dict_repeats1))\n print('total of repeats in_2', len(self.videoids_dict_repeats2))\n print('total in_1_missing_in_2', len(self.in_1_missing_in_2))\n print('total in_2_missing_in_1', len(self.in_2_missing_in_1))",
"def volcano_plotter():\n print(\"this is volcano plotter\")\n from math import log\n with open(\"../bob/processed/24h_bobdata_ed2_volcano.csv\", \"w\") as outF:\n outF.write(\"Gene log2FoldChange pvalue\\n\")\n with open(\"../bob/processed/24h_bobdata_ed2.csv\", \"r\") as inpF:\n skipFlag = True\n missCount = 1\n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n try:\n curLine.append(float(inpI.strip(\"\\\"\\n \")))\n except ValueError:\n curLine.append(inpI.strip(\"\\\"\\n \")) # by this point, each line in the entry file is processed into a neat list\n if curLine[2] == \"\": # if no gene name is given, just add a placeholder\n curLine[2] = \"Noname\" + str(missCount)\n missCount += 1\n # calculate log2foldChange here:\n try:\n FAvg = (curLine[4] + curLine[5] + curLine[6])/3.0 # KO\n SAvg = (curLine[7] + curLine[8] + curLine[9])/3.0 # WT\n except TypeError:\n print(curLine)\n raise\n logFoldChange = log(SAvg/FAvg,2) # so positive numbers are more abundant in the wt cells, negatives number in the KO, at least for the 24H bobdata file\n outF.write(curLine[2] + \" \" + str(logFoldChange) + \" \" + str(curLine[10]) + \"\\n\") # write out results to file",
"def print_superoctads(contained, not_contained, partial, weight):\n l = l_superoctads(contained, not_contained, partial, weight) \n for x in l: print(x)\n if len(l) == 0: print(\"No octad found\")",
"def show_dbscan():\n\n # simulate normal hourly data\n weekday = ([0.05, 0.95], 0.05) #bath, bed\n weekend = ([0.3, 0.7], 0.1)\n roomperwd, truelabelswd = make_blobs(n_samples=23, centers=weekday[0],\n cluster_std=weekday[1], random_state=0)\n roomperwe, truelabelswe = make_blobs(n_samples=8, centers=weekend[0],\n cluster_std=weekend[1], random_state=0)\n\n # combine modes\n roompers = np.vstack((roomperwd, roomperwe))\n\n # make positive and sum to one to simulate valid distribution\n for i in range(roompers.shape[0]):\n for j in range(roompers.shape[1]):\n if roompers[i, j] < 0:\n roompers[i, j] = 0\n roompersnorm = normalize(roompers, norm='l1')\n\n # simulate anomaly on most recent day where don't leave bedroom\n roompersnorm[-1, :] = np.array([0.8, 0.2])\n\n # detect outliers\n roompersdetector = HourlyRoomPercentageAnomalyDetection(roompersnorm, eps=0.3, min_samples=3)\n labels = roompersdetector.scale_and_proximity_cluster(eps=0.3, min_samples=3)\n\n # plot results\n plt.figure()\n seenflag1 = False; seenflag2 = False; seenflag3 = False;\n for i, label in enumerate(labels):\n if label == 0:\n if seenflag1:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'ro', label='Cluster 1')\n seenflag1 = True\n elif label == 1:\n if seenflag2:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'kx', label='Cluster 2')\n seenflag2 = True\n elif label == -1:\n if seenflag3:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^')\n else:\n plt.plot(roompersnorm[i][0], roompersnorm[i][1], 'b^', label='Outlier')\n seenflag3 = True\n plt.legend(loc='lower left')\n plt.axis([0, 1, 0, 1])\n plt.show()",
"def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))",
"def plotVolumePDFs(self, topV=3, noSecond=True):\n take = self.bins < topV\n fig = plt.figure()\n plt.plot(self.bins[take], self.PDF[take], label='Preferential Distribution')\n plt.plot(self.bins[take], self.RandomPDF[take], label='Random Distribution')\n plt.plot(self.V1*np.ones(50), np.linspace(0, self.PDF[self.cut1]), '--', label='First Intersection - V = ' + str(round(self.V1, 2)))\n if not noSecond:\n plt.plot(self.V2 * np.ones(50), np.linspace(0, self.PDF[self.cut2]), '--', label='Second Intersection - V = ' + str(round(self.V2, 2)))\n plt.xlim([0, topV])\n plt.title('Voronoi Cell Volume PDF')\n plt.xlabel('Normed Volume [-]')\n plt.ylabel('PDF [-]')\n plt.legend()",
"def PrintSummary(self, dollarsPerKiloWattHour = 0.1149, dollarsPerDTH = 6.53535):\n\t\tprint()\n\t\tprint(\" RESULTS \")\n\t\tprint()\n\t\tprint(\"The Number of times the furnace turns on: \" + str(self.building_hvac.NumberOfTimesHeatingTurnedOn))\n\t\tprint(\"The Number of times the AC turns on: \" + str(self.building_hvac.NumberOfTimesCoolingTurnedOn))\n\t\tprint(\"The Current Temperature: \" + str(self.current_temperature) + \"C\")\n\t\tprint(\"The total Electrical power used: \" + str(self.building_hvac.GetElectricKilowattHours()) + \"KWH\")\n\t\tprint(\"The total Time: \" + str(self.building_hvac.TotalTimeInSeconds))\n\t\tprint(\"The total Time Heating was on: \" + str(self.building_hvac.TotalDurationHeatingOn))\n\t\tprint(\"The total Time Cooling was on: \" + str(self.building_hvac.TotalDurationCoolingOn))\n\t\tprint(\"The Total Gas Energy Used: \" + str(self.building_hvac.GetGasDTH()) + \" DTH\")\n\t\tprint(\"Electrical Cost: $\" + str(self.CalculateElectricEneregyCost()))\n\t\tprint(\"Gas Cost: $\" + str(self.CalculateGasEneregyCost()))",
"def display_subsenses(subsense_dict, order):\n count = 1\n for subsense in subsense_dict:\n type_sub_def = OxfordDict.get_type_of_def(subsense)\n if type_sub_def:\n type_sub_def = \" \" + type_sub_def + \" \"\n _def = subsense['definitions'][0]\n print(\" %s.%s\" % (order, count) +\n bcolors.ITALIC + bcolors.GREEN + \"%1s\" % type_sub_def + bcolors.ENDC +\n \"%s\" % OxfordDict.chunk_str(_def))\n count += 1\n if 'examples' in subsense:\n OxfordDict.display_examples(subsense['examples'])\n print(\"\\r\")",
"def print_results(self):\n pass",
"def handle_output(self, parameters):\n if not self.file_confidence or not parameters:\n raise ValueError(\"List is empty or parameters value is wrong\")\n\n if parameters.descending:\n self.file_confidence.sort(reverse=True, key=lambda elem: elem[0])\n\n if parameters.ascending:\n self.file_confidence.sort(key=lambda elem: elem[0])\n\n for item in self.file_confidence:\n if item[0] >= parameters.confidence:\n if parameters.print_confidence:\n print(f\"{item[0]}% {os.path.basename(item[1])}\")\n else:\n print(os.path.basename(item[1]))",
"def __printResults(files, expected, actual, similarity):\n if (showIndividualResults):\n for i in range(len(files)):\n print \"\\nExpected = %s\\nActual = %s \\nSimilarity = %f\" % (expected[i], actual[i], similarity[i])\n print \"\\nMean Similarity = %f\" % np.mean(similarity)",
"def Run(tests, period=YEAR, verbosity=\"all\"):\n\n # figure out what output he wants\n headings = True\n parms = True\n descr = True\n if verbosity == \"parameters\":\n descr = False\n elif verbosity == \"headings\":\n parms = False\n descr = False\n elif verbosity == \"data only\":\n parms = False\n descr = False\n headings = False\n\n # introspect the tests to find the disk/raid/rados parameters\n disk = None\n raid = None\n rados = None\n site = None\n multi = None\n for t in tests:\n c = t.__class__.__name__\n if disk is None and \"Disk\" in c:\n disk = t\n if raid is None and c.startswith(\"RAID\"):\n raid = t\n if rados is None and c.startswith(\"RADOS\"):\n rados = t\n if site is None and c.startswith(\"Site\"):\n site = t\n if multi is None and c.startswith(\"MultiSite\"):\n multi = t\n\n # find elements that only exist beneath others\n if site is None and multi is not None:\n site = multi.site\n if rados is None and multi is not None:\n rados = multi.rados\n if disk is None and rados is not None:\n disk = rados.disk\n if disk is None and raid is not None:\n disk = raid.disk\n\n if parms and disk is not None:\n print(\"Disk Modeling Parameters\")\n print(\" size: %10s\" % printSize(disk.size))\n print(\" FIT rate: %10d (MTBF = %s)\" %\n (disk.fits, printTime(mttf(disk.fits))))\n print(\" NRE rate: %10.1E\" % (disk.nre))\n\n if parms and raid is not None:\n print(\"RAID parameters\")\n print(\" replace: %16s\" % (printTime(raid.delay)))\n if raid.speed > 0:\n print(\" recovery rate: %7s/s (%s)\" %\n (printSize(raid.speed),\n printTime(raid.rebuild_time())))\n print(\" NRE model: %10s\" % (raid.nre_model))\n print(\" object size: %10s\" % (printSize(raid.objsize)))\n\n if parms and rados is not None:\n print(\"RADOS parameters\")\n print(\" auto mark-out: %14s\" % printTime(rados.delay))\n print(\" recovery rate: %8s/s (%s/drive)\" %\n (printSize(rados.speed),\n printTime(rados.rebuild_time(rados.speed))))\n print(\" osd fullness: %7d%%\" % (rados.full * 100))\n print(\" declustering: %7d PG/OSD\" % (rados.pgs))\n print(\" NRE model: %10s\" % (rados.nre_model))\n print(\" object size: %7s\" % printSize(rados.objsize, unit=1024))\n print(\" stripe length:%7d\" % (rados.stripe))\n\n if parms and site is not None:\n print(\"Site parameters\")\n s = 0 if multi is None else multi.sites\n if site.fits == 0:\n print(\" disasters: IGNORED\")\n else:\n tf = mttf(site.fits)\n print(\" disaster rate: %12s (%d FITS)\" %\n (printTime(tf), site.fits))\n if site.replace == 0:\n print(\" site recovery: NEVER\")\n else:\n print(\" site recovery: %11s\" %\n (printTime(site.replace)))\n\n if multi is not None:\n print(\" recovery rate: %8s/s (%s/PG)\" %\n (printSize(multi.speed),\n printTime(multi.rados.rebuild_time(multi.speed))))\n if multi.latency == 0:\n print(\" replication: synchronous\")\n else:\n print(\" replication: asynchronous (%s delay)\" %\n (printTime(multi.latency)))\n\n # column headings\n heads = (\"storage\", \"durability\",\n \"PL(site)\", \"PL(copies)\", \"PL(NRE)\", \"PL(rep)\", \"loss/PiB\")\n format = getFormat(heads)\n\n # column descriptions\n legends = [\n \"storage unit/configuration being modeled\",\n \"probability of object survival\",\n \"probability of loss due to site failures\",\n \"probability of loss due to drive failures\",\n \"probability of loss due to NREs during recovery\",\n \"probability of loss due to replication failure\",\n \"expected data loss per Petabyte\"\n ]\n\n if descr:\n print(\"\")\n print(\"Column legends\")\n s = printTime(period)\n i = 1\n while i <= len(legends):\n l = legends[i - 1]\n if i == 1:\n print(\"\\t%d %s\" % (i, l))\n else:\n print(\"\\t%d %s (per %s)\" % (i, l, s))\n i += 1\n\n if headings:\n printHeadings(heads, format)\n\n # expected data loss after drive failures\n for t in tests:\n if t is None:\n printHeadings(heads, format)\n continue\n\n # calculate the renderable reliabilities and durability\n s = list()\n t.compute(period=period)\n s.append(t.description) # description\n s.append(printDurability(t.dur)) # durability\n s.append(printProbability(t.P_site)) # P(site failure)\n s.append(printProbability(t.P_drive)) # P(drive failure)\n s.append(printProbability(t.P_nre)) # P(NRE on recovery)\n s.append(printProbability(t.P_rep)) # P(replication failure)\n l = (t.P_site * t.L_site) + (t.P_drive * t.L_drive) +\\\n (t.P_nre * t.L_nre) + (t.P_rep * t.L_rep)\n s.append(printFloat(l * PiB / t.rawsize)) # expected data loss/PiB\n print(format % tuple(s))",
"def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)",
"def print_overlaps(gt_list, det_list):\n\n overlap_list = []\n high = 0\n for i_1, grt in enumerate(gt_list):\n for i_2, det in enumerate(det_list):\n overlap = overlap_between(grt, det)\n print(i_1, i_2, overlap)\n if overlap > high:\n high = overlap\n overlap_list.append(high)\n high = 0\n\n print(overlap_list)"
]
| [
"0.6076633",
"0.58335817",
"0.578717",
"0.5764819",
"0.5708327",
"0.57060343",
"0.5651979",
"0.5650857",
"0.5488244",
"0.5408044",
"0.5405682",
"0.5396798",
"0.53912807",
"0.53626204",
"0.5359393",
"0.52684",
"0.5258393",
"0.52547324",
"0.5238997",
"0.5229664",
"0.5223999",
"0.52226514",
"0.5193584",
"0.5167076",
"0.5161649",
"0.5159788",
"0.5159653",
"0.5158769",
"0.5154062",
"0.5152609"
]
| 0.65382415 | 0 |
Initialize the Abstract base class of a Variational State defined on an hilbert space. | def __init__(self, hilbert: AbstractHilbert):
self._hilbert = hilbert # type: AbstractHilbert
self._model_state = {} # type: PyTree
self._parameters = {} # type: PyTree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_state(self):\n # Initialize everything to zero\n self.stateC = self.initializer((self.nSym, 1))\n self.stateC_prev = self.initializer((self.nSym, 1))\n self.state = self.toNeural(self.stateC)\n self.state_prev = self.toNeural(matrix=self.stateC_prev)\n self.inpC = self.initializer((self.nSym, 1))\n self.inpS = self.toNeural(self.inpC)\n\n # Create full traces\n self.create_full_traces()\n\n # Initialize Lotka Volterra\n self.LV_Matrices()\n\n # Allocate Temperature and Lambda\n self.vars['T'] = 0\n self.vars['lambda'] = 0",
"def __init__(self, variables, cardinality, inhibitor_probability):\n # TODO: Accept values of each state so that it could be\n # put into F to compute the final state values of the output\n self.variables = np.array([])\n self.cardinality = np.array([], dtype=int)\n self.inhibitor_probability = []\n self.add_variables(variables, cardinality, inhibitor_probability)",
"def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6",
"def __init__(self):\n raise NotImplementedError('cannot create independent state')",
"def __init__(self, initial_state, alphas, beta):\n\n assert isinstance(initial_state, Gaussian)\n assert initial_state.Sigma.shape == (3, 3)\n\n if not isinstance(initial_state, Gaussian):\n raise TypeError('The initial_state must be of type `Gaussian`. (see tools/objects.py)')\n\n if initial_state.mu.ndim < 1:\n raise ValueError('The initial mean must be a 1D numpy ndarray of size 3.')\n elif initial_state.mu.shape == (3, ):\n # This transforms the 1D initial state mean into a 2D vector of size 3x1.\n initial_state.mu = initial_state.mu[np.newaxis].T\n elif initial_state.mu.shape != (3, 1):\n raise ValueError('The initial state mean must be a vector of size 3x1')\n\n self.state_dim = 3 # [x, y, theta]\n self.motion_dim = 3 # [drot1, dtran, drot2]\n self.obs_dim = 1 # [bearing]\n\n self._state = copy(initial_state)\n self._state_bar = copy(initial_state)\n\n # Filter noise parameters.\n self._alphas = alphas\n # Measurement variance.\n self._Q = beta ** 2\n\n # Setup the field map.\n self._field_map = FieldMap()",
"def __init__(self):\n self.py = random.getstate()\n self.np = np.random.get_state()\n self.torch = torch.get_rng_state()",
"def __init__(self, shape, rate, state0, state1, variables, nStates):\n self.shape = shape\n self.rate = rate\n self.state0 = state0\n self.state1 = state1\n self.variables = variables\n self.bivariateGradInd = getBivariateFeatGradientIndex.getBivariateFeatGradientIndex(state0, state1, nStates)\n self.nStates = nStates\n self.variable = self.variables[self.bivariateGradInd]",
"def initialize_state(self):\n raise NotImplementedError()",
"def __init__(self,\n name,\n input_size,\n state_size):\n self._input_size = input_size\n self._state_size = state_size\n super(BiGRU, self).__init__(name)",
"def __init__(self, dim_x, dim_z, dim_u=0):\n\n assert dim_x > 0\n assert dim_z > 0\n assert dim_u >= 0\n\n self.dim_x = dim_x\n self.dim_z = dim_z\n self.dim_u = dim_u\n\n self._x = zeros((dim_x,1)) # state\n self._P_inv = eye(dim_x) # uncertainty covariance\n self._Q = eye(dim_x) # process uncertainty\n self._B = 0 # control transition matrix\n self._F = 0 # state transition matrix\n self._F_inv = 0 # state transition matrix\n self._H = 0 # Measurement function\n self._R_inv = eye(dim_z) # state uncertainty\n\n # gain and residual are computed during the innovation step. We\n # save them so that in case you want to inspect them for various\n # purposes\n self._K = 0 # kalman gain\n self._y = zeros((dim_z, 1))\n self._S = 0 # system uncertainty in measurement space\n\n # identity matrix. Do not alter this.\n self._I = np.eye(dim_x)\n self._no_information = False",
"def __init__(self, x0, mus0, hists):\n # my state variable\n x = np.array([mus0, x0], dtype=np.float64)",
"def __init__(self, init_state):\n self._curr_state = init_state",
"def __init__(self):\n super(SiLU, self).__init__()",
"def __init__(self):\n\n super().__init__()\n\n self._model = None # type: StateSpaceModel\n self._kernel = None # type: Distribution",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(StateInstantiation, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.state_path is None:\n self.state_path = ''\n if self.state_class is None:\n self.state_class = ''\n if self.initial_state_name is None:\n self.initial_state_name = ''\n if self.input_keys is None:\n self.input_keys = []\n if self.output_keys is None:\n self.output_keys = []\n if self.cond_outcome is None:\n self.cond_outcome = []\n if self.cond_transition is None:\n self.cond_transition = []\n if self.behavior_class is None:\n self.behavior_class = ''\n if self.parameter_names is None:\n self.parameter_names = []\n if self.parameter_values is None:\n self.parameter_values = []\n if self.position is None:\n self.position = [0.] * 2\n if self.outcomes is None:\n self.outcomes = []\n if self.transitions is None:\n self.transitions = []\n if self.autonomy is None:\n self.autonomy = []\n if self.userdata_keys is None:\n self.userdata_keys = []\n if self.userdata_remapping is None:\n self.userdata_remapping = []\n else:\n self.state_path = ''\n self.state_class = ''\n self.initial_state_name = ''\n self.input_keys = []\n self.output_keys = []\n self.cond_outcome = []\n self.cond_transition = []\n self.behavior_class = ''\n self.parameter_names = []\n self.parameter_values = []\n self.position = [0.] * 2\n self.outcomes = []\n self.transitions = []\n self.autonomy = []\n self.userdata_keys = []\n self.userdata_remapping = []",
"def init_state(self) -> None:\n self.state = np.zeros(self.shape, dtype=int)",
"def initialize(self):\n\n for i, item in enumerate(self.v.items()):\n state, value = item\n if value == None:\n raise ValueError, \"state '%s' has no value\" % state\n self.S[i]=value\n self.storage=Storage()",
"def __init__(self, init_state):\n\n self.PUZZLE_TYPE = len(init_state) - 1\n self.initial_state = init_state\n self.current_state = init_state\n self.goal_state = [i for i in range(0, self.PUZZLE_TYPE + 1)]\n self.explored_states = []",
"def __init__(self, state):\n super().__init__(\"set_matrix_product_state\", len(state[0]), 0, [state])",
"def __init__(self, hilbert):\n super().__init__(DoubledHilbert(hilbert))",
"def __setstate__(self,pdict):\n self.__init__()\n self.upperBoundUsed = pdict.pop('upperBoundUsed' )\n self.lowerBoundUsed = pdict.pop('lowerBoundUsed' )\n self.hasInfiniteBound = pdict.pop('hasInfiniteBound')\n self.upperBound = pdict.pop('upperBound' )\n self.lowerBound = pdict.pop('lowerBound' )\n self.__adjustmentType = pdict.pop('adjustmentType' )\n self.dimensionality = pdict.pop('dimensionality' )\n self.type = pdict.pop('type' )\n self._localSetState(pdict)\n self.initializeDistribution()",
"def __init__( self, state=None ):\n\n raise NotImplementedError(\"__init__\");",
"def __setstate__(self, _state : dict):\n self.__init__(**_state)",
"def __init__(self, inp_subQ, inp_divQ, inp_subRH, inp_divRH, hyam, hybm, **kwargs):\n self.inp_subQ, self.inp_divQ, self.inp_subRH, self.inp_divRH, self.hyam, self.hybm = \\\n np.array(inp_subQ), np.array(inp_divQ), np.array(inp_subRH), np.array(inp_divRH), \\\n np.array(hyam), np.array(hybm)\n # Define variable indices here\n # Input\n self.QBP_idx = slice(0,30)\n self.TBP_idx = slice(90,120)\n self.PS_idx = 300\n self.SHFLX_idx = 302\n self.LHFLX_idx = 303\n\n super().__init__(**kwargs)",
"def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()",
"def _init_derived(self):\n TensorFieldParal._init_derived(self) \n self._exterior_derivative = None",
"def __init__(self):\n self.tape_tag = None\n self.independentVariableShapeList = []\n self.dependentVariableShapeList = []",
"def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())",
"def __init__(self, multiplicity, in_features, cardinality, dropout=0.0):\n super(IndependentNormal, self).__init__(multiplicity, in_features, dropout)\n self.gauss = Normal(\n multiplicity=multiplicity, in_features=in_features, dropout=dropout\n )\n self.prod = Product(in_features=in_features, cardinality=cardinality)\n\n self.cardinality = cardinality",
"def __init__(self, inp_sub, inp_div, norm_q, hyai=hyai, hybi=hybi, **kwargs):\n self.inp_sub, self.inp_div, self.norm_q, self.hyai, self.hybi = \\\n np.array(inp_sub), np.array(inp_div), np.array(norm_q), np.array(hyai), np.array(hybi)\n # Define variable indices here\n # Input\n self.PS_idx = 300\n self.LHFLX_idx = 303\n # Output\n self.PHQ_idx = slice(0, 29) # Residual still missing\n self.PHCLDLIQ_idx = slice(29, 59)\n self.PHCLDICE_idx = slice(59, 89)\n self.PRECT_idx = 212\n self.PRECTEND_idx = 213\n\n super().__init__(**kwargs)"
]
| [
"0.66713417",
"0.64572304",
"0.6398625",
"0.6388972",
"0.63356894",
"0.6299398",
"0.6227243",
"0.6222026",
"0.62105525",
"0.62102795",
"0.6195474",
"0.6141361",
"0.6126284",
"0.6114694",
"0.6112479",
"0.61019963",
"0.6083083",
"0.60628355",
"0.6019901",
"0.5988344",
"0.5980008",
"0.5958422",
"0.59386164",
"0.5935761",
"0.5934925",
"0.5926184",
"0.5922216",
"0.59064305",
"0.5899635",
"0.5896487"
]
| 0.67266685 | 0 |
r"""The pytree of the parameters of the model. | def parameters(self) -> PyTree:
return self._parameters | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def variables(self) -> PyTree:\n return flax.core.freeze({\"params\": self.parameters, **self.model_state})",
"def parameterNode(self):\r\n # framework\r\n profbox()\r\n return self.parameterNode",
"def parameterNode(self):\n #framework\n profbox()\n return self.parameterNode",
"def parameters(self):\n return self.model.parameters()",
"def parameters(self):\n return self.pars",
"def node_params(self):\n return self._op_params",
"def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}",
"def tree(self):\r\n return self._tree",
"def parameters(self):\n return self._params",
"def parameters(self):\n return NeuralNetwork.flatten([module.parameters() for module in self.modules])",
"def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta",
"def get_params(self) -> torch.Tensor:\n params = []\n for pp in list(self.net.parameters()):\n params.append(pp.view(-1))\n return torch.cat(params)",
"def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...",
"def parameters(self):\n res = dict()\n res[\"population_size\"] = self.population_size\n res[\"mutation_prob\"] = self.mutation_prob\n res[\"crossover\"] = self.crossover\n res[\"selection\"] = self.selection\n res[\"sigma\"] = self.sigma\n res[\"crossover_method\"] = self.crossover_method\n res[\"selection_method\"] = self.selection_method\n res[\"best_rate\"] = self.best_rate\n res[\"n_parents\"] = self.n_parents\n res[\"model_parameters\"] = self.model.total_parameters()\n res[\"IDCT_from\"] = self.IDCT_from\n res[\"elitism\"] = self.elitism\n return res",
"def tree(self):\n # type: () -> Optional[Module]\n return self._tree",
"def parameters(self):\n # encoded in θ\n return self.theta.columns",
"def _get_raw_params(self, node):",
"def generative_parameters(self):\n params = nn.ParameterList()\n if 'parameters' in dir(self.generative_model):\n params.extend(list(self.generative_model.parameters()))\n params.extend(list(self.latent.generative_parameters()))\n return params",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P}",
"def tree(self):\n return self.to_geom()",
"def parameters(self):\n if self.state is not None:\n return self.state.tensors()\n else:\n return []",
"def get_params(self) -> np.array:\n pass",
"def parameters(self):\n return [i.parameter for i in self.joints.values()]",
"def parameters(self):\n return self.vars",
"def params(self):\n return self._pars",
"def parameters(self):\n return self._params"
]
| [
"0.7385442",
"0.6606818",
"0.62927306",
"0.6261241",
"0.62531644",
"0.62116337",
"0.6189143",
"0.5985493",
"0.59852135",
"0.5982368",
"0.59517086",
"0.59439075",
"0.5943505",
"0.5928683",
"0.5913491",
"0.58814526",
"0.5872295",
"0.5865241",
"0.5845439",
"0.5845439",
"0.5845439",
"0.5845439",
"0.5830421",
"0.58263904",
"0.5809096",
"0.5808963",
"0.5796475",
"0.5786178",
"0.57300353",
"0.5724408"
]
| 0.8160057 | 0 |
r"""The optional pytree with the mutable state of the model. | def model_state(self) -> Optional[PyTree]:
return self._model_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tree(self):\r\n return self._tree",
"def __copy__(self) -> 'Tree':\n return non_recursive_tree_copy(self)",
"def tree(self):\n # type: () -> Optional[Module]\n return self._tree",
"def get_tree(self):\n return self.tree or None",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def _node_defaults(self):\n parent = super(QTree, self)._node_defaults()\n parent[\"state\"] = np.zeros([self.size, self.size])\n parent[\"network\"] = self\n return parent",
"def is_mutable(self):\n return self._mutable",
"def __init__(self):\n self.tree = {}",
"def test_update_node_state_readonly(self):\n pass",
"def test_on_copy_not_on_root():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(5)\n builder.add_child(6, move=True)\n\n _ = builder.build()\n builder.add_child(7)\n\n t = builder.build()\n assert_tree_structure(t, {(): 0, (0, ): 5, (1, ): 6, (1, 0): 7})",
"def __init__(self):\n Tree.__init__(self, \"\")",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def test_get_node_state_readonly(self):\n pass",
"def tree(self) -> Node:\n return Node(self.to_string())",
"def test_minimal_tree_creation():\n t = Tree(None)\n\n assert t.data is None\n assert t.parent is None\n assert len(t) == 0",
"def __init__(self):\n self.root = TreeNode(None)",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def __init__(self,tree):\n self._tree = tree",
"def test_after_creation_copy():\n builder = TreeBuilder()\n builder.create_root(0)\n builder.add_child(2, move=True)\n builder.add_child(13)\n builder.move_to_parent()\n builder.add_child(7)\n\n t1 = builder.build()\n\n builder.move_to_root()\n builder.set_data(4)\n builder.add_child(3, move=True)\n builder.add_child(15)\n\n t2 = builder.build()\n\n assert t2 is not t1\n assert t2[0] is not t1[0]\n assert t2[0][0] is not t1[0][0]\n assert t2[1] is not t1[1]\n\n assert t2.data == 4\n assert t2[0].data == 2\n assert t2[0][0].data == 13\n assert t2[1].data == 7\n assert t2[2].data == 3\n assert t2[2][0].data == 15\n\n assert len(t2) == 3\n assert len(t2[0]) == 1\n assert len(t2[1]) == 0\n assert len(t2[2]) == 1",
"def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)",
"def __init__(self, tree):\n self._tree = tree",
"def __init__(self, tree):\n self._tree = tree",
"def test_build_children_state_default_extra(self):\n l_a = SystemFile(\"a\", 150, True)\n l_aa = SystemFile(\"aa\", 50, True)\n l_a.add_child(l_aa)\n l_aaa = SystemFile(\"aaa\", 50, False)\n l_aa.add_child(l_aaa)\n l_ab = SystemFile(\"ab\", 100, False)\n l_a.add_child(l_ab)\n\n self.model_builder.set_local_files([l_a])\n model = self.model_builder.build_model()\n m_a = model.get_file(\"a\")\n self.assertEqual(ModelFile.State.DEFAULT, m_a.state)\n m_a_ch = {m.name: m for m in model.get_file(\"a\").get_children()}\n m_aa = m_a_ch[\"aa\"]\n self.assertEqual(ModelFile.State.DEFAULT, m_aa.state)\n m_aaa = m_aa.get_children()[0]\n self.assertEqual(ModelFile.State.DEFAULT, m_aaa.state)\n m_ab = m_a_ch[\"ab\"]\n self.assertEqual(ModelFile.State.DEFAULT, m_ab.state)",
"def return_tree(self):\n\n return self.tree, self.ParentMap",
"def __getstate__(self) -> Dict[str, Any]:\n s = self.__dict__.copy()\n # Kill the parent ref. It won't pickle well.\n s[\"_parent\"] = None\n return s",
"def __init__(self):\n self.root = TreeNode(\"\")",
"def __init__(self):\n self.root = TreeNode(\"\")"
]
| [
"0.6223878",
"0.6183596",
"0.61077166",
"0.6103018",
"0.5914585",
"0.5802627",
"0.575805",
"0.57547",
"0.57230943",
"0.5715844",
"0.5713787",
"0.56472003",
"0.56472003",
"0.56472003",
"0.56472003",
"0.55974734",
"0.5592061",
"0.5558143",
"0.555776",
"0.55290544",
"0.5511357",
"0.5501622",
"0.5498055",
"0.5471482",
"0.5471482",
"0.54250115",
"0.5422707",
"0.54194725",
"0.5408947",
"0.5408947"
]
| 0.70185894 | 0 |
r"""The PyTree containing the parameters and state of the model, used when evaluating it. | def variables(self) -> PyTree:
return flax.core.freeze({"params": self.parameters, **self.model_state}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parameters(self) -> PyTree:\n return self._parameters",
"def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...",
"def model_state(self) -> Optional[PyTree]:\n return self._model_state",
"def parameterNode(self):\r\n # framework\r\n profbox()\r\n return self.parameterNode",
"def tree(self):\r\n return self._tree",
"def create_tree():\n\tdtree = {}\n\n\tdtree['stats'] = None\n\tdtree['libs'] = {'fort':\n\t\t\t\t\t\t\t {'integers': None, 'floats': None, 'data': None}\n\t\t\t\t\t ,'cache blocking': None}\n\tdtree['grid'] = {'size' :\n\t\t\t\t\t\t\t {'nxgb': None, 'nygb': None, 'nzgb': None}\n\t\t\t\t\t ,'geom' :\n\t\t\t\t\t\t\t {'Lx' : None, 'Ly' : None, 'Lz' : None\n\t\t\t\t\t\t\t ,'dx' : None, 'dy' : None, 'dz' : None\n\t\t\t\t\t\t\t ,'x' : None, 'y' : None, 'z' : None\n\t\t\t\t\t\t\t ,'xloc': None, 'yloc': None, 'zloc': None}}\t\t\t\t\t\t\t \t\t\t\t\t \t\t \n\tdtree['eqns'] = {'qvec' : \n\t\t\t\t\t\t\t {'nvars': None, 'solved': None, 'stored': None, 'views': None}\n\t\t\t\t\t ,'coeff': None\n\t\t\t\t\t ,'time' : None\n\t\t\t\t\t ,'ndim' : None}\n\n\tdtree['misc'] = {'verbose': None, 'working precision': None}\n\tdtree['mpi'] = {'split': \n\t\t\t\t\t\t\t {'nxpr': None, 'nypr': None, 'nzpr': None}\n\t\t\t\t\t ,'dMpi' : None}\t\t \t\n\tdtree['num'] = {'hlo' : None\n\t\t\t\t\t ,'deriv': \n\t\t\t\t\t\t\t {'order': None, 'stencil': None, 'hlo': None} \n\t\t\t\t\t ,'filtr': \n\t\t\t\t \t {'order': None, 'stencil': None, 'hlo': None,'eps': None}\n\t\t\t\t\t ,'tint' : \n\t\t\t\t\t {'tstep': None, 'cfl': None, 'itn': None}}\n\n\tdtree['bc'] = {'wall': \n\t\t\t\t\t\t\t {'isoT': None, 'zeroQ': None, 'slip': None}}\t\n\n\tdtree['usr'] = None\n\tdtree['ios'] = None\n\t\n\tfrom rhsinfo import dim, stencil, order, coefficients, varname, varsolved, varstored, varbc, wp,hlo_rhs\n\n\tdtree['eqns']['qvec']['solved'] = []\n\tdtree['eqns']['qvec']['stored'] = []\n\tdtree['eqns']['qvec']['bcs'] = {'face':{'i' :[],'j' :[],'k' :[]},\n\t\t\t\t\t\t\t\t\t 'edge':{'ij':[],'jk':[],'ik':[]}}\n\n\tfor v in varsolved:\n\t\tdtree['eqns']['qvec']['solved'].append([v,varname[v]])\t\n\n\tfor v in varstored:\n\t\tdtree['eqns']['qvec']['stored'].append([v,varstored[v]['ind']])\n\n\tfor v in varbc:\n\t\tfor bcloc in ['face','edge']:\n\t\t\tif bcloc in varbc[v]:\n\t\t\t\tloctype = ''.join(sorted(varbc[v][bcloc].replace('1','').replace('max','')))\n\t\t\t\tdtree['eqns']['qvec']['bcs'][bcloc][loctype].append([v,varbc[v]['ind']])\n\n\n\tdtree['eqns']['coeff'] = []\n\tfor v in coefficients:\n\t\tdtree['eqns']['coeff'].append([v,coefficients[v]])\t\n\n\tdtree['eqns']['qvec']['nvars'] = len(varname)#+len(dtree['eqns']['qvec']['stored'])\t\n\tdtree['num']['deriv']['stencil'] = stencil\n\tdtree['num']['deriv']['hlo'] = hlo_rhs #int((stencil-1)/2)\n\tdtree['num']['deriv']['order'] = order\n\t\n\t# if dtree['num']['filtr']['hlo'] != None:\n\t# \tdtree['num']['hlo'] = max(dtree['num']['deriv']['hlo'],dtree['num']['filtr']['hlo'])\n\t# else:\n\t# \tdtree['num']['hlo'] = dtree['num']['deriv']['hlo']\t\n \n\tdtree['num']['hlo'] = hlo_rhs\t\t\t\n\t\n\tdtree['eqns']['ndim'] = dim\n\tdtree['misc']['working precision'] = wp\n\tdtree['misc']['verbose'] = True\n\n\t# dtree['libs']['cache blocking'] = [256,2,6] # good for 11 pts 3D, div. forme of N.S.\n\t\n\tdtree['libs']['cache blocking'] = [2560000,2,6]\n\n\t# recover BCs info:\n\t\n\ttry:\n\t from rhsinfo import bc_info\n\texcept: \n\t\tbc_info = [{},{}]\n\n\tdtree['bc']\t = {'allbc':bc_info[1],'mybc':[]} # OVERWRITE predefined 'bc' key.\n\n\treturn dtree",
"def tree(self):\n # type: () -> Optional[Module]\n return self._tree",
"def parameterNode(self):\n #framework\n profbox()\n return self.parameterNode",
"def allocate(tree):\n\n\twp = tree['misc']['working precision']\t\n\tdim = tree['eqns']['ndim']\n\tnvar = tree['eqns']['qvec']['nvars']\n\tnvarst = len(tree['eqns']['qvec']['stored'])\t\n\thlod = tree['num']['deriv']['hlo']\n\thlof = tree['num']['filtr']['hlo']\n \n\thlo = tree['num']['hlo'] \n\tnx = tree['mpi']['dMpi'].nx \n\tny = tree['mpi']['dMpi'].ny \n\tnz = tree['mpi']['dMpi'].nz \n\n\tvariables = []\n\tif tree['eqns']['qvec']['solved']:\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tvariables.append(v[1])\n\n\tif tree['eqns']['qvec']['stored']:\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tvariables.append(v[1])\t\n\n\tvariables_face = {}\n\tfor dir in ['i','j','k']:\n\t\tvariables_face[dir] = []\n\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\n\t\t\t\tvariables_face[dir].append(v[1])\t\t\t\n\n\tvariables_edge = {}\n\tfor dir in ['ij','jk','ik']:\n\t\tvariables_edge[dir] = []\n\t\tfor v in tree['eqns']['qvec']['bcs']['edge'][dir]:\n\t\t\t\tvariables_edge[dir].append(v[1])\t\t\n\t\n\n\tnvar_face = {'i': len(variables_face['i']),\n\t\t\t\t 'j': len(variables_face['j']),\n\t\t\t\t 'k': len(variables_face['k'])}\n\n\tnvar_edge = {'ij': len(variables_edge['ij']),\n\t\t\t\t 'jk': len(variables_edge['jk']),\n\t\t\t\t 'ik': len(variables_edge['ik'])}\t\t\t \n\n\n\n\tcoeff = []\n\tif tree['eqns']['coeff']:\n\t\tfor v in tree['eqns']['coeff']:\n\t\t\tcoeff.append(v[1])\n\n\tsizex = nx + 2*hlo\n\t\n\tif(ny == 1):\n\t\tsizey = 1\n\telse:\t\t\n\t\tsizey = ny + 2*hlo\n\t\n\tif(nz == 1) : \n\t\tsizez = 1\n\telse:\t\n\t\tsizez = nz + 2*hlo\n\t\n\n\tndimpt = sizex*sizey*sizez\n\tndimtot = ndimpt*nvar\n\n\n\t# alloc bcs fields:\n\tndimptbcs = {}\n\tndimptbcs['i'] =sizey*sizez\n\tndimptbcs['j'] =sizex*sizez\n\tndimptbcs['k'] =sizex*sizey\n\tndimptbcs['ij'] =sizez \n\tndimptbcs['jk'] =sizex \n\tndimptbcs['ik'] =sizey \n\n\tsizebcs = {}\n\tsizebcs['i'] =(sizey,sizez)\n\tsizebcs['j'] =(sizex,sizez)\n\tsizebcs['k'] =(sizex,sizey)\n\tsizebcs['ij'] =(sizez) \n\tsizebcs['jk'] =(sizex) \n\tsizebcs['ik'] =(sizey) \n\n\t# faces:\n\tnfacei = max(1,nvar_face['i']*ndimptbcs['i'])\n\tnfacej = max(1,nvar_face['j']*ndimptbcs['j'])\n\tnfacek = max(1,nvar_face['k']*ndimptbcs['k'])\n\t\n\n\t# edges:\n\tnedgeij = max(1,nvar_edge['ij']*ndimptbcs['ij'])\n\tnedgejk = max(1,nvar_edge['jk']*ndimptbcs['jk'])\n\tnedgeik = max(1,nvar_edge['ik']*ndimptbcs['ik'])\t\n\n\t# unpack bc info:\n\ttree = unpack_bcs(tree)\n\t\n\tmybc = tree['bc']['mybc']\n\tnbc = len(mybc)\n\n\t# Integers parameters to be passed to the fortran layer\n\tparam_intDim = 12 + nvar + nvarst + 1 + nbc + 6 + 6\n\tparam_int = np.empty(shape=param_intDim, dtype=np.int32, order='F') \n\tparam_int[0] = hlo\n\tparam_int[1] = nx\n\tparam_int[2] = ny\n\tparam_int[3] = nz\n\tparam_int[4] = nvar\n\tparam_int[5] = nvarst\n\tparam_int[6] = ndimtot\n\tparam_int[7] = 3 # for RK sub steps in Python\n\tparam_int[8] = ndimpt\n\tparam_int[9:9+3] = tree['libs']['cache blocking']\n\tadr = 9+3\n\tparam_int[adr:adr+nvar] = variables[0:0+nvar] # variables location (in q).\n\tadr = adr+nvar\n\tparam_int[adr:adr+nvarst] = variables[nvar:nvar+nvarst] # variables location (in qst).\n\tadr = adr+nvarst\n\tparam_int[adr] = nbc\n\tadr = adr + 1\n\tparam_int[adr:adr+nbc] = mybc\n\tadr = adr + nbc\n\tparam_int[adr:adr+6] = list(nvar_face.values()) + list(nvar_edge.values())\n\tadr = adr + 6\n\tparam_int[adr:adr+6] = [nfacei,nfacej,nfacek,nedgeij,nedgejk,nedgeik]\n\n\tif tree['eqns']['coeff']: \n\t\tncoef = len(tree['eqns']['coeff'])\n\telse:\n\t\tncoef = 0\t\n\n\t# Floating point parameters to be passed to the Fortran layer (3 additional floats for the metrics, uniforme grid + 1 for dt +1 for eps filter)\n\tparam_float = np.zeros(shape=ncoef+5, dtype=wp, order='F') \n\n\tparam_float[0] = cst(1.0)/tree['grid']['geom']['dx']\n\tif(dim>=2) : param_float[1] = cst(1.0)/tree['grid']['geom']['dy']\n\tif(dim == 3) : param_float[2] = cst(1.0)/tree['grid']['geom']['dz']\n\n\tparam_float[3] = tree['num']['tint']['tstep']\n\tparam_float[4] = tree['num']['filtr']['eps']\n\n\tfor i,v in enumerate(tree['eqns']['coeff']):\n\t\tparam_float[i+5] = v[1]\n\n\t# Floating point array (contiguous, aligned, actually NOT aligned yet...)\n\t\n\tnfieldbcs = sum([nfacei,nfacej,nfacek,nedgeij,nedgejk,nedgeik])\n\n\tif nvarst != 0 :\n\t\tdata = np.empty(shape=ndimtot*4 + ndimpt*nvarst + nfieldbcs, dtype=wp,order='F') # 4 --> q,q1,q2 + rhs + nvarstored\n\telse:\t\n\t\tdata = np.empty(shape=ndimtot*4 + 1 + nfieldbcs, dtype=wp,order='F') # 4 --> q,q1,q2, rhs, + 1 (address for qst in fortran layer)\n\n\t# Explicit view of data (only references here, no copy) \n\tviews = {}\n\tnvsolved = len(tree['eqns']['qvec']['solved'])\n\t\n\t# WARNING assume contiguous addresses of stored variables in data_float...\n\tif nvarst != 0:\n\t\taddrstored_beg = ndimtot*4 \n\t\taddrstored_end = addrstored_beg + ndimpt*nvarst\n\telse:\n\t\taddrstored_beg = ndimtot*4\t\n\t\taddrstored_end = addrstored_beg + 1\n\n\taddrbcfields_beg = addrstored_end\t\n\taddrbcfields_edge_beg = addrbcfields_beg + sum([nfacei,nfacej,nfacek])\n\n\tif dim == 3:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,sizey,sizez,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,sizey,sizez,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex,sizey,sizez, order='F')\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex,sizey,sizez, order='F')\t\t\t\n\n\t\t# bc faces:\t\n\t\tshift = addrbcfields_beg\n\t\tfor dir in ['i','j','k']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\t\t\t\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\t\n\n\t\t# bc edges:\t\n\t\tshift = addrbcfields_edge_beg\n\t\tfor dir in ['ij','jk','ik']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['edge'][dir]:\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\n\telif dim == 2:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,sizey,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,sizey,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex,sizey, order='F')\t\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex,sizey, order='F')\t\n\n\n\t\t# bc faces:\t\n\t\tshift = addrbcfields_beg\t\n\t\tfor dir in ['i','j']:\n\t\t\tfor v in tree['eqns']['qvec']['bcs']['face'][dir]:\t\t\t\t\t\n\t\t\t\tviews[v[0]] = data[shift:ndimptbcs[dir]+shift].view().reshape(sizebcs[dir], order='F')\t\t\t\n\t\t\t\tshift = shift + ndimptbcs[dir]\t\n\t\t\t\t\t\t\n\telse:\n\t\tviews['q'] = data[0:ndimpt*nvsolved].view().reshape(sizex,nvsolved, order='F')\n\t\tif nvarst !=0:\n\t\t\tviews['qstored'] = data[addrstored_beg:addrstored_end].view().reshape(sizex,nvarst, order='F')\n\t\t\n\t\tfor v in tree['eqns']['qvec']['solved']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt):(v[1])*ndimpt].view().reshape(sizex, order='F')\t\t\n\t\tfor v in tree['eqns']['qvec']['stored']:\n\t\t\tviews[v[0]] = data[(v[1]-1)*(ndimpt)+addrstored_beg:(v[1])*ndimpt+addrstored_beg].view().reshape(sizex, order='F')\t\t\t\t\t\t\t\t\t\n\t\n\ttree['libs']['fort']['integers'] = param_int\n\ttree['libs']['fort']['floats'] = param_float\n\ttree['libs']['fort']['data'] = data\n\ttree['eqns']['qvec']['views'] = views\n\n\tdnamiF.init(param_int,param_float,data)\t\n\t\n\treturn tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n return self._tree",
"def tree(self):\n\n tree_parameters = [{'min_samples_leaf': list(range(2, 10, 1)),\n 'criterion': ['mae', 'mse'],\n 'random_state': [1]}]\n tree_grid = GridSearchCV(estimator=DecisionTreeRegressor(),\n param_grid=tree_parameters,\n scoring=self.scorer, cv=5, n_jobs=-1,\n iid=False)\n tree_grid_result = tree_grid.fit(self.X_train, self.y_train)\n best_tree_parameters = tree_grid_result.best_params_\n tree_score = tree_grid_result.best_score_\n print('Best tree params: ' + str(best_tree_parameters))\n print('Tree score: ' + str(tree_score))\n return DecisionTreeRegressor(\n min_samples_leaf=best_tree_parameters['min_samples_leaf'],\n criterion=best_tree_parameters['criterion'],\n random_state=1)",
"def evaluate(self, tree):\n\t\tpass",
"def evaluate_node(self):\n # p, v = np.random.random(225).astype(np.float16), np.random.random()\n socket = zmq.Context().socket(zmq.DEALER)\n socket.setsockopt_string(zmq.IDENTITY, self.player_id)\n socket.connect('ipc://./tmp/oracle_%s' % self.tree.model_name)\n print('start to evaluate', self.tree.model_name)\n while True:\n # print(self.tree.to_evaluate.qsize())\n batch = []\n states = []\n colors = []\n size = self.tree.to_evaluate.qsize()\n if size > config.INFERENCE_BATCHSIZE:\n size = config.INFERENCE_BATCHSIZE\n elif size == 0:\n time.sleep(0.001)\n continue\n for _ in range(size):\n t, black, white = self.tree.to_evaluate.get()\n mine, yours = posswap(t, black, white)\n batch.append((str(mine), str(yours), t % 2))\n states.append((black, white))\n colors.append(t % 2)\n socket.send(msgpack.dumps((batch, self.player_id)))\n result = msgpack.loads(socket.recv())\n assert len(states) == len(result[0])\n assert len(states) == len(result[1])\n for ind, state in enumerate(states):\n with self.lock:\n self.tree.nodes[state].p = result[0][ind]\n if colors[ind] == 0:\n self.tree.nodes[state].v = result[1][ind]\n else:\n self.tree.nodes[state].v = -result[1][ind]\n self.tree.nodes[state].updated = True",
"def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta",
"def buildDecisionTree(self, data):\n self.data = data\n self.decisionTree = self.buildTree(self.data, self.listAttributes)\n with open(\"decision_tree_model\", \"wb\") as f:\n pickle.dump(self.decisionTree, f, pickle.HIGHEST_PROTOCOL)\n return self.decisionTree",
"def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up",
"def _build_graph(self,tree):\n list_val = [] # input of computation graph\n list_h = [] # output of computation graph\n def build_cell(node, children_h):\n if (node.has_child()):\n assert len(children_h) > 0\n val, h = self.builder.get_recursive_module(children_h)\n else:\n val, h = self.builder.get_leaf_module()\n return val, h\n\n def recursive_build_graph(node):\n if(node.has_child()):\n h_1 = recursive_build_graph(node.children[0])\n h_2 = recursive_build_graph(node.children[1])\n input_val, h = build_cell(node, [h_1, h_2])\n else:\n input_val, h = build_cell(node, None)\n list_val.append(input_val)\n list_h.append(h)\n return h\n\n recursive_build_graph(tree)\n return list_val, list_h",
"def parse(self):\n\n root = self.expr()\n return root",
"def tree(self) -> Node:\n return Node(self.to_string())",
"def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)",
"def tree(self):\n return self.to_geom()",
"def return_tree(self):\n\n return self.tree, self.ParentMap",
"def get_original_tree(self, tree):\n if not tree:\n return\n tree = copy.deepcopy(tree)\n PCFG.__revert_step_4(tree.root)\n PCFG.__revert_step_2(tree.root)\n # Get rid of step 1, namely get rid of S_0 -> S\n new_root = tree.root.children[0]\n new_tree = ParseTree(new_root, tree.probability)\n return new_tree",
"def nltk_tree(self):\n return nltk_tree(self)",
"def qtree(self):\n return self._qtree",
"def getstate(self):\r\n return Parameterized.getstate(self) + [self.parts,\r\n self.num_parts,\r\n self.num_params,\r\n self.input_dim,\r\n self.input_slices,\r\n self.param_slices\r\n ]",
"def parameters(self):\n if self.state is not None:\n return self.state.tensors()\n else:\n return []"
]
| [
"0.72823274",
"0.62419695",
"0.61395806",
"0.6070549",
"0.5913993",
"0.5892661",
"0.588385",
"0.584981",
"0.5712577",
"0.5679467",
"0.5679467",
"0.5679467",
"0.5679467",
"0.5656677",
"0.5551771",
"0.5551533",
"0.55390733",
"0.5533959",
"0.5529545",
"0.5401554",
"0.53472054",
"0.5331488",
"0.53291184",
"0.5319923",
"0.531481",
"0.5281888",
"0.52709436",
"0.5252153",
"0.52465814",
"0.5241579"
]
| 0.7401724 | 0 |
r"""Convert this mixed state to a qutip density matrix Qobj. | def to_qobj(self): # -> "qutip.Qobj"
from qutip import Qobj
q_dims = [list(self.hilbert_physical.shape), list(self.hilbert_physical.shape)]
return Qobj(np.asarray(self.to_matrix()), dims=q_dims) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_qobj(self): # -> \"qutip.Qobj\"\n from qutip import Qobj\n\n q_dims = [list(self.hilbert.shape), [1 for i in range(self.hilbert.size)]]\n return Qobj(np.asarray(self.to_array()), dims=q_dims)",
"def serialize_Q(Q: np.ndarray):\n ret = QMatrix()\n ret.q_matrix = [QMatrixRow() for i in range(64)]\n for i in range(64):\n row = []\n for j in range(9):\n row.append(Q.q_matrix[i][j])\n ret.q_matrix[i].q_matrix_row = row\n return ret",
"def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()",
"def convert_to(self, domain):\n if domain == self.domain:\n return self.copy()\n elif domain == QQ and self.domain == ZZ:\n return self._new(flint.fmpq_mat(self.rep), self.shape, domain)\n elif domain == ZZ and self.domain == QQ:\n # XXX: python-flint has no fmpz_mat.from_fmpq_mat\n return self.to_ddm().convert_to(domain).to_dfm()\n else:\n # It is the callers responsibility to convert to DDM before calling\n # this method if the domain is not supported by DFM.\n raise NotImplementedError(\"Only ZZ and QQ are supported by DFM\")",
"def from_q(self, q: np.ndarray) -> np.ndarray:\n return self.from_quaternion(self, q)",
"def calc_q_values(self, state): \n q_vals = self.q_network.predict(np.swapaxes(state,0,3))\n return q_vals",
"def _q_matrix(self):\n return np.array([\n [self.q[0], -self.q[1], -self.q[2], -self.q[3]],\n [self.q[1], self.q[0], -self.q[3], self.q[2]],\n [self.q[2], self.q[3], self.q[0], -self.q[1]],\n [self.q[3], -self.q[2], self.q[1], self.q[0]]])",
"def calc_q_values(self, state):\n state = state[None, :, :, :]\n return self.q_network.predict_on_batch(state)",
"def get_q(self):\n for state in self.vibresults:\n dFdG = []\n j = 0\n for i in range(3*len(self.indices)):\n if (i+1)%3 == 0:\n # a z-component\n try:\n differential = self.dFdG[state][j]\n except IndexError:\n print('Missing data!')\n continue\n dFdG.append([0,0,differential[-1]])\n j += 1\n else:\n dFdG.append([0, 0, 0])\n dFdG = np.array(dFdG)\n mu_axes = dFdG.T[-1]\n # now dot product with the different modes available\n for index, mode in enumerate(self.modes):\n try:\n q = np.dot(mu_axes, mode)\n except ValueError:\n continue\n self.q.setdefault(state,{})[index] = q",
"def _format(self, state):\n x = state\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x,\n device=self.device,\n dtype=torch.float32)\n x = x.unsqueeze(0)\n return x",
"def get_qs(self, state):\n state = state.astype(np.float32)\n if len(state.shape) == 3:\n state = state.reshape([1] + list(self.input_shape))\n return self.session.run(self.q_policy,\n feed_dict={self.s_placeholder: state})",
"def quatreal(q):\n a = q[0,0]\n b = q[0,1]\n c = q[0,2]\n d = q[0,3]\n amat = a*np.identity(4)\n bmat = b*np.array([[0,1,0,0],[-1,0,0,0],[0,0,0,-1],[0,0,1,0]])\n cmat = c*np.array([[0,0,1,0],[0,0,0,1],[-1,0,0,0],[0,-1,0,0]])\n dmat = d*np.array([[0,0,0,1],[0,0,-1,0],[0,1,0,0],[-1,0,0,0]])\n return amat+bmat+cmat+dmat",
"def set_qmat(self):\n if self.model == 'ER':\n self.qmat = np.array([\n [-self.alpha, self.alpha],\n [self.alpha, -self.alpha],\n ])\n \n elif self.model == 'ARD':\n self.qmat = np.array([\n [-self.alpha, self.alpha],\n [self.beta, -self.beta]\n ])\n else:\n raise Exception(\"model must be specified as either 'ER' or 'ARD'\")",
"def Q(self):\n self.dualEigenmatrix()",
"def q_values(self, state):\n return self.sess.run(self.graph.target_q_values,\n feed_dict={self.graph.states: [state]}).reshape(-1)",
"def density_matrix(wires) -> \"DensityMatrixMP\":\n wires = Wires(wires)\n return DensityMatrixMP(wires=wires)",
"def to_Matrix(self):\n from sympy.matrices.dense import MutableDenseMatrix\n\n # XXX: If the internal representation of RepMatrix changes then this\n # might need to be changed also.\n if self.domain in (ZZ, QQ, EXRAW):\n if self.rep.fmt == \"sparse\":\n rep = self.copy()\n else:\n rep = self.to_sparse()\n else:\n rep = self.convert_to(EXRAW).to_sparse()\n\n return MutableDenseMatrix._fromrep(rep)",
"def _csv3_q2x(self, Q, deriv = 0, out = None, var = None):\n \n natoms = 3 \n base_shape = Q.shape[1:]\n \n if var is None:\n var = [0, 1, 2] # Calculate derivatives for all Q\n \n nvar = len(var)\n \n # nd = adf.nck(deriv + nvar, min(deriv, nvar)) # The number of derivatives\n nd = dfun.nderiv(deriv, nvar)\n \n # Create adf symbols/constants for each coordinate\n q = [] \n for i in range(self.nQ):\n if i in var: # Derivatives requested for this variable\n q.append(adf.sym(Q[i], var.index(i), deriv, nvar))\n else: # Derivatives not requested, treat as constant\n q.append(adf.const(Q[i], deriv, nvar))\n # q = r1, r2, theta\n \n if out is None:\n out = np.ndarray( (nd, 3*natoms) + base_shape, dtype = Q.dtype)\n out.fill(0) # Initialize out to 0\n \n # Calculate Cartesian coordinates\n \n if self.angle == 'deg':\n q[2] = (np.pi / 180.0) * q[2] \n # q[2] is now in radians\n if self.supplementary:\n q[2] = np.pi - q[2] # theta <-- pi - theta \n \n if self.embedding_mode == 0:\n np.copyto(out[:,2], (-q[0]).d ) # -r1\n np.copyto(out[:,7], (q[1] * adf.sin(q[2])).d ) # r2 * sin(theta)\n np.copyto(out[:,8], (-q[1] * adf.cos(q[2])).d ) # -r2 * cos(theta)\n elif self.embedding_mode == 1:\n np.copyto(out[:,0], (q[0] * adf.cos(q[2]/2)).d ) # r1 * cos(theta/2)\n np.copyto(out[:,2], (q[0] * adf.sin(q[2]/2)).d ) # r1 * sin(theta/2)\n np.copyto(out[:,6], (q[1] * adf.cos(q[2]/2)).d ) # r2 * cos(theta/2)\n np.copyto(out[:,8], (-q[1] * adf.sin(q[2]/2)).d ) # -r2 * sin(theta/2)\n else:\n raise RuntimeError(\"Unexpected embedding_mode\")\n \n return out",
"def simple_q(self: Q) -> Q:\n\n self.t = sp.simplify(self.t)\n self.x = sp.simplify(self.x)\n self.y = sp.simplify(self.y)\n self.z = sp.simplify(self.z)\n return self",
"def to_flat_nz(self):\n return self.to_ddm().to_flat_nz()",
"def __init__(self,quat_as_vector=[0,0,0,1],order=\"xyzw\"):\n\n if order == \"xyzw\":\n self._data = np.array(quat_as_vector).flatten()\n elif order == \"wxyz\":\n flatten_q_vec = np.array(quat_as_vector).flatten()\n self._data = np.zeros(4)\n self._data[3] = flatten_q_vec[0]\n self._data[0:3] = flatten_q_vec[1:4]",
"def quat2mat(self,quat):\n\t quat = np.asarray(quat, dtype=np.float64)\n\t assert quat.shape[-1] == 4, \"Invalid shape quat {}\".format(quat)\n\n\t w, x, y, z = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3]\n\t Nq = np.sum(quat * quat, axis=-1)\n\t s = 2.0 / Nq\n\t X, Y, Z = x * s, y * s, z * s\n\t wX, wY, wZ = w * X, w * Y, w * Z\n\t xX, xY, xZ = x * X, x * Y, x * Z\n\t yY, yZ, zZ = y * Y, y * Z, z * Z\n\n\t mat = np.empty(quat.shape[:-1] + (3, 3), dtype=np.float64)\n\t mat[..., 0, 0] = 1.0 - (yY + zZ)\n\t mat[..., 0, 1] = xY - wZ\n\t mat[..., 0, 2] = xZ + wY\n\t mat[..., 1, 0] = xY + wZ\n\t mat[..., 1, 1] = 1.0 - (xX + zZ)\n\t mat[..., 1, 2] = yZ - wX\n\t mat[..., 2, 0] = xZ - wY\n\t mat[..., 2, 1] = yZ + wX\n\t mat[..., 2, 2] = 1.0 - (xX + yY)\n\t return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))",
"def to_DCM(self) -> np.ndarray:\n if not all(self.is_versor()):\n raise AttributeError(\"All quaternions must be versors to be represented as Direction Cosine Matrices.\")\n R = np.zeros((self.num_qts, 3, 3))\n R[:, 0, 0] = 1.0 - 2.0*(self.y**2 + self.z**2)\n R[:, 1, 0] = 2.0*(self.x*self.y+self.w*self.z)\n R[:, 2, 0] = 2.0*(self.x*self.z-self.w*self.y)\n R[:, 0, 1] = 2.0*(self.x*self.y-self.w*self.z)\n R[:, 1, 1] = 1.0 - 2.0*(self.x**2 + self.z**2)\n R[:, 2, 1] = 2.0*(self.w*self.x+self.y*self.z)\n R[:, 0, 2] = 2.0*(self.x*self.z+self.w*self.y)\n R[:, 1, 2] = 2.0*(self.y*self.z-self.w*self.x)\n R[:, 2, 2] = 1.0 - 2.0*(self.x**2 + self.y**2)\n return R",
"def quat2dcm(q):\n q0q0 = q[0] * q[0]\n q0q1 = q[0] * q[1]\n q0q2 = q[0] * q[2]\n q0q3 = q[0] * q[3]\n q1q1 = q[1] * q[1]\n q1q2 = q[1] * q[2]\n q1q3 = q[1] * q[3]\n q2q2 = q[2] * q[2]\n q2q3 = q[2] * q[3]\n q3q3 = q[3] * q[3]\n dcm = np.zeros((3, 3))\n dcm[0, 0] = q0q0 + q1q1 - q2q2 - q3q3\n dcm[0, 1] = 2.0*(q1q2 + q0q3)\n dcm[0, 2] = 2.0*(q1q3 - q0q2)\n dcm[1, 0] = 2.0*(q1q2 - q0q3)\n dcm[1, 1] = q0q0 - q1q1 + q2q2 - q3q3\n dcm[1, 2] = 2.0*(q2q3 + q0q1)\n dcm[2, 0] = 2.0*(q1q3 + q0q2)\n dcm[2, 1] = 2.0*(q2q3 - q0q1)\n dcm[2, 2] = q0q0 - q1q1 - q2q2 + q3q3\n return dcm",
"def get_dq_dynmat_q(phonon,q):\n groupvel = phonon._group_velocity\n return groupvel._get_dD(q)",
"def Q_net(self, state):\n\t\tif not self._prediction_made: \n\t\t\tQ = tf.matmul(tf.nn.relu( tf.matmul(state, self.weights_hidden) + self.bias_hidden ), self.weights_out) + self.bias_out \n\t\t\tself._Qval = Q\t\n\t\t\tself._prediction_made = True\n\t\treturn self._Qval",
"def __repr__(self):\n return \"Quaternion({}, {}, {}, {})\".format(repr(self.q[0]), repr(self.q[1]), repr(self.q[2]), repr(self.q[3]))",
"def qobjlist_to_mps(qobjlist):\n mpo = qobjlist_to_mpo(qobjlist)\n tensors = mpo.data\n for t in tensors:\n # Remove dummy input labels\n t.remove_all_dummy_indices(labels=[mpo.physin_label])\n # Change physical label to the standard choice 'phys'\n t.replace_label(mpo.physout_label, 'phys')\n return onedim.MatrixProductState(tensors, left_label='left',\n right_label='right', phys_label='phys')",
"def __float__(self):\n return self.q[0]",
"def __init__(self, p, q):\n self.p = p\n self.q = q\n # biais des unités d’entrée) -> dim (1xp)\n self.a = np.zeros((1, self.p))\n # biais des unités de sortie -> dim (1xq)\n self.b = np.zeros((1, self.q))\n # initialisés aléatoirement suivant une loi normale centrée, de variance égale à 0.01\n self.W = np.random.normal(loc=0, scale=0.1, size=(self.p, self.q))"
]
| [
"0.6459236",
"0.64518213",
"0.62433493",
"0.59719163",
"0.5919937",
"0.5865261",
"0.58466554",
"0.56326973",
"0.5629153",
"0.55800503",
"0.55686086",
"0.5567256",
"0.55051607",
"0.5466473",
"0.54598826",
"0.5440446",
"0.5437382",
"0.5437118",
"0.5430142",
"0.5422909",
"0.5419825",
"0.54105425",
"0.5394798",
"0.5392589",
"0.5389921",
"0.5363855",
"0.5349966",
"0.5349855",
"0.5344102",
"0.5324805"
]
| 0.6822012 | 0 |
Computes the expectation value of the given operator over the variational state. | def expect(vstate: VariationalState, operator: AbstractOperator): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def expectation(operator, state):\n\n if isinstance(state, scipy.sparse.spmatrix):\n # Handle density matrix.\n if isinstance(operator, scipy.sparse.linalg.LinearOperator):\n raise ValueError('Taking the expectation of a LinearOperator with '\n 'a density matrix is not supported.')\n product = state * operator\n expectation = numpy.sum(product.diagonal())\n\n elif isinstance(state, numpy.ndarray):\n # Handle state vector.\n if len(state.shape) == 1:\n # Row vector\n expectation = numpy.dot(numpy.conjugate(state), operator * state)\n else:\n # Column vector\n expectation = numpy.dot(numpy.conjugate(state.T),\n operator * state)[0, 0]\n\n else:\n # Handle exception.\n raise ValueError(\n 'Input state must be a numpy array or a sparse matrix.')\n\n # Return.\n return expectation",
"def variance(operator, state):\n return (expectation(operator**2, state) - expectation(operator, state)**2)",
"def expectation_computational_basis_state(operator, computational_basis_state):\n if isinstance(operator, QubitOperator):\n raise NotImplementedError('Not yet implemented for QubitOperators.')\n\n if not isinstance(operator, FermionOperator):\n raise TypeError('operator must be a FermionOperator.')\n\n occupied_orbitals = computational_basis_state\n\n if not isinstance(occupied_orbitals, list):\n computational_basis_state_index = (occupied_orbitals.nonzero()[0][0])\n\n occupied_orbitals = [\n digit == '1' for digit in bin(computational_basis_state_index)[2:]\n ][::-1]\n\n expectation_value = operator.terms.get((), 0.0)\n\n for i in range(len(occupied_orbitals)):\n if occupied_orbitals[i]:\n expectation_value += operator.terms.get(((i, 1), (i, 0)), 0.0)\n\n for j in range(i + 1, len(occupied_orbitals)):\n expectation_value -= operator.terms.get(\n ((j, 1), (i, 1), (j, 0), (i, 0)), 0.0)\n\n return expectation_value",
"def expectation_db_operator_with_pw_basis_state(operator,\n plane_wave_occ_orbitals,\n n_spatial_orbitals, grid,\n spinless):\n expectation_value = operator.terms.get((), 0.0)\n\n for single_action, coefficient in operator.terms.items():\n if len(single_action) == 2:\n expectation_value += coefficient * (\n expectation_one_body_db_operator_computational_basis_state(\n single_action, plane_wave_occ_orbitals, grid, spinless) /\n n_spatial_orbitals)\n\n elif len(single_action) == 4:\n expectation_value += coefficient * (\n expectation_two_body_db_operator_computational_basis_state(\n single_action, plane_wave_occ_orbitals, grid, spinless) /\n n_spatial_orbitals**2)\n\n elif len(single_action) == 6:\n expectation_value += coefficient * (\n expectation_three_body_db_operator_computational_basis_state(\n single_action, plane_wave_occ_orbitals, grid, spinless) /\n n_spatial_orbitals**3)\n\n return expectation_value",
"def objective_function(params):\n cirq_circuit = variational_state_evolve(params)\n mean_value = self.expectation(\n all_qubits_in_circuit, cirq_circuit, hamiltonian)\n self._current_expectation = mean_value\n return mean_value",
"def advantage(self, state):\n Q = self.predict(state)\n return Q - np.dot(self.π.pmf(state, Q), Q)",
"def utility(self, state, option: Option) -> float:\n ω = self.option_idx_dict[str(option)]\n β = option.β.pmf(state)\n Q = self.predict(state)\n continuation_value = (1 - β) * Q[ω]\n termination_value = β * np.dot(self.π.pmf(state, Q), Q)\n return continuation_value + termination_value",
"def expval(op, dm):\n return np.tensordot(op, dm, ([0, 1], [0, 1]))",
"def eval_prior(self, state, action):\n\n return np.dot(state, self.a.T) + np.dot(action, self.b.T)",
"def expval(self, observable, shot_range=None, bin_size=None):\n is_state_batched = self._ndim(self.state) == 2\n # intercept Sums\n if isinstance(observable, Sum) and not self.shots:\n return measure(\n ExpectationMP(observable.map_wires(self.wire_map)),\n self._pre_rotated_state,\n is_state_batched,\n )\n\n # intercept other Hamiltonians\n # TODO: Ideally, this logic should not live in the Device, but be moved\n # to a component that can be re-used by devices as needed.\n if observable.name not in (\"Hamiltonian\", \"SparseHamiltonian\"):\n return super().expval(observable, shot_range=shot_range, bin_size=bin_size)\n\n assert self.shots is None, f\"{observable.name} must be used with shots=None\"\n\n self.map_wires(observable.wires)\n backprop_mode = (\n not isinstance(self.state, np.ndarray)\n or any(not isinstance(d, (float, np.ndarray)) for d in observable.data)\n ) and observable.name == \"Hamiltonian\"\n\n if backprop_mode:\n # TODO[dwierichs]: This branch is not adapted to broadcasting yet\n if is_state_batched:\n raise NotImplementedError(\n \"Expectation values of Hamiltonians for interface!=None are \"\n \"not supported together with parameter broadcasting yet\"\n )\n # We must compute the expectation value assuming that the Hamiltonian\n # coefficients *and* the quantum states are tensor objects.\n\n # Compute <psi| H |psi> via sum_i coeff_i * <psi| PauliWord |psi> using a sparse\n # representation of the Pauliword\n res = qml.math.cast(qml.math.convert_like(0.0, observable.data), dtype=complex)\n interface = qml.math.get_interface(self.state)\n\n # Note: it is important that we use the Hamiltonian's data and not the coeffs\n # attribute. This is because the .data attribute may be 'unwrapped' as required by\n # the interfaces, whereas the .coeff attribute will always be the same input dtype\n # that the user provided.\n for op, coeff in zip(observable.ops, observable.data):\n # extract a scipy.sparse.coo_matrix representation of this Pauli word\n coo = qml.operation.Tensor(op).sparse_matrix(wire_order=self.wires, format=\"coo\")\n Hmat = qml.math.cast(qml.math.convert_like(coo.data, self.state), self.C_DTYPE)\n\n product = (\n self._gather(self._conj(self.state), coo.row)\n * Hmat\n * self._gather(self.state, coo.col)\n )\n c = qml.math.convert_like(coeff, product)\n\n if interface == \"tensorflow\":\n c = qml.math.cast(c, \"complex128\")\n\n res = qml.math.convert_like(res, product) + qml.math.sum(c * product)\n\n else:\n # Coefficients and the state are not trainable, we can be more\n # efficient in how we compute the Hamiltonian sparse matrix.\n Hmat = observable.sparse_matrix(wire_order=self.wires)\n\n state = qml.math.toarray(self.state)\n if is_state_batched:\n res = qml.math.array(\n [\n csr_matrix.dot(\n csr_matrix(self._conj(_state)),\n csr_matrix.dot(Hmat, csr_matrix(_state[..., None])),\n ).toarray()[0]\n for _state in state\n ]\n )\n else:\n res = csr_matrix.dot(\n csr_matrix(self._conj(state)),\n csr_matrix.dot(Hmat, csr_matrix(state[..., None])),\n ).toarray()[0]\n\n if observable.name == \"Hamiltonian\":\n res = qml.math.squeeze(res)\n\n return self._real(res)",
"def expect(self, var):\n e = 0.0\n for prob, val in self.rv(var):\n e += prob * float(val)\n return e",
"def fock_state(state, device_wires, params):\r\n # pylint: disable=unused-argument\r\n n = params[0]\r\n N = state.num_modes\r\n\r\n if N == len(device_wires):\r\n # expectation value of the entire system\r\n ex = state.fock_prob(n)\r\n return ex, ex - ex ** 2\r\n\r\n dm = state.reduced_dm(modes=device_wires.tolist())\r\n ex = tf.math.real(dm[tuple([n[i // 2] for i in range(len(n) * 2)])])\r\n\r\n var = ex - ex ** 2\r\n return ex, var",
"def compute_expectation( self, X, O ):\n\n raise NotImplementedError",
"def __call__(self, observation):\n # Validates that the state variable is a scalar with this float() call.\n current_val = float(observation[self.dict_key])\n retval = current_val - self.last_val\n self.last_val = current_val\n return retval",
"def computeQValueFromValues(self, state, action):\n #get all possible actions from current state\n possibleActions = self.mdp.getPossibleActions(state)\n\n #if action is illegal return 0\n if action in possibleActions:\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n returnVal = 0\n index = 0\n\n #loop through all trainsitions/probabilities\n while index < len(transitions):\n #This returns s' and T(s, a, s')\n (nextState, prob) = transitions[index]\n\n\n #This returns R(s,a,s')\n rewardVal = self.mdp.getReward(state, action, nextState)\n\n #This is gamma * V(s')\n lastPartOfEquation = self.values[nextState] *self.discount\n\n #This is T(s, a, s')[R(s, a, s') + gamma * V(s'). Bellman equation\n returnVal = returnVal + prob * (rewardVal + lastPartOfEquation)\n index = index + 1\n\n return returnVal\n else:\n return 0",
"def act(self, state, epsilon, env):\n if random.random() > epsilon:\n state = Variable(torch.FloatTensor(state)).unsqueeze(0) # adds extra dim when single input\n state = self.vari_gpu(state)\n _, u_opt = self.forward(state)\n action = (u_opt.cpu().detach().numpy()) # compute the u*[0] \n #print('act:q_value ',q_value)\n #print('act:model action ',action)\n else:\n rand = np.random.rand(int(np.array(env.action_space.shape)))\n high = env.action_space.high\n low = env.action_space.low\n action = low + rand*(high-low)\n #print('act: ',action)\n return action",
"def advantage(self, state, Q: torch.Tensor = None):\n return Q - Q.max()\n # return Q - torch.matmul(self.π.pmf(state, action_values=Q), Q)",
"def get_value(self, state):\n epsilon = self.epsilon\n possible_actions = self.get_legal_actions(state)\n\n #If there are no legal actions, return 0.0\n if len(possible_actions) == 0:\n return 0.0\n\n optimal_action = possible_actions[\n np.argmax([self.get_qvalue(state, action) for action in possible_actions])\n ]\n state_value = 0\n for action in possible_actions:\n if action == optimal_action:\n state_value += (1 - epsilon) * self.get_qvalue(state, action)\n state_value += (epsilon / len(possible_actions)) * self.get_qvalue(state, action)\n return state_value",
"def _evaluate(self, state):\n leading_power_error = self.get_leading_power_error(state)\n if np.isfinite(leading_power_error):\n return -float(leading_power_error)\n else:\n return self._default_value",
"def _R(state, effects, observed_frequencies):\n # this small number ~ 10^-304 is added so that we don't get divide by zero errors\n machine_eps = np.finfo(float).tiny\n # have a zero in the numerator, we can fix this is we look a little more carefully.\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n update_operator = sum([effect * observed_frequencies[i] / (predicted_probs[i] + machine_eps)\n for i, effect in enumerate(effects)])\n return update_operator",
"def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor)\n return output",
"def get_state_action_value(self, state, action):\n state_tensor = torch.from_numpy(state).float().to(self.device)\n output = torch.dot(self.weights[action,:],state_tensor.view(-1))\n return output",
"def eval_action(self, state):\n means, _ = self.__call__(state)\n action = self.action_scale * means + self.action_bias\n\n return action.detach().cpu().numpy()",
"def get_expectation_values(\n self, circuit: Circuit, operator: SymbolicOperator, **kwargs\n ) -> ExpectationValues:\n measurements = self.run_circuit_and_measure(circuit)\n operator = change_operator_type(operator, IsingOperator)\n expectation_values = measurements.get_expectation_values(operator)\n expectation_values = expectation_values_to_real(expectation_values)\n return expectation_values",
"def power_iteration(operator, steps=20, error_threshold=1e-4,\n momentum=0.0, use_gpu=True,\n init_vec=None):\n vector_size = operator.size # input dimension of operator\n if init_vec is None:\n vec = torch.rand(vector_size)\n else:\n vec = init_vec\n\n if use_gpu:\n vec = vec.cuda()\n\n prev_lambda = 0.\n prev_vec = torch.zeros_like(vec)\n for _ in range(steps):\n new_vec = operator.apply(vec) - momentum * prev_vec\n prev_vec = vec / (torch.norm(vec) + 1e-6)\n\n lambda_estimate = vec.dot(new_vec).item()\n diff = lambda_estimate - prev_lambda\n vec = new_vec.detach() / torch.norm(new_vec)\n error = np.abs(diff / lambda_estimate)\n if error < error_threshold:\n return lambda_estimate, vec\n prev_lambda = lambda_estimate\n\n return lambda_estimate, vec",
"def body(state):\n v = state.v\n # normalize\n v_norm = arr_l2norm(v)\n v = v / v_norm\n # compute the next vector\n v_new = operator.times(v)\n # estimate the eigen value\n new_estimate = jnp.vdot(v, v_new)\n return PowerIterState(v=v_new, old_estimate=state.new_estimate, \n new_estimate=new_estimate, iterations=state.iterations+1)",
"def computeQValueFromValues(self, state, action):\n newState_prob = self.mdp.getTransitionStatesAndProbs(state, action)\n sum = 0\n for i in newState_prob:\n sum += i[1]*(self.mdp.getReward(state, action, i[0])+(self.discount*self.getValue(i[0])))\n return sum \n \n util.raiseNotDefined()",
"def expected_value_of_outcome(n, p):\r\n expected = n * p\r\n print(\"expected value / mean = \", expected)\r\n return expected",
"def computeQValueFromValues(self, state, action):\n #get the Transition function and nextStates\n state_prob_pair=self.mdp.getTransitionStatesAndProbs(state,action)\n #initialize the value to zero\n actual_value=0\n #iterate over probabilities (transition functions) and next states\n for pair in state_prob_pair:\n #compute qvalue\n actual_value+=pair[1]*(self.mdp.getReward(state,action,pair[0])+self.discount*self.values[pair[0]])\n #print \"The Q value is \",actual_value\n return actual_value",
"def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE"
]
| [
"0.73366123",
"0.7222232",
"0.7041802",
"0.631812",
"0.604789",
"0.5915146",
"0.589428",
"0.5876983",
"0.5830442",
"0.58052284",
"0.5766971",
"0.57608795",
"0.5747666",
"0.56931096",
"0.56688267",
"0.5637735",
"0.5619489",
"0.5608534",
"0.55620414",
"0.5543287",
"0.5540375",
"0.5532951",
"0.55095404",
"0.55024993",
"0.5469471",
"0.5465056",
"0.54639083",
"0.544496",
"0.5440424",
"0.54289156"
]
| 0.7237491 | 1 |
Establish a connection with the server. Set `self.stream` to the `pyxmpp.jabberd.ComponentStream` when initial connection succeeds. | def connect(self):
if not self.jid or self.jid.node or self.jid.resource:
raise ValueError,"Cannot connect: no or bad JID given"
if not self.secret:
raise ValueError,"Cannot connect: no secret given"
if not self.server:
raise ValueError,"Cannot connect: no server given"
if not self.port:
raise ValueError,"Cannot connect: no port given"
self.lock.acquire()
try:
stream=self.stream
self.stream=None
if stream:
stream.close()
self.__logger.debug("Creating component stream: %r" % (self.stream_class,))
stream=self.stream_class(jid = self.jid,
secret = self.secret,
server = self.server,
port = self.port,
keepalive = self.keepalive,
owner = self)
stream.process_stream_error=self.stream_error
self.stream_created(stream)
stream.state_change=self.__stream_state_change
stream.connect()
self.stream=stream
self.state_changed.notify()
self.state_changed.release()
except:
self.stream=None
self.state_changed.release()
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect(self):\n if self.server is not None:\n # TODO might want to give client debug flag option\n self.client = xmpp.Client(server=self.server, port=self.port, debug=[])\n con = self.client.connect(server=(self.server, self.port))\n\n # making helper classes, order is relevant, since roster is used by the others\n self._roster = self._RosterManager(self.client)\n self.iq_handler = self._IQHandler(self._roster, self.client)\n self._pres_manager = self._PresenceManager(self._roster, self.client)\n return con",
"async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e",
"def _connect(self):\n\n # Get the timeout\n m_timeout = OMPv4.TIMEOUT\n if self.__timeout:\n m_timeout = self.__timeout\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(m_timeout)\n try:\n sock.connect((self.__host, int(self.__port)))\n except socket.error, e:\n raise ServerError(str(e))\n self.socket = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1)\n\n # Authenticate to the server\n self._authenticate(self.__username, self.__password)",
"def start(self):\n self.protocol.makeConnection(self.transport)",
"def start(self):\n self.protocol.makeConnection(self.transport)",
"def connectionInitialized(self):\n log.msg('Connection Initialized')\n self.send(AvailablePresence())\n self.xmlstream.addObserver(\"/iq[@type='result']\", self.handleRequest)\n self.xmlstream.addObserver(\"/message\", self._onMessage)",
"async def connect(self):\n await asyncio.gather(self._exchange_connection.connect_to_server(), self.on_connection())",
"async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()",
"def connect(self):\n if not self.is_connected:\n self._init_cec_connection()",
"def initialize_connection(self):\n # TODO how to track state of connection if this fails?\n assert(self.state == self.State.DISCONNECTED)\n\n self.socket.settimeout(self.CONNECTION_TIMEOUT_S)\n self.socket.connect((self.peer_info['ip'], self.peer_info['port']))\n\n # Get to initializing state once connection succeeds\n self.state = self.State.INIT_HANDSHAKE\n\n handshake = PeerHandshake(consts.PEER_ID, self.info_hash)\n self.socket.send(handshake.serialize())",
"def connect(self):\n\t\tself._entity_server_connection.attempt_connection()",
"def _establish_connection(self):\n self.conn = self.listener.accept()",
"def connect(self, register = False, on_success=None, on_fail=None):\n JabberClient.connect(self, register)\n if register:\n s = self.stream\n s.registration_callback = self.process_registration_form\n s.registration_error_callback = on_fail\n s.registration_success_callback = lambda: (self.disconnect(), on_success())",
"def initialize(self):\n if not self.connection.is_closed():\n self.connection.close()\n\n self.connection.connect()",
"def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True",
"async def connect(self):\n await self._perform_connect()\n\n self.logger.debug(\"ewelink Connected\")\n self._publish('client', 'status', \"Connected\")\n self._disconnecting = False\n\n await self._receive_loop()",
"def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()",
"def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True",
"def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n if self.print_send:\n print(' - connecting...')\n self.socket.settimeout(1)\n self.socket.connect(self.host_port)\n if self.print_send:\n print(' - connected')\n except socket.timeout:\n raise Timeout('Timeout connecting to projector')\n except Exception as err:\n raise Error('Connection failed', err)\n self.expect(b'PJ_OK')\n self.send(b'PJREQ')\n self.expect(b'PJACK')",
"def connect(self):\r\n if self.__socket:\r\n return\r\n try:\r\n # This is the server communicator, try and accept connections.\r\n if self.__server_socket is not None:\r\n self.__socket, _ = self.__server_socket.accept()\r\n self.__socket.setblocking(0)\r\n self.__server_socket.close()\r\n # This is the client communicator, try and connect (quickly).\r\n else:\r\n self.__socket = socket.socket()\r\n self.__socket.settimeout(self.CONNECT_TIMEOUT)\r\n self.__socket.connect((self.__ip, self.__port))\r\n self.__socket.setblocking(0)\r\n self.__get_message()\r\n except socket.error:\r\n # Always close the socket if created, then make it none (this\r\n # way it is evident that a connection was not yet established).\r\n if self.__socket:\r\n self.__socket.close()\r\n self.__socket = None\r\n # Try again in a given interval.\r\n self.__root.after(self.WAIT_PERIOD, self.connect)",
"async def connection_made(self):\n logging.info('connecting to %s:%s' % self.address)",
"def connect(self):\n self.conn.connect()",
"async def connect(self):\n\n self.socket = await self._session.ws_connect(str(self._url))\n self._create_task(self.__handle_connection())",
"def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)",
"def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable",
"def connectionMade(self):\n self.protocol.makeConnection(BridgeTransport(self.transport))",
"def connectionMade(self):\n protocol.Protocol.connectionMade(self)\n self.port = self.transport.getHost().port\n #Start the inactivity timer the connection is dropped if we receive no data\n self.activateInactivityTimer()\n self.sessionState = SMPPSessionStates.OPEN\n self.log.warning(\"SMPP connection established from %s to port %s\", self.transport.getPeer().host, self.port)",
"async def connect(self) -> None:\n LOGGER.debug(\"Connecting to Home Assistant...\")\n try:\n self._client = await self._http_session.ws_connect(\n self.ws_server_url, heartbeat=55\n )\n version_msg = await self._client.receive_json()\n self._version = version_msg[\"ha_version\"]\n # send authentication\n await self._client.send_json({\"type\": \"auth\", \"access_token\": self._token})\n auth_result = await self._client.receive_json()\n if auth_result.get(\"type\", \"\") != \"auth_ok\":\n raise AuthenticationFailed(\n auth_result.get(\"message\", \"Authentication failed\")\n )\n except (\n client_exceptions.WSServerHandshakeError,\n client_exceptions.ClientError,\n ) as err:\n raise CannotConnect(err) from err\n\n LOGGER.info(\n \"Connected to Home Assistant %s (version %s)\",\n self.ws_server_url.split(\"://\")[0].split(\"/\")[0],\n self.version,\n )\n # start task to handle incoming messages\n self._loop.create_task(self._process_messages())\n # register event listener\n await self.send_command({\"type\": \"subscribe_events\"})\n # request full state once\n await self._request_full_state()",
"async def connect(self) -> None:\n self.client = mqtt.Client()\n self.client.on_message = self.on_message\n self.client.connect(self.host, self.port)\n self.client.loop_start()\n self.client.subscribe(LSST_GENERAL_TOPIC)\n self.connected = True\n self.log.debug(\"Connected.\")",
"def connect(self):\n self.snmp_client = SNMPClient(host=self.host,\n read_community=self.read_community,\n write_community=self.write_community,\n port=self.port,\n version=self.version,\n log=self.log)"
]
| [
"0.69157887",
"0.6877156",
"0.66275495",
"0.66263306",
"0.66263306",
"0.6622735",
"0.6613999",
"0.66042864",
"0.6585165",
"0.6509693",
"0.6508548",
"0.6492015",
"0.64870465",
"0.6427897",
"0.6406653",
"0.64025706",
"0.6399208",
"0.6368029",
"0.6316508",
"0.6271877",
"0.62522554",
"0.6240218",
"0.62007076",
"0.61958385",
"0.61886966",
"0.61867076",
"0.6174836",
"0.61687714",
"0.61283034",
"0.61061716"
]
| 0.7528595 | 0 |
Get the stream of the component in a safe way. | def get_stream(self):
self.lock.acquire()
stream=self.stream
self.lock.release()
return stream | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_stream(self):\n result = self.stream\n self.stream = \"\"\n return result",
"def stream(self):\r\n return streams.Stream(self)",
"def stream(self):\r\n return streams.Stream(self)",
"def stream(self):\n return streams.Stream(self)",
"def stream(self):\n\t\tdata = self._client.get(\"streams\", self.name)['stream']\n\t\tif data is not None:\n\t\t\tdata.pop('channel', None)\n\t\treturn data",
"async def stream_source(self):\n return self._stream_source",
"def get(self, stream):\n\n return self._streams[stream]",
"def stream(self) -> interface.Stream:\n return cast(interface.Stream, self._interfaces[interface.Stream])",
"def _get_stream(\n session: \"Session\", url_tail: str, params: Optional[Dict[str, Any]] = None\n) -> Any:\n response = _get(session, url_tail, params, stream=True)\n response.raw.decode_content = True\n return response.raw",
"def stream(self):\n if self.body_used:\n raise RuntimeError('Cannot use both stream and body')\n self.stream_used = True\n return self._stream",
"def getvalue(self):\n if callable(getattr(self.stream, 'getvalue', None)):\n return self.stream.getvalue()",
"async def get_stream(self) -> dict:\n return await self.channel.get_stream()",
"def test_get_stream(self):\n pass",
"def is_stream(self):\r\n return self.stream",
"async def get_stream(self) -> dict:\n\n data = await self._http.get_streams(channels=[self.name])\n\n try:\n return data[0]\n except IndexError:\n pass",
"def cohere_stream(stream):\n if isinstance(stream, IterIO):\n return stream\n return IterIO(stream)",
"def source(self):\n return self._group.stream",
"def outstream(self):\r\n #noinspection PyUnresolvedReferences\r\n return self._outstream",
"def streaming_buffer(self) -> 'outputs.StreamingbufferResponse':\n return pulumi.get(self, \"streaming_buffer\")",
"async def read_stream(self):\n data = bytearray(await self.reader.readuntil(b\"\\n\"))\n return data[:-1]",
"def file_contents(self, stream):\n return stream.read()",
"def create_stream(self):\n pass",
"def squares_streaming_get():\n \n return 'do some magic!'",
"def source(self):\n return self._client.group.stream",
"def _ReadStream(self, stream_name):\n file_object = self._OpenStream(stream_name)\n if not file_object:\n return b''\n\n try:\n data = file_object.read()\n finally:\n file_object.close()\n\n return data",
"def response_as_stream(self) -> Any:\n raise NotImplementedError # pragma: no cover",
"def __next__(self):\n return next(self.buffered_streamer)",
"async def stream_source(self) -> str:\n if not self._stream_enabled:\n return None\n return self._stream_source",
"def getStream(self,name):\n if (name in self._streams):\n return self._streams[name]\n return None",
"def __iter__(self):\n return self.stream_chunker"
]
| [
"0.7813552",
"0.73093575",
"0.73093575",
"0.72392064",
"0.71380043",
"0.68041235",
"0.6763978",
"0.6759576",
"0.6547358",
"0.65138996",
"0.64420074",
"0.63967717",
"0.63899106",
"0.6356386",
"0.6313429",
"0.62822145",
"0.6140741",
"0.6103475",
"0.6076918",
"0.60742474",
"0.6035672",
"0.60104376",
"0.59889376",
"0.594357",
"0.59363085",
"0.593459",
"0.59068936",
"0.5835694",
"0.5817363",
"0.5763324"
]
| 0.7730196 | 1 |
Register a feature to be announced by Service Discovery. | def register_feature(self, feature_name):
self.disco_info.add_feature(feature_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register(cls, feature_name, feature):\n if feature_name in cls.feature_registry:\n raise FeatureAlreadyRegistered(feature_name)\n cls.feature_registry[feature_name] = feature",
"def register_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_service_description()\n self._register(description, \"registering agent's service on the SOEF.\")",
"def add_feature(self, feat: str):\n if feat not in self._features:\n self._features.append(feat)\n else:\n raise IDAlreadyExists",
"def register(self, service_name, service_addr, service_ttl):\n raise NotImplementedError",
"def register_service(service, iface, name):",
"async def do_start_feature(self, feature_name, channel, target, req):\n allowed = self.config[\"allowed_dynamic_features\"]\n if allowed is not None and feature_name not in allowed:\n return self._rpc_failure(\n \"Feature {} is disallowed by configuration\".format(feature_name)\n )\n feature_cls = DynamicFeature.find(feature_name)\n new_feature = feature_cls(self.ws_handler, channel, target, req)\n self.ws_handler.add_feature(new_feature, channel, target)\n self.started_features[_fk(feature_name, channel, target)] = new_feature",
"def register_service(self, service, name):\n assert service._remote_service, \"Services should be decorated correctly.\"\n \n prepare_remote_service(service)\n self._services[name] = service",
"def add_feature(self, feat: Feature) -> None:\n self.data_features.append(feat)",
"def register(self):\n raise NotImplementedError",
"def register(self):\n raise NotImplementedError",
"def add_feature(self, featureName):\n newFeature = {\n \"name\": featureName,\n \"isRange\" : False\n }\n\n self.features.append(newFeature)",
"def register(self):\n raise NotImplementedError()",
"def feature(self, feature):\n\n self._feature = feature",
"def feature(self, feature):\n\n self._feature = feature",
"def register(self, hook_url):\n raise NotImplementedError()",
"def _register(self, noun):\n self.noun = noun",
"def _add_feature(self, feature):\n\n if feature.name in self.feature_name_index:\n logger.info(\"Feature %s already exists at %i, overwriting\" %\n (feature.name, self.feature_name_index[feature.name]))\n self.features[self.feature_name_index[feature.name]] = feature\n else:\n self.features.append(feature)\n self.feature_name_index[feature.name] = len(self.features) - 1\n logger.info(\"Adding %s to model at location %i\" % (\n feature.name, len(self.features)))\n self._add_domain_fault_above(feature)\n self._add_unconformity_above(feature)\n feature.set_model(self)",
"def add_feature(self, feature):\n # type: (Any) -> int\n # A copy of self.feature_names is always made, because it might be\n # \"owned\" by someone else.\n # It's possible to make the copy only at the first call to\n # self.add_feature to improve performance.\n idx = self.n_features\n if isinstance(self.feature_names, (list, np.ndarray)):\n self.feature_names = list(self.feature_names)\n self.feature_names.append(feature)\n elif isinstance(self.feature_names, dict):\n self.feature_names = dict(self.feature_names)\n self.feature_names[idx] = feature\n elif self.feature_names is None:\n self.feature_names = {idx: feature}\n self.n_features += 1\n return idx",
"def register(self):\n raise NotImplementedError(\"Should have implemented this\")",
"def attach_feature(self, feature):\r\n\r\n # Filter out literally identical features\r\n if feature in self._features:\r\n return # the feature is already present\r\n\r\n # Filter out functionally identical features.\r\n # Features may use their on_attach method to raise\r\n # toolbox.AlreadyThere if they detect that some\r\n # installed feature does the same thing already\r\n attach = getattr(feature, 'on_attach', None)\r\n if attach is not None:\r\n try:\r\n attach(self)\r\n except toolbox.AlreadyThere:\r\n return\r\n self.execute_callbacks_times.setdefault(feature, 0)\r\n #it would be nice if we could require a specific class instead of\r\n #a \"workalike\" so we could do actual error checking\r\n #if not isinstance(feature, toolbox.Feature):\r\n # raise TypeError(\"Expected gof.toolbox.Feature instance, got \"+\\\r\n # str(type(feature)))\r\n\r\n # Add the feature\r\n self._features.append(feature)",
"def register_service_agent(cm, sc, conf, rpcmgr):\n\n service_type = lb_const.SERVICE_TYPE\n cm.register_service_agent(service_type, rpcmgr)",
"def register_classification(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_register_classification_description()\n self._register(\n description, \"registering agent's personality classification on the SOEF.\"\n )",
"def register_factory(factory, iface, requires, name):",
"def _register(self, faucet):\n if self.faucet is None:\n self.faucet = faucet",
"def register_to_core(self):\n self.channel.basic_publish(exchange='', routing_key='peripheral_register', body=json.dumps({self.name: api}))",
"def register(self, target, hostname, listener_type, expire=-1):",
"def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])",
"def register_service(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def register(self):\n self.logger.info(\"Registering agent %s\", \"/registry/\" + self._configuration[\"identification\"][\"uuid\"])\n self._coordination.update(\"/registry/\" + self._configuration[\"identification\"][\"uuid\"], self._configuration[\"identification\"])",
"def addFeature(self, strName, lstDomain):\n # create a new variable CSPVariable object\n newFeature = CSPFeature(strName, lstDomain)\n # put the new variable in the graph's list of variables\n self.features.append(newFeature)"
]
| [
"0.6767601",
"0.62767833",
"0.6213465",
"0.6098763",
"0.6075585",
"0.58385265",
"0.57447296",
"0.57391727",
"0.5735931",
"0.5735931",
"0.570295",
"0.5646223",
"0.56006825",
"0.56006825",
"0.5569809",
"0.55604154",
"0.5557282",
"0.55560994",
"0.5542508",
"0.5523626",
"0.55188304",
"0.5518318",
"0.5494324",
"0.54589707",
"0.5454453",
"0.5440195",
"0.5435054",
"0.54267687",
"0.5394075",
"0.53924954"
]
| 0.7496854 | 0 |
Unregister a feature to be announced by Service Discovery. | def unregister_feature(self, feature_name):
self.disco_info.remove_feature(feature_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_feature(self, feature):\r\n try:\r\n self._features.remove(feature)\r\n except Exception:\r\n return\r\n detach = getattr(feature, 'on_detach', None)\r\n if detach is not None:\r\n detach(self)",
"def __delattr__(self, feature):\n setattr(self, feature, None)",
"def remove_feature(self, feat: str):\n if feat in self._features:\n self._features.remove(feat)\n else:\n raise IDDoesNotExist",
"def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.train.pop(name)\n self.test.pop(name)",
"def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)",
"def unregister_service(self, name):\n self._services.remove(name)",
"def unregister(self, service_name, service_addr):\n raise NotImplementedError",
"def delete_feature(self, feature):\r\n cmd = DeleteFeatureCommand(self._delete_feature, self._set_features, self._features, feature)\r\n self.get_invoker().store_and_execute(cmd)",
"async def do_stop_feature(self, feature_name, channel, target):\n feature_key = _fk(feature_name, channel, target)\n feature = self.started_features[feature_key]\n await self.ws_handler.remove_feature(feature, channel, target)\n try:\n del self.started_features[feature_key]\n except KeyError:\n # probably some race condition\n log.debug(\n \"Ignoring missing started feature %s on stop_feature request\",\n feature_key,\n )\n\n if isinstance(feature, TargetFeature):\n # Might as well try a disconnect\n log.debug(\"About to disconnect stopped feature \" + feature_key)\n await feature.disconnect()",
"def unregister(self):\n idaapi.unregister_action(self.get_name())",
"def unregister(url):\n return Client.get_client().unregister(url)",
"def unregister(self):\r\n self._unregister()",
"def stop(self, context):\n # Unregister the service\n self.__registration.unregister()\n self.__registration = None",
"def unregister(self, fd):\n self.poller.unregister(fd)",
"def unregister(self, fd):\n self.poller.unregister(fd)",
"def remove(self, feature_type):\n with self._map_lock.write_lock():\n del self._feature2memory[feature_type]",
"def _unregister_service(self) -> None:\n strategy = cast(Strategy, self.context.strategy)\n description = strategy.get_unregister_service_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.UNREGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"unregistering service from SOEF.\")",
"def unregister(self):\n assert self.state == State.SHUTDOWN\n del self._proto[self.dest_addr]",
"def __delitem__(self, feature):\n self[feature] = None",
"def unregisterRemove(self, function):\n self._sig_remove.unsubscribe(function)",
"def unregister_service(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")",
"def RoutingInterfaceNotificationUnregister(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def unregister(provider):\n _DEFAULT_PROVIDER.remove_provider(provider)",
"def unsubscribe(self):\r\n self._unregister()",
"def delete_feature(self, dataset, fid):\n uri = URITemplate(\n self.baseuri + '/{owner}/{did}/features/{fid}').expand(\n owner=self.username, did=dataset, fid=fid)\n return self.session.delete(uri)",
"def unregister(self, alias):\n delattr(self, alias)",
"def RoutingInterfaceNotificationUnregister(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()",
"def removeFeatureManagerConnection(address=None):\n global __mgr_cache__\n #: :type: FeatureManager\n if hasattr(__mgr_cache__[address], 'shutdown'):\n __mgr_cache__[address].shutdown()\n del __mgr_cache__[address]",
"def unregister(self, name: str, opset: OpsetVersion) -> None:\n if name not in self._registry:\n return\n self._registry[name].remove_custom(opset)",
"def disable_feature(self, feature_name, callback=None):\n logger.info(\"disable_feature {} called\".format(feature_name))\n self.feature_enabled[feature_name] = False\n\n def pipeline_callback(call):\n if call.error:\n # TODO we need error semantics on the client\n exit(1)\n if callback:\n callback()\n\n self._pipeline.run_op(\n pipeline_ops_base.DisableFeature(feature_name=feature_name, callback=pipeline_callback)\n )"
]
| [
"0.7234667",
"0.6661622",
"0.6654812",
"0.65258306",
"0.64295805",
"0.63569415",
"0.62806374",
"0.6280209",
"0.62158936",
"0.6206144",
"0.6193515",
"0.6166244",
"0.6110538",
"0.6105044",
"0.6105044",
"0.605418",
"0.6034885",
"0.6032989",
"0.6032759",
"0.6029561",
"0.6011828",
"0.6005594",
"0.6002829",
"0.594094",
"0.59330636",
"0.58842736",
"0.58319926",
"0.574813",
"0.5746461",
"0.5728032"
]
| 0.8387436 | 0 |
Handle various stream state changes and call right methods of `self`. | def __stream_state_change(self,state,arg):
self.stream_state_changed(state,arg)
if state=="fully connected":
self.connected()
elif state=="authenticated":
self.authenticated()
elif state=="authorized":
self.authorized()
elif state=="disconnected":
self.state_changed.acquire()
try:
if self.stream:
self.stream.close()
self.stream_closed(self.stream)
self.stream=None
self.state_changed.notify()
finally:
self.state_changed.release()
self.disconnected() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stream_state_changed(self,state,arg):\n pass",
"def stream_state_changed(self,state,arg):\n if opts.verbose:\n print \"*** State changed: %s %r ***\" % (state,arg)\n else:\n pass",
"def stream_changed(self, uri):\n pass",
"def stream_call(self):\n pass",
"def stream_status_event(self, event):\r\n pass",
"def run(self, stream):\n pass",
"def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)",
"def __updateStreamStatus(self):\n while(True):\n for server,streams in self._streamsByServer.items():\n activeStreams = server.getActiveStreams()\n # Update each streams state\n for stream in streams:\n stream.lock.acquire()\n stream.setStreamState(server,Stream.STATE.DOWN)\n if (stream.name in activeStreams):\n stream.setStreamState(server,Stream.STATE.UP)\n stream.setStreamAddress(server,activeStreams[stream.name])\n stream.lock.release()\n time.sleep(StreamManager.SECS_BETWEEN_STATUS_CHECKS)",
"def stream_created(self,stream):\n pass",
"def update_state(self):\n for listener in self.listeners:\n listener['callback']()",
"def state_cb(self, msg):\n self.prev_state = deepcopy(self.current_state)\n self.current_state = msg\n\n if self.current_state.mode == \"MANUAL\":\n if self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream DISABLED\")\n self.stop_streaming_offboard_points()\n\n if self.current_state.mode == \"POSCTL\":\n if not self.offboard_point_streaming:\n rospy.loginfo(\"Setpoint stream ENABLED\")\n self.start_streaming_offboard_points()\n if not self.prev_state.mode == \"POSCTL\":\n # just switched into POSCTL, call hover\n self.hover()\n\n if self.current_state.mode == \"OFFBOARD\":\n if not self.prev_state.mode == \"OFFBOARD\":\n # just switched to OFFBOARD, call move\n rospy.loginfo(\"Entering OFFBOARD Mode\")\n for i in range(0,len(velocities)):\n maneuver_velocity_setpoint=velocities[i]\n maneuver_reference_frame = maneuver_reference_Frame\n maneuver_duration=duration[i]\n self.execute_maneuver( self.maneuver_velocity_setpoint, \n self.maneuver_reference_frame, \n self.maneuver_duration)",
"def __state_cb(self, data):\n self.state = data",
"def test_data_source_soaps_change_stream_post(self):\n pass",
"def stream_closed(self,stream):\n pass",
"def _stateHandler(self, state):\n try:\n if state.getType() == TSMessage.STATUS and self.parent:\n _params = state.getParams()\n if _params.get('main'):\n _descr = _params['main'].split(';')\n\n if _descr[0] == 'starting':\n self.msg_params['prebuf'] = 0\n\n elif _descr[0] == 'prebuf':\n if _descr[1] != self.msg_params.get('prebuf', 0):\n self.msg_params['last_update'] = state.getTime()\n self.msg_params['prebuf'] = _descr[1]\n log.d('_stateHandler: Пытаюсь показать состояние')\n self.parent.showStatus('Пребуферизация {0}'.format(self.msg_params['prebuf']))\n self.parent.player.showStatus('Пребуферизация {0}'.format(self.msg_params['prebuf']))\n\n if time.time() - self.msg_params['last_update'] >= AcePlayer.TIMEOUT_FREEZE:\n log.w('AceEngine is freeze')\n self.autoStop()\n\n elif _descr[0] == 'check':\n log.d('_stateHandler: Проверка {0}'.format(_descr[1]))\n self.parent.showStatus('Проверка {0}'.format(_descr[1]))\n # elif _descr[0] == 'dl':\n # self.parent.showInfoStatus('Total:%s DL:%s UL:%s' % (_descr[1], _descr[3], _descr[5]))\n elif _descr[0] == 'buf':\n # self.parent.showStatus('Буферизация: %s DL: %s UL: %s' % (_descr[1],\n # _descr[5], _descr[7])) @IgnorePep8\n\n if _descr[1] != self.msg_params.get('buf', 0):\n self.msg_params['last_update'] = state.getTime()\n self.msg_params['buf'] = _descr[1]\n # self.parent.player.showStatus('Буферизация {0}'.format(self.msg_params['value']))\n if time.time() - self.msg_params['last_update'] >= AcePlayer.TIMEOUT_FREEZE:\n self.parent.player.showStatus('Пребуферизация {0}'.format(self.msg_params['buf']))\n log.w('AceEngine is freeze')\n self.autoStop()\n # elif _descr[0] == 'dl':\n # if _descr[8] != self.msg_params.get('downloaded', 0):\n # self.msg_params['last_update'] = state.getTime()\n # self.msg_params['downloaded'] = _descr[8]\n # if time.time() - self.msg_params['last_update'] >= 10:\n # log.w('AceEngine is freeze')\n # self.autoStop()\n\n # self.parent.showInfoStatus('Buf:%s DL:%s UL:%s' % (_descr[1], _descr[5], _descr[7]))\n # else:\n # self.parent.showInfoStatus('%s' % _params)\n elif state.getType() in (TSMessage.RESUME, TSMessage.PAUSE, TSMessage.START):\n self.msg_params['value'] = 0\n # self.msg_params['downloaded'] = 0\n\n elif state.getType() == TSMessage.EVENT:\n if state.getParams() == 'getuserdata':\n self._send_command('USERDATA [{{\"gender\": {0}}} {{\"age\": {1}}}]'.format(\n utils.str2int(defines.GENDER) + 1,\n utils.str2int(defines.AGE) + 1))\n elif state.getParams().startswith('showdialog'):\n _parts = state.getParams().split()\n self.parent.showStatus('{0}: {1}'.format(unquote(_parts[2].split('=')[1]),\n unquote(_parts[1].split('=')[1])))\n elif state.getType() == TSMessage.ERROR:\n self.parent.showStatus(state.getParams())\n\n elif state.getType() == TSMessage.STOP:\n self.waiting.abort.set()\n\n elif state.getType() == TSMessage.STATE:\n _params = state.getParams()\n _param = utils.str2int(_params)\n if _param == 0:\n self.waiting.abort.set()\n\n except Exception as e:\n log.e('_stateHandler error: \"{0}\"'.format(uni(e)))\n finally:\n try:\n if self.waiting.msg == state.getType():\n self.waiting.msg = state\n self.waiting.event.set()\n\n except Exception as e:\n log.e('_stateHandler error: \"{0}\"'.format(uni(e)))",
"def _handle_read(self):\n pass",
"def run(self):\n\n if (self.action == 'read'):\n self.read()\n else:\n self.write()",
"def refresh(self):\r\n # todo, use vid_info as property instead of this\r\n # reset properties and rebuild streams\r\n self.setup()",
"def _update_state(self) -> None:\n raise NotImplementedError(\"\")",
"def handle_stream_client(self, event):\n try:\n while True:\n client_req = self.receive_streaming_msg()\n self.choose_action(client_req[ZERO], client_req[ONE:], event)\n except socket.error as e:\n print('stream', e)",
"def handle_read(self):\n pass",
"def starting_stream(self, stream):\n self.cur_stream_observations = 0\n self.stream = stream",
"def start_stream(self):\n pass",
"def handle_recv(self,stream,msgs):\n pass",
"def on(self):\n self._current_stream = self._stdout",
"def StatusChanged(self, state, info):\n pass",
"def consumer(state: SharedState):",
"def _state_message_received(self, msg: ReceiveMessage) -> None:\n try:\n self._state = int(msg.payload)\n self.async_write_ha_state()\n except ValueError:\n pass",
"def on_receive(self):\n self.state = RECEIVED",
"def _handle_message(self, bus, message):\n if message.type == Gst.MessageType.EOS:\n logger.info(\"End-Of-Stream reached.\\n\")\n # file finished playing\n self.pipeline.set_state(Gst.State.NULL)\n #self.playing = False\n # if self.finished_callback:\n # self.finished_callback()\n \n elif message.type == Gst.MessageType.ERROR:\n # error\n self.pipeline.set_state(Gst.State.NULL)\n err, debug_info = message.parse_error()\n logger.error(f\"Error received from element {message.src.get_name()}: {err.message}\\n\")\n logger.error(f\"Debugging information: {debug_info if debug_info else 'none'}\\n\")\n #self.playing = False \n elif message.type == Gst.MessageType.STATE_CHANGED:\n # We are only interested in state-changed messages from the pipeline\n if message.src == self.pipeline:\n old_state, new_state, pending_state = message.parse_state_changed()\n logger.info(f\"Pipeline state changed from {Gst.Element.state_get_name(old_state)} to {Gst.Element.state_get_name(new_state)}:\\n\")"
]
| [
"0.78895384",
"0.68260914",
"0.6409811",
"0.6369162",
"0.6344077",
"0.62601334",
"0.603433",
"0.59010655",
"0.5873999",
"0.5868889",
"0.5808116",
"0.57923627",
"0.57655436",
"0.5761755",
"0.5749359",
"0.573211",
"0.5684717",
"0.56735766",
"0.56671077",
"0.56656724",
"0.56448895",
"0.5642123",
"0.56409454",
"0.5628385",
"0.55811185",
"0.55747855",
"0.5570012",
"0.55574393",
"0.5551966",
"0.5551541"
]
| 0.7470412 | 1 |
Handle a discoinfo query. | def __disco_info(self,iq):
q=iq.get_query()
if q.hasProp("node"):
node=from_utf8(q.prop("node"))
else:
node=None
info=self.disco_get_info(node,iq)
if isinstance(info,DiscoInfo):
resp=iq.make_result_response()
self.__logger.debug("Disco-info query: %s preparing response: %s with reply: %s"
% (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))
resp.set_content(info.xmlnode.copyNode(1))
elif isinstance(info,Stanza):
resp=info
else:
resp=iq.make_error_response("item-not-found")
self.__logger.debug("Disco-info response: %s" % (resp.serialize(),))
self.stream.send(resp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cli(ctx, query):\n query = query.split('/')\n get_info(query)",
"def get_info(self, info):\r\n pass",
"def process_info(self, info):\n return info",
"def info_handler(userdata, *args):\n\t\tinfo = database.devinfo(userdata[\"cursor\"], args[0])\n\t\t\n\t\tif info is None:\n\t\t\tprint(\"can't find user \" + args[0])\n\t\t\treturn\n\t\t\n\t\tstype, connected, status = info\n\t\t\n\t\tprint(shlex.quote((\"+\" if connected else \"-\") + stype), end=\" \")\n\t\tprint(shlex.quote(status))",
"def handle_info(self, api, command):\n return self.handle_log(api, command, level=logging.INFO)",
"def disco_get_info(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_info:\n return self.disco_info\n return None",
"def info(self, id):",
"def _process_info_message(self, message):\n # Extract the output resolution from the appropriate message, if\n # it's present.\n contents = message.get('contents', None)\n if message['messageCode'] == 'JOB_RUNNING_RESOLUTION':\n self._resolution = contents['resolutionMs']\n elif message['messageCode'] == 'FETCH_NUM_TIMESERIES':\n self._num_input_timeseries += int(message['numInputTimeSeries'])\n elif message['messageCode'] == 'FIND_MATCHED_NO_TIMESERIES':\n self._find_matched_no_timeseries = True\n elif message['messageCode'] == 'FIND_LIMITED_RESULT_SET':\n self._find_limited_resultset = True\n self._find_matched_size = contents['matchedSize']\n self._find_limit_size = contents['limitSize']\n elif message['messageCode'] == 'GROUPBY_MISSING_PROPERTY':\n self._group_by_missing_property = True\n self._group_by_missing_properties = contents['propertyNames']",
"def get_info(self, response):\n try:\n if re.search('artist/\\d+', response.url) or \\\n re.search('i\\.xiami\\.com/[^/]+$', response.url):\n self.get_artist(response)\n elif re.search('album/\\d+', response.url):\n self.get_albums(response)\n elif re.search('song/\\d+', response.url):\n self.get_songs(response)\n elif 'count/getplaycount' in response.url:\n self.get_count(response)\n else:\n self.get_pages(response)\n except (AttributeError, TypeError):\n return\n request = self.gen_info(response)\n if not request:\n self.save(response.meta['source_id'],\n response.meta['raw_info'],\n response.meta['result'])\n else:\n yield request",
"async def info(self, ctx: commands.Context, *, title: str) -> None:\n async with ctx.typing():\n searches = await self.backend.search(title, 1)\n data = searches[\"results\"][0]\n mangaUUID = self.backend.mangaUUID(data)\n coverUUID = self.backend.coverUUID(data)\n thumbnail = await self.backend.cover(mangaUUID, coverUUID)\n\n embed = utilities.Embeds.standard()\n embed.set_footer(text=\"Powered by the Mangadex API.\", icon_url=utilities.Icons.info)\n embed.set_thumbnail(url=thumbnail)\n\n manga = data[\"data\"][\"attributes\"]\n title = manga[\"title\"][\"en\"] or \"No title available.\"\n description = manga[\"description\"][\"en\"][0:1024] or \"No description available.\"\n embed.title = f\"Mangadex: {title}\"\n embed.description = description\n\n genres = [tag[\"attributes\"][\"name\"][\"en\"] for tag in manga[\"tags\"]]\n genreSubtitle = \"Genres\" if len(genres) > 1 else \"Genre\"\n embed.add_field(name=genreSubtitle, value=\", \".join(genres))\n\n demo = manga[\"publicationDemographic\"]\n demo = tcase.titlecase(demo) if demo is not None else \"Unknown demographic.\"\n embed.add_field(name=\"Demographic\", value=demo)\n await ctx.reply(embed=embed)",
"def infocalypse_info(ui_, repo, **opts):\n # FCP not required. Hmmm... Hack\n opts['fcphost'] = ''\n opts['fcpport'] = 0\n params, stored_cfg = get_config_info(ui_, opts)\n request_uri = opts['uri']\n if request_uri == '':\n request_uri = stored_cfg.get_request_uri(repo.root)\n if not request_uri:\n ui_.warn(\"There is no stored request URI for this repo.\\n\"\n \"Please set one with the --uri option.\\n\")\n return\n\n params['REQUEST_URI'] = request_uri\n execute_info(ui_, repo, params, stored_cfg)",
"def test_ctcpQuery_CLIENTINFO(self):\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"\")\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"PING PONG\")\n info = (\n \"ACTION CLIENTINFO DCC ERRMSG FINGER PING SOURCE TIME \" \"USERINFO VERSION\"\n )\n self.assertEqual(\n self.client.methods,\n [\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", info)])),\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", None)])),\n ],\n )",
"def getInfo():",
"def testDynamicInfoJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('client', 'console', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <identity category=\"client\"\n type=\"console\"\n name=\"Dynamic Info\" />\n </query>\n </iq>\n \"\"\")",
"def got_info(self, cloud_obj):",
"async def handle_info(\n hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: Dict\n):\n info_callbacks = hass.data.get(DOMAIN, {}).get(\"info\", {})\n data = {}\n data[\"homeassistant\"] = await hass.helpers.system_info.async_get_system_info()\n\n if info_callbacks:\n for domain, domain_data in zip(\n info_callbacks,\n await asyncio.gather(\n *(\n _info_wrapper(hass, info_callback)\n for info_callback in info_callbacks.values()\n )\n ),\n ):\n data[domain] = domain_data\n\n connection.send_message(websocket_api.result_message(msg[\"id\"], data))",
"def get_info(self) -> Optional[Dict[str, Any]]:",
"def fetch_info(self, client):\n self.log_verbose(\"Sending info command\")\n client.send(\"info\")\n try:\n data = client.read_response()\n except RedisError as e:\n collectd.error(\"redis_info plugin: Error response from %s:%d - %r\" % (self.host, self.port, e))\n return None\n\n self.log_verbose(\"Received data: %s\" % data)\n\n linesep = \"\\r\\n\" if \"\\r\\n\" in data else \"\\n\"\n info_dict = self.parse_info(data.split(linesep))\n\n return info_dict",
"def info(self, *args, **kwargs):",
"async def serverinfo_command(self, ctx):\n owner = str(ctx.guild.owner.mention)\n id = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n textChannels = len(ctx.guild.text_channels)\n voiceChannels = len(ctx.guild.voice_channels)\n roles = len(ctx.guild.roles)\n guildCreatedate = ctx.guild.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p\")\n\n embed = Embed(\n title=f\"Info of {ctx.guild.name} Server\",\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_thumbnail(url=ctx.guild.icon_url)\n fields = [\n (\"Server ID\", id, True),\n (\"Server Region\", region.capitalize(), True),\n (\"Owner\", owner, True),\n (\"Member Count\", memberCount, True),\n (\"Text Channels\", textChannels, True),\n (\"Voice Channels\", voiceChannels, True),\n (\"Role Count\", roles, True),\n (\"Created on\", guildCreatedate, True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)",
"def info() -> None:",
"def manage_info():",
"def do_info (self, line) :\n\t\tprint\n\t\tprint get_info_string( self.__image )\n\t\tprint",
"def testGetInfoRemote(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n events = set()\n\n def handle_disco_info(iq):\n events.add('disco_info')\n\n\n self.xmpp.add_event_handler('disco_info', handle_disco_info)\n\n\n self.xmpp.wrap(self.xmpp['xep_0030'].get_info('user@localhost', 'foo'))\n self.wait_()\n\n self.send(\"\"\"\n <iq type=\"get\" to=\"user@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"foo\" />\n </iq>\n \"\"\")\n\n self.recv(\"\"\"\n <iq type=\"result\" to=\"tester@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"foo\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")\n\n self.assertEqual(events, {'disco_info'},\n \"Disco info event was not triggered: %s\" % events)",
"def get_dicom_info_from_description(dicom_object, return_extra=False, sop_class_name=\"UNKNOWN\"):\n try:\n dicom_sop_class_name = dicom_object.SOPClassUID.name\n except AttributeError:\n logger.warning(\"Could not find DICOM SOP Class UID, using %s.\", sop_class_name)\n dicom_sop_class_name = sop_class_name\n\n if \"Image\" in dicom_sop_class_name:\n # Get the modality\n image_modality = dicom_object.Modality\n logger.info(\" Image modality: %s\", image_modality)\n\n if image_modality == \"CT\":\n # There is typically not much extra information\n # At the moment, we do not return anything for CT imaging\n if return_extra:\n try:\n protocol_name = dicom_object.ProtocolName\n\n if protocol_name != \"\":\n return re.sub(r\"[^\\w]\", \"_\", protocol_name).upper()\n except AttributeError:\n logger.warning(\" Could not find ProtocolName\")\n\n return \"\"\n\n elif image_modality == \"MR\":\n # Not much consistency, but we can get the protocol name\n try:\n protocol_name = re.sub(r\"[^\\w]\", \"_\", dicom_object.ProtocolName).upper()\n except AttributeError:\n logger.warning(\" Could not find ProtocolName\")\n protocol_name = \"\"\n\n try:\n sequence_name = re.sub(r\"[^\\w]\", \"_\", dicom_object.SequenceName).upper()\n except AttributeError:\n logger.warning(\" Could not find SequenceName\")\n sequence_name = \"\"\n\n try:\n series_description = re.sub(r\"[^\\w]\", \"_\", dicom_object.SeriesDescription).upper()\n except AttributeError:\n logger.warning(\" Could not find SequenceName\")\n series_description = \"\"\n\n combined_name = \"_\".join([protocol_name, sequence_name, series_description])\n\n while \"__\" in combined_name:\n combined_name = combined_name.replace(\"__\", \"_\")\n\n if protocol_name != \"\" and not return_extra:\n return protocol_name\n\n else:\n return combined_name\n\n elif image_modality == \"PT\":\n # Not much experience with this\n # We can search through the corrections applied\n # Return whether or not attentuation is applied\n\n try:\n corrections = dicom_object.CorrectedImage\n except AttributeError:\n corrections = \"NONE\"\n\n if \"ATTN\" in corrections:\n return \"AC\"\n else:\n return \"NAC\"",
"async def device_info(request):\n textx = await request.get_reply_message()\n codename = request.pattern_match.group(1)\n if codename:\n pass\n elif textx:\n codename = textx.text\n else:\n await edit_or_reply(request, \"`Usage: .device <codename> / <model>`\")\n return\n data = json.loads(\n get(\n \"https://raw.githubusercontent.com/androidtrackers/\"\n \"certified-android-devices/master/by_device.json\"\n ).text\n )\n results = data.get(codename)\n if results:\n reply = f\"**Search results for {codename}**:\\n\\n\"\n for item in results:\n reply += (\n f\"**Brand**: `{item['brand']}`\\n\"\n f\"**Name**: `{item['name']}`\\n\"\n f\"**Model**: `{item['model']}`\\n\\n\"\n )\n else:\n reply = f\"`Couldn't find info about {codename}!`\\n\"\n await edit_or_reply(request, reply)",
"def cln_info(record):\n try:\n significance = re.search(r\"(CLNSIG=)([A-Za-z_]+)\", record[7]).group(2)\n except:\n significance = \"clnSigNA\"\n\n try:\n disease = re.search(r\"(CLNDN=)([A-Za-z0-9_,-]+)\", record[7]).group(2)\n except:\n disease = \"diseaseNA\"\n\n return significance + \"|\" + disease",
"def _get_information(self):\n pass",
"def dish_get_info() -> Dish:\r\n return Dish(input(\"Please enter the dish's name: \"),\r\n float(input(\"Please enter the price of the dish: \")),\r\n int(input(\"Please enter the number of calories of the dish: \")))",
"def _query_info(self, entities: List[str]):\n self.player.respond(entities)"
]
| [
"0.59908015",
"0.5874965",
"0.56763387",
"0.56491685",
"0.5562806",
"0.551351",
"0.5379382",
"0.5369016",
"0.5272622",
"0.52676773",
"0.52373207",
"0.5186972",
"0.5169449",
"0.51482904",
"0.5144639",
"0.5129233",
"0.51241094",
"0.5123439",
"0.509425",
"0.50923896",
"0.508306",
"0.5082522",
"0.50779074",
"0.507158",
"0.50410646",
"0.49976268",
"0.49815798",
"0.49732253",
"0.49608323",
"0.49451295"
]
| 0.70154274 | 0 |
Handle a discoitems query. | def __disco_items(self,iq):
q=iq.get_query()
if q.hasProp("node"):
node=from_utf8(q.prop("node"))
else:
node=None
items=self.disco_get_items(node,iq)
if isinstance(items,DiscoItems):
resp=iq.make_result_response()
self.__logger.debug("Disco-items query: %s preparing response: %s with reply: %s"
% (iq.serialize(),resp.serialize(),items.xmlnode.serialize()))
resp.set_content(items.xmlnode.copyNode(1))
elif isinstance(items,Stanza):
resp=items
else:
resp=iq.make_error_response("item-not-found")
self.__logger.debug("Disco-items response: %s" % (resp.serialize(),))
self.stream.send(resp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def query_items_handler(query):\n items = getItemsByName(query)\n return jsonify(items=[i.serialize for i in items])",
"def query_items(self, request, params, payload):\n return util.ndb_query_from_values(self.model, params).fetch()",
"def get_items_for_query(self, query_str):\n raise NotImplementedError()",
"async def _fetch_data(self, ctx: commands.Context, query: str):\n params = {\n \"query\": query,\n \"maxResults\": 10,\n \"sort\": \"FavoritedTimes\",\n \"preferAccurateMatches\": \"true\",\n \"nameMatchMode\": \"Words\",\n \"fields\": \"Artists,Lyrics,Names,ThumbUrl\",\n }\n headers = {\n \"User-Agent\": f\"Red-DiscordBot/{red_version} Fixator10-cogs/VocaDB/{self.__version__}\"\n }\n try:\n async with self.session.get(BASE_API_URL, params=params, headers=headers) as resp:\n if resp.status != 200:\n return f\"https://http.cat/{resp.status}\"\n result = await resp.json()\n except asyncio.TimeoutError:\n return \"Request timed out\"\n\n all_items = result.get(\"items\")\n if not all_items:\n return None\n\n filtered_items = [x for x in all_items if x.get(\"lyrics\")]\n if not filtered_items:\n return None\n\n if len(filtered_items) == 1:\n return filtered_items[0]\n\n items = \"\\n\".join(\n f\"**`[{i}]`** {x.get('defaultName')} - {x.get('artistString')}\"\n f\" (published: {self._parse_date(x.get('publishDate'))})\"\n for i, x in enumerate(filtered_items, start=1)\n )\n\n prompt = await ctx.send(\n f\"Found below **{len(filtered_items)}** result(s). Pick one in 60 seconds:\\n\\n{items}\"\n )\n\n def check(msg: discord.Message) -> bool:\n return bool(\n msg.content.isdigit()\n and int(msg.content) in range(len(filtered_items) + 1)\n and msg.author.id == ctx.author.id\n and msg.channel.id == ctx.channel.id\n )\n\n try:\n choice = await self.bot.wait_for(\"message\", timeout=60.0, check=check)\n except asyncio.TimeoutError:\n choice = None\n\n if choice is None or choice.content.strip() == \"0\":\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.edit(content=\"Cancelled.\", delete_after=5.0)\n return None\n\n choice = int(choice.content.strip()) - 1\n with contextlib.suppress(discord.NotFound, discord.HTTPException):\n await prompt.delete()\n return filtered_items[choice]",
"def disco_get_items(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_items:\n return self.disco_items\n return None",
"async def get_item_data(self, ref, db):\n # If items_url is empty, treat ref as URL\n url = self.items_url.format(ref) or ref\n response = await self.asession.get(url)\n\n if response.status_code == 404:\n return self.log.debug(f\"Item {ref} doesn't exist\")\n\n try:\n # Raise for other response failures\n response.raise_for_status()\n\n # Add item to the db\n self.process_item_data(db, ref, response)\n\n self.log.debug(f'Got item {ref}')\n except Exception:\n e = traceback.format_exc()\n self.log.error(f'{e} (item {ref}, status {response.status_code})')",
"async def get_item(\n request: Request,\n response: Response,\n item_id: int,\n db: SAConnection = Depends(get_postgresql_connection)\n):\n cached_item = await request.app.extra['cache'].get_cache_item(item_id=item_id)\n if cached_item:\n return cached_item\n if db is None:\n response.status_code = 503\n return ResponseModel(result='Service unavailable')\n q = items.select().where(items.c.id == item_id)\n item = await db.fetchrow(query=q)\n if item is not None:\n item = Item(**item)\n await request.app.extra['cache'].set_cache_item(item=item)\n return item\n else:\n response.status_code = 404",
"def process_item_data(self, db, ref, response):\n raise Exception(\"To be implemented\")",
"async def get_items(\n request: Request,\n response: Response,\n db: SAConnection = Depends(get_postgresql_connection)\n):\n if db is None:\n response.status_code = 503\n return ResponseModel(result='Service unavailable')\n q = items.select()\n result = await db.fetch(query=q)\n items_list = [Item(**item) for item in result]\n for item in items_list:\n await request.app.extra['cache'].set_cache_item(item=item)\n return items_list",
"def query_items(self, items_params):\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n items_runnable = ItemRunnable(username, password, api_key, items_params)\n items_runnable.item_object.task_complete.connect(self.on_new_items)\n self.init_progress_bar()\n self.search_thread_pool.start(items_runnable)",
"def query(self, query):",
"def process_item(self, item, spider):\n\t\tif isinstance(item, DatabaseItem):\n\t\t\t# run db query in thread pool\n\t\t\tquery = self.dbpool.runInteraction(self._conditional_op, item)\n\t\t\tquery.addErrback(self._database_error, item)\n\n\t\treturn item",
"def handleQuery(self, query) -> None: # noqa\n results = []\n\n try:\n query_str = query.string.strip()\n\n # too small request - don't even send it.\n if len(query_str) < 2:\n keys_monitor.reset()\n return\n\n if len(query_str.split()) > 1:\n # pydictionary or synonyms.com don't seem to support this\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"A term must be only a single word\",\n actions=[],\n )\n )\n return\n\n # determine if we can make the request --------------------------------------------\n keys_monitor.report()\n if keys_monitor.triggered():\n results.extend(get_items_for_word(query, query_str))\n\n if not results:\n query.add(\n 0,\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"No results.\",\n actions=[],\n ),\n )\n\n return\n else:\n query.add(results)\n\n except Exception: # user to report error\n print(traceback.format_exc())\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"Something went wrong! Press [ENTER] to copy error and report it\",\n actions=[\n ClipAction(\n f\"Copy error - report it to {md_url[8:]}\",\n f\"{traceback.format_exc()}\",\n )\n ],\n ),\n )",
"def handleQuery(self,query):\n results = None\n return results",
"def get_item_detail(item_id):\n pass",
"def _handle_query(self, text, loop=False):\n \n # lazy complete\n text = line_add_lazy_return(text)\n text = line_add_lazy_describe(text)\n\n # RUN QUERY\n if not loop:\n res = self.dslobject.query(text)\n return res\n else:\n res = self.dslobject.query_iterative(text)\n return res",
"def get_items_for_catalog(catalog_id):\n pass",
"def _run_query(self, entity, query, items):\n for item in items:\n ok = True\n for field_name, q in query.iteritems():\n field = entity.fields[field_name]\n value = getattr(item, field_name, None)\n if field.is_relation():\n if value is None or not any(q.match(v.name) for v in value):\n ok = False\n break\n elif not q.match(value):\n ok = False\n break\n if ok:\n yield item",
"def testGetItemsRemote(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n events = set()\n results = set()\n\n def handle_disco_items(iq):\n events.add('disco_items')\n results.update(iq['disco_items']['items'])\n\n\n self.xmpp.add_event_handler('disco_items', handle_disco_items)\n\n self.xmpp.wrap(self.xmpp['xep_0030'].get_items('user@localhost', 'foo'))\n self.wait_()\n\n self.send(\"\"\"\n <iq type=\"get\" to=\"user@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"foo\" />\n </iq>\n \"\"\")\n\n self.recv(\"\"\"\n <iq type=\"result\" to=\"tester@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"foo\">\n <item jid=\"user@localhost\" node=\"bar\" name=\"Test\" />\n <item jid=\"user@localhost\" node=\"baz\" name=\"Test 2\" />\n </query>\n </iq>\n \"\"\")\n\n items = {('user@localhost', 'bar', 'Test'),\n ('user@localhost', 'baz', 'Test 2')}\n self.assertEqual(events, {'disco_items'},\n \"Disco items event was not triggered: %s\" % events)\n self.assertEqual(results, items,\n \"Unexpected items: %s\" % results)",
"def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:",
"def by_item(self) -> global___Snippet.PaginatedResponseHandling.ByItem:",
"async def item(self, ctx, raid: Raid):\n\n def check_author(m):\n return m.author == ctx.author\n\n if raid:\n # Raid Found, ask user to start entering items\n await ctx.send(RAID_FOUND.format(raid_id=raid.id,\n raid_event_name=raid.event_name,\n raid_date=raid.date))\n item_log = ''\n while True:\n # Wait for item entry: <Character> <DKP> <Item Name>\n try:\n msg = await ctx.bot.wait_for('message', check=check_author, timeout=60)\n except asyncio.TimeoutError:\n break\n\n response = msg.content.replace(\"<\", \"\").replace(\">\", \"\")\n\n if \"done\" in response.lower():\n break\n\n if \"cancel\" in response.lower():\n return None\n\n parts = response.split()\n if len(parts) < 3:\n await ctx.send(f'The following response `{msg.content}` was not valid. Please try again.')\n continue\n\n character_part = parts[0]\n item_value_part = parts[1]\n item_name_part = parts[2:]\n\n # Validate the character\n character = [c for c in self.characters if c.name.lower() == character_part.lower()]\n if not character:\n await ctx.send(f'The following character `{character_part}` was not valid. Please try again.')\n continue\n character = character[0]\n\n # Validate the item value\n if not item_value_part.isnumeric():\n await ctx.send(f'The following dkp of `{item_value_part}` is not a number. Please try again.')\n continue\n item_value = int(item_value_part)\n\n # TODO validate item_name\n item_name = ' '.join(item_name_part).capitalize()\n\n raid_item = eqdkp.create_raid_item(item_date=raid.date,\n item_name=item_name,\n item_raid_id=raid.id,\n item_value=item_value,\n item_buyers=[character.id])\n if raid_item:\n await ctx.send(\n f\"`{item_name} was successfully charged to {character.name} for {item_value} dkp. \"\n f\"Continue with the next item, or type done.`\")\n item_log += f\"> {item_name.ljust(30)}{character.name.ljust(20)}{str(item_value).rjust(5)} DKP\\n\"\n\n else:\n await ctx.send(f\"`ERROR: {item_name} failed to get entered. Please try again`\")\n\n # Find and edit the raid log in #dkp-entry-log channel\n if len(item_log) > 0:\n async with ctx.typing():\n channel = ctx.bot.dkp_entry_log_channel\n messages = await channel.history(limit=50).flatten()\n messages = [m for m in messages if f\"Raid Entry Log [{raid.id}]\" in m.content]\n if messages:\n message = messages[0]\n items_purchased = f\"\"\"\\n\\n* Items Purchased\\n{item_log}```\"\"\"\n content = message.content[:-3] + items_purchased\n await message.edit(content=content)\n return await ctx.send(f'All done! #{channel.name} has been edited.')\n else:\n return await ctx.send(\n f\"`ERROR: I wasn't able to edit #{channel.name}. Please do so manually.`\")",
"def item(self, id_or_slug):\n if isinstance(id_or_slug, int):\n self.query[\"item\"] = str(id_or_slug)\n elif id_or_slug.isdigit():\n self.query[\"item\"] = id_or_slug\n else:\n if self.query[\"dataset\"] == MOBYGAMES:\n self.query[\"item\"] = f\"slug/{id_or_slug}\"\n if self.query[\"dataset\"] == GAMEFAQS:\n self.query[\"item\"] = id_or_slug.replace(\"/\", \"__\")\n else:\n self.query[\"item\"] = id_or_slug\n if self.get_on_item:\n return self.get()\n else:\n return self",
"def testDynamicItemsJID(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n def dynamic_jid(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoItems()\n result['node'] = node\n result.add_item('tester@localhost', node='foo', name='JID')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_items',\n jid='tester@localhost',\n handler=dynamic_jid)\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\" to=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\">\n <query xmlns=\"http://jabber.org/protocol/disco#items\"\n node=\"testing\">\n <item jid=\"tester@localhost\" node=\"foo\" name=\"JID\" />\n </query>\n </iq>\n \"\"\")",
"async def items(self, ctx, search=''):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n inventory = ch.print_inventory(ctx.user_object, search.lower())\n await self.paginate(ctx, inventory)",
"async def root(\n p: str,\n item_id: int = Path(\n ...,\n title=\"The ID of the item to get\",\n ge=1, # constraint greater than or equal to 1\n lt=10 # less than 10\n ),\n size: Optional[float] = Query(None, gt=0., lt=33.),\n q: Optional[int] = Query(None, alias=\"item-query\")\n):\n results = dict(item_id=item_id)\n if q:\n results.update(q=q)\n if size:\n results.update(size=size)\n return results",
"def read_item(\n db: Session = Depends(deps.get_db),\n item: models.Item = Depends(deps.get_owned_item_by_id),\n current_user: schemas.UserInDB = Depends(deps.get_current_active_user),\n) -> Any:\n return item",
"def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError",
"def handle_items(data, user_to_check):\n for item in data:\n handle_item(item, user_to_check)",
"def cmsQuery(self, key, *items):\n params = [key]\n params += items\n \n return self.execute_command(self.CMS_QUERY, *params)"
]
| [
"0.57164264",
"0.5608247",
"0.5520259",
"0.5432534",
"0.53975445",
"0.53627044",
"0.5322989",
"0.5303249",
"0.5280937",
"0.52668864",
"0.5260443",
"0.5259907",
"0.5255112",
"0.5254149",
"0.5249836",
"0.51780385",
"0.514595",
"0.5127345",
"0.5107591",
"0.5099575",
"0.5099575",
"0.50878227",
"0.5078595",
"0.5074405",
"0.5038915",
"0.5029328",
"0.5000838",
"0.49933398",
"0.49891907",
"0.49735895"
]
| 0.66721976 | 0 |
Handle stream creation event. [may be overriden in derived classes] | def stream_created(self,stream):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_stream(self):\n pass",
"def start_stream(self):\n pass",
"def add_stream_to_event(self,stream):\n assert isinstance(stream,Stream)",
"def process_IN_CREATE(self, event):\n self.ProcessFile(event.name)",
"def __init__(self, stream):\n self.stream = stream",
"def __init__(self, stream):\n self.stream = stream",
"def test_register_stream(self):\n pass",
"def create_stream(self, tag, database, table):\n try:\n logger.info(\"About to create a firehose stream \")\n self.firehose_client.create_delivery_stream(DeliveryStreamName=self.get_stream_name(tag),\n ExtendedS3DestinationConfiguration=self.create_s3_dest_config(tag, database, table))\n except ClientError:\n logger.info(\"The firehose stream has already been created!\\n\")",
"def stream_status_event(self, event):\r\n pass",
"def on_connect(self):\n log.info(\"Stream connected\")",
"def __init__(\n self,\n alias: typing.Optional[str] = None,\n stream_type: typing.Optional[str] = None,\n internal_stream_info: typing.Optional[list] = None,\n access_mode: str = AT_MOST_ONCE,\n ) -> None:\n super().__init__()\n\n if __debug__:\n logger.debug(\"Registering new stream...\")\n\n self.alias = alias\n self.stream_type = stream_type\n self.access_mode = access_mode\n\n # Retrieve registration id\n req = RegisterStreamRequest(\n self.alias,\n self.stream_type,\n self.access_mode,\n internal_stream_info,\n )\n DistroStreamClientHandler.request(req)\n\n req.wait_processed()\n error = req.get_error_code()\n if error != 0:\n raise RegistrationException(error, req.get_error_msg())\n self.id = req.get_response_msg() # pylint: disable=invalid-name",
"def __init__(self, stream, time=None, eventType=None, eventCode=None,\r\n eventValue=None):\r\n self.stream = stream\r\n self.time = time\r\n self.eventType = eventType\r\n self.eventCode = eventCode\r\n self.eventValue = eventValue",
"def start(self):\n\t\tself.stream.start_stream()",
"def on_created(self, event):\n print(\"Created\")\n time.sleep(5)\n self.moveFile(event.src_path)",
"def handle_stream(self, stream, address):\n r = Participant(stream, self.db)\n r.wait_for_headers()",
"def stream_call(self):\n pass",
"def request_received(self, event):\n log.debug(\"request received, stream %s\", event.stream_id)",
"def on_created(self, event):\n\n # the absolute path of the event file/folder\n abs_path = event.src_path\n # replace the root path with a '.' to build a relative path to be sent to server\n relative_event_path = abs_path.replace(self.root_path, \".\")\n\n # retrieve event type and the flag for directory/folder\n event_type = event.event_type\n is_directory = event.is_directory\n\n # only propagate changes if there is a connection with the server\n if self.protocol.connected:\n self.protocol.send_event(event_type, is_directory, relative_event_path)\n else:\n logging.warning(\"Connection with server has not been established, 'create' changes will not be propagated.\")",
"def create_stream(self, name_or_addr, mode=MODE.THREED, exinfo=None):\n mode = mode | MODE.CREATESTREAM\n return self.create_sound(name_or_addr, mode, exinfo)",
"def contentsCreationStarted(self, *args, **kwargs): # real signature unknown\n pass",
"def handle_stream(self, stream, address):\n conn = RPCServerConnection(stream)\n self._connections.add(conn)\n conn.start_serving(self)",
"def create_new_event(self):\n pass",
"def process_IN_CREATE(self, event):",
"def handle_stream(self, stream, address):\n logger.info('Incoming connection from %r', address)\n client = Connection(stream, address, server=self)\n self.clients[address] = (client)",
"def run(self, stream):\n pass",
"def __init__(self, stream_id):\n self.stream_id = stream_id\n self._stream = None",
"def process_IN_CREATE(self, event):\n try:\n if self.checks(event):\n if ListenerContainer.is_syncing and not event.pathname[:2] == '.#':\n if event.dir:\n ListenerContainer.add_watch(event.pathname)\n ListenerContainer.client.mkdir(event.pathname)\n else:\n count = 0\n while True: # This may have an unending loop\n try:\n ListenerContainer.client.upload(event.pathname)\n break\n except SocketError or error_reply:\n reset()\n else:\n timer = now()\n if event.dir:\n ListenerContainer.add_watch(event.pathname)\n x = [timer, 'MKDIR', event.pathname]\n else:\n x = [timer, 'UPLOAD', event.pathname]\n ListenerContainer.sync_db.quick_push(x)\n except:\n reset()",
"def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()",
"def init_stream_handler(\n self, \n logger, \n loop, \n netconf_ip, \n netconf_port,\n statistics,\n xml_to_json_translator):\n self._logger = logger\n self._asyncio_loop = loop\n self._encoding = \"xml\"\n self._netconf_ip = netconf_ip\n self._netconf_port = netconf_port\n self._stat = statistics\n self._xml_to_json_translator = xml_to_json_translator",
"def _start_event_stream(self):\r\n\r\n # Register with an event queue, which will be used as event source:\r\n self._event_queue = self._call_factory(\"subscribe\")\r\n if self._event_queue is None:\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): no queue, \"\r\n \"stopping this thread\",\r\n threading.current_thread().ident)\r\n # As per http://dev.w3.org/html5/eventsource/, a response code\r\n # of 204 tells the browser not to reconnect:\r\n self.send_response(204)\r\n return\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): registered queue, \"\r\n \"start sending events\", threading.current_thread().ident)\r\n\r\n # Send HTTP headers:\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/event-stream\")\r\n self.end_headers()\r\n\r\n # Start event serving loop:\r\n self._send_events()"
]
| [
"0.7602492",
"0.7028708",
"0.6454284",
"0.64243484",
"0.63340473",
"0.63340473",
"0.63292897",
"0.62247956",
"0.61535805",
"0.6152194",
"0.61246437",
"0.60779375",
"0.60511464",
"0.6039878",
"0.60376835",
"0.6023824",
"0.6018856",
"0.60125256",
"0.6009551",
"0.59860206",
"0.59710634",
"0.5938939",
"0.591245",
"0.5901415",
"0.5881305",
"0.58506334",
"0.5794779",
"0.5786813",
"0.5780044",
"0.5740262"
]
| 0.83577925 | 0 |
Handle stream closure event. [may be overriden in derived classes] | def stream_closed(self,stream):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stream_status_event(self, event):\r\n pass",
"def request_received(self, event):\n log.debug(\"request received, stream %s\", event.stream_id)",
"def stream_state_changed(self,state,arg):\n pass",
"def stream_call(self):\n pass",
"def process_IN_CLOSE_WRITE(self, event):",
"def handle_stream_client(self, event):\n try:\n while True:\n client_req = self.receive_streaming_msg()\n self.choose_action(client_req[ZERO], client_req[ONE:], event)\n except socket.error as e:\n print('stream', e)",
"def process(self, event):\n pass",
"def stream_created(self,stream):\n pass",
"def process_event(self, event):\r\n pass",
"def event_in_cb(self, msg):\n self.event = msg.data",
"def stream_changed(self, uri):\n pass",
"def __streamHandler__(signum, frame):\n raise Exception(\"end of time\")",
"def run(self, stream):\n pass",
"def data_received(self, event):\n stream_id = event.stream_id\n\n log.debug(\"data received on stream %s: %s...\", stream_id, event.data[:100])\n receive_stream = self.receive_streams.get(stream_id)\n if receive_stream is None:\n try:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n except StreamClosedError:\n pass\n return\n\n receive_stream.write(event.data)\n self.conn.acknowledge_received_data(event.flow_controlled_length, stream_id)",
"def on_connection_close(self):\n self._stat.http_stream_close += 1\n self._logger.info(\"HTTP EventStream %s closed\", self)\n self.finish()\n asyncio.async(self._nc_handler.close(), loop=self._asyncio_loop)",
"def consume(self, handler) -> None:\n pass # pragma: no cover",
"def doEvent(self, source):\n pass",
"def intercept_stream(self, request_or_iterator, servicer_context,\n server_info, handler):\n raise NotImplementedError()",
"def outReadEvent(self, readBuffer):\r\n pass",
"def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass",
"def on_event_finished(self, event):",
"def stream_ended(self, event):\n log.debug(\"stream ended, stream %s\", event.stream_id)\n receive_stream = self.receive_streams.pop(event.stream_id, None)\n if receive_stream:\n receive_stream.close()",
"def ReceiveStreamedEvents(self, request, context):\n print 'got ReceiveStreamedEvents request'\n while 1:\n if ShutDown.stop:\n break\n yield self.get_next_event()",
"def process_IN_CLOSE_NOWRITE(self, event):",
"def process_IN_CLOSE_WRITE(s, event):\n s.doReload(event)",
"def event_queue_proc(self,event):\r\n event()",
"def _handle_connection(self, connection, address):\n try:\n stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)\n self.handle_stream(stream, address)\n except Exception:\n app_log.error(\"Error in connection callback\", exc_info=True)",
"def process_IN_OPEN(self, event):",
"def stream():\n return flask.Response(event_stream(flask.request.access_route[0]),\n mimetype='text/event-stream')",
"def on_data(self, session, byte_data):\n pass"
]
| [
"0.68242705",
"0.6463266",
"0.6414683",
"0.6398063",
"0.6271539",
"0.62169015",
"0.60852075",
"0.6076142",
"0.6034561",
"0.59915805",
"0.59613615",
"0.5934897",
"0.5905793",
"0.5898639",
"0.58615917",
"0.58546025",
"0.58373463",
"0.58128047",
"0.5790825",
"0.5786417",
"0.5744899",
"0.57443523",
"0.5731825",
"0.5731687",
"0.5720286",
"0.57041705",
"0.5701127",
"0.56976974",
"0.5677232",
"0.5649431"
]
| 0.6526973 | 1 |
Handle a stream error received. [may be overriden in derived classes] | def stream_error(self,err):
self.__logger.debug("Stream error: condition: %s %r"
% (err.get_condition().name,err.serialize())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_request_error(self, status_code):\n log.error(\"Stream encountered HTTP error: %d\", status_code)",
"def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")",
"def _handle_error(self, soc):\n err_string = \"socket error\"\n if soc in self._reading:\n err_string += (\" with '%s' read\" % self._reading[soc])\n if soc in self._writing:\n err_string += (\" with '%s' still to write\" % self._writing[soc])\n self._log_error(err_string)\n self._cleanup(soc)",
"def handle_err(self):\n pass",
"def handle_error(self):\n self.cmd_channel.debug(\"DTPHandler.handle_error()\")\n try:\n raise\n # if error is connection related we provide a detailed\n # information about it\n except socket.error, err:\n if err[0] in errno.errorcode:\n error = err[1]\n else:\n error = \"Unknown connection error\"\n # an error could occur in case we fail reading / writing\n # from / to file (e.g. file system gets full)\n except EnvironmentError, err:\n error = _strerror(err)\n except:\n # some other exception occurred; we don't want to provide\n # confidential error messages to user so we return a\n # generic \"unknown error\" response.\n logerror(traceback.format_exc()) \n error = \"Unknown error\"\n self.cmd_channel.respond(\"426 %s; transfer aborted.\" %error)\n self.close()",
"def on_error(self, status):\n print('The stream ended with status error:' + status)\n self.producer.stop()\n return False",
"def on_exception(self, exception):\n log.exception(\"Stream encountered an exception\")",
"def __streamHandler__(signum, frame):\n raise Exception(\"end of time\")",
"def errReceived(self, data):\n log.msg(\"Error output from process: \" + data,\n isError=True)",
"def tcp_error(self, flow: mitmproxy.tcp.TCPFlow):",
"def error(self, handler):\n pass",
"def received_error(self, data: Data, source: tuple, destination: tuple):\n pass",
"def response_received(self, event):\n super().response_received(event)\n\n stream_id = event.stream_id\n response_stream = self.receive_streams.get(stream_id)\n if response_stream is None:\n self.conn.reset_stream(stream_id, error_code=ErrorCodes.PROTOCOL_ERROR)\n return\n\n headers = response_stream.headers\n\n if int(headers.get(\"grpc-status\", 0)) > 0:\n error = GrpcError.from_headers(headers)\n response_stream.close(error)\n del self.receive_streams[stream_id]",
"def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)",
"def error_received(self, exc):\n print('Error received:', exc)",
"def errReceived(self, data):\n log.msg('err: %s' % data)",
"def testStreamParseError(self):\n\n def _testStreamError(res):\n self.assertEqual(True, isinstance(res.value, httpb_client.HTTPBNetworkTerminated))\n self.assertEqual(res.value.body_tag.getAttribute('condition', None), 'remote-connection-failed')\n\n def _failStreamError(res):\n self.fail('Expected a remote-connection-failed error')\n\n def _testSessionCreate(res):\n self.sid = res[0]['sid']\n self.server_protocol.triggerInvalidXML()\n return self.send().addCallbacks(_failStreamError, _testStreamError)\n\n return self.proxy.connect(self.get_body_node(connect=True)).addCallback(_testSessionCreate)",
"def error(self, flow: mitmproxy.http.HTTPFlow):",
"def handle_error(self, data, **kwargs):\n logger.log_err(str(data))",
"def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)",
"def error(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def error(self, flow: mitmproxy.http.HTTPFlow):\n pass",
"def stream_closed(self,stream):\n pass",
"def analyzeproblem(self,whichstream_): # 3\n if not isinstance(whichstream_,streamtype): raise TypeError(\"Argument whichstream has wrong type\")\n res = self.__obj.analyzeproblem(whichstream_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def error_handler(e):\n logging.error('error_handler for socketio. An error has occurred: ' + str(e))",
"def error(self, error):\n pass",
"def handle_err(self, err, msg):\n assert \"BAD:\" in msg.value().decode('utf-8')\n assert err is not None\n self.remaining -= 1",
"def on_closed(self, response):\n log.error(\"Stream connection closed by Twitter\")",
"def onError(self, stanza):\n errorNode = stanza.get_error()\n if self.verbose:\n print( \"error type = %s\"%errorNode.get_type() )\n print( \"error message = %s\"%errorNode.get_message() )\n self.disconnect()\n raise RuntimeError",
"def handle_error(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()"
]
| [
"0.7221695",
"0.69530296",
"0.68626505",
"0.6637434",
"0.6482486",
"0.6361655",
"0.63598186",
"0.6275657",
"0.62751657",
"0.6272131",
"0.62548524",
"0.62074804",
"0.6178833",
"0.6163628",
"0.6155807",
"0.61543494",
"0.61513495",
"0.6131373",
"0.6104614",
"0.60864156",
"0.60470945",
"0.60470945",
"0.6006313",
"0.59621656",
"0.5946389",
"0.59257865",
"0.5914463",
"0.59131604",
"0.5901393",
"0.58016384"
]
| 0.70682997 | 1 |
Handle a stream state change. [may be overriden in derived classes] | def stream_state_changed(self,state,arg):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __stream_state_change(self,state,arg):\n self.stream_state_changed(state,arg)\n if state==\"fully connected\":\n self.connected()\n elif state==\"authenticated\":\n self.authenticated()\n elif state==\"authorized\":\n self.authorized()\n elif state==\"disconnected\":\n self.state_changed.acquire()\n try:\n if self.stream:\n self.stream.close()\n self.stream_closed(self.stream)\n self.stream=None\n self.state_changed.notify()\n finally:\n self.state_changed.release()\n self.disconnected()",
"def stream_state_changed(self,state,arg):\n if opts.verbose:\n print \"*** State changed: %s %r ***\" % (state,arg)\n else:\n pass",
"def stream_changed(self, uri):\n pass",
"def stream_status_event(self, event):\r\n pass",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def state_changed(self, oldstate, newstate, event, *args, **kwargs):",
"def StatusChanged(self, state, info):\n pass",
"def _state_message_received(self, msg: ReceiveMessage) -> None:\n try:\n self._state = int(msg.payload)\n self.async_write_ha_state()\n except ValueError:\n pass",
"def __state_cb(self, data):\n self.state = data",
"def on_state_change(self, new_state):\n self.state = new_state",
"def change_state(self, timestamp, state):\n\t\tself.timestamp = timestamp\n\t\tself.state = state",
"def stream_closed(self,stream):\n pass",
"def state_changed(self, old_state, new_state, target_state):\n pass",
"def printstatechange(self, event):\n print('printstatechange; event: %s, %s->%s' % (event.event, event.src, event.dst))\n try:\n self.mqttclient.publish(\"{}/{}/events\".format(self.config['MQTT']['TOPICBASE'],\n self.config['CLIENTID']),\n event.event)\n self.mqttclient.publish(\"{}/{}/state\".format(self.config['MQTT']['TOPICBASE'],\n self.config['CLIENTID']),\n event.dst)\n except:\n pass",
"def _state_cb(self, msg):\n if self.current_mode == '':\n self.current_mode = msg.mode\n self.state = msg",
"def handle_input(self):\n difference = self.check_state()\n if not difference:\n return\n self.events = []\n self.handle_new_events(difference)\n self.update_timeval()\n self.events.append(self.sync_marker(self.timeval))\n self.write_to_pipe(self.events)",
"def sync_state_changed(self, state):\n return",
"def _update_state(self) -> None:\n raise NotImplementedError(\"\")",
"def __change_state(self, state):\n self.state = state",
"def on_dts_state_change(self, state):\n \n switch = {\n rwdts.State.CONFIG: rwdts.State.INIT,\n rwdts.State.INIT: rwdts.State.REGN_COMPLETE,\n rwdts.State.REGN_COMPLETE: rwdts.State.RUN,\n }\n\n handlers = {\n rwdts.State.INIT: self.init,\n rwdts.State.RUN: self.run,\n }\n\n # Transition application to next state\n handler = handlers.get(state, None)\n if handler is not None:\n yield from handler()\n\n # Transition dts to next state\n next_state = switch.get(state, None)\n self.log.info(\"DTS transition from {} -> {}\".format(state, next_state))\n\n if next_state is not None:\n self._dts.handle.set_state(next_state)",
"def onStateChange(self, oldStateID, newStateID):\n pass",
"def on_state_change(self, state):\n return state",
"def test_data_source_soaps_change_stream_post(self):\n pass",
"def process(self, new_state, **args):\n self._state = new_state",
"def stream_created(self,stream):\n pass",
"def state_changed(self, state_changed):\n\n self._state_changed = state_changed",
"def change_status(self):\n message = self.state_frame[0]\n self.on_status_update(message)\n self.state = STATE_READ_LINE",
"def state_callback(self, state, file_ingested):\n self.state_callback_value = state\n self.file_ingested_value = file_ingested",
"def _stateHandler(self, state):\n try:\n if state.getType() == TSMessage.STATUS and self.parent:\n _params = state.getParams()\n if _params.get('main'):\n _descr = _params['main'].split(';')\n\n if _descr[0] == 'starting':\n self.msg_params['prebuf'] = 0\n\n elif _descr[0] == 'prebuf':\n if _descr[1] != self.msg_params.get('prebuf', 0):\n self.msg_params['last_update'] = state.getTime()\n self.msg_params['prebuf'] = _descr[1]\n log.d('_stateHandler: Пытаюсь показать состояние')\n self.parent.showStatus('Пребуферизация {0}'.format(self.msg_params['prebuf']))\n self.parent.player.showStatus('Пребуферизация {0}'.format(self.msg_params['prebuf']))\n\n if time.time() - self.msg_params['last_update'] >= AcePlayer.TIMEOUT_FREEZE:\n log.w('AceEngine is freeze')\n self.autoStop()\n\n elif _descr[0] == 'check':\n log.d('_stateHandler: Проверка {0}'.format(_descr[1]))\n self.parent.showStatus('Проверка {0}'.format(_descr[1]))\n # elif _descr[0] == 'dl':\n # self.parent.showInfoStatus('Total:%s DL:%s UL:%s' % (_descr[1], _descr[3], _descr[5]))\n elif _descr[0] == 'buf':\n # self.parent.showStatus('Буферизация: %s DL: %s UL: %s' % (_descr[1],\n # _descr[5], _descr[7])) @IgnorePep8\n\n if _descr[1] != self.msg_params.get('buf', 0):\n self.msg_params['last_update'] = state.getTime()\n self.msg_params['buf'] = _descr[1]\n # self.parent.player.showStatus('Буферизация {0}'.format(self.msg_params['value']))\n if time.time() - self.msg_params['last_update'] >= AcePlayer.TIMEOUT_FREEZE:\n self.parent.player.showStatus('Пребуферизация {0}'.format(self.msg_params['buf']))\n log.w('AceEngine is freeze')\n self.autoStop()\n # elif _descr[0] == 'dl':\n # if _descr[8] != self.msg_params.get('downloaded', 0):\n # self.msg_params['last_update'] = state.getTime()\n # self.msg_params['downloaded'] = _descr[8]\n # if time.time() - self.msg_params['last_update'] >= 10:\n # log.w('AceEngine is freeze')\n # self.autoStop()\n\n # self.parent.showInfoStatus('Buf:%s DL:%s UL:%s' % (_descr[1], _descr[5], _descr[7]))\n # else:\n # self.parent.showInfoStatus('%s' % _params)\n elif state.getType() in (TSMessage.RESUME, TSMessage.PAUSE, TSMessage.START):\n self.msg_params['value'] = 0\n # self.msg_params['downloaded'] = 0\n\n elif state.getType() == TSMessage.EVENT:\n if state.getParams() == 'getuserdata':\n self._send_command('USERDATA [{{\"gender\": {0}}} {{\"age\": {1}}}]'.format(\n utils.str2int(defines.GENDER) + 1,\n utils.str2int(defines.AGE) + 1))\n elif state.getParams().startswith('showdialog'):\n _parts = state.getParams().split()\n self.parent.showStatus('{0}: {1}'.format(unquote(_parts[2].split('=')[1]),\n unquote(_parts[1].split('=')[1])))\n elif state.getType() == TSMessage.ERROR:\n self.parent.showStatus(state.getParams())\n\n elif state.getType() == TSMessage.STOP:\n self.waiting.abort.set()\n\n elif state.getType() == TSMessage.STATE:\n _params = state.getParams()\n _param = utils.str2int(_params)\n if _param == 0:\n self.waiting.abort.set()\n\n except Exception as e:\n log.e('_stateHandler error: \"{0}\"'.format(uni(e)))\n finally:\n try:\n if self.waiting.msg == state.getType():\n self.waiting.msg = state\n self.waiting.event.set()\n\n except Exception as e:\n log.e('_stateHandler error: \"{0}\"'.format(uni(e)))",
"def test_data_source_soaps_change_stream_get(self):\n pass"
]
| [
"0.8110733",
"0.79275924",
"0.70709205",
"0.6905514",
"0.6596064",
"0.6596064",
"0.6517958",
"0.62820935",
"0.6261136",
"0.6224113",
"0.6183899",
"0.616299",
"0.6083104",
"0.6066408",
"0.60274255",
"0.6026631",
"0.60072976",
"0.6000606",
"0.59990644",
"0.59986216",
"0.59928685",
"0.59900117",
"0.59695256",
"0.59686756",
"0.59627014",
"0.59530574",
"0.594952",
"0.5930067",
"0.5868627",
"0.5836389"
]
| 0.87195003 | 0 |
Handle successful authentication event. A good place to register stanza handlers and disco features. [should be overriden in derived classes] | def authenticated(self):
self.__logger.debug("Setting up Disco handlers...")
self.stream.set_iq_get_handler("query","http://jabber.org/protocol/disco#items",
self.__disco_items)
self.stream.set_iq_get_handler("query","http://jabber.org/protocol/disco#info",
self.__disco_info) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authentication_hook(self):\n pass",
"def auth_complete(self, *args, **kwargs):\n request_data = self.strategy.request_data()\n\n sso_params = request_data.get(\"sso\")\n sso_signature = request_data.get(\"sig\")\n\n param_signature = hmac.new(\n self.setting(\"SECRET\").encode(\"utf8\"), sso_params.encode(\"utf8\"), sha256\n ).hexdigest()\n\n if not hmac.compare_digest(str(sso_signature), str(param_signature)):\n raise AuthException(\"Could not verify discourse login\")\n\n decoded_params = urlsafe_b64decode(sso_params.encode(\"utf8\")).decode(\"ascii\")\n\n # Validate the nonce to ensure the request was not modified\n response = parse_qs(decoded_params)\n nonce_obj = self.get_nonce(response.get(\"nonce\"))\n if nonce_obj:\n self.delete_nonce(nonce_obj)\n else:\n raise AuthTokenError(self, \"Incorrect id_token: nonce\")\n\n kwargs.update({\"sso\": \"\", \"sig\": \"\", \"backend\": self, \"response\": response})\n return self.strategy.authenticate(*args, **kwargs)",
"def authenticate():\n if request.environ['PATH_INFO'] == \"/notification\":\n user = getUser()\n \n if user is None:\n raise HTTPResponse(body=\"Forbidden\", status=403)\n \n try:\n if authz.login(user):\n logging.info('Login success: %s', user.username)\n return\n except IOError:\n raise HTTPResponse(body=\"Error reading user file\", status=400)\n except Exception as e:\n raise HTTPResponse(body=\"Unexpected error\", status=400)\n \n raise HTTPResponse(body=\"Invalid username or password\", status=401)",
"def handle_connect(self):\n LOGGER.debug(\"server connected\")\n self.append_connect = False\n domain = MOLO_CONFIGS.get_config_object().get('domain', '')\n self.send_dict_pack(\n MoloSocketHelper.molo_auth(CLIENT_VERSION,\n MOLO_CLIENT_APP.hass_context,\n __short_version__, domain),)",
"def process(self, data):\n\t\tif data['action'] == '0x40':\n\t\t\tself.authenticate(data)\n\t\telse:\n\t\t\t# Protocol error\n\t\t\tstack['clients'][self.client_ident].put(1,{'type':'0x000','status':'0x001'})",
"def _authenticate_success(self, avatar):\r\n verifyObject(IRobot, avatar)\r\n verifyObject(IMessageReceiver, avatar)\r\n\r\n self._realm.registerWebsocketProtocol(avatar, self)\r\n self._avatar = avatar\r\n self._assembler.start()",
"async def on_login(self, login_response):\n pass",
"def on_phase_auth_message(self, buf):\n msg = self.__nanojsonrpc_unpack(buf)\n method = msg['method']\n if method == 'auth':\n self.__ws_conn.send(self.__nanojsonrpc_pack('auth', [self.secret]))\n elif method == 'ready':\n self.phase_auth = False\n else:\n self.wsapp.keep_running = False\n raise Error.LogicError(f'Unexpected message: ({msg})!',\n ModuleErrorCode,\n FileErrorCode,\n 12)",
"def authd(self, xmlstream):\n\t\tprint \"authenticated\"\n\t\t\n\t\tpresence = domish.Element(('jabber:client', 'presence'))\n\t\tpresence.addElement('status').addContent('Online')\n\t\txmlstream.send(presence)\n\n\t\t#xmlstream.addObserver('/message', self.debug)\n\t\t#xmlstream.addObserver('/presence', self.debug)\n\t\t#xmlstream.addObserver('twisted.words.xish.xmlstream.STREAM_ERROR_EVENT', self.debug)\n\t\tself.xmlstream = xmlstream\n\t\tself.joinRoom(xmlstream)",
"def auth_handler(self, url, method, timeout, headers, data):\n username = self.username\n password = self.password\n return basic_auth_handler(url, method, timeout, headers, data, username,\n password)",
"def auth(self, user):",
"def authenticate(self):\n # self.qobject.remove_authenticate_signal.emit()\n # self.qobject.authenticate_signal.emit( )\n #if self.app.sync_thread.status != const.STATUS_SYNC:\n # self.app.sync_thread.force_sync()\n change_auth_token( )\n self.data_changed()",
"def on_register(self, response):\n print('You have been registered!')\n self.on_auth(response)",
"def auth():\n pass",
"def auth():\n pass",
"def handleAuth(self, opcode, data, client):\n \n # Get the data the client sent.\n clientUser = data.getString()\n clientPass = data.getString()\n \n # Flag to be send back after serverside auth\n flag = None\n userpass = False\n loginTries = 0 # Not thought out now, will return to it later...\n \n # Get the data from DB\n try:\n # Here we can add the player to the PLAYERS{} by using a player\n # ID or something\n details = []\n details = Database.getAccountData(clientUser, clientPass)\n \n except:\n print \"Can't connected to ACCOUNT DATABASE\"\n \n # Will make some other checks later... this is just good for now..\n if details == None:\n flag = 2\n print \"Player: \", clientUser, \" Doesn't exist! or Incorrect!\"\n loginTries += 1\n \n # Check if the password/username match\n elif clientPass == details[2] and clientUser == details[1]:\n print details\n userpass = True\n self.network.base.PLAYERS[details[0]] = Player(self, details[0], details[1])\n print \"Player: \", details[1], \" Logged in, ID: \", details[0]\n flag = 1\n \n else:\n userpass = False\n print \"Player: \", clientUser, \" login incorrect\"\n loginTries += 1\n flag = 2\n \n # Create buffer\n pkg = PyDatagram()\n \n # Add response\n pkg.addUint16(SMSG_AUTH_RESPONSE)\n \n # Add the flag\n pkg.addUint16(flag)\n \n # Send the packet\n self.network.tcpWriter.send(pkg, client)",
"def authenticate(cls, handler):\n raise NotImplementedError(\"Missing implementation for authenticate\")",
"def auth_complete(self, *args, **kwargs):\n if 'denied' in self.data:\n raise AuthCanceled(self)\n else:\n return super(DailymotionAuth, self).auth_complete(*args, **kwargs)",
"def onSuccess(self, stanza):\n if self.verbose:\n print( \"success\" )\n self.disconnect()\n return True",
"def _check_authentication(self) -> NoReturn:\n if not self.heartbeat():\n self.authenticate()",
"def _on_connection_success(self):\n if self.connect_handler:\n self.connect_handler()",
"def _Authenticate(self):\r\n super(HttpRpcServer, self)._Authenticate()\r\n if self.save_cookies:\r\n StatusUpdate(\"Saving authentication cookies to %s\" % self.cookie_file)\r\n self.cookie_jar.save()",
"def _delegate_authentication(username, password):\n payload = json.dumps({\n 'type': 'normal',\n 'username': username,\n 'password': password\n })\n headers = {'Content-Type': 'application/json'}\n login_response = requests.post(API_URL + \"/auth\", data=payload, headers=headers)\n if login_response.status_code != 200:\n return False\n\n try:\n decoded_response = login_response.json()\n except ValueError as error:\n logger.error(f'Cannot decode Taiga auth response: {error}. Response was: {login_response}')\n return False\n return decoded_response",
"def auth_complete(self, *args, **kwargs):\n self.process_error(self.data)\n params = self.auth_complete_params(self.validate_state())\n\n response = requests.post(self.ACCESS_TOKEN_URL, data=params,\n headers=self.auth_headers())\n if response.status_code == 400:\n raise AuthCanceled(self)\n\n response.raise_for_status()\n\n try:\n response = response.json()\n except (ValueError, KeyError):\n raise AuthUnknownError(self)\n\n response.pop('data')\n self.process_error(response)\n return self.do_auth(response['access_token'], response=response,\n *args, **kwargs)",
"def auth_success(self, msg):\n slot_num = ord(msg[1:2])\n print(\"auth success: slot_num={}\".format(slot_num))\n self.player = Player(slot_num, \"AIBOT1\")\n # Send information about the player to the server\n self.add_message(PlayerInfo(self.player))\n self.add_message(PlayerHPInfo(self.player))\n self.add_message(PlayerManaInfo(self.player))\n self.add_message(PlayerBuffState(self.player))\n # Initialize player inventory\n INV_SIZE = 259\n for inv_slot in range(INV_SIZE):\n stack = 0\n prefix = 0\n item_id = 0\n self.add_message(PlayerInventorySlot(self.player, inv_slot, stack, prefix, item_id))\n # Request world data\n self.add_message(RequestWorldData())",
"def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )",
"def authenticate(cls, handler):\n return None",
"def fake_auth_complete(self, strategy):\r\n args = ()\r\n kwargs = {\r\n 'request': strategy.request,\r\n 'backend': strategy.backend,\r\n 'user': None,\r\n 'response': self.get_response_data(),\r\n }\r\n return strategy.authenticate(*args, **kwargs)",
"def _Authenticate(self):\n super(HttpRpcServer, self)._Authenticate()\n if self.save_cookies:\n StatusUpdate(\"Saving authentication cookies to %s\" % self.cookie_file)\n self.cookie_jar.save()",
"def signedOn(self):\n log.info(\"Signed on as %s.\", self.nickname)\n if not self.password:\n # We aren't wating for auth, join all the channels\n self.joinChannels()\n else:\n self.msg(\"NickServ\", \"IDENTIFY %s\" % self.password)"
]
| [
"0.68363607",
"0.6044735",
"0.60170233",
"0.60143876",
"0.60114545",
"0.60049105",
"0.59941304",
"0.5974587",
"0.5890883",
"0.5889594",
"0.5859647",
"0.5840435",
"0.58397806",
"0.58061874",
"0.58061874",
"0.5791992",
"0.57758445",
"0.5771166",
"0.57590616",
"0.5739374",
"0.57324904",
"0.572678",
"0.57152003",
"0.57141507",
"0.57064056",
"0.5687911",
"0.56817853",
"0.56725746",
"0.5663755",
"0.5640198"
]
| 0.62774074 | 1 |
Get discoinfo data for a node. [may be overriden in derived classes] | def disco_get_info(self,node,iq):
to=iq.get_to()
if to and to!=self.jid:
return iq.make_error_response("recipient-unavailable")
if not node and self.disco_info:
return self.disco_info
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __disco_info(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n info=self.disco_get_info(node,iq)\n if isinstance(info,DiscoInfo):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-info query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))\n resp.set_content(info.xmlnode.copyNode(1))\n elif isinstance(info,Stanza):\n resp=info\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-info response: %s\" % (resp.serialize(),))\n self.stream.send(resp)",
"def get_data(node):\n return node['data']",
"def get_info(node):\n info = {\n 'parser_style': '_',\n 'parser_lang': None,\n 'parser_defaults': None,\n 'convert_style': '_',\n 'convert_from': None,\n 'convert_to': 'html',\n 'convert_defaults': None,\n 'adopt': True,\n 'convert': 'true'\n }\n for att in node:\n info[att] = node[att]\n if info['src'][0] != '/':\n base = os.path.dirname(node.owner.uri_)\n if base != '':\n base += '/'\n info['src'] = '%s%s' % (base, info['src'])\n if info['parser_lang'] is None:\n path = pth.realpath(info['src'])\n name = pth.basename(path)\n name = pth.splitext(name)\n info['parser_lang'] = name[1][1:]\n return info",
"def getInfo():",
"def get_info(self):\n pass",
"def get_info(self):\n pass",
"def get_info(self, info):\r\n pass",
"def get_node_info(cls):\r\n\r\n if hasattr(cls, \"__node_info__\") and cls not in _node_info_warnings:\r\n\r\n utils.get_logger().warn(\"depreciated __node_info__ present in %s, rename to node_info\" \\\r\n \" (this warning will be shown only once)\" % str(cls))\r\n _node_info_warnings.add(cls)\r\n\r\n return cls.__node_info__\r\n else:\r\n return cls.node_info",
"def pcp_node_info(self, nid):\n\n\t\tif self.PCPConnectionStatus() != ConnStateType.OK:\n\t\t\tself.pcp_internal_error('invalid PCP connection')\n\t\t\treturn None\n\n\t\tnode_id = str(nid)\n\n\t\tself._PCPWrite('I'.encode(), 1)\n\t\twsize = self.int_to_bytes(len(node_id) + 1 + 4)\n\t\tself._PCPWrite(wsize, 4)\n\t\tself._PCPWrite(node_id.encode() + NULL, len(node_id) + 1)\n\t\tif self.PCPFlush() < 0:\n\t\t\treturn None\n\t\tif self.Pfdebug:\n\t\t\tself.Pfdebug.write(f'DEBUG: send: tos=\"I\", length={self.bytes_to_int(wsize)}\\n')\n\n\t\treturn self._process_pcp_response('I')",
"def get_metadata_for_node(self, node):\n return self.manager.get_metadata(self, node=node)",
"def info(self):\n return self.nfo",
"def get_info(self):\n return None",
"def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details",
"def node_data(self):\n return self.node_data_",
"def get_info(self, key: str) -> TaskInfo:\n return self.task_graph.nodes[key][\"info\"]",
"def info(self, node_uuid):\n if node_uuid is None:\n return None\n uri = '{}/{}'.format(self.store.aroot, node_uuid)\n infos = self.store.actual.resolve(uri)\n if infos is None:\n return None\n return json.loads(infos)",
"def get_info(self):\n return \"TODO !\"",
"def getNodeInfo(self, node, state=None, happy_only=False):\n happy_node_info = self.getNodes()[node]\n node_info = {\"happy\": happy_node_info}\n\n # get extension state including weave\n if not happy_only:\n for i in six.iteritems(self.getExtensionState(state)):\n extState = self.getNodes(i[1])\n if extState and node in extState:\n node_info[i[0]] = extState[node]\n\n return node_info",
"def get_info(self) -> str:\n return self.info",
"def getInfo(self):\n return self.info",
"def get_info(self) -> str:\n raise NotImplementedError()",
"def get_coulomb_info(self):\n return",
"def get_info_by_node(conn, node): \n cur = conn.cursor()\n cur.execute(\"SELECT * FROM Info WHERE NodeID=?\", (node))",
"def getInfo(self):\n return self._info",
"def _get_information(self):\n pass",
"def get_metadata_for_node(self, loadbalancer, node):\n return loadbalancer.get_metadata_for_node(node)",
"def get_data(self, node_id, username, moderator):\n raise NotImplementedError()",
"def get_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_INFO)",
"def node_info(self) -> dict:\r\n location_str = f\"{self.location[0]},{str(self.location[1])},{str(self.location[2])}\"\r\n return {\"id\": self.key, \"pos\": location_str}",
"def return_info(self):\n\t\treturn self.info"
]
| [
"0.6871241",
"0.62665915",
"0.61718",
"0.60817903",
"0.5979027",
"0.5979027",
"0.597608",
"0.5964263",
"0.59326667",
"0.59011",
"0.5897912",
"0.58444035",
"0.5842514",
"0.58325905",
"0.5814806",
"0.581295",
"0.5807344",
"0.57974905",
"0.5793282",
"0.57798433",
"0.57316035",
"0.57255244",
"0.57017756",
"0.56528187",
"0.563822",
"0.5637207",
"0.5637169",
"0.5627182",
"0.559574",
"0.5584563"
]
| 0.6782496 | 1 |
Get discoitems data for a node. [may be overriden in derived classes] | def disco_get_items(self,node,iq):
to=iq.get_to()
if to and to!=self.jid:
return iq.make_error_response("recipient-unavailable")
if not node and self.disco_items:
return self.disco_items
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_data(node):\n return node['data']",
"def getItemData(itemId):\n return Gw2Spidy._request('item', str(itemId))['result']",
"def __disco_items(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n items=self.disco_get_items(node,iq)\n if isinstance(items,DiscoItems):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-items query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),items.xmlnode.serialize()))\n resp.set_content(items.xmlnode.copyNode(1))\n elif isinstance(items,Stanza):\n resp=items\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-items response: %s\" % (resp.serialize(),))\n self.stream.send(resp)",
"def getNodeRRDData(self,node):\n data = self.connect('get','nodes/%s/rrddata' % (node),None)\n return data",
"def get_inventory(self, node):",
"def __getitem__(self, item):\n return self.get_data(stock=item)",
"def get_data_item(self):\n raise exceptions.NotImplemented",
"def get_data(self, node_id, username, moderator):\n raise NotImplementedError()",
"def node_data(self):\n return self.node_data_",
"def disco_get_info(self,node,iq):\n to=iq.get_to()\n if to and to!=self.jid:\n return iq.make_error_response(\"recipient-unavailable\")\n if not node and self.disco_info:\n return self.disco_info\n return None",
"def GetPyData(self, item):\r\n\r\n return item.GetData()",
"def getData(self, product, variables, attributes, variable, *args):\r\n\r\n data = None\r\n return data",
"def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n if self.preload:\n return self.data_ram[item]\n else:\n return self.data[item]",
"def getd(self, node):\n\n return self.daq.getDouble(f'/{self.device_id}/{node}')",
"def __getitem__(self, node):\n\n return self.adj_list[node]",
"def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n return self.data[item]",
"def data(self, index):\n return self._itemData",
"def get_data():\n return",
"def get_data():\n pass",
"def data_nodes(self):\n data_nodes = []\n for node in self.nodes:\n if 'datanode' == node.get('infos').get('type'):\n data_nodes.append(node)\n return data_nodes",
"def __getitem__(self, item):\r\n\r\n return self.data.__getitem__(item)",
"def getData(self, taskId:int):\n return self.pool.getData(taskId)",
"def data(self) -> Dict[int, NameItem]:\n return self.__data",
"def get_metadata_for_node(self, node):\n return self.manager.get_metadata(self, node=node)",
"def _get_data(self):\n raise NotImplementedError()",
"def GetDicomFromNode(self,node):\n storageNode=node.GetStorageNode()\n if storageNode is not None: # loaded via drag-drop\n filepath=storageNode.GetFullNameFromFileName()\n else: # loaded via DICOM browser\n instanceUIDs=node.GetAttribute('DICOM.instanceUIDs').split()\n filepath=slicer.dicomDatabase.fileForInstance(instanceUIDs[0])\n Dcm_tag=pydicom.dcmread(filepath)\n return Dcm_tag",
"def __disco_info(self,iq):\n q=iq.get_query()\n if q.hasProp(\"node\"):\n node=from_utf8(q.prop(\"node\"))\n else:\n node=None\n info=self.disco_get_info(node,iq)\n if isinstance(info,DiscoInfo):\n resp=iq.make_result_response()\n self.__logger.debug(\"Disco-info query: %s preparing response: %s with reply: %s\"\n % (iq.serialize(),resp.serialize(),info.xmlnode.serialize()))\n resp.set_content(info.xmlnode.copyNode(1))\n elif isinstance(info,Stanza):\n resp=info\n else:\n resp=iq.make_error_response(\"item-not-found\")\n self.__logger.debug(\"Disco-info response: %s\" % (resp.serialize(),))\n self.stream.send(resp)",
"def get_introspection_data(node_id, processed=True):\n try:\n with session_for_read() as session:\n ref = session.query(model.IntrospectionData).filter_by(\n uuid=node_id, processed=processed).one()\n res = ref['data']\n return res\n except orm_errors.NoResultFound:\n msg = _('Introspection data not found for node %(node)s, '\n 'processed=%(processed)s') % {'node': node_id,\n 'processed': processed}\n raise utils.IntrospectionDataNotFound(msg)",
"def get_data(self):\n raise NotImplementedError(\"Not implemented!\")",
"def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')"
]
| [
"0.633038",
"0.6200804",
"0.6013455",
"0.59789824",
"0.59707445",
"0.5966929",
"0.59421146",
"0.59097546",
"0.56797266",
"0.5665139",
"0.56622565",
"0.56536365",
"0.56274885",
"0.55336416",
"0.55131185",
"0.5499119",
"0.54829735",
"0.54567564",
"0.5450693",
"0.5430541",
"0.5402878",
"0.53968096",
"0.5389633",
"0.5368702",
"0.5364259",
"0.53615135",
"0.53488976",
"0.53402996",
"0.5313922",
"0.5307618"
]
| 0.62508786 | 1 |
Interpolation of a staircase function using averaging. This function returns nan outside of the input abscissa range. | def interp1d_stair_aver(x, y): #TODO: deal with the case x not sorted
def f(xp):
yp=np.empty(np.size(xp)-1)
xmod=x[~(np.isnan(x)+np.isnan(y))]
ymod=y[~(np.isnan(x)+np.isnan(y))]
yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))
g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)
# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])
yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])
return yp
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interp1d_stair_aver_withnan(x, y): #TODO: deal with the case x not sorted\n def f(xp):\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n yp=np.empty(np.size(xp)-1)\n yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))\n g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)\n# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n for i in range(np.size(xp)-1):\n if np.isnan(y[np.where((x>=xp[i])*(x<xp[i+1]))]).all():\n yp[i]=np.nan\n return yp\n\n return f",
"def interpolate_to_amplitude(a):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (0, 1))",
"def interpolate_and_average(xs, ys, interop_points=None, confidence_interval=False):\n # Get the xs of shortest curve\n max_min_x = max(x.min() for x in xs)\n min_max_x = min(x.max() for x in xs)\n if interop_points is None:\n # Interop points according to curve with \"least resolution\"\n interop_points = min(x.shape[0] for x in xs)\n\n new_x = np.linspace(max_min_x, min_max_x, interop_points)\n new_ys = []\n\n for old_x, old_y in zip(xs, ys):\n new_ys.append(np.interp(new_x, old_x, old_y))\n\n # Average out\n # atleast_2d for case when we only have one reptition\n new_ys = np.atleast_2d(np.array(new_ys))\n new_y = np.mean(new_ys, axis=0)\n std_y = np.std(new_ys, axis=0)\n\n if confidence_interval:\n interval = 1.96 * (std_y / np.sqrt(len(xs)))\n lower_bound = new_y - interval\n upper_bound = new_y + interval\n return new_x, new_y, std_y, lower_bound, upper_bound\n else:\n return new_x, new_y, std_y",
"def create_interpolate_prior_mean_fun(final_time, prior_mean_raw):\n f_outlet = interp1d(np.arange(0, final_time+1), prior_mean_raw, kind='cubic')\n return f_outlet",
"def average_over_interval(raw_rate, weight_function, intervals):\n\n def averaging_function(t):\n return raw_rate(t) * weight_function(t)\n\n results = np.zeros(len(intervals), dtype=np.float)\n\n for interval_idx in range(len(intervals)):\n start = intervals.start[interval_idx]\n finish = intervals.finish[interval_idx]\n results[interval_idx] = quad(averaging_function, start, finish)[0]\n\n return results",
"def interpolation_matrix(m):\n return np.nanmean(m,axis=1)",
"def atmean(a,limits=None,inclusive=(1,1)):\r\n if a.dtype in [N.int_, N.short,N.ubyte]:\r\n a = a.astype(N.float_)\r\n if limits == None:\r\n return mean(a)\r\n assert type(limits) in [ListType,TupleType,N.ndarray], \"Wrong type for limits in atmean\"\r\n if inclusive[0]: lowerfcn = N.greater_equal\r\n else: lowerfcn = N.greater\r\n if inclusive[1]: upperfcn = N.less_equal\r\n else: upperfcn = N.less\r\n if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):\r\n raise ValueError, \"No array values within given limits (atmean).\"\r\n elif limits[0]==None and limits[1]<>None:\r\n mask = upperfcn(a,limits[1])\r\n elif limits[0]<>None and limits[1]==None:\r\n mask = lowerfcn(a,limits[0])\r\n elif limits[0]<>None and limits[1]<>None:\r\n mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])\r\n s = float(N.add.reduce(N.ravel(a*mask)))\r\n n = float(N.add.reduce(N.ravel(mask)))\r\n return s/n",
"def mean(vals):",
"def interp_and_smooth(self, n_points = 11, poly_order = 3, \n pixel = 'centre'):\n \n nrows = self.data_array.attrs['nrows']\n ncols = self.data_array.attrs['ncols']\n data_mtx = np.arange(nrows * ncols).reshape(nrows, ncols)\n if pixel == 'centre': \n loc = np.where(data_mtx == nrows * ncols / 2)\n elif isinstance(pixel, int):\n loc = np.where(data_mtx == pixel)\n elif pixel == 'mean': \n loc = None\n else:\n raise TypeError('\"pixel\" kwarg must be either of type str (\"center\" '\n 'or \"mean\") or int')\n pd_time = pd.to_datetime(self.data_array.time.data)\n n_days = np.array((pd_time - pd_time[0]).days)\n if loc: \n data = self.data_array.data[loc[0], loc[1], :].flatten()\n else:\n data = self.data_array.mean(['x', 'y'])\n valid_idx = np.where(~np.isnan(data)) \n f = interpolate.Akima1DInterpolator(n_days[valid_idx], data[valid_idx])\n interp_series = f(n_days)\n smooth_series = signal.savgol_filter(interp_series, n_points, poly_order, \n mode = \"mirror\")\n df = pd.DataFrame({'data': data, 'data_interp': interp_series,\n 'data_smooth': smooth_series}, index = pd_time.date)\n return df",
"def interpolate_eleMean(model):\n # Get mean of columns (data at the same elevation) without taking int account NaNs\n el_mean = nanmean(model,axis=0)\n #print(el_mean) \n # Find indices for NaNs, and replace them by the column mean\n ind_nan = np.where(np.isnan(model))\n model[ind_nan] = np.take(el_mean,ind_nan[1])\n\n return model",
"def compute_avg_func(values, get_date_func, get_value_func, time_from, time_to):\n values_in = [get_value_func(v) for v in values if time_from <= get_date_func(v) <= time_to]\n if len(values_in) > 0:\n a = np.array(values_in)\n avg = a.mean()\n else:\n avg = np.NaN\n return avg",
"def integrand_normalization(weight_function, intervals):\n\n def constant_rate(t):\n return 1.0\n\n return average_over_interval(constant_rate, weight_function, intervals)",
"def sinc_mean_function(x):\n return np.sin(24*x ) / (12*x) + 2",
"def average(self, start, end):\n return self.integrate(start, end) / (end - start)",
"def compute_interpolation_function(self, x, inarray,\n kind,\n bounds_error=True,\n axis=0):\n\n return interp1d(x, inarray, kind=kind,copy=True, axis=axis,\n bounds_error=bounds_error)",
"def avg(x, y):\n return (x + y)/2",
"def mean_approx(self, name='mean_approx'):\n with self._name_and_control_scope(name):\n loc = tf.convert_to_tensor(self.loc)\n scale = tf.convert_to_tensor(self.scale)\n monahan_stefanski_answer = approx_expected_sigmoid(\n loc, scale,\n MONAHAN_MIX_PROB[self.num_probit_terms_approx],\n MONAHAN_INVERSE_SCALE[self.num_probit_terms_approx])\n if self.gauss_hermite_scale_limit is None:\n return monahan_stefanski_answer\n else:\n gauss_hermite_answer = logit_normal_mean_gh(\n loc, scale, self.gauss_hermite_degree)\n return tf.where(scale < self.gauss_hermite_scale_limit,\n gauss_hermite_answer, monahan_stefanski_answer)",
"def average(data):\n return np.average(data)",
"def smooth(x, window,nan=True,old=False,fill='mean'):\n if nan:\n from Sp_parameters import nanmasked\n from scipy import interpolate\n ix = np.arange(len(x))\n xmasked, mask = nanmasked(x)\n if fill == 'mean':\n fv = np.mean(xmasked)\n elif fill == 'median':\n fv = np.median(xmasked)\n elif fill == 'zero':\n fv = 0.0\n else:\n raise ValueError('the fill keyword doesnt match possible values, try mean, median, or zero')\n fx = interpolate.interp1d(ix[mask],xmasked,bounds_error=False,fill_value=fv)\n xinterp = fx(ix)\n if old:\n xout = np.convolve(xinterp, np.ones(window)/window, 'same')\n else:\n s = np.r_[xinterp[window-1:0:-1],xinterp,xinterp[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n istart = window/2\n iend = -window/2+1\n if iend==0:\n iend = len(xout)\n xout = xout[istart:iend]\n else:\n if old:\n xout = np.convolve(x, np.ones(window)/window, 'same')\n else:\n s = np.r_[x[window-1:0:-1],x,x[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n xout = xout[window/2:-window/2+1]\n return xout",
"def func_return_just_mean(x):\n return func(x)[0] if func_returns_confidence_intervals else func(x)",
"def moving_average_filter(val, filtered_val_prev, zeta):\n filtered_val = (1-zeta)*filtered_val_prev + zeta*val\n return filtered_val",
"def profile_interp(var,z_orig,z_interp,method='linear',out_of_bounds='NaN'):\n z_orig = z_orig[~isnan(z_orig)]\n var= var[~isnan(var)]\n #assert(all(diff(z_orig) > 0))\n if len(z_orig) > len(var) or len(var) > len(z_orig): return NaN\n if len(z_orig) <= 2 or len(var) <= 2: return NaN\n \n if out_of_bounds == 'NaN':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=NaN)\n elif out_of_bounds == 'nearest':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=(var[0],var[-1]))\n elif out_of_bounds == 'extrap':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value='extrapolate')\n else:\n raise ValueError('Extrapolation method must be NaN, nearest, or cubic.')\n result = interpolant(z_interp)\n\n if result.size == 1: return result.item()\n else: return result",
"def _interpolation(matrix):\n try:\n\tok = ~np.isnan(matrix)\n \txp = ok.ravel().nonzero()[0]\n \tfp = matrix[~np.isnan(matrix)]\n \tx = np.isnan(matrix).ravel().nonzero()[0]\n \tmatrix[np.isnan(matrix)] = np.interp(x, xp, fp)\n \treturn matrix\n except:\n return matrix",
"def zconfint_mean(self, alpha=0.05, alternative=\"two-sided\"):\n\n return _zconfint_generic(self.mean, self.std_mean, alpha, alternative)",
"def mean_kall_interp(filename, xvariable,num_interp = 100, show_plot = True,\n sample_header = 'Sample Description',\n stress_header = 'Stress (Pa)',\n strain_header = 'Strain (%)',\n k_header = 'K prime (Pa)',\n sep = ',', dec = '.'): \n\n # Read data and get all the samples within the data frame\n data = pd.read_csv(filename, sep = sep, decimal = dec)\n all_samples = data[sample_header].unique()\n\n # Define which dependent variable to extract\n if 'stress' in xvariable: xvar = stress_header\n elif 'strain' in xvariable: xvar = strain_header\n\n # Loop to get mean values of minimum and maximum xdata for the samples\n xmin = []; xmax = []\n for isample in all_samples:\n data_sample = data.loc[data[sample_header] == isample]\n xsample = np.array(data_sample[xvar])\n xmin.append(np.min(xsample))\n xmax.append(np.max(xsample))\n\n xmin_avg = np.mean(np.array(xmin))\n xmax_avg = np.mean(np.array(xmax))\n xmax_std = np.std(np.array(xmax))\n\n print('Rupture: ', xmax_avg, '+/-', xmax_std)\n # Build interpolation vector\n xmin_log = np.log10(xmin_avg)\n xmax_log = np.log10(xmax_avg)\n xinterp = np.logspace(xmin_log, xmax_log, num = num_interp)\n\n #Loop to get the interpolated curves for each sample within the file\n k_all = []\n for isample in all_samples:\n data_sample = data.loc[data[sample_header] == isample]\n xsample = data_sample[xvar]\n ksample = data_sample[k_header]\n k_interp = np.interp(xinterp, xsample, ksample)\n k_all.append(k_interp)\n \n k_all = np.array(k_all)\n kmean = np.mean(k_all, axis = 0)\n kstd = np.std(k_all, axis = 0)\n\n # Plot the average curve and standard deviation, if desired\n if show_plot == True:\n plt.fill_between(xinterp, kmean - kstd, kmean + kstd, color = 'lightgray',\n alpha = 0.8)\n plt.plot(xinterp, kmean, c = 'darkgray', marker = 'o', mfc = 'w')\n plt.ylabel('$K\\'$ (Pa)')\n plt.xlabel(xvar)\n plt.loglog()\n\n return [xinterp, kmean, kstd]",
"def lin_trim_mean(a: np.ndarray, start: float = 0.5, end: float = 0.1,\n start_v: float = 0, end_v: float = 0.5) -> float:\n start_w = np.linspace(start_v, 1, start * len(a), endpoint=False)\n end_w = np.linspace(end_v, 1, end * len(a), endpoint=False)[::-1]\n mid_w = np.ones(len(a) - len(start_w) - len(end_w))\n weights = np.concatenate((start_w, mid_w, end_w))\n return ((a * weights).sum() / weights.sum()).item()",
"def avg_pressure(start, end):\n return round((start + end) / 2, 2)",
"def _get_average(self):\n norm = 1.0\n for pos, idx in enumerate(self.idx):\n norm *= (self.high[pos] - self.low[pos])\n return 1.0/norm",
"def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2",
"def forward_avg(array_in):\n return (array_in[:-1] + array_in[1:]) * 0.5"
]
| [
"0.6612869",
"0.6197294",
"0.6021409",
"0.59408367",
"0.58685285",
"0.5757912",
"0.5747442",
"0.5746117",
"0.5628356",
"0.56112516",
"0.5594228",
"0.5554872",
"0.5553829",
"0.5519848",
"0.5465822",
"0.54347026",
"0.5407415",
"0.5396332",
"0.53776",
"0.5349899",
"0.5349717",
"0.5333575",
"0.53305167",
"0.53065765",
"0.5301334",
"0.52998936",
"0.5294936",
"0.5282933",
"0.52706754",
"0.5262537"
]
| 0.6691568 | 0 |
Interpolation of a staircase function using averaging. This function returns nan when there are all nans in one interpolation interval. | def interp1d_stair_aver_withnan(x, y): #TODO: deal with the case x not sorted
def f(xp):
xmod=x[~(np.isnan(x)+np.isnan(y))]
ymod=y[~(np.isnan(x)+np.isnan(y))]
yp=np.empty(np.size(xp)-1)
yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))
g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)
# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])
yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])
for i in range(np.size(xp)-1):
if np.isnan(y[np.where((x>=xp[i])*(x<xp[i+1]))]).all():
yp[i]=np.nan
return yp
return f | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interp1d_stair_aver(x, y): #TODO: deal with the case x not sorted\n def f(xp):\n yp=np.empty(np.size(xp)-1)\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))\n g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)\n# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n return yp\n\n return f",
"def interpolation_matrix(m):\n return np.nanmean(m,axis=1)",
"def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2",
"def interpolate_eleMean(model):\n # Get mean of columns (data at the same elevation) without taking int account NaNs\n el_mean = nanmean(model,axis=0)\n #print(el_mean) \n # Find indices for NaNs, and replace them by the column mean\n ind_nan = np.where(np.isnan(model))\n model[ind_nan] = np.take(el_mean,ind_nan[1])\n\n return model",
"def _interpolation(matrix):\n try:\n\tok = ~np.isnan(matrix)\n \txp = ok.ravel().nonzero()[0]\n \tfp = matrix[~np.isnan(matrix)]\n \tx = np.isnan(matrix).ravel().nonzero()[0]\n \tmatrix[np.isnan(matrix)] = np.interp(x, xp, fp)\n \treturn matrix\n except:\n return matrix",
"def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))",
"def interpolate_and_average(xs, ys, interop_points=None, confidence_interval=False):\n # Get the xs of shortest curve\n max_min_x = max(x.min() for x in xs)\n min_max_x = min(x.max() for x in xs)\n if interop_points is None:\n # Interop points according to curve with \"least resolution\"\n interop_points = min(x.shape[0] for x in xs)\n\n new_x = np.linspace(max_min_x, min_max_x, interop_points)\n new_ys = []\n\n for old_x, old_y in zip(xs, ys):\n new_ys.append(np.interp(new_x, old_x, old_y))\n\n # Average out\n # atleast_2d for case when we only have one reptition\n new_ys = np.atleast_2d(np.array(new_ys))\n new_y = np.mean(new_ys, axis=0)\n std_y = np.std(new_ys, axis=0)\n\n if confidence_interval:\n interval = 1.96 * (std_y / np.sqrt(len(xs)))\n lower_bound = new_y - interval\n upper_bound = new_y + interval\n return new_x, new_y, std_y, lower_bound, upper_bound\n else:\n return new_x, new_y, std_y",
"def interp_and_smooth(self, n_points = 11, poly_order = 3, \n pixel = 'centre'):\n \n nrows = self.data_array.attrs['nrows']\n ncols = self.data_array.attrs['ncols']\n data_mtx = np.arange(nrows * ncols).reshape(nrows, ncols)\n if pixel == 'centre': \n loc = np.where(data_mtx == nrows * ncols / 2)\n elif isinstance(pixel, int):\n loc = np.where(data_mtx == pixel)\n elif pixel == 'mean': \n loc = None\n else:\n raise TypeError('\"pixel\" kwarg must be either of type str (\"center\" '\n 'or \"mean\") or int')\n pd_time = pd.to_datetime(self.data_array.time.data)\n n_days = np.array((pd_time - pd_time[0]).days)\n if loc: \n data = self.data_array.data[loc[0], loc[1], :].flatten()\n else:\n data = self.data_array.mean(['x', 'y'])\n valid_idx = np.where(~np.isnan(data)) \n f = interpolate.Akima1DInterpolator(n_days[valid_idx], data[valid_idx])\n interp_series = f(n_days)\n smooth_series = signal.savgol_filter(interp_series, n_points, poly_order, \n mode = \"mirror\")\n df = pd.DataFrame({'data': data, 'data_interp': interp_series,\n 'data_smooth': smooth_series}, index = pd_time.date)\n return df",
"def fill_nan(A):\n\tinds = np.arange(A.shape[0])\n\tgood = np.where(np.isfinite(A))\n\tA[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good])\n\treturn A",
"def create_interpolate_prior_mean_fun(final_time, prior_mean_raw):\n f_outlet = interp1d(np.arange(0, final_time+1), prior_mean_raw, kind='cubic')\n return f_outlet",
"def profile_interp(var,z_orig,z_interp,method='linear',out_of_bounds='NaN'):\n z_orig = z_orig[~isnan(z_orig)]\n var= var[~isnan(var)]\n #assert(all(diff(z_orig) > 0))\n if len(z_orig) > len(var) or len(var) > len(z_orig): return NaN\n if len(z_orig) <= 2 or len(var) <= 2: return NaN\n \n if out_of_bounds == 'NaN':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=NaN)\n elif out_of_bounds == 'nearest':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=(var[0],var[-1]))\n elif out_of_bounds == 'extrap':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value='extrapolate')\n else:\n raise ValueError('Extrapolation method must be NaN, nearest, or cubic.')\n result = interpolant(z_interp)\n\n if result.size == 1: return result.item()\n else: return result",
"def check_and_interpolate_nans(df):\n nan_count = df.isna().sum().sum()\n if nan_count > 0:\n df.interpolate(method='linear', inplace=True)\n return df",
"def average_over_interval(raw_rate, weight_function, intervals):\n\n def averaging_function(t):\n return raw_rate(t) * weight_function(t)\n\n results = np.zeros(len(intervals), dtype=np.float)\n\n for interval_idx in range(len(intervals)):\n start = intervals.start[interval_idx]\n finish = intervals.finish[interval_idx]\n results[interval_idx] = quad(averaging_function, start, finish)[0]\n\n return results",
"def test_interpolation_random_array_and_nan(self):\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)",
"def interpolate_to_amplitude(a):\n a_min = a.min()\n a_max = a.max()\n return np.interp(a, (a_min, a_max), (0, 1))",
"def extrapolate_nans(x, y, v):\n if numpy.ma.is_masked(v):\n nans = v.mask\n else:\n nans = numpy.isnan(v)\n notnans = numpy.logical_not(nans)\n v[nans] = scipy.interpolate.griddata((x[notnans], y[notnans]), v[notnans],\n (x[nans], y[nans]),\n method='nearest').ravel()\n return v",
"def check_nan(wseries: pd.Series) -> pd.Series:\n\n if len(wseries[pd.Series([\n (type(val) == str or isnan(val)) for val in wseries\n ], index=wseries.index)]) == 0:\n return wseries # nothing to change\n\n # ensure that all are either float or nan\n def _float_or_nan(ent):\n \"\"\"\n Force values to be either a float or nan first\n \"\"\"\n try:\n return float(ent)\n except ValueError:\n return float('nan')\n\n wseries = pd.Series(\n [_float_or_nan(val) for val in wseries], index=wseries.index,\n name=wseries.name\n )\n\n # continue with interpolation or extrapolation if needed\n inds = where(\n pd.Series([\n (isinstance(val, str) or isnan(val)) for val in wseries\n ], index=wseries.index)\n )[0] # locate the position of the problematic readings\n for ind in inds:\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-1],\n wseries.index[ind+1],\n wseries[ind-1], wseries[ind+1]\n )\n if isnan(wseries[ind]): # interpolation does not work\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError: # extrapolation\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind+2],\n wseries.index[ind+1],\n wseries[ind+2], wseries[ind+1]\n )\n return wseries\n\n return wseries",
"def mean(vals):",
"def smooth(x, window,nan=True,old=False,fill='mean'):\n if nan:\n from Sp_parameters import nanmasked\n from scipy import interpolate\n ix = np.arange(len(x))\n xmasked, mask = nanmasked(x)\n if fill == 'mean':\n fv = np.mean(xmasked)\n elif fill == 'median':\n fv = np.median(xmasked)\n elif fill == 'zero':\n fv = 0.0\n else:\n raise ValueError('the fill keyword doesnt match possible values, try mean, median, or zero')\n fx = interpolate.interp1d(ix[mask],xmasked,bounds_error=False,fill_value=fv)\n xinterp = fx(ix)\n if old:\n xout = np.convolve(xinterp, np.ones(window)/window, 'same')\n else:\n s = np.r_[xinterp[window-1:0:-1],xinterp,xinterp[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n istart = window/2\n iend = -window/2+1\n if iend==0:\n iend = len(xout)\n xout = xout[istart:iend]\n else:\n if old:\n xout = np.convolve(x, np.ones(window)/window, 'same')\n else:\n s = np.r_[x[window-1:0:-1],x,x[-1:-window:-1]]\n w = np.ones(window,'d')\n xout = np.convolve(w/w.sum(),s,mode='valid')\n xout = xout[window/2:-window/2+1]\n return xout",
"def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y",
"def interpolate1d(X):\n ind = np.arange(X.shape[0])\n ind_not_nans = np.where(~np.isnan(X)) #fill last values by last non nan values\n last_non_nan = X[ind_not_nans[0][-1]]\n f = interpolate.interp1d(ind[ind_not_nans], X[ind_not_nans], bounds_error=False, fill_value=last_non_nan)\n X_int = np.where(np.isfinite(X), X, f(ind))\n \n return X_int",
"def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)",
"def interp1d_lin_aver_withoutnan(x,y): #FIXME: there is a problem in this routine when the x are in decreasing order.\n def f(xp):\n yp=np.empty(np.size(xp)-1)\n for i in range(np.size(xp)-1):\n# print i, xp[i], xp[i+1]\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n xmod2=xmod[np.where((xmod>xp[i])*(xmod<xp[i+1]))]\n ymod2=ymod[np.where((xmod>xp[i])*(xmod<xp[i+1]))]\n xmod3=np.concatenate((np.array([xp[i]]),xmod2,np.array([xp[i+1]])))\n g=interp1d(xmod,ymod, bounds_error=False, fill_value=np.nan)\n ymod3=np.concatenate((np.array([g(xp[i])]),ymod2,np.array([g(xp[i+1])])))\n# print xmod3\n# print ymod3\n if np.isnan(ymod3).all():\n yp[i]=np.nan\n else:\n xmod4=xmod3[np.where(~(np.isnan(ymod3)+np.isnan(xmod3)))]\n ymod4=ymod3[np.where(~(np.isnan(ymod3)+np.isnan(xmod3)))]\n# if i==9:\n# print xmod4,ymod4\n yp[i]=np.sum((ymod4[1:]+ymod4[:-1])/2*(xmod4[1:]-xmod4[:-1]))\n yp[i]=yp[i]/(xmod4[-1]-xmod4[0])\n# print yp[i]\n return yp\n return f",
"def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data",
"def interpolate(f):\n @funnel\n def fill_missing(data, return_model=False, **kwargs):\n impute_kwargs = kwargs.pop('impute_kwargs', {})\n\n if impute_kwargs:\n model = impute_kwargs.pop('model', eval(defaults['impute']['model']))\n imputed_data, model = apply_sklearn_model(model, data, return_model=True, **impute_kwargs)\n data = pd.DataFrame(data=imputed_data, index=data.index, columns=data.columns)\n else:\n model = None\n\n if kwargs:\n kwargs = update_dict(defaults['interpolate'], kwargs, from_config=True)\n data = data.interpolate(**kwargs)\n\n if return_model:\n return data, {'model': model, 'args': [], 'kwargs': kwargs}\n else:\n return data\n\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n interp_kwargs = kwargs.pop('interp_kwargs', {})\n return f(fill_missing(data, *args, **interp_kwargs), **kwargs)\n\n return wrapped",
"def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data",
"def replaces_nans_ma(series):\n series = series.replace([np.inf, -np.inf], np.nan)\n result = series.fillna(series.rolling(window=len(series), min_periods=0).mean())\n return result",
"def test_linear_interpolation_nan_points(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n xis[6:7] = numpy.nan\n etas[3] = numpy.nan\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)",
"def mean_kall_interp(filename, xvariable,num_interp = 100, show_plot = True,\n sample_header = 'Sample Description',\n stress_header = 'Stress (Pa)',\n strain_header = 'Strain (%)',\n k_header = 'K prime (Pa)',\n sep = ',', dec = '.'): \n\n # Read data and get all the samples within the data frame\n data = pd.read_csv(filename, sep = sep, decimal = dec)\n all_samples = data[sample_header].unique()\n\n # Define which dependent variable to extract\n if 'stress' in xvariable: xvar = stress_header\n elif 'strain' in xvariable: xvar = strain_header\n\n # Loop to get mean values of minimum and maximum xdata for the samples\n xmin = []; xmax = []\n for isample in all_samples:\n data_sample = data.loc[data[sample_header] == isample]\n xsample = np.array(data_sample[xvar])\n xmin.append(np.min(xsample))\n xmax.append(np.max(xsample))\n\n xmin_avg = np.mean(np.array(xmin))\n xmax_avg = np.mean(np.array(xmax))\n xmax_std = np.std(np.array(xmax))\n\n print('Rupture: ', xmax_avg, '+/-', xmax_std)\n # Build interpolation vector\n xmin_log = np.log10(xmin_avg)\n xmax_log = np.log10(xmax_avg)\n xinterp = np.logspace(xmin_log, xmax_log, num = num_interp)\n\n #Loop to get the interpolated curves for each sample within the file\n k_all = []\n for isample in all_samples:\n data_sample = data.loc[data[sample_header] == isample]\n xsample = data_sample[xvar]\n ksample = data_sample[k_header]\n k_interp = np.interp(xinterp, xsample, ksample)\n k_all.append(k_interp)\n \n k_all = np.array(k_all)\n kmean = np.mean(k_all, axis = 0)\n kstd = np.std(k_all, axis = 0)\n\n # Plot the average curve and standard deviation, if desired\n if show_plot == True:\n plt.fill_between(xinterp, kmean - kstd, kmean + kstd, color = 'lightgray',\n alpha = 0.8)\n plt.plot(xinterp, kmean, c = 'darkgray', marker = 'o', mfc = 'w')\n plt.ylabel('$K\\'$ (Pa)')\n plt.xlabel(xvar)\n plt.loglog()\n\n return [xinterp, kmean, kstd]",
"def scipy_nanmean(x, axis=0):\n x, axis = _chk_asarray(x,axis)\n x = x.copy()\n Norig = x.shape[axis]\n factor = 1.0-np.sum(np.isnan(x),axis)*1.0/Norig\n\n x[np.isnan(x)] = 0\n return np.mean(x,axis)/factor"
]
| [
"0.6942735",
"0.64421344",
"0.6326304",
"0.6222424",
"0.6200586",
"0.6040275",
"0.5972592",
"0.59142005",
"0.58430594",
"0.5767788",
"0.5762497",
"0.5755958",
"0.5682408",
"0.56756353",
"0.5646744",
"0.564491",
"0.5589128",
"0.55759656",
"0.55423373",
"0.55390733",
"0.552185",
"0.55208117",
"0.5464399",
"0.54538554",
"0.545084",
"0.5445618",
"0.5389615",
"0.538909",
"0.5388172",
"0.5369978"
]
| 0.71166354 | 0 |
Initialize the site record attributes. | def __init__(self, site_record_string) -> None:
super().__init__()
self.quadrat = None
self.waypoint = None
self.grid_reference = None
self.photo_up = None
self.photo_down = None
self.wetness = None
self.canopy = None
self.species = []
string_as_file = io.StringIO(site_record_string)
self._initialize_attributes() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }",
"def initialize_attributes(self):\n self.host = self.netloc\n self.url = self.geturl()\n\n self.set_scheme_if_non('https')\n \n # The file extensions we are watching for. Either load the extensions\n # from a text file, or create a seperate python file contain a list\n # supported file extensions\n self.listed_file_extensions = [ \n '.jpg', '.bmp', '.png',\n '.mp3', '.mp4', '.flv', '.avi',\n '.zip', '.7z', '.tar', '.tar.gz', '.tar.bz', '.rar',\n '.exe', '.git', '.torrent',\n ] \n # Type Boolean: True or False\n # Urls contain some useful information. Depending on the framework the \n # website is built on, a url can contain information about paths and files.\n # This is a glimpse of the sites computer system. Pretty Useful!\n self.is_file_extension = None # Does this path end as a file?\n #self.file_extension = self.check_for_file_extension()",
"def initialize(self):\n self.ID = uuid.uuid4()\n self.TMDB_ID = 0\n self.title = \"\"\n self.release_date = \"\"\n self.popularity = \"\"\n self.overview = \"\"",
"def __init__(self, api, site_id):\n self.api = api\n self.site_id = site_id\n\n self.data = {}\n self.attributes = {}",
"def initialize(self):\n\n db = dict()\n\n db['meta'] = Meta(None)\n db['race'] = Race(None, None, None, None, None)\n db['track'] = Track(None, None)\n db['classes'] = set([])\n db['teams'] = set([])\n db['drivers'] = set([])\n\n self.db = db",
"def init_attrs(self):\n raise NotImplementedError",
"def __init__(self, request):\n\n self.request = request\n self.db = mdb(request)\n self.site = request.matchdict.get('code')\n self.eid = request.matchdict.get('id')\n claims, site = verify_access(request, site=self.site)\n\n self.eac_path = site['eac']\n self.source_map = site['map']\n self.name = site['name']\n self.url = site['url']\n log.debug(\"Processing site: %s, data path: %s\" % (self.site, self.eac_path))",
"def init_attributes(self):\n # Set default values\n for key, value in self.defaults.items():\n setattr(self, key, value)\n\n # Parse all arguments in kwargs\n for key, value in self.kwargs.items():\n parsed_value = eval_arg(value, key)\n logging.info('Setting ' + str(type(parsed_value)) + ' self.' + str(key) + ' = ' + str(parsed_value))\n setattr(self, key, parsed_value)\n\n # self.today = date_utils.get_datetime_from_timezone(self.date_offset, self.timezone)\n self.today = datetime.datetime.today()",
"def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass",
"def init_warping_record_fields(self, bc):\n self.init_record_fields(WARPING, bc.warping_record_field_names())",
"def __init__(self, record=None):\n self.record = record",
"def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()",
"def __init__(self, site):\n self.site = site\n self._entries = []",
"def __init__(self, identifier, site, type,):\n self.identifier = identifier\n self.site = site\n self.type = type",
"def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value",
"def _initFields(self):\n pass",
"def init_resampler_record_fields(self, resampler):\n self.init_record_fields(RESAMPLER, resampler.resampler_record_field_names())",
"def initialise(self):\n self.set_up()",
"def __init__(self):\n self.constant_fields = {}\n self.post_score_renames = {}\n self.form = None\n self.form_field_regex = None\n self.field_count = None\n\n self.set_generic_fields()\n self.set_specific_fields()\n self.set_post_score_renames()",
"def initialize(self):\r\n if not self.context:\r\n self.context = SQLContext(self.url, self.connection, self.schema)\r\n if self.table is None:\r\n self.table = self.context.table(self.table_name)\r\n if not self.fields:\r\n self.read_fields()\r\n self.field_names = self.fields.names()",
"def __init__(self, site, first_page, last_page):\n\n\t\tself.site_url = site\n\t\tself.first_page = first_page\n\t\tself.last_page = last_page",
"def __init__(self, interactor: _Interactor) -> None:\n self.interactor = interactor\n\n self._sites: Dict[str, Site] = {}\n self._sites_records: Dict[str, Dict[str, Any]] = {}",
"def __init__(self):\n self._init_site_specifications_()\n\n self.my_params = None # parameters for site requests\n self.rates = None # exchange rates from the site\n self.timeout = 1 # url response timeout in seconds\n\n # retrieved rates validity\n self.valid_from_utc = None\n self.valid_to_utc = None\n\n self.in_ccode = None\n self.response_success = False",
"def _initialize_metadata(self):\n\n survey_metadata = metadata.Survey(id=\"0\")\n survey_metadata.stations.append(metadata.Station(id=\"0\"))\n survey_metadata.stations[0].runs.append(metadata.Run(id=\"0\"))\n\n return survey_metadata",
"def __init__(self, recordObj):\n\n for prop in Details.inheritProps:\n setattr(self, prop, getattr(recordObj, prop, None))\n\n self.details = {}\n \"\"\"*dict* Stores the details of this record.\n\n Keyed by the name of the detail table, values are lists of detail\n records in that detail table.\n \"\"\"",
"def memb_init(self):\n self.initialize()",
"def __init__(self):\n self.conf = None\n self.section = None\n self._engine = None\n self._session = None\n self.base_model = declarative_base()",
"def init(self):\n self.latest_domain_info = None",
"def init(self):\n self.latest_domain_info = None",
"def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)"
]
| [
"0.6735955",
"0.65529585",
"0.6512695",
"0.63683957",
"0.63453543",
"0.6218702",
"0.61691666",
"0.6167701",
"0.61475956",
"0.608511",
"0.603915",
"0.6023864",
"0.59777045",
"0.595316",
"0.5944511",
"0.5943978",
"0.5942444",
"0.5922102",
"0.5920835",
"0.5882726",
"0.586566",
"0.586414",
"0.58505785",
"0.5821806",
"0.5811385",
"0.5805813",
"0.57919365",
"0.5791295",
"0.5791295",
"0.57776874"
]
| 0.7114244 | 0 |
Return all subtiles contained within a tile | def all_descendant_tiles(x, y, zoom, max_zoom):
if zoom < max_zoom:
for child_tile in mercantile.children(x, y, zoom):
yield child_tile
for desc_tile in all_descendant_tiles(child_tile.x, child_tile.y,
child_tile.z, max_zoom):
yield desc_tile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list",
"def subtrees(self):\n return list(iter(self))",
"def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]",
"def subtrees(self):\n yield from subtrees(self)",
"def extract_subgrid(grid, i, j, subgrid_radius):\n\n\tnum_rows = 1 + (2 * subgrid_radius)\n\tnum_columns = num_rows\n\n\tsubgrid = []\n\ti_min = i - subgrid_radius\n\tj_min = j - subgrid_radius\n\tfor i_subgrid in range(num_rows):\n\t\trow = []\n\t\tfor j_subgrid in range(num_columns):\n\t\t\ti_grid = i_min + i_subgrid\n\t\t\tj_grid = j_min + j_subgrid\n\t\t\t\n\t\t\tif (0 <= i_grid < len(grid)) and (0 <= j_grid < len(grid[0])):\n\t\t\t\ttile = grid[i_grid][j_grid]\n\t\t\telse:\n\t\t\t\ttile = MaskedTile.WALL\n\n\t\t\trow.append(tile)\n\n\t\tsubgrid.append(row)\n\n\treturn subgrid",
"def _get_tiles(self, width: Numeric) -> List[Polygon]:\n min_x, min_y, max_x, max_y = self._get_rounded_bounding_box(self.geom, width)\n tiles = []\n\n for i in range(0, int((max_x - min_x) / width)):\n for j in range(0, int((max_y - min_y) / width)):\n tile = box(\n (i * width) + min_x,\n (j * width) + min_y,\n ((i + 1) * width) + min_x,\n ((j + 1) * width) + min_y,\n )\n\n if self.geom.intersects(tile):\n tiles.append(tile)\n\n return tiles",
"def tileslist(self, bbox, zoomlevels, tms_osm=False):\n mercator = GlobalMercator(tms_osm,self.tile_size,zoomlevels)\n return mercator.tileslist(bbox)",
"def generate_tiles(self):\n if self.children:\n for child in self.children:\n child.generate_tiles()\n print \"Generating tile for %s using child tiles\" % self.bbox\n self.generate_tile_from_child_tiles()\n else:\n print \"Generating tile for %s using source data\" % self.bbox\n self.generate_tile_from_source()",
"def findTiles(request, tree, removeHeadLinks=False, ignoreHeadTiles=False):\n \n tiles = []\n baseURL = request.getURL()\n\n # Find tiles in the head of the page\n if not ignoreHeadTiles or removeHeadLinks:\n for tileNode in headTileXPath(tree):\n tileHref = tileNode.get('href', None)\n\n if tileHref is not None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileHref = urljoin(baseURL, tileHref)\n \n if removeHeadLinks:\n tileNode.getparent().remove(tileNode)\n tileNode = None\n \n if not ignoreHeadTiles:\n tiles.append((tileId, tileHref, tileNode,))\n\n # Find tiles in the body\n for tileNode in tree.getroot().cssselect(\".tile-placeholder\"):\n tileId = tileNode.get('id', None)\n tileHref = tileNode.get('data-tile-href', None)\n\n if tileHref is not None:\n \n # If we do not have an id, generate one\n if tileId is None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileNode.attrib['id'] = tileId\n \n tileHref = urljoin(baseURL, tileHref)\n tiles.append((tileId, tileHref, tileNode,))\n\n return tiles",
"def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]",
"def open_tiles(self):\n return list(filter(None, self.empty))",
"def iter_tiles(data_sampler, depth, merge=True):\n if merge is True:\n merge = _default_merge\n\n parents = defaultdict(dict)\n\n for node, c, increasing in iter_corners(max(depth, 1),\n bottom_only=merge):\n\n l, b = subsample(c[0], c[1], c[2], c[3], 256, increasing)\n img = data_sampler(l, b)\n\n for pth, img in _trickle_up(img, node, parents, merge, depth):\n yield pth, img",
"def copy_tiles(self):\n \n return self.tiles",
"def get_work_tiles(self):\n work_tiles = []\n for zoom in self.config[\"zoom_levels\"]:\n bbox = self.config[\"zoom_levels\"][zoom][\"process_area\"]\n work_tiles.extend(self.tile_pyramid.tiles_from_geom(bbox, zoom))\n return work_tiles",
"def tileslist(self, bbox, zoomlevels, tms_scheme=False):\n proj = GoogleProjection(self.tile_size, zoomlevels, tms_scheme)\n return proj.tileslist(bbox)",
"def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits",
"def get_tiles(self):\n\n tiles = []\n for x in range(self.position[0],\n self.position[0] + CAR_LENGTH if self.is_horizontal else self.position[0] + CAR_WIDTH):\n for y in range(self.position[1],\n self.position[1] + CAR_WIDTH if self.is_horizontal else self.position[1] + CAR_LENGTH):\n tiles.append((x, y))\n\n return tiles",
"def get_traversable_tiles(room, x, y, length, width):\n traversables = []\n # Checking that we are not going out of bounds\n if x > length - 1 or y > width - 1 or x < 0 or y < 0:\n return\n # Checking above\n if not (x - 1 < 0):\n if not room.tiles[x - 1][y].border:\n traversables.append([x - 1, y])\n # Checking left\n if not (y - 1 < 0):\n if not room.tiles[x][y - 1].border:\n traversables.append([x, y - 1])\n # Checking right\n if not (y + 1 > width - 1):\n if not room.tiles[x][y + 1].border:\n traversables.append([x, y + 1])\n # Checking below\n if not (x + 1 > length - 1):\n if not room.tiles[x + 1][y].border:\n traversables.append([x + 1, y])\n\n return traversables",
"def create_subgrid(self)->list:\n return [subgrid.Subgrid(i) for i in range(0, 9)]",
"def calc_tiles(raster, tile_x, tile_y):\n \n #get coordinates of upper left corner\n x_upper_left = raster.transform[2]\n y_upper_left = raster.transform[5]\n #calculate width and height based on tile_x and tile_y\n x,y = x_upper_left + tile_x, y_upper_left - tile_y\n height, width = raster.index(x,y)\n \n #get cols and rows of raster band\n ncols, nrows = raster.meta['width'], raster.meta['height']\n #create offsets for window processing\n subsets = product(range(0, ncols, width), range(0, nrows, height))\n #create bounding_window to fill missing windows\n bounding_window = rio.windows.Window(col_off=0, row_off=0, width=ncols, height=nrows)\n \n #create windows\n for col_off, row_off in subsets:\n #yield windows with the given parameters\n window = rio.windows.Window(col_off=col_off, row_off=row_off, \n width=width, height=height).intersection(bounding_window)\n yield window",
"def slice_to_tiles(self, tile_raw_size=None, show_info=\"\"):\n if not tile_raw_size: tile_raw_size = self.tile_raw_size\n tile_raw_w,tile_raw_h = tile_raw_size\n tile_w,tile_h = round(tile_raw_w),round(tile_raw_h)\n\n if show_info:\n print(f\" ==Slicing {show_info} Tiles==\")\n print(f' Tile raw size: {tile_raw_size[0]} x {tile_raw_size[1]} px\\n')\n\n #process into list of image objects\n tiles = []\n true_x, true_y = (0,0)\n with Image.open(self.path) as img_obj:\n w,h = img_obj.size\n for row in range(0,h-tile_h,tile_h):\n tiles_row = []\n y = round(true_y)\n for col in range(0,w-tile_w,tile_w):\n x = round(true_x)\n im_crop = img_obj.crop((x,y,x+tile_w,y+tile_h))\n tiles_row.append(im_crop)\n true_x += tile_raw_w\n tiles.append(tiles_row)\n true_y += tile_raw_h\n true_x = 0\n\n return tiles",
"def subsections(self):\n return self.children()",
"def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();",
"def chunks(self) -> List[OctreeChunk]:\n return [tile_data.octree_chunk for tile_data in self._tiles.values()]",
"def find_open_tiles(self, arena, units):\r\n tiles = []\r\n for x, y in [(self.x+1, self.y), (self.x, self.y+1), (self.x-1, self.y), (self.x, self.y-1)]:\r\n if arena[x][y] == '.':\r\n tiles.append((x, y))\r\n return tiles",
"def select_all_active_tiles(self):\n self.ref_tiles = []\n number_grids = int(self.cfg['grids']['number_grids'])\n for grid in range(number_grids):\n for tile in self.gm.get_active_tiles(grid):\n self.ref_tiles.append(str(grid) + '.' + str(tile))",
"def tiles(self, width: int, height: int) -> TileSet:\n y_count = len(self.tiling)\n for y_index, y_tile in enumerate(self.tiling):\n\n x_count = len(y_tile)\n for x_index, tile_strength in enumerate(y_tile):\n\n # Doing multiplication before devision here to make sure rounding is correct\n bounding_box = (\n # from (x1, y1)\n int(width * x_index / x_count),\n int(height * y_index / y_count),\n # to (x2, y2)\n int(width * (x_index + 1) / x_count),\n int(height * (y_index + 1) / y_count),\n )\n\n yield bounding_box, tile_strength",
"def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles",
"def grid_tiles(self, bbox, zoomlevel):\n tiles,tile_bounds = self.tileslist(bbox, [zoomlevel],self.reader.tms_osm)\n grid = {}\n # for (z, x, y) in sorted(tiles,key=operator.itemgetter(0,1,2),reverse=True):\n for (z, x, y) in tiles:\n if not grid.get(y):\n grid[y] = []\n grid[y].append(x)\n sortedgrid = []\n for y in sorted(grid.keys(),reverse=not self.reader.tms_osm):\n sortedgrid.append([(x, y) for x in sorted(grid[y])])\n return sortedgrid,tile_bounds",
"def get_candidate_tiles(self) -> List[Point]:\n\t\tempty_tiles = set()\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif not self.tiles[x][y] == 0:\n\t\t\t\t\tfor d in [[0,1], [1,1], [1,0], [1,-1], [0,-1], [-1,-1], [-1,0], [-1,1]]:\n\t\t\t\t\t\tif x+d[0] >= 0 and y+d[1] >= 0 and x+d[0] < self.size and y+d[1] < self.size and self.tiles[x+d[0]][y+d[1]] == 0:\n\t\t\t\t\t\t\tempty_tiles.add(Point(x+d[0],y+d[1]))\n\t\treturn list(empty_tiles)"
]
| [
"0.676234",
"0.6596666",
"0.64846224",
"0.63919574",
"0.63342875",
"0.63312995",
"0.6318957",
"0.6259967",
"0.6224775",
"0.62130743",
"0.6210267",
"0.61658144",
"0.61581886",
"0.6139943",
"0.6124439",
"0.6120807",
"0.6054848",
"0.601967",
"0.6005847",
"0.5993344",
"0.5978138",
"0.5928255",
"0.59261256",
"0.5924274",
"0.5921477",
"0.5852372",
"0.5845776",
"0.5836749",
"0.5835016",
"0.57641304"
]
| 0.6925616 | 0 |
All tiles that should not be in MBTiles | def redundant_tiles(mbtiles, required_tiles):
xyz_dict= lambda: defaultdict(xyz_dict)
# Mark all tiles that are required
marked_tiles = xyz_dict()
for tile in required_tiles:
marked_tiles[tile.z][tile.x][tile.y] = True
for tile in mbtiles.all_tiles():
required = marked_tiles[tile.z][tile.x][tile.y]
if required != True:
yield tile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def missing_tiles(mbtiles, required_tiles):\n for tile in required_tiles:\n if not mbtiles.tile_exists(tile.x, tile.y, tile.z):\n yield tile",
"def open_tiles(self):\n return list(filter(None, self.empty))",
"def discard_tile(self):\n raise NotImplemented()",
"def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list",
"def remove_tiles(self, num):\r\n drawntiles = [self.db.tilestring.pop(random.randrange(len(self.db.tilestring))) for _ in xrange(num)]\r\n return drawntiles",
"def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)",
"def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)",
"def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4",
"def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)",
"def __get_neutral_tiles(self) -> List[List[int]]:\n neutral_tiles = []\n for x in range(self.num_columns):\n for y in range(self.num_rows):\n if self.is_flippable_tile([y, x]):\n neutral_tiles.append([y, x])\n return neutral_tiles",
"def list_tiles_covering_land(self):\n\n land_tiles = Equi7Grid._static_data[self.core.tag][\"coverland\"][\n self.core.tiletype]\n return list(land_tiles)",
"def clear_tiles(self):\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tile_grid[y][x].configure(\n image=self.__marker_images[MarkerType.NONE])",
"def scns2tilecache_all_avail(self):\n scn_lst = self.get_scnlist_tilecache()\n for scn in scn_lst:\n self.scn2tilecache(scn)",
"def any_empty_tiles(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False",
"def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits",
"def select_all_active_tiles(self):\n self.ref_tiles = []\n number_grids = int(self.cfg['grids']['number_grids'])\n for grid in range(number_grids):\n for tile in self.gm.get_active_tiles(grid):\n self.ref_tiles.append(str(grid) + '.' + str(tile))",
"def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True",
"def findTiles(request, tree, removeHeadLinks=False, ignoreHeadTiles=False):\n \n tiles = []\n baseURL = request.getURL()\n\n # Find tiles in the head of the page\n if not ignoreHeadTiles or removeHeadLinks:\n for tileNode in headTileXPath(tree):\n tileHref = tileNode.get('href', None)\n\n if tileHref is not None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileHref = urljoin(baseURL, tileHref)\n \n if removeHeadLinks:\n tileNode.getparent().remove(tileNode)\n tileNode = None\n \n if not ignoreHeadTiles:\n tiles.append((tileId, tileHref, tileNode,))\n\n # Find tiles in the body\n for tileNode in tree.getroot().cssselect(\".tile-placeholder\"):\n tileId = tileNode.get('id', None)\n tileHref = tileNode.get('data-tile-href', None)\n\n if tileHref is not None:\n \n # If we do not have an id, generate one\n if tileId is None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileNode.attrib['id'] = tileId\n \n tileHref = urljoin(baseURL, tileHref)\n tiles.append((tileId, tileHref, tileNode,))\n\n return tiles",
"def copy_tiles(self):\n \n return self.tiles",
"def check(self):\n return self.tile==\"\"",
"def get_flagged_tile_list ( self ) :\n tile_list = []\n stmt = \"select name from sdb_product where sys003 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list",
"def calculate_min_max_tiles(self):",
"def test_that_tiling_ignores_padding_if_web_friendly_internal_tiles_exist():\n with rasterio.open(COG_WEB_TILED) as src_dst:\n arr, _ = reader.tile(\n src_dst, 147, 182, 9, tilesize=256, padding=0, resampling_method=\"bilinear\"\n )\n arr2, _ = reader.tile(\n src_dst,\n 147,\n 182,\n 9,\n tilesize=256,\n padding=100,\n resampling_method=\"bilinear\",\n )\n assert numpy.array_equal(arr, arr2)",
"def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles",
"def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)",
"def enumerate_tiles(self):\n # Iterates through entire game board.\n for row in range(self.rows):\n for col in range(self.cols):\n\n # Doesn't count mines adjacent to mine tiles.\n if self.board[row][col].category == Tiles.mine:\n continue\n mines = 0\n\n # Calculates number of mines surrounding each tile.\n for i in [row-1, row, row+1]:\n for j in [col-1, col, col+1]:\n if (self.valid_tile(i, j) and self.board[i][j].category == Tiles.mine):\n mines += 1\n \n # Sets each game board tile's mine proximity number.\n self.board[row][col] = Tiles(row, col, str(mines))",
"def get_tiles(self) -> list:\n n_rows = self.mosaic_dimensions[0]\n n_columns = self.mosaic_dimensions[1]\n return [\n self.get_tile(i_row, i_column)\n for i_row in range(n_rows)\n for i_column in range(n_columns)\n ]",
"def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();"
]
| [
"0.71309334",
"0.66754884",
"0.65807",
"0.6471359",
"0.6469424",
"0.64363986",
"0.64131284",
"0.63628995",
"0.63628465",
"0.63358223",
"0.63254714",
"0.63083833",
"0.6305903",
"0.6300953",
"0.6284223",
"0.6275323",
"0.62566996",
"0.6220742",
"0.6201428",
"0.6201267",
"0.6191593",
"0.61846125",
"0.6165361",
"0.61475515",
"0.61323804",
"0.61023384",
"0.60958385",
"0.60906535",
"0.6075726",
"0.60717076"
]
| 0.69980603 | 1 |
All tiles that should be in MBTiles but are missing | def missing_tiles(mbtiles, required_tiles):
for tile in required_tiles:
if not mbtiles.tile_exists(tile.x, tile.y, tile.z):
yield tile | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def redundant_tiles(mbtiles, required_tiles):\n xyz_dict= lambda: defaultdict(xyz_dict)\n\n # Mark all tiles that are required\n marked_tiles = xyz_dict()\n for tile in required_tiles:\n marked_tiles[tile.z][tile.x][tile.y] = True\n\n\n for tile in mbtiles.all_tiles():\n required = marked_tiles[tile.z][tile.x][tile.y]\n if required != True:\n yield tile",
"def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)",
"def open_tiles(self):\n return list(filter(None, self.empty))",
"def test_room_has_tiles(self):\n self.assertEqual(self.room.tile_set.count(), self.room.grid_size ** 2)",
"def test_room_has_tiles(self):\n self.assertGreaterEqual(self.room.tile_set.count(), 2)",
"def scns2tilecache_all_avail(self):\n scn_lst = self.get_scnlist_tilecache()\n for scn in scn_lst:\n self.scn2tilecache(scn)",
"def isTileBlank(tile):\n for b in tile:\n if b: return False\n return True",
"def any_empty_tiles(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False",
"def test_generate_tiles_2(self):\n tile_list = utils.generate_tiles()\n self.assertIsInstance(tile_list[0], Tile)",
"def get_empty_tiles(self) -> List[Point]:\n\t\tempty_tiles = []\n\t\tfor x in range(self.size):\n\t\t\tfor y in range(self.size):\n\t\t\t\tif self.tiles[x][y] == 0:\n\t\t\t\t\tempty_tiles.append(Point(x,y))\n\t\treturn empty_tiles",
"def test_that_tiling_ignores_padding_if_web_friendly_internal_tiles_exist():\n with rasterio.open(COG_WEB_TILED) as src_dst:\n arr, _ = reader.tile(\n src_dst, 147, 182, 9, tilesize=256, padding=0, resampling_method=\"bilinear\"\n )\n arr2, _ = reader.tile(\n src_dst,\n 147,\n 182,\n 9,\n tilesize=256,\n padding=100,\n resampling_method=\"bilinear\",\n )\n assert numpy.array_equal(arr, arr2)",
"def clear_tiles(self):\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tile_grid[y][x].configure(\n image=self.__marker_images[MarkerType.NONE])",
"def remove_tiles(self, num):\r\n drawntiles = [self.db.tilestring.pop(random.randrange(len(self.db.tilestring))) for _ in xrange(num)]\r\n return drawntiles",
"def test_tiles_to_bounds():\n tiles = [morecantile.Tile(x=150, y=182, z=9), morecantile.Tile(x=151, y=182, z=9)]\n assert len(utils.tiles_to_bounds(tiles)) == 4",
"def discard_tile(self):\n raise NotImplemented()",
"def find_tiles(self):\n lat1, lat2 = self.bbox.south, self.bbox.north\n lon1, lon2 = self.bbox.west, self.bbox.east\n # convert to geographic bounding box\n minlat, minlon = min(lat1, lat2), min(lon1, lon2)\n maxlat, maxlon = max(lat1, lat2), max(lon1, lon2)\n\n # convert to tile-space bounding box\n _, xmin, ymin = self.mercator(maxlat, minlon, self.zoom)\n _, xmax, ymax = self.mercator(minlat, maxlon, self.zoom)\n\n # generate a list of tiles\n xs, ys = range(xmin, xmax + 1), range(ymin, ymax + 1)\n tile_list = [(self.zoom, x, y) for (y, x) in product(ys, xs)]\n\n return tile_list",
"def findTiles(request, tree, removeHeadLinks=False, ignoreHeadTiles=False):\n \n tiles = []\n baseURL = request.getURL()\n\n # Find tiles in the head of the page\n if not ignoreHeadTiles or removeHeadLinks:\n for tileNode in headTileXPath(tree):\n tileHref = tileNode.get('href', None)\n\n if tileHref is not None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileHref = urljoin(baseURL, tileHref)\n \n if removeHeadLinks:\n tileNode.getparent().remove(tileNode)\n tileNode = None\n \n if not ignoreHeadTiles:\n tiles.append((tileId, tileHref, tileNode,))\n\n # Find tiles in the body\n for tileNode in tree.getroot().cssselect(\".tile-placeholder\"):\n tileId = tileNode.get('id', None)\n tileHref = tileNode.get('data-tile-href', None)\n\n if tileHref is not None:\n \n # If we do not have an id, generate one\n if tileId is None:\n tileId = \"__tile_%s\" % uuid.uuid4()\n tileNode.attrib['id'] = tileId\n \n tileHref = urljoin(baseURL, tileHref)\n tiles.append((tileId, tileHref, tileNode,))\n\n return tiles",
"def check(self):\n return self.tile==\"\"",
"def __init__tiles__(self):\n return [[Tiles(i, j, Tiles.closed) for j in range(self.cols)] for i in range(self.rows)]",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def blaze(self):\n visited = set()\n tile_exits = dict((tile, {}) for tile in self.tiles)\n\n def visit(tile):\n # Randomized depth-first search of self.tiles.\n visited.add(tile)\n adj = self.adjacencies(tile, self.tiles)\n self.rand.shuffle(adj)\n for d, t in adj:\n if t not in visited:\n tile_exits[tile][d] = t\n tile_exits[t][self._inverted_dirs[d]] = tile\n visit(t)\n\n visit(next(iter(self.tiles)))\n return tile_exits",
"def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))",
"def __get_neutral_tiles(self) -> List[List[int]]:\n neutral_tiles = []\n for x in range(self.num_columns):\n for y in range(self.num_rows):\n if self.is_flippable_tile([y, x]):\n neutral_tiles.append([y, x])\n return neutral_tiles",
"def render_tiles(self, tiles):\n for row in tiles:\n for tile in row:\n if tile is not None:\n if tile.height < 0:\n color = (0, 100, 0)\n else:\n z = max(0, tile.height)\n color = tuple([z * 255] * 3)\n self.surface.set_at((tile.x, tile.y), color)",
"def _clear_map(self, default=100):\r\n self.tiles = [\r\n [default\r\n for _ in range(self.height)]\r\n for _ in range(self.width)]\r\n\r\n for (x, y, score) in self.goals:\r\n self.tiles[x][y] = score\r\n\r\n for (x,y) in self.walls:\r\n self.tiles[x][y] = np.nan",
"def list_tiles_covering_land(self):\n\n land_tiles = Equi7Grid._static_data[self.core.tag][\"coverland\"][\n self.core.tiletype]\n return list(land_tiles)",
"def __init__(self, tiles):\n self.tiles = tiles",
"def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)",
"def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();",
"def copy_tiles(self):\n \n return self.tiles"
]
| [
"0.7078869",
"0.6778325",
"0.6713871",
"0.6635691",
"0.659477",
"0.65015763",
"0.6449895",
"0.64383185",
"0.6418814",
"0.63844216",
"0.6355032",
"0.6353515",
"0.6344302",
"0.63361865",
"0.6327802",
"0.62926275",
"0.62705094",
"0.626863",
"0.6231602",
"0.6198358",
"0.6195752",
"0.6190737",
"0.6171813",
"0.61491454",
"0.61272085",
"0.6113414",
"0.6113043",
"0.6103531",
"0.609656",
"0.60791856"
]
| 0.77272135 | 0 |
Calculate min, max, mean and stddev for values applying a factor. | def _calculate_stats(values, factor=1):
result = {'min': min(values) * factor,
'max': max(values) * factor,
'sum': sum(values) * factor,
'mean': 0,
'stddev': 0}
if values:
mean = sum(values) / float(len(values))
result['mean'] = factor * mean
result['stddev'] = (
factor * math.sqrt((1.0 / (len(values) - 1))
* sum((x - mean) ** 2 for x in values)))
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_statistical_measures(errors, step_error, max_error, min_error=0.):\n if isinstance(errors[0], Iterable):\n mean_val = []\n std_val = []\n median_val = []\n mad_val = []\n max_val = []\n auc_val = []\n fail_val = []\n for e in errors:\n mean_val.append(np.mean(e))\n std_val.append(np.std(e))\n median_val.append(np.median(e))\n mad_val.append(mad(e))\n max_val.append(np.max(e))\n auc_v, fail_v = area_under_curve_and_failure_rate(\n e, step_error=step_error, max_error=max_error,\n min_error=min_error)\n auc_val.append(auc_v)\n fail_val.append(fail_v)\n else:\n mean_val = np.mean(errors)\n std_val = np.std(errors)\n median_val = np.median(errors)\n mad_val = mad(errors)\n max_val = np.max(errors)\n auc_val, fail_val = area_under_curve_and_failure_rate(\n errors, step_error=step_error, max_error=max_error,\n min_error=min_error)\n return mean_val, std_val, median_val, mad_val, max_val, auc_val, fail_val",
"def value_stats(values):\n stats = describe(values)\n mean = stats.mean\n std = np.sqrt(stats.variance)\n t_stat = t.ppf(1 - 0.025, len(values) - 1)\n dev = t_stat * (std / np.sqrt(len(values)))\n trim_mean_v = trim_mean(values, 0.25)\n upper_val = mean + dev\n lower_val = mean - dev\n\n return mean, trim_mean_v, std, upper_val, lower_val",
"def summaryone(x):\n print 'mean and std are ',np.mean(x), np.std(x)\n print 'max and min are ',np.max(x), np.min(x)\n print 'the range is ',np.max(x)-np.min(x)",
"def show_stats(x):\n print(\"min =\", x.min())\n print(\"max =\", x.max())\n print(\"median =\", np.median(x))\n print(\"average =\", x.mean())\n print(\"std =\", x.std())",
"def meanstd(self):\n\t\tmean = [125.3, 123.0, 113.9] # R,G,B\n\t\tstddev = [63.0, 62.1, 66.7] # R,G,B\n\t\treturn [mean, stddev]",
"def get_mean_stddev(self):\n return self.get_mean(), self.get_std_dev()",
"def mean_stddev(self):\n if len(self.vs) == 0:\n raise StdDevFilterException\n\n mx = self.mean()\n # compute variance\n variance = sum([(x - mx)**2 for x in self.vs])/len(self.vs)\n # return mean value and standard deviation (square root of variance)\n return mx,math.sqrt(variance)",
"def measure(mode, x, y, x0, x1):\n xm = ma.masked_outside(x, x0, x1)\n ym = ma.array(y, mask = ma.getmask(xm))\n if mode == 'mean':\n r1 = ma.mean(ym)\n r2 = ma.std(ym)\n if mode == 'max':\n r1 = ma.max(ym)\n r2 = 0\n if mode == 'min':\n r1 = ma.min(ym)\n r2 = 0\n if mode == 'median':\n r1 = ma.median(ym)\n r2 = 0\n if mode == 'p2p': # peak to peak\n r1 = ma.ptp(ym)\n r2 = 0\n return(r1, r2)",
"def deviation(values, val):\n\tm = mean(values)\n\tdev = abs(val-m)\n\tsd = standard_deviation(values)\n\treturn float(dev)/sd if sd!=0 else 0.0",
"def std(self):\n\n return self._reduce_for_stat_function(F.stddev, only_numeric=True)",
"def normalize_inputs(df, metrics):\n for m in metrics:\n mean = np.mean(df[m])\n stdev = np.std(df[m])\n def std_normalize(x):\n return (x - mean) / stdev\n #df[m] = df[m].map(std_normalize)\n xmin = min(df[m])\n xmax = max(df[m])\n def minmax_normalize(x):\n return (x - xmin) / (xmax - xmin)\n df[m] = df[m].map(minmax_normalize)\n return df",
"def get_stddev(self):\r\n for i in range(1,len(self.data[0])):\r\n self.stddev.append(np.std(self.data[:,i]))",
"def _standardize(self):\n deviation = np.std(self.series)\n self.series = (self.series - np.mean(self.series)) / (deviation if deviation != 0 else 1)",
"def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std",
"def fit(self, df):\n self.df_std = df.std(axis=0, skipna=True)\n self.df_mean = df.mean(axis=0, skipna=True)\n return self",
"def sd(vals):",
"def avg_dev(self, tmin=None, tmax=None):\n res = self.ml.residuals(tmin=tmin, tmax=tmax)\n return res.mean()",
"def normalize(values):\n return (values - np.mean(values)) / np.std(values)",
"def std_outliers(res, stats, factor):\n col_name1 = 'mean + std*' + str(factor)\n std1 = (stats['mean'] + (stats['standard deviation']*factor))\n std1.name = col_name1\n\n col_name2 = 'mean - std*' + str(factor)\n std2 = (stats['mean'] - (stats['standard deviation']*factor))\n std2.name = col_name2\n\n std2.loc[std2 < 0] = 0\n\n std = pd.concat([std1, std2], axis=1)\n\n data1 = pd.merge(res.reset_index(), std.reset_index(), on=['Site', 'Measurement'])\n data2 = data1[data1['Value'] > data1[col_name1]]\n data3 = data1[data1['Value'] < data1[col_name2]]\n\n data4 = pd.concat([data2, data3])\n\n return data4",
"def collect_rms(self, rms):\n if self._data:\n self._data['min'] = min(rms, self._data['min'])\n self._data['max'] = max(rms, self._data['max'])\n self._data['avg'] = float(rms + self._data['avg']) / 2\n else:\n self._data['min'] = rms\n self._data['max'] = rms\n self._data['avg'] = rms",
"def MeanAndStandardDeviation(data):\n n = len(data)\n if n == 0:\n return 0.0, 0.0\n mean = float(sum(data)) / n\n variance = sum([(element - mean)**2 for element in data]) / n\n return mean, math.sqrt(variance)",
"def stdev(values):\n mean = avg(values)\n diffs = [(value - mean) ** 2 for value in values]\n return avg(diffs) ** 0.5",
"def mean_std_calc(dataloader):\n mean = 0\n std = 0\n samples = 0\n for data, _, _ in dataloader:\n batch_samples = data.size(0)\n data = data.view(batch_samples, data.size(1), -1)\n mean += data.mean(2).sum(0)\n std += data.std(2).sum(0)\n samples += batch_samples\n\n return (mean / samples),(std / samples)",
"def __init__(self):\n self.mean = 0.0\n self.std = 1.0",
"def _calculate_std(self, lc):\n if self.std is None:\n std = np.mean(lc)**0.5\n elif isinstance(self.std, collections.Iterable):\n std = np.mean(self.std) # Iterable of numbers\n else: # Single float number\n std = self.std\n\n return std",
"def statistics_from_array(x: numpy.ndarray):\n try:\n return x.mean(), x.std(), x.max(), x.min()\n except AttributeError:\n return numpy.nan, numpy.nan, numpy.nan, numpy.nan",
"def std(mean, vals):\n return sqrt(sum([(i-mean)**2 for i in vals])/len(vals))",
"def get_mean_and_std(arr):\r\n return np.round(np.mean(arr), 3), np.round(np.std(arr), 3)",
"def stddev(self, num_list):\n try:\n mean = self.average(num_list)\n\n minus_mean = []\n\n for number in num_list:\n try:\n minus_mean.append((number - mean) ** 2)\n except Exception as e:\n print(\"Error: \", e)\n\n meany_mean = self.average(minus_mean)\n\n meany_mean = meany_mean ** .5\n\n except Exception as e:\n print(\"Error: \", e)\n\n return meany_mean",
"def calc_stat_values(self):"
]
| [
"0.64670855",
"0.62839454",
"0.6065232",
"0.60381913",
"0.6000552",
"0.59957784",
"0.59866536",
"0.5935536",
"0.5907073",
"0.5857234",
"0.58116394",
"0.580348",
"0.5799028",
"0.5774109",
"0.577322",
"0.5765237",
"0.57508856",
"0.5731915",
"0.57177246",
"0.56852514",
"0.5653293",
"0.56511784",
"0.56482935",
"0.56430626",
"0.56282264",
"0.56250876",
"0.5619791",
"0.5611962",
"0.56072855",
"0.5607022"
]
| 0.7550818 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.