query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
If no start_params are given, use reasonable defaults.
def _get_start_params(self, start_params=None): if start_params is None: if hasattr(self, 'start_params'): start_params = self.start_params elif self.exog is not None: # fails for shape (K,)? start_params = [0] * self.exog.shape[1] else: # pragma: no cover raise ValueError("If exog is None, then start_params should " "be specified") return start_params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def start( *args, **kwargs ):", "def ReviewServiceArgs(cls, start = False):\n return (start,)", "def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:\n params = self._invocation_params\n # if params[\"best_of\"] != 1:\n # raise ValueError(\"OpenAI only supports best_of == 1 for streaming\")\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n params[\"stream\"] = True\n return params", "def __init__(__self__, *,\n start_time: Optional[pulumi.Input[str]] = None):\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def defaultRegridParams(self):\n\n casalog.origin(\"ParallelDataHelper\")\n \n if self.__args['mode'] == 'channel' or self.__args['mode'] == 'channel_b':\n self.__args['start'] = str(self.__args['start'])\n self.__args['width'] = str(self.__args['width'])\n \n elif self.__args['mode'] == 'velocity':\n restfreq = self.__args['restfreq']\n if restfreq == \"\" or restfreq.isspace():\n raise ValueError, \"Parameter restfreq must be set when mode='velocity'\"\n \n if self.__args['start'] == 0:\n self.__args['start'] = ''\n \n if self.__args['width'] == 1:\n self.__args['width'] = ''\n \n\n # Check if the parameter has valid velocity units\n if not self.__args['start'] == '':\n if (qa.quantity(self.__args['start'])['unit'].find('m/s') < 0):\n raise TypeError, 'Parameter start does not have valid velocity units'\n \n if not self.__args['width'] == '':\n if (qa.quantity(self.__args['width'])['unit'].find('m/s') < 0):\n raise TypeError, 'Parameter width does not have valid velocity units'\n \n elif self.__args['mode'] == 'frequency':\n if self.__args['start'] == 0:\n self.__args['start'] = ''\n if self.__args['width'] == 1:\n self.__args['width'] = ''\n \n # Check if the parameter has valid frequency units\n if not self.__args['start'] == '':\n if (qa.quantity(self.__args['start'])['unit'].find('Hz') < 0):\n raise TypeError, 'Parameter start does not have valid frequency units'\n \n if not self.__args['width'] == '':\n if (qa.quantity(self.__args['width'])['unit'].find('Hz') < 0):\n raise TypeError, 'Parameter width does not have valid frequency units' \n \n start = self.__args['start']\n width = self.__args['width']\n \n return start, width", "def set_default_parameters(self):\n super().set_default_parameters()", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def init_params(self):\n blah", "def pre_setup(self, sch_params: Optional[Mapping[str, Any]]) -> Optional[Mapping[str, Any]]:\n return sch_params", "def initialize_params(self, params):\n pass", "def __init__(__self__, *,\n end: pulumi.Input[str],\n start: pulumi.Input[str]):\n pulumi.set(__self__, \"end\", end)\n pulumi.set(__self__, \"start\", start)", "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def _init_start(self):\n def start(core, args):\n task = ' '.join(args.task) if args.task else ''\n return core.start(task=task)\n\n usage = 'stl start [task]'\n desc = (\n 'make a log that you are starting to work'\n )\n\n subp = self.subparsers.add_parser(\n 'start', usage=usage, description=desc, help=desc)\n\n subp.add_argument(\n 'task', nargs=argparse.REMAINDER,\n help='the task that you are about to start working on')\n\n subp.set_defaults(func=start)", "def test_startup_params(self):\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 10)\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 15)\n # make sure some value isnt the default value\n self.driver._protocol._param_dict.update(\"bar=20\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 20)\n self.driver._protocol._param_dict.update(\"baz=30\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 30)\n\n # pretend to manually adjust a few things:\n self.driver._protocol._param_dict.update(\"foo=1000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 1000)\n self.driver._protocol._param_dict.update(\"bar=1500\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 1500)\n self.driver._protocol._param_dict.update(\"baz=2000\")\n self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n\n # pretend to go through the motions of a startup sequence\n self.driver.set_init_params({'foo': 100, \"bar\": 150, \"baz\": 200})\n\n # Now a virtual method in the protocol that asserts when not implemented\n # behavior proven in derived protocol classes\n # self.driver.apply_startup_params()\n\n # check the values on the other end\n # running_config = self.driver._protocol.get_cached_config()\n\n # confirm that the default values were set back appropriately.\n # self.assertTrue(self.driver._protocol._param_dict.get(\"foo\"), 100)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bar\"), 150)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"baz\"), 2000)\n # self.assertTrue(self.driver._protocol._param_dict.get(\"bat\"), 40)\n\n ##### Integration tests for startup config in the SBE37 integration suite", "def update_start_values(self, params):\n allparwids = {}\n for comp in self.fit_components.values():\n if comp.usebox is not None and comp.usebox.IsChecked():\n for name, parwids in comp.parwids.items():\n allparwids[name] = parwids\n\n for pname, par in params.items():\n if pname in allparwids:\n allparwids[pname].value.SetValue(par.value)", "def set_params(self):\r\n pass", "def set_params(self, **kwargs):\n if 'nbins' in kwargs:\n self._nbins = kwargs['nbins']\n if self._nbins != 'auto':\n self._nbins = int(self._nbins)\n if 'symmetric' in kwargs:\n self._symmetric = kwargs['symmetric']\n if 'prune' in kwargs:\n prune = kwargs['prune']\n if prune is not None and prune not in ['upper', 'lower', 'both']:\n raise ValueError(\n \"prune must be 'upper', 'lower', 'both', or None\")\n self._prune = prune\n if 'min_n_ticks' in kwargs:\n self._min_n_ticks = max(1, kwargs['min_n_ticks'])\n if 'steps' in kwargs:\n steps = kwargs['steps']\n if steps is None:\n self._steps = [1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]\n else:\n self._steps = self._validate_steps(steps)\n self._extended_steps = self._staircase(self._steps)\n if 'integer' in kwargs:\n self._integer = kwargs['integer']", "def set_start_time():\n __start = current_time_milli()", "def start(total_param):\n global start_time\n global total\n\n if type(total_param) is list:\n total_param = len(total_param)\n if type(total_param) is not int:\n sys.exit(\"bad total_param. Should be list or int.\")\n\n start_time = time.time()\n total = total_param", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def _set_start(self, coordinates):\n self._start = coordinates", "def _SetRunParameters(self, params: Mapping[str, Any]) -> None:\n # Ideally YCSB should be refactored to include a function that just takes\n # commands for a run, but that will be a large refactor.\n FLAGS['ycsb_run_parameters'].unparse()\n FLAGS['ycsb_run_parameters'].parse([f'{k}={v}' for k, v in params.items()])", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def __init__(**params):", "def test_call_default_params(self):\r\n\r\n # adapted from test_app.test_cd_hit.test_cdhit_clusters_from_seqs\r\n\r\n exp = {'0': 'R27DLI_4812',\r\n '1': 'U1PLI_7889',\r\n '2': 'W3Cecum_4858',\r\n '3': 'R27DLI_3243',\r\n }\r\n app = GenericRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath, self.tmp_otu_filepath)\r\n self.assertEqual(obs, exp)", "def start_workflow(self, **params):\n raise NotImplementedError", "def set_params(self, **kwargs) -> NoReturn:\n pass" ]
[ "0.6189463", "0.596062", "0.5936523", "0.5922948", "0.5908743", "0.5905468", "0.58663934", "0.5828694", "0.5814522", "0.5814522", "0.5756695", "0.5748159", "0.5678698", "0.5660043", "0.5645459", "0.56087685", "0.56059563", "0.5576346", "0.5533934", "0.5510091", "0.55027384", "0.54661244", "0.54416907", "0.5432734", "0.5428722", "0.54077", "0.5406224", "0.539342", "0.5391937", "0.53904986" ]
0.73069286
0
Compute a sequence of Wald tests for terms over multiple columns This computes joined Wald tests for the hypothesis that all coefficients corresponding to a `term` are zero. `Terms` are defined by the underlying formula or by string matching.
def wald_test_terms(self, skip_single=False, extra_constraints=None, combine_terms=None): # noqa:E501 result = self if extra_constraints is None: extra_constraints = [] if combine_terms is None: combine_terms = [] design_info = getattr(result.model.data, 'design_info', None) if design_info is None and extra_constraints is None: raise ValueError('no constraints, nothing to do') identity = np.eye(len(result.params)) constraints = [] combined = defaultdict(list) if design_info is not None: for term in design_info.terms: cols = design_info.slice(term) name = term.name() constraint_matrix = identity[cols] # check if in combined for cname in combine_terms: if cname in name: combined[cname].append(constraint_matrix) k_constraint = constraint_matrix.shape[0] if skip_single: if k_constraint == 1: continue constraints.append((name, constraint_matrix)) combined_constraints = [] for cname in combine_terms: combined_constraints.append((cname, np.vstack(combined[cname]))) else: # check by exog/params names if there is no formula info for col, name in enumerate(result.model.exog_names): constraint_matrix = identity[col] # check if in combined for cname in combine_terms: if cname in name: combined[cname].append(constraint_matrix) if skip_single: continue constraints.append((name, constraint_matrix)) combined_constraints = [] for cname in combine_terms: combined_constraints.append((cname, np.vstack(combined[cname]))) use_t = result.use_t distribution = ['chi2', 'F'][use_t] res_wald = [] index = [] for pair in constraints + combined_constraints + extra_constraints: name, constraint = pair wt = result.wald_test(constraint) row = [wt.statistic.item(), wt.pvalue, constraint.shape[0]] if use_t: row.append(wt.df_denom) res_wald.append(row) index.append(name) # distribution neutral names col_names = ['statistic', 'pvalue', 'df_constraint'] if use_t: col_names.append('df_denom') # TODO: maybe move DataFrame creation to results class table = pd.DataFrame(res_wald, index=index, columns=col_names) res = WaldTestResults(None, distribution, None, table=table) # TODO: remove temp again, added for testing res.temp = constraints + combined_constraints + extra_constraints return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all(any, shard, shard_term_features, qterms):\n tmp = 1\n for t in qterms:\n if t in shard_term_features[shard]:\n cdf = shard_term_features[shard][t].df\n else:\n cdf = 0\n tmp *= cdf/any\n all = tmp * any\n return all", "def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]", "def test_ccsd_doubles_a_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c, d = p.V_dumms[:4]\n i, j, k, l = p.O_dumms[:4]\n u = dr.two_body\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n tau = dr.define_einst(\n IndexedBase('tau')[a, b, i, j],\n Rational(1, 2) * t[a, b, i, j] + t[a, i] * t[b, j]\n )\n\n a_i = dr.define_einst(\n IndexedBase('ai')[k, l, i, j], u[i, c, k, l] * t[c, j]\n )\n\n a_ = dr.define(\n IndexedBase('a')[k, l, i, j],\n u[k, l, i, j] +\n a_i[k, l, i, j] - a_i[k, l, j, i]\n + u[k, l, c, d] * tau[c, d, i, j]\n )\n\n tensor = dr.define_einst(\n IndexedBase('r')[a, b, i, j],\n a_[k, l, i, j] * tau[a, b, k, l]\n )\n targets = [tensor]\n\n eval_seq = optimize(\n targets, substs={p.nv: p.no * 10}, strategy=Strategy.ALL | Strategy.SUM\n )\n assert verify_eval_seq(eval_seq, targets)\n # Here we just assert that the final step is a simple product.\n assert len(eval_seq[-1].rhs_terms) == 1", "def all_terms(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allTerms\", [*terms])", "def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())", "def collect_like_terms(term_matrix):\n t = [term[:] for term in term_matrix]\n for i, term in enumerate(t, start=1):\n if i < len(t) - 1:\n for j in range(i+1, len(t)):\n if t[i][1:] == t[j][1:]:\n t[i] = [t[i][0] + t[j][0]] + t[i][1:]\n t[j][0] = 0\n # get rid of 0 terms\n t = [u for u in t if u[0] != 0]\n # get rid of extra variables\n if len(t[0]) > 0:\n for i in reversed(range(len(t[0]))):\n # in reverse so deletion doesn't affect index of subsequent variables\n extra = True\n if len(t) > 0:\n for term in t[1:]:\n try:\n if term[i] != 0:\n extra = False\n except IndexError:\n extra = True\n if extra:\n for term in t:\n try:\n del term[i]\n except IndexError:\n pass\n if t == [[]]:\n return [['constant']]\n return t", "def calc_tdf(docs):\r\n terms = set()\r\n for doc in docs:\r\n for term in doc:\r\n terms.add(term)\r\n tdf = {}\r\n for term in terms:\r\n doc_count = 0\r\n for doc in docs:\r\n doc_count += 1 if term in doc else 0\r\n tdf[term] = doc_count\r\n return tdf", "def eval_all_combinations(docs, labels, punct_vals,\n feature_fns, min_freqs):\n# result = []\n\n# for punct_val in punct_vals:\n# tokens = [tokenize(doc, punct_val) for doc in docs]\n# for f in [comb for i in range(len(feature_fns)) for comb in combinations(feature_fns, i+1)]:\n# feature = list(f)\n\n# for min_freq in min_freqs:\n# clf = LogisticRegression()\n# X, vocab = vectorize(tokens, feature, min_freq)\n# accuracy = cross_validation_accuracy(clf, X, labels, 5)\n# result.append(dict(punct= punct_val, features= feature, min_freq= min_freq, accuracy = accuracy))\n\n# return sorted(result, key=lambda x:(-x['accuracy'],-x['min_freq']))\n clf = LogisticRegression()\n result = []\n output = []\n for l in range(1, len(feature_fns)+1):\n for c in combinations(feature_fns,l):\n result.append(c)\n\n for p in punct_vals:\n list_tok = [tokenize(d,p) for d in docs]\n for fl in result:\n for mf in min_freqs:\n dict_output = {}\n matrix,vocab = vectorize(list_tok, fl, mf)\n average_value = cross_validation_accuracy(clf, matrix, labels, 5)\n dict_output['features'] = fl\n dict_output['punct'] = p\n dict_output['accuracy'] = average_value\n dict_output['min_freq'] = mf\n output.append(dict_output)\n\n return sorted(output, key=lambda x: (-x['accuracy'], -x['min_freq']))", "def test_multiple(self):\n df = self.df.copy()\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = nan_weighted_compositional_mean(df.values, renorm=renorm)\n if renorm:\n self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))", "def test_null_distribution_wald(self, n_cells: int = 2000, n_genes: int = 100):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n sim = Simulator(num_observations=n_cells, num_features=n_genes)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n summary = test.summary()\n\n # Compare p-value distribution under null model against uniform distribution.\n pval_h0 = stats.kstest(test.pval, 'uniform').pvalue\n\n logging.getLogger(\"diffxpy\").info('KS-test pvalue for null model match of wald(): %f' % pval_h0)\n assert pval_h0 > 0.05, \"KS-Test failed: pval_h0 is <= 0.05!\"\n\n return True", "def vectorize(self, terms):\n features = {}\n\n if self.parameters[LexiconFeature.PARAM_ENABLED] == 'false':\n return features\n\n tones = []\n if (self.terms_used == 'all'):\n tones = [self.get_tone(term) for term in terms]\n elif (self.used_terms == 'hashtags_only'):\n tones = [self.get_tone(term) for term in terms\n if len(term) > 0 and term[0] == '#']\n\n if (len(tones) == 0):\n tones.append(0)\n\n for function_name in self.functions:\n if (function_name == 'sum'):\n value = (sum(tones))\n elif (function_name == 'max'):\n value = max(tones)\n elif (function_name == 'min'):\n value = min(tones)\n else:\n raise ValueError(\n \"unexpected function: '{}'\".format(function_name))\n\n feature_name = \"{}_{}\".format(self.get_name(), function_name)\n features[feature_name] = utils.normalize(value)\n\n #\n # Calculate sum of cluster scores\n #\n # for cluster in self.bag_of_clusters_features:\n # cluster_tones = [self.get_cluster_tone(\n # cluster, cluster.get_cluster_id(word))\n # for word in terms if cluster.contains_word(word)]\n # if len(cluster_tones) == 0:\n # cluster_tones.append(0)\n\n # feature_name = \"{}_score_sum\".format(cluster.get_name())\n # value = sum(cluster_tones)\n # features[feature_name] = utils.normalize(value)\n\n return features", "def findTerms(self, text, terms, scope=50, includeAll=True):\n\t\tlistOfResults = list()\n\t\tlistOfMatchesMain = list()\n\t\tlistOfMatchesSecondary = list()\n\n\t\tappend = listOfResults.append\n\t\treplace\t= str.replace\n\n\t\tkeywordIndices = self.find(text, terms[0])\n\n\t\t# loop through the indices and check for dependencies if terms list has more than 1 term\n\t\tfor indices in keywordIndices:\n\n\t\t\tleading = text[indices[0]-scope:indices[0]]\n\t\t\ttrailing = text[indices[0]:indices[0]+scope]\n\n\t\t\tleading = replace(replace(leading, '\\n', '_'), '\\t', ' ') \n\t\t\ttrailing = replace(replace(trailing, '\\n', '_'), '\\t', ' ') \n\n\t\t\t# if terms list has more than 1 term (i.e., contextual terms), see if present within scope\n\t\t\tif len(terms) > 1:\n\n\t\t\t\t# loop through the contextual terms and check for presence within scope\n\t\t\t\tfor term in terms[1:]:\n\n\t\t\t\t\t# if term in either leading or trailing\n\t\t\t\t\tif (replace(term, '*', '') in leading.lower()) or (replace(term, '*', '') in trailing.lower()):\n\n\t\t\t\t\t\t# if '*' in term, do not add this context\n\t\t\t\t\t\tif '*' in term:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t# if '*' not indicated, add this context\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t'+term)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tappend(excerpt)\n\n\t\t\t# if terms list has 1 term, just append the excerpt\n\t\t\telse:\n\n\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t')\n\t\t\t\t\telse:\n\t\t\t\t\t\tappend(excerpt)\n\n\t\treturn listOfResults", "def compare_css_terms(self, x_inputs, x_samples, full_path):\n \n self.load_model_params(full_path)\n \n data_term = self.get_data_term()\n is_term = self.get_is_term()\n \n diff_var = T.log(is_term - data_term)\n \n self.add_p_tilda()\n \n get_css_diff = theano.function(inputs = [self.x, self.x_tilda],\n outputs = [diff_var, self.p_tilda])\n \n diff_val, p_tilda_vals = get_css_diff(x_inputs, x_samples)\n \n return diff_val, p_tilda_vals", "def corrected_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / math.sqrt(2 * n_words)", "def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (basestring, list, tuple)):\n raise ValueError, \"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms))\n\n if isinstance(terms, (list, tuple)):\n parsed = terms\n else:\n if callable(split):\n parsed = split(terms)\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n term = str_to_unicode(term)\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def _score_terms(self, terms_list):\n terms_scores = {}\n total_terms = 0\n\n for terms, coeff, split, ivtidx in terms_list:\n if not terms:\n continue\n # Swap ivtidx name for inverted index definition dict\n ivtidx = self._inverted_indexes[ivtidx]\n if not isinstance(terms, (str, list, tuple)):\n raise ValueError(\"Invalid type (%s) for ATTR_INVERTED_INDEX attribute. \" \\\n \"Only sequence, unicode or str allowed.\" % str(type(terms)))\n\n if isinstance(terms, (list, tuple)):\n terms = [tostr(term) for term in terms]\n parsed = terms\n else:\n terms = tostr(terms)\n if callable(split):\n parsed = list(split(terms))\n else:\n parsed = split.split(terms)\n\n for term in parsed:\n if not term or (ivtidx['max'] and len(term) > ivtidx['max']) or \\\n (ivtidx['min'] and len(term) < ivtidx['min']):\n continue\n\n lower_term = term.lower()\n\n if ivtidx['ignore'] and lower_term in ivtidx['ignore']:\n continue\n if lower_term not in terms_scores:\n terms_scores[lower_term] = [term, coeff]\n else:\n terms_scores[lower_term][1] += coeff\n total_terms += 1\n\n # Score based on term frequency in document. (Add weight for\n # non-dictionary terms? Or longer terms?)\n for lower_term, score in terms_scores.items():\n terms_scores[lower_term][1] = math.sqrt(terms_scores[lower_term][1] / total_terms)\n return dict(terms_scores.values())", "def any_term(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"anyTerm\", [*terms])", "def add_eqns(df):\n\n def lett(col): return alpha[list(df.columns).index(col)]\n for i in df.index:\n row = str(i + 3)\n if df.loc[i, 'Deleted'] != 'Total':\n df.loc[i, 'M/M_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Molds') + row + '*' + lett('Price/Mold') + row + '+' + lett('Model Price') + row + ')'\n df.loc[i, 'Unit_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + lett('# Units') + row + '*' + lett('Price/Unit') + row + ')'\n df.loc[i, 'Line_Total'] = '=IF(' + lett('Deleted') + row + '<>\"\",0,' + 'SUM(' + lett('M/M_Total') + row + ',' + lett('Unit_Total') + row + '))'\n return df", "def terms(f):\n return dmp_terms(f.rep, f.lev, f.dom)", "def test(dfa, words):\n for word in words:\n try:\n dfa.test(word)\n except AssertionError as e:\n logging.error('ERROR: %s\\n' % e.message)", "def test__empty_terms_in_potential():\n\n assert automol.pot.is_nonempty(POT1)\n assert not automol.pot.is_nonempty(POT4)\n\n ref_filt_pot = {\n (0.00000000,): 0.00, (1.04719755,): 3.58,\n (2.09439510,): 0.01, (2.61799388,): 1.75,\n (3.14159265,): 3.59, (3.66519143,): 1.69,\n (4.18879020,): 0.02, (4.71238898,): 1.72,\n (5.23598776,): 3.60\n }\n\n filt_pot = automol.pot.remove_empty_terms(POT3)\n assert numpy.allclose(\n tuple(filt_pot.keys()), tuple(ref_filt_pot.keys()), atol=1.0e-2)\n assert numpy.allclose(\n tuple(filt_pot.values()), tuple(ref_filt_pot.values()), atol=1.0e-2)", "def test_should_accept_alphanumeric_formulas(self):\n validator = CharCombinationValidator()\n\n for formula in self.correct_formulas:\n self.assertIsNone(validator(formula))", "def all_terms(f):\n return dmp_all_terms(f.rep, f.lev, f.dom)", "def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)", "def get_relevant_terms(self, to_test_path, min_word_count=0):\n matching_include_path_ids = []\n matching_exclude_path_ids = []\n op_pathes_include = []\n op_pathes_exclude = []\n # all terms without a pattern\n no_restricted_terms_ids =Term.objects.filter(is_active=True).filter(operating_path__isnull=True).values_list('id', flat=True)\n relevant_term_ids = [x for x in no_restricted_terms_ids]\n \n # get all exclude pathes\n op_path_exclude_ids = OperatingPath.objects.filter(is_include=False).values_list('id', flat=True)\n # includes\n op_path_include_ids = OperatingPath.objects.filter(is_include=True).values_list('id', flat=True)\n if op_path_include_ids:\n op_pathes_include = OperatingPath.objects.filter(id__in=op_path_include_ids)\n if op_path_exclude_ids:\n op_pathes_exclude = OperatingPath.objects.filter(id__in=op_path_exclude_ids)\n \n # test exclude pathes\n for exclude_path in op_pathes_exclude:\n if exclude_path.is_matching(to_test_path):\n if exclude_path.id not in matching_exclude_path_ids:\n matching_exclude_path_ids.append(exclude_path.id)\n # test include pathes\n for include_path in op_pathes_include:\n if include_path.is_matching(to_test_path):\n if include_path.id not in matching_include_path_ids:\n matching_include_path_ids.append(include_path.id)\n \n # now mix the buckets\n to_include_term_ids = Term.objects.filter(is_active=True).filter(operating_path__id__in=matching_include_path_ids).values_list('id', flat=True)\n #log.warn(\"include term ids %s\"%(to_include_term_ids) )\n to_exclude_term_ids = Term.objects.filter(is_active=True).filter(operating_path__id__in=matching_exclude_path_ids).values_list('id', flat=True)\n # buckets with term ids are filled\n # now get the term obj to the ids\n positive_term_ids = [x for x in relevant_term_ids]\n positive_term_ids += [x for x in to_include_term_ids]\n final_set = []\n for x in positive_term_ids:\n if x not in to_exclude_term_ids:\n if x not in final_set:\n final_set.append(x)\n \n relevant_terms = Term.objects.filter(word_count__gte = min_word_count).filter(id__in = final_set).exclude(is_active = False) \n relevant_terms = relevant_terms.order_by(\"-word_count\") \n \n # need to be ordered by wordcount longest first\n return relevant_terms", "def test_ccd_doubles_terms(parthole_drudge):\n\n dr = parthole_drudge\n p = dr.names\n\n a, b, c, d = p.V_dumms[:4]\n i, j, k, l = p.O_dumms[:4]\n u = dr.two_body\n t = IndexedBase('t')\n dr.set_dbbar_base(t, 2)\n\n r = IndexedBase('r')\n tensor = dr.define_einst(\n r[a, b, i, j],\n + t[a, b, l, j] * t[c, d, i, k] * u[k, l, c, d]\n + t[a, d, i, j] * t[b, c, k, l] * u[k, l, c, d]\n - t[a, b, i, l] * t[c, d, k, j] * u[k, l, c, d]\n - t[a, c, k, l] * t[b, d, i, j] * u[k, l, c, d]\n )\n targets = [tensor]\n\n eval_seq = optimize(targets, substs={p.nv: p.no * 10})\n\n assert verify_eval_seq(eval_seq, targets)", "def _construct_compute_fe_terms(self):\n # setup some symbolic variables for theano to deal with\n xi = T.matrix()\n xo = T.matrix()\n _, hi_zmuv = self._construct_zmuv_samples(xi, 1)\n # construct values to output\n nll = self.nlli[-1]\n kld = self.kld_z.flatten() + self.kld_hi_q2p.flatten()\n # compile theano function for a one-sample free-energy estimate\n fe_term_sample = theano.function(inputs=[ xi, xo ], \\\n outputs=[nll, kld], \\\n givens={self.x_in: xi, \\\n self.x_out: xo, \\\n self.hi_zmuv: hi_zmuv}, \\\n updates=self.scan_updates)\n # construct a wrapper function for multi-sample free-energy estimate\n def fe_term_estimator(XI, XO, sample_count):\n # compute a multi-sample estimate of variational free-energy\n nll_sum = np.zeros((XI.shape[0],))\n kld_sum = np.zeros((XI.shape[0],))\n for i in range(sample_count):\n result = fe_term_sample(XI, XO)\n nll_sum += result[0].ravel()\n kld_sum += result[1].ravel()\n mean_nll = nll_sum / float(sample_count)\n mean_kld = kld_sum / float(sample_count)\n return [mean_nll, mean_kld]\n return fe_term_estimator", "def find_terms(naf: KafNafParser, words: Sequence[str]) -> Iterable[Cterm]:\n for t in naf.get_terms():\n if t.get_lemma() in words or get_word(naf, t) in words:\n yield t", "def check_spellings(text):\n\n for word in vocabulary:\n text = correct(word, text, 0.7)\n return text" ]
[ "0.5098105", "0.49435574", "0.49396107", "0.4900988", "0.4857589", "0.4845605", "0.46524802", "0.4648548", "0.4620158", "0.46167716", "0.46052644", "0.45635396", "0.45390478", "0.45198467", "0.45163706", "0.4515403", "0.4504055", "0.45020077", "0.44826326", "0.44797707", "0.44423264", "0.44348654", "0.4431736", "0.44302016", "0.44154418", "0.44074002", "0.44044936", "0.4403324", "0.43892953", "0.43720508" ]
0.61136484
0
Formats dictated text to camel case.
def camel_case_text(text): newText = format_camel_case(text) Text("%(text)s").execute({"text": newText})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()", "def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText", "def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]", "def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)", "def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))", "def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")", "def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)", "def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)", "def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))", "def name_camel(self) -> str:\n # We want to use any of the customization applied to name_title\n # so let's just give _name_title with spaces stripped out.\n return self._name_title.replace(' ', '')", "def convert_to_uppercase(text):\n return text.upper()", "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])", "def uppercase_text(text):\n newText = format_upper_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def to_upper(self, text):\n\t\treturn text.upper()", "def UPPER(text):\n return text.upper()", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def capify(text):\n return text[0].upper() + text[1:]", "def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()", "def camel(s):\n return s[0].upper() + s[1:]", "def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))" ]
[ "0.7785462", "0.7445175", "0.72581625", "0.7184089", "0.71070236", "0.70822126", "0.6964225", "0.69159365", "0.6845309", "0.68369484", "0.683591", "0.67920893", "0.6758114", "0.6714242", "0.66896963", "0.6676171", "0.6648838", "0.6558678", "0.6504255", "0.6491712", "0.6463216", "0.64568657", "0.64397925", "0.6427154", "0.64185476", "0.64045215", "0.6373759", "0.6344882", "0.63438916", "0.63438916" ]
0.77859265
0
Formats n words to the left of the cursor to camel case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def camel_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText) newText = _camelify(text.split(' ')) if endSpace: newText = newText + ' ' newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v').execute() # Restore cut out text. _set_clipboard_text(saveText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText", "def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))", "def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]", "def convert_camel_case_to_underscore(text):\n words = []\n last_capital_index = 0\n\n for current_letter_index in xrange(1, len(text)):\n if text[current_letter_index].isupper():\n words.append(text[last_capital_index:current_letter_index])\n last_capital_index = current_letter_index\n elif current_letter_index == len(text) - 1:\n words.append(text[last_capital_index:])\n\n return '_'.join(words).lower()", "def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])", "def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)", "def make_title(words):", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def camel(s):\n return s[0].upper() + s[1:]", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def camel_to_spaces(s):\n subbed = _underscorer1.sub(r'\\1 \\2', s)\n return _underscorer2.sub(r'\\1 \\2', subbed).lower()", "def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()", "def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]", "def counter(name):\n count_name = list(name)\n counter = 0\n for letter in count_name:\n counter += 1\n\n print(f\"There are {counter} letter in the name {name}.\")\n print(f\"\\tAnd btw... {name} backwards is {name[::-1].lower()}.\")", "def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)", "def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text", "def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))" ]
[ "0.73462373", "0.6970347", "0.6930063", "0.69166636", "0.66168064", "0.6470583", "0.6462862", "0.6323668", "0.6291367", "0.6219177", "0.61823606", "0.61714643", "0.6143847", "0.6097175", "0.60751027", "0.60740274", "0.60687244", "0.6060768", "0.6060067", "0.6053927", "0.5989033", "0.59860134", "0.5924931", "0.59247094", "0.590119", "0.58939", "0.5885327", "0.58656526", "0.5856559", "0.5838679" ]
0.7841125
0
Takes a list of words and returns a string formatted to camel case.
def _camelify(words): newText = '' for word in words: if newText == '': newText = word[:1].lower() + word[1:] else: newText = '%s%s' % (newText, word.capitalize()) return newText
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]", "def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def correctCasing(words):\n strings = words.split(' ')\n strings = [s[0].upper()+s[1:].lower() for s in strings if s]\n return ' '.join(strings)", "def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))", "def camel_case(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))", "def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text", "def to_camelCase(in_str):\n \n if in_str.find(' ') > -1:\n words = in_str.split(' ')\n elif in_str.find('_') > -1:\n words = in_str.split('_')\n else:\n return in_str.lower()\n \n first_word = words[0].lower()\n other_words = ''.join(w.title() for w in words[1:])\n \n return '%s%s' % (first_word, other_words)", "def camelcase(string):\r\n return ''.join(word.capitalize() for word in string.split('_'))", "def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)", "def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)", "def convert_underscore_to_camel_case(text):\n words = text.split('_')\n words = [word.capitalize() for word in words]\n return ''.join(words)", "def snake_to_camel(string):\n return \"\".join(word.title() for word in string.split(\"_\"))", "def camel(s):\n return s[0].upper() + s[1:]", "def snake_to_camel(string):\n \n camel_case = []\n\n for word in string.split(\"_\"):\n camel_case.append(word.title())\n\n \"\".join(camel_case)", "def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])", "def snake_to_camel(name):\n return \"\".join([piece.capitalize() for piece in name.split(\"_\")])", "def underscored2camel_case(v):\n vlist = v.split('_')\n c = []\n for n, el in enumerate(vlist):\n if el:\n if n == 0:\n c.append(el)\n else:\n c.extend([el[0].upper(), el[1:]])\n return ''.join(c)", "def CamelCase(text, separator='_'):\n return ''.join(map(str.capitalize, text.split(separator)))", "def underscore_to_camelcase(word, initial_capital=False):\n words = [x.capitalize() or \"_\" for x in word.split(\"_\")]\n if not initial_capital:\n words[0] = words[0].lower()\n\n return \"\".join(words)", "def space_out_camel_case(camel):\r\n chars = []\r\n\r\n for char in camel:\r\n if len(chars) >= 2 and chars[-1] != ' ':\r\n if char.isupper() and chars[-1].islower():\r\n chars.append(' ')\r\n elif char.islower() and chars[-1].isupper() and chars[-2].isupper():\r\n chars.insert(len(chars) - 1, ' ')\r\n\r\n chars.append(char)\r\n\r\n return ''.join(chars)", "def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])", "def to_camel_case(string: str):\n return \"\".join(\n word.title() if idx > 0 else word for idx, word in enumerate(string.split(\"_\"))\n )", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def underscore_to_camelcase(text):\n text = text.replace('_', ' ').title()\n return text.replace(' ', '')", "def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])" ]
[ "0.8251883", "0.7825327", "0.7563968", "0.7526195", "0.7506611", "0.7506611", "0.7482835", "0.72373885", "0.7220596", "0.7182877", "0.7105976", "0.7050961", "0.70338947", "0.7015833", "0.70085067", "0.6960694", "0.6957555", "0.6947957", "0.69223166", "0.69102275", "0.6900598", "0.6872665", "0.6864597", "0.68355256", "0.68123627", "0.67573637", "0.67450446", "0.6735121", "0.6732642", "0.67188543" ]
0.8511594
0
Formats dictated text to pascal case.
def pascal_case_text(text): newText = format_pascal_case(text) Text("%(text)s").execute({"text": newText})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))", "def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]", "def pascalcase(string):\n\n return capitalcase(camelcase(string))", "def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def capify(text):\n return text[0].upper() + text[1:]", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def LCase(text):\n return text.lower()", "def UCase(text):\n return text.upper()", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def preprocess(text):\n return text.lower()", "def _transliterate_text(self, _text):\n return _text.upper()", "def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()", "def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()", "def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)", "def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def convert_to_uppercase(text):\n return text.upper()", "def normalize_case(text):\n text = str(text)\n return text.lower()", "def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")", "def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data", "def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText", "def process_text(text, args):\n if args.uppercase:\n text = convert_to_uppercase(text)\n\n if args.spaces:\n text = add_spaces(text)\n\n if not args.uppercase and not args.spaces:\n text = add_spaces(text)\n\n return text", "def UPPER(text):\n return text.upper()", "def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()" ]
[ "0.8325709", "0.7577789", "0.7054179", "0.6948193", "0.67188567", "0.66129065", "0.64995056", "0.64025515", "0.6349463", "0.633064", "0.6246584", "0.6222095", "0.6209781", "0.6185109", "0.61772996", "0.6169017", "0.6150109", "0.6128363", "0.6115912", "0.6075849", "0.60635227", "0.6053344", "0.6049605", "0.60489094", "0.60287654", "0.5980917", "0.5979099", "0.59762156", "0.5971847", "0.5963734" ]
0.8074431
1
Formats n words to the left of the cursor to pascal case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def pascal_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText) newText = text.title().replace(' ', '') if endSpace: newText = newText + ' ' newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v').execute() # Restore cut out text. _set_clipboard_text(saveText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]", "def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter", "def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])", "def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def _set_number_of_words(self, N):\n self.N_words_to_display = N", "def create_word(char_list):", "def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None", "def wc(file_):\r\n with open(file_) as f:\r\n file = f.read().strip()\r\n char_nums = len(file)\r\n lines = file.split('\\n')\r\n line_nums = len(lines)\r\n word_nums = 0\r\n for line in lines:\r\n words = line.split()\r\n word_nums += len(words)\r\n return f'{line_nums} {word_nums} {char_nums} {file_}'", "def make_title(words):", "def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))", "def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def pascalcase(string):\n\n return capitalcase(camelcase(string))", "def _nth_letter(n):\r\n\treturn string.ascii_lowercase[n % len(string.ascii_lowercase)]", "def get_pad1(n):\n if n < 10:\n return \" \"\n if n < 100:\n return \" \"\n if n < 1000:\n return \" \"\n return \"\"", "def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2", "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()", "def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified", "def make(text=input()):\n alp = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n box = []\n dic = dict()\n val = 0\n #collect alphabets into list\n for i in alp:\n if i in text:\n dic[i] = text.count(i)\n box.append(i)\n if text.count(i) > val:\n val = text.count(i)\n else:\n val = val\n for i in range(val, 0, -1):\n print(\"%03d \"%i, end=\"\")\n for wow in sorted(dic, key=str.swapcase):\n if dic[wow] >= i:\n print(\"*\", end=\" \")\n else:\n print(\" \", end=\" \")\n print()\n print(\" \", *box, sep=\" \")" ]
[ "0.6748049", "0.668678", "0.66685766", "0.65173775", "0.6507821", "0.64246476", "0.62588936", "0.5990105", "0.5817989", "0.5803157", "0.5715056", "0.5696685", "0.5679296", "0.5647844", "0.56197363", "0.56024784", "0.5598668", "0.5587423", "0.5575231", "0.5562862", "0.5554474", "0.55531186", "0.55442256", "0.55324304", "0.55222905", "0.55084115", "0.5501381", "0.5492033", "0.54842985", "0.548238" ]
0.7799282
0
Formats dictated text to snake case.
def snake_case_text(text): newText = format_snake_case(text) Text("%(text)s").execute({"text": newText})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()", "def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))", "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def screaming_snake_case(value: str, **kwargs: Any) -> str:\n return snake_case(value, **kwargs).upper()", "def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)", "def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)", "def convert_to_snake_case(string: str) -> str:\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)\n draft = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return draft.replace('__', '_')", "def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def LCase(text):\n return text.lower()", "def to_upper_snakecase(text: str) -> str:\n\n data = text.replace(\" \", \"_\").upper()\n if data[0].isdigit():\n data = \"_\" + data\n return data", "def normalize_case(text):\n text = str(text)\n return text.lower()", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def camel_case_to_readable(text: str) -> str:\n if text == 'id':\n return 'ID'\n return ''.join(' ' + char if char.isupper() else char.strip() for char in text).strip().title()", "def to_lower(self, text):\n return text.lower()", "def snakecase(self, given_path):\n filename = os.path.basename(given_path)\n filename = first_cap_re.sub(r'\\1_\\2', filename)\n filename = all_cap_re.sub(r'\\1_\\2', filename).lower()\n return given_path.replace(os.path.basename(given_path), filename)", "def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')", "def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))", "def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()", "def case(text, casingformat='sentence'):\n\n # If the lowercase version of the casing format is 'uppercase'\n if casingformat.lower() == 'uppercase':\n # Return the uppercase version\n return str(text.upper())\n\n # If the lowercase version of the casing format is 'lowercase'\n elif casingformat.lower() == 'lowercase':\n # Return the lowercase version\n return str(text.lower())\n\n # If the lowercase version of the casing format is 'sentence'\n elif casingformat.lower() == 'sentence':\n # Return the sentence case version\n return str(text[0].upper()) + str(text[1:])\n\n # If the lowercase version of the casing format is 'caterpillar'\n elif casingformat.lower() == 'caterpillar':\n # Return the caterpillar case version\n return str(text.lower().replace(\" \", \"_\"))\n\n # Raise a warning\n raise ValueError(\"Invalid text format specified.\")", "def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def to_snake_case(string):\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)).lower()", "def preprocess(text):\n return text.lower()", "def LOWER(text):\n return text.lower()", "def to_snake_case(str):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n s2 = re.sub('-', '_', s1)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s2).lower()", "def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def snakecase(string):\n\n string = re.sub(r\"[\\-\\.\\s]\", '_', str(string))\n if not string:\n return string\n return lowercase(string[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), string[1:])" ]
[ "0.70693374", "0.7003121", "0.69320387", "0.6745865", "0.67148775", "0.6688143", "0.66141355", "0.65758014", "0.65488815", "0.64430827", "0.6417229", "0.6416612", "0.63867706", "0.63755894", "0.6365438", "0.63618255", "0.6327051", "0.6304511", "0.6293115", "0.62793934", "0.6235145", "0.6223727", "0.6212002", "0.61669123", "0.61632633", "0.616044", "0.61486906", "0.6146765", "0.6143876", "0.6121142" ]
0.7620087
0
Formats n words to the left of the cursor to snake case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def snake_case_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: endSpace = cutText.endswith(' ') text = _cleanup_text(cutText.lower()) newText = '_'.join(text.split(' ')) if endSpace: newText = newText + ' ' newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v').execute() # Restore cut out text. _set_clipboard_text(saveText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def render_snake(var_words):\n return '_'.join(var_words)", "def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))", "def _set_number_of_words(self, N):\n self.N_words_to_display = N", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def make_title(words):", "def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))", "def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def _camel_to_snake(s):\n return \"_\".join(\n [\n i.lower() for i in _camel_words.split(s)[1::2]\n ]\n )", "def generate_text_owc(model: Dict[str, Set[str]], n: int) -> str:\n # ACCUMULATOR: a list of the randomly-generated words so far\n words_so_far = []\n # We've provided this template as a starting point; you may modify it as necessary.\n words_so_far.append(generate_new_word(model))\n for x in range(0, n-1):\n key = words_so_far[x]\n new_word = generate_next_word(model,key)\n if new_word == \".\":\n words_so_far[x] = words_so_far[x]+'.'\n new_word= generate_new_word(model)\n elif new_word == {}:\n new_word = generate_new_word(model)\n words_so_far.append(new_word)\n\n return str.join(' ', words_so_far)", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))", "def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)", "def test_downcase_word(self):\n before_b = \"\"\"\\\n XYZZY line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n xyzzy line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"1.4\", \"1.4\"),\n after_sel=(\"1.4\", \"1.4\"),\n command_name=\"downcase-word\",\n )", "def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)", "def rotate_word(s1, n):\n s2 = ''\n for c in s1:\n i = (ord(c)-97+n) % 26\n ch = chr(i+97)\n s2 = s2 + ch\n return s2", "def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def nth_word(value: str, n: int) -> str:\n return value.split()[n]", "def make_text(chains, n):\n\n words = []\n\n capital_keys = [key for key in chains.keys() if key[0][0].isupper() and chains[key] != None]\n first_key = choice(capital_keys)\n\n words.extend(list(first_key))\n rand_value = choice(chains[first_key])\n words.append(rand_value)\n\n current_string = \" \".join(words)\n\n i = 1\n while len(current_string) < 140:\n current_string = \" \".join(words)\n new_key = tuple(words[i: i + n])\n if not chains[new_key]:\n break\n else:\n rand_value = choice(chains[new_key])\n words.append(rand_value)\n i += 1\n\n return current_string", "def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None" ]
[ "0.7383346", "0.7166221", "0.6926944", "0.6357526", "0.62876", "0.6222895", "0.5993043", "0.5974094", "0.5933266", "0.5884368", "0.5855512", "0.58387643", "0.57972294", "0.57646835", "0.574676", "0.57403195", "0.57358956", "0.5729549", "0.5655932", "0.5653761", "0.56503767", "0.562152", "0.5605645", "0.55945045", "0.55934334", "0.5572235", "0.5572001", "0.5568948", "0.55586344", "0.55516106" ]
0.7747876
0
Formats n words to the left of the cursor to upper case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def uppercase_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: newText = cutText.upper() newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v').execute() # Restore cut out text. _set_clipboard_text(saveText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def lowercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.lower()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def upper(value,n):\n return value.upper()[0:n]", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def print_upper_words(words):\n for word in words:\n print(word.upper())", "def _set_number_of_words(self, N):\n self.N_words_to_display = N", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def print_upper_words(words):\n \n for word in words:\n print(word.upper())", "def capitalize_text(text, words_list, marker=''):\n for word in words_list:\n text = capitalize_term_re(text, word, marker)\n return text", "def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results", "def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))", "def make_title(words):", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def expand_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n cutText = _expand_after_special_chars(cutText)\n reg = re.compile(\n r'([a-zA-Z0-9_\\\"\\'\\)][=\\+\\-\\*/\\%]|[=\\+\\-\\*/\\%][a-zA-Z0-9_\\\"\\'\\(])')\n hit = reg.search(cutText)\n count = 0\n while hit and count < 10:\n cutText = cutText[:hit.start() + 1] + ' ' + \\\n cutText[hit.end() - 1:]\n hit = reg.search(cutText)\n count += 1\n newText = cutText\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def words_capital_letter(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_capital_letter = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_capital_letter = number_of_words_capital_letter + sum(list(map(lambda x: x.istitle(), i.text.split())))\n return number_of_words_capital_letter", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def test_capitalize_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n Line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.6\", \"3.6\"),\n after_sel=(\"3.6\", \"3.6\"),\n command_name=\"capitalize-word\",\n )", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def render_camel(var_words):\n return ''.join([word.capitalize() for word in var_words])", "def create_word(char_list):", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified", "def test_upcase_word(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n LINE a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"3.7\", \"3.7\"),\n after_sel=(\"3.7\", \"3.7\"),\n command_name=\"upcase-word\",\n )", "def nth_word(value: str, n: int) -> str:\n return value.split()[n]", "def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text", "def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))" ]
[ "0.7106559", "0.6925886", "0.6874002", "0.66543984", "0.64259547", "0.60723484", "0.6036499", "0.6025138", "0.59689647", "0.5953917", "0.58584684", "0.5850111", "0.58310604", "0.58229566", "0.5809339", "0.57956696", "0.5791785", "0.5782367", "0.57508373", "0.5699419", "0.56948966", "0.5678806", "0.5673155", "0.56342334", "0.5627989", "0.5619573", "0.56096303", "0.5608581", "0.56008816", "0.55885905" ]
0.6954752
1
Formats dictated text to lower case.
def lowercase_text(text): newText = format_lower_case(text) Text("%(text)s").execute({"text": newText})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_lower(self, text):\n return text.lower()", "def LOWER(text):\n return text.lower()", "def LCase(text):\n return text.lower()", "def lower(text):\n text = text.lower()\n return text", "def toLowerCase(self) -> None:\n self.text = self.text.lower()", "def normalize_case(text):\n text = str(text)\n return text.lower()", "def _lowercase(text: str) -> str:\n return text.lower()", "def _format(text):\n \n if isinstance(text, unicode):\n return text.lower().encode(\"UTF-8\")\n elif isinstance(text, str):\n return text.lower()", "def lowerCase(self,phrase):\n if(\"normalizeText\" in self._classes):\n return self._normalize.lowerCase(phrase)", "def preprocess(text):\n return text.lower()", "def lower(self) -> str:", "def lower(self, value):\n return self.text(value).lower()", "def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def to_lowercase(text: str) -> str:\n text = text.lower()\n return text", "def _transform_to_lowercase(self, doc: str):\n processed_tweet = doc.lower()\n return processed_tweet", "def lower_case_really():", "def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)", "def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()", "def lowercase(raw_text):\n lowercase_text = [text.lower() for text in raw_text]\n return lowercase_text", "def clean_cases(text):\n return text.lower()", "def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def lower(value): # Only one argument.\n return value.lower()", "def lower(value): # Only one argument.\n return value.lower()", "def lower(value): # Only one argument.\n return value.lower()", "def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)", "def lowercase(self, value):\n return value.lower()", "def lowercase_name(name):\n return name.lower()", "def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()" ]
[ "0.7950047", "0.7778725", "0.76062495", "0.757439", "0.7487152", "0.7474553", "0.744533", "0.73600155", "0.7350697", "0.7326691", "0.72125846", "0.7002865", "0.6945686", "0.6917932", "0.68785036", "0.68477106", "0.68410075", "0.670077", "0.6686607", "0.66477966", "0.6613953", "0.6600545", "0.65928", "0.6540414", "0.6540414", "0.6540414", "0.65396714", "0.65377384", "0.6515144", "0.6505206" ]
0.7870158
1
Formats n words to the left of the cursor to lower case. Note that word count differs between editors and programming languages. The examples are all from Eclipse/Python.
def lowercase_count(n): saveText = _get_clipboard_text() cutText = _select_and_cut_text(n) if cutText: newText = cutText.lower() newText = newText.replace("%", "%%") # Escape any format chars. Text(newText).execute() else: # Failed to get text from clipboard. Key('c-v').execute() # Restore cut out text. _set_clipboard_text(saveText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pascal_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = text.title().replace(' ', '')\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def snake_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText.lower())\n newText = '_'.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def camel_case_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = _camelify(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def to_lowercase(self):\n new_words = []\n # new_words = \"\"\n for word in self.words:\n new_word = word.lower()\n new_words.append(new_word)\n # new_word += f\"{new_word} \"\n self.words = new_words\n return self", "def lower(self) -> str:", "def words_lower_case(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words_in_lower_case = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words_in_lower_case = number_of_words_in_lower_case + sum(list(map(lambda x: x.islower(), i.text.split())))\n return number_of_words_in_lower_case", "def wrap_by_word(s, n):\n\ta = s.split()\n\tret = ''\n\tfor i in range(0, len(a), n):\n\t\tret += ' '.join(a[i:i+n]) + '\\n'\n\treturn ret", "def lowercase_text(text):\n newText = format_lower_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def _set_number_of_words(self, N):\n self.N_words_to_display = N", "def get_number_of_words_with_capital_letters_and_lowercase(self):\n filename = f'{self.path}/{self.filename}'\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n\n # upper_cnt = (sum([sum([c.isupper() for c in a]) for a in word_list]))\n # lower_cnt = (sum([sum([c.islower() for c in a]) for a in word_list]))\n\n upper_cnt = sum([a[0].isupper() for a in word_list])\n lower_cnt = (sum([a.islower() for a in word_list]))\n\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_with_capital_letters', upper_cnt)\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'words_in_lowercase', lower_cnt)\n print(datetime.now(), '-', 'words_with_capital_letters for', self.filename, 'calculated =', upper_cnt)\n print(datetime.now(), '-', 'words_in_lowercase for', self.filename, 'calculated =', lower_cnt)\n return None", "def lowercase_well_known_word(text):\n lines = []\n lines_append = lines.append\n for line in text.splitlines(True):\n words = []\n words_append = words.append\n for word in line.split():\n if word in COMMON_WORDS:\n word = word.lower()\n words_append(word)\n lines_append(' '.join(words))\n return '\\n'.join(lines)", "def make_title(words):", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def format_words(words):\n return sorted(words, key=str.lower)", "def pascal_case(value: str, **kwargs: Any) -> str:\n return \"\".join(map(str.title, split_words(value)))", "def ladcased(normal):\r\n\r\n ladified = ''\r\n for i, c in enumerate(normal):\r\n ladified += c.lower() if (i % 2 == 0) else c.upper()\r\n\r\n return ladified", "def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))", "def LCase(text):\n return text.lower()", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def titleize(phrase):\n words = phrase.split(' ')\n results = ''\n for i in range(len(words)):\n word = ''\n for j in range(len(words[i])):\n if(j == 0):\n word = words[i][j].upper() \n else:\n word += words[i][j].lower()\n results += word\n if(i < len(words) - 1):\n results += ' '\n return results", "def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def LOWER(text):\n return text.lower()", "def generate_words(self,N):\n for i in xrange(N):\n prefix = \" \" * self.chainlen\n name = \"\"\n suffix = \"\"\n while True:\n suffix = self.get_suffix(prefix)\n if suffix == \"\\n\" or len(name) > 9:\n break\n else:\n name = name + suffix\n prefix = prefix[1:] + suffix\n yield name.capitalize()", "def test20():\n\tdef highlight_word(sentence, word):\n\t\treturn(\" \".join(x) for x in sentence.split())\n\n\tprint(highlight_word(\"Have a nice day\", \"nice\"))\n\tprint(highlight_word(\"Shhh, don't be so loud!\", \"loud\"))\n\tprint(highlight_word(\"Automating with Python is fun\", \"fun\"))", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def _lowercase(text: str) -> str:\n return text.lower()", "def create_word(char_list):", "def pascal_case_text(text):\n newText = format_pascal_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])", "def uppercase_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n newText = cutText.upper()\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)" ]
[ "0.68059164", "0.67174524", "0.6428881", "0.612785", "0.60862404", "0.60718983", "0.6050795", "0.59884626", "0.59352654", "0.59344274", "0.5890492", "0.58382356", "0.5795074", "0.5784348", "0.5748356", "0.5746969", "0.5746722", "0.57407844", "0.57185817", "0.5685551", "0.56584406", "0.5656949", "0.5629105", "0.56139094", "0.5612925", "0.56016505", "0.5589428", "0.55866903", "0.55855596", "0.5556237" ]
0.73799914
0
Cleans up the text before formatting to camel, pascal or snake case. Removes dashes, underscores, single quotes (apostrophes) and replaces them with a space character. Multiple spaces, tabs or new line characters are collapsed to one space character. Returns the result as a string.
def _cleanup_text(text): prefixChars = "" suffixChars = "" if text.startswith("-"): prefixChars += "-" if text.startswith("_"): prefixChars += "_" if text.endswith("-"): suffixChars += "-" if text.endswith("_"): suffixChars += "_" text = text.strip() text = text.replace('-', ' ') text = text.replace('_', ' ') text = text.replace("'", ' ') text = re.sub('[ \t\r\n]+', ' ', text) # Any whitespaces to one space. text = prefixChars + text + suffixChars return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_review(self, text):\n text = text.lower() # lowercase capital letters\n\n if self.remove_stopwords:\n text = self.remove_stopwords_f(text, keep_neg_words=True)\n\n text = re.sub('[^a-zA-Z]+', ' ', text) # select only alphabet characters (letters only)\n # text = re.sub('[^a-zA-Z0-9]+', ' ', text) # select only alphanumeric characters (letters & numbers)\n # text = re.sub(r'\\W+', ' ', text) # Select only alphanumeric characters (including greek & underscore)\n\n text = re.sub(' +', ' ', text) # remove extra spaces\n\n if self.apply_normalization:\n text = self.normalize_text(text)\n\n return text", "def detokenize(self, text):\n text = ' ' + text + ' '\n text = self._dash_fixes.sub(r' \\1-\\2 ', text)\n text = self._dash_fixes2.sub(r' \\1-\\2 ', text)\n text = self._currency_or_init_punct.sub(r' \\1', text)\n text = self._noprespace_punct.sub(r'\\1 ', text)\n text = self._contract.sub(r\" \\1'\\2\", text)\n text = self._contractions.sub(r\"\\1\", text)\n text = self._esses.sub(r\"s \", text)\n text = self.moses_detokenizer.detokenize(text.split())\n text = text.strip()\n # capitalize\n if not text:\n return ''\n return text", "def CleanText(text):\n\n pretty_issue = text.lower().strip()\n\n quoteless_issue = re.sub('\\'', '', pretty_issue)\n no_punctuation_issue = re.sub('[^\\w\\s]|_+', ' ', quoteless_issue)\n one_space_issue = ' '.join(no_punctuation_issue.split())\n\n return one_space_issue", "def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()", "def _clean(self, text):\n if len(self.alph) == 26:\n text = sub('[\\n\\t ' + string.punctuation + ']+?', '', text)\n else:\n text = sub('[\\n\\t]+?', '', text)\n\n text = text.lower()\n text = text.encode('ascii', 'ignore').decode()\n return text", "def desc_cleanser(self, txt):\n # New line issues\n txt = re.sub(r'\\\\n', r' ', txt)\n # Unicode cleanse\n txt = re.sub(r'\\\\u[\\d]{4}', r'', txt)\n # Remaining unicode cleanse\n txt = re.sub(r'\\\\{1,2}\\S+', r' ', txt)\n # Remove remaining non-alphanumeric and spaces\n txt = ''.join([i for i in txt if i.isalnum() or i.isspace() or i in ['.','?','!']])\n # Remove more than a single space\n txt = re.sub(r'\\s+', r' ', txt)\n\n return txt", "def sanitise(text: str):\n # Removes new lines, weird characters and dialogue\n text = \" \" + text + \" \"\n\n lined_text = text.split(\"\\n\")\n text = \"\"\n # Remove dialogue\n for line in lined_text:\n if \":\" in line:\n if line.index(\":\") < 15:\n index = line.index(\":\") + 1\n else:\n index = 0\n else:\n index = 0\n text = text + \"\\n\" + line[index:]\n\n # Lower case everything\n text = text.lower()\n\n text = text.replace(\"'s\", \" is\")\n text = text.replace(\"'ve\", \" have\")\n text = text.replace(\"n't\", \" not\")\n text = text.replace(\"I'm\", \"I am\")\n text = text.replace(\"'re\", \" are\")\n text = text.replace(\"’s\", \" is\")\n text = text.replace(\"’ve\", \" have\")\n text = text.replace(\"n’t\", \" not\")\n text = text.replace(\"I’m\", \"I am\")\n text = text.replace(\"’re\", \" are\")\n\n # Remove weird characters and double spaces\n weird_characters = [\".\", \",\", \"?\", \"!\", \"'\", \"’\", \"\\\"\", \"\\n\", \"\\t\", \"-\", \"/\", \"[\", \"]\", \"(\", \")\", \":\", \"“\", \"”\"]\n for weird_character in weird_characters:\n text = text.replace(weird_character, \" \")\n\n while \" \" in text:\n text = text.replace(\" \", \" \")\n\n return text", "def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()", "def pre_process(text: str) -> str:\n text = text.replace('--', '-')\n space_right = '!?:;,.-()*+-/<=>@^_'\n space_both = '-()*+-/<=>@^_'\n\n for punct in space_right:\n text = text.replace(punct, punct + ' ')\n for punct in space_both:\n text = text.replace(punct, ' ' + punct + ' ')\n\n # remove extra space\n text = re.sub(r' +', ' ', text)\n return text", "def _clean_text(text):\n rrb = re.compile(\"-RRB-\")\n lrb = re.compile(\"-LRB-\")\n new_text = re.sub(rrb, \" \", text)\n new_text = re.sub(lrb, \" \", new_text)\n\n punct = re.compile(r'[_?!.,]')\n new_text = re.sub(punct, \" \", new_text)\n\n new_text = str(new_text).lower()\n return new_text", "def text_cleaning(self, text):\n # remove string formatting '\\n' or '\\t'\n tmp_text = re.sub(r'\\n+', '. ', text)\n tmp_text = re.sub(r'\\t+', '. ', text)\n # remove words with non-ascii characters\n tmp_text = \" \".join([word for word in tmp_text.split() if self.is_ascii(word)])\n # remove email address\n tmp_text = \" \".join([word for word in tmp_text.split() if not word.startswith(\"@\")])\n # remove urls\n tmp_text = re.sub(r'http\\S+', '', tmp_text, flags=re.MULTILINE)\n tmp_text = re.sub(r'www\\S+', '', tmp_text, flags=re.MULTILINE)\n # remove punctuation but . (to split sentences)\n cleaned_text = re.sub('[^A-Za-z.,]+', ' ', tmp_text)\n # lowercase\n cleaned_text = cleaned_text.lower()\n\n return cleaned_text", "def clean_text(text):\n text = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", text)\n text = re.sub(r\"\\'s\", \" \\'s\", text)\n text = re.sub(r\"\\'ve\", \" \\'ve\", text)\n text = re.sub(r\"n\\'t\", \" n\\'t\", text)\n text = re.sub(r\"\\'re\", \" \\'re\", text)\n text = re.sub(r\"\\'d\", \" \\'d\", text)\n text = re.sub(r\"\\'ll\", \" \\'ll\", text)\n text = re.sub(r\",\", \" , \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\(\", \" \\( \", text)\n text = re.sub(r\"\\)\", \" \\) \", text)\n text = re.sub(r\"\\?\", \" \\? \", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n return text.strip().lower()", "def remove_all_caps(text):\n return re.sub(r\"(\\b(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b(?:\\s+(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b)*)\",\n ' ', text)", "def keyify(text):\n text = text.lower()\n text = text.strip()\n\n text = text.replace('.', '')\n text = re.sub('[,-]', ' ', text)\n text = re.sub('\\s{2,}', ' ', text)\n\n return text", "def get_clean_text(messy_text: str) -> str:\n new_text = \"\"\n replace = {\n \"*\": \"\\\"\",\n \"!\": \"?\",\n \"/\": ',',\n \"?\": \"!\"\n }\n remove = \"1234567890&@#$%^()_+|><~\"\n pls_do_upper = False\n for l in messy_text:\n if l in replace:\n new_text += replace[l]\n elif l not in remove:\n if pls_do_upper:\n new_text += l.upper()\n else:\n new_text += l\n return new_text", "def clean(text):\n new = text.replace(\"\\r\", \"\")\n new = new.replace(\"\\t\", \"\")\n new = new.replace(\"\\n\", \"\")\n new = new.replace(\"- \", \"-\")\n new = new.replace(\" \", \" \")\n return new", "def preprocess_input(self, text):\n text = re.sub(r\"([^a-zA-Z0-9 -]+ +[^a-zA-Z0-9 -]*|[^a-zA-Z0-9 -]*\" +\n \" +[^a-zA-Z0-9 -]+)\", ' ', text, flags=re.UNICODE)\n text = re.sub(r\"([^a-zA-Z0-9 -]+$|^[^a-zA-Z0-9 -]+)\", '', text)\n text = re.sub(r\"([a-zA-Z0-9 -]+?)([^a-zA-Z0-9 -])([a-zA-Z0-9 -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE)\n text = re.sub(r\"([\\x00-\\x7F -]+?)([^a-zA-Z0-9 -]+)([\\x00-\\x7F -]+?)\",\n r\"\\1'\\3\", text, flags=re.UNICODE).encode(\"utf-8\")\n return re.sub(r\"([^a-zA-Z0-9 \\-\\'])\", '', text, flags=re.UNICODE)", "def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval", "def clean_text(text):\n text = text.lower()\n text = text.replace('\\xa0', ' ')\n text = text.replace('fls.', 'folhas ')\n text = text.replace('fl.', 'folha ')\n text = text.replace('arts.', 'artigos ')\n text = text.replace('art.', 'artigo ')\n text = re_tree_dots.sub('...', text)\n text = re.sub(r'\\.\\.\\.', ' ', text)\n text = re_remove_brackets.sub(' ', text)\n text = re_changehyphen.sub('-', text)\n text = re_remove_html.sub(' ', text)\n text = re_transform_numbers.sub('0', text)\n text = re_transform_url.sub('URL', text)\n text = re_transform_emails.sub('EMAIL', text)\n text = re_quotes_1.sub(r'\\1\"', text)\n text = re_quotes_2.sub(r'\"\\1', text)\n text = re_quotes_3.sub('\"', text)\n text = re.sub('\"', ' ', text)\n text = re_dots.sub('.', text)\n text = re_punctuation.sub(r'\\1', text)\n text = re_hiphen.sub(' - ', text)\n text = re_punkts.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_b.sub(r'\\1 \\2 \\3', text)\n text = re_punkts_c.sub(r'\\1 \\2', text)\n text = re_doublequotes_1.sub('\\\"', text)\n text = re_doublequotes_2.sub('\\'', text)\n text = re_trim.sub(' ', text)\n return text.strip()", "def preprocess_text(text):\n # replace non characers with space and lower case\n temp = re.sub(r\"[/W/D/S.,-]+\", \" \", str(text).lower())\n # merge multiple spaces to a single one\n return re.sub(r\"[ ]+\", \" \", temp)", "def sanitize_txt(x):\n return '_'.join(smart_split(x.lower()))", "def normalize_text(text):\n text = re.sub(r'[ \\t]+', ' ', text)\n text = re.sub(r'\\r', '', text)\n\n # Remove whitespace in the middle of text.\n text = re.sub(r'[ \\t]+\\n', '\\n', text)\n # Remove whitespace at the end of the text.\n text = text.rstrip()\n\n return text", "def _lowercase_despace_depunctuate(some_str=None):\n some_str = some_str.replace(\" \", \"\")\n some_str = some_str.replace(\"_\", \"\")\n some_str = some_str.replace(\"-\", \"\")\n some_str = some_str.lower()\n return some_str", "def clean_text(text):\n text = text.lower()\n text = re.sub(r\"i'm\", 'i am', text)\n text = re.sub(r\"he's\", 'he is', text)\n text = re.sub(r\"she's\", 'she is', text)\n text = re.sub(r\"that's\", 'that is', text)\n text = re.sub(r\"what's\", 'what is', text)\n text = re.sub(r\"where's\", 'where is', text)\n text = re.sub(r\"\\'ll\", ' will', text)\n text = re.sub(r\"\\'ve\", ' have', text)\n text = re.sub(r\"\\'re\", ' are', text)\n text = re.sub(r\"\\'d\", ' would', text)\n text = re.sub(r\"won't\", 'will not', text)\n text = re.sub(r\"can't\", 'cannot', text)\n text = re.sub(r\"[-()\\\"#/@;:<>{}+=~|.?,]\", '', text)\n return text", "def convert_pascal_case_to_readable_format(text):\n return \"\".join(list(map(lambda x : \" \" + x.lower() if x.isupper() else x, list(text))))", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text", "def clean_text(text):\n\n\n regex = re.compile('[\\.|\\-|\\,|\\?|\\_|\\:|\\\"|\\)|\\(\\)\\/|\\\\|\\>|\\<]')\n text = text.lower() # Turn everything to lower case\n text = regex.sub(' ', text).strip()\n out = re.sub(' +', ' ', text) # Reduce whitespace down to one\n \n return out", "def normalize_space(text):\n return re.sub(r\"\\s+\", \" \", text.strip(), flags=re.UNICODE)", "def clean_text ( self, text ) :\n text = BeautifulSoup ( text , \"lxml\" ).text # HTML decoding\n text = text.lower ( ) # lowercase text\n text = REPLACE_BY_SPACE_RE.sub ( ' ' , text ) # replace REPLACE_BY_SPACE_RE symbols by space in text\n text = BAD_SYMBOLS_RE.sub ( '' , text ) # delete symbols which are in BAD_SYMBOLS_RE from text\n text = ' '.join ( word for word in text.split ( ) if word not in STOPWORDS ) # delete stopwors from text\n return text" ]
[ "0.6987627", "0.69115055", "0.68299866", "0.68170524", "0.6792189", "0.6779221", "0.6683785", "0.6663541", "0.66497254", "0.661544", "0.66122067", "0.6598049", "0.65703285", "0.6562379", "0.6545204", "0.6541442", "0.6536878", "0.6534535", "0.6511388", "0.64796376", "0.6465856", "0.64221936", "0.63998723", "0.639693", "0.6344568", "0.6332369", "0.6332369", "0.6328018", "0.63158756", "0.6308443" ]
0.7590578
0
Returns the text contents of the system clip board.
def _get_clipboard_text(): clipboard = Clipboard() return clipboard.get_system_text()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTextFromClipboard(self) -> str:\n cb = self.qtApp.clipboard()\n if cb:\n QtWidgets.QApplication.processEvents()\n return cb.text()\n g.trace('no clipboard!')\n return ''", "def read_all_screen(self):\n full_text = \"\"\n for ypos in range(self.model_dimensions[\"rows\"]):\n full_text += self.string_get(ypos + 1, 1, self.model_dimensions[\"columns\"])\n return full_text", "def getText(self):\n if self.app.children:\n return self.app.childActive.source.GetText()\n else:\n return ''", "def get_console_text(self):\n console_text_api = '/consoleText'\n return self._api_request(self.url + console_text_api)", "def _get_pad_content(self):\n self.ensure_one()\n return self.pad_get_content(self.description_pad)", "def text(self):\n text = ''\n for run in self.runs:\n text += run.text\n return text", "def text_output(self):\n print(self.board)\n print()", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def text(self) -> str:\n return self._impl.get_text()", "def get_text(self):\n text_element = self.page.find(id=self.text_location)\n return text_element.get_text()", "def getText(self):\n return _libsbml.TextGlyph_getText(self)", "def get_text(self):\n\n return self.output['text']", "def _get_edit_text(hwnd):\n buf_size = win32gui.SendMessage(hwnd, win32con.WM_GETTEXTLENGTH, 0, 0)\n buf_size += 1 # don't forget that null character boys...\n buffer = win32gui.PyMakeBuffer(buf_size)\n # odd, we're telling them how big the text is that they're giving\n # back to us\n win32gui.SendMessage(hwnd, win32con.WM_GETTEXT, buf_size, buffer)\n # don't need the null character now for Python\n return buffer[:buf_size]", "def get_text(self):\n return self.output.getvalue()", "def text(self):\n return self.content", "def get_visible_text(self):\n return self.browser.find_element_by_xpath(\"//body\").text", "def get_text(self):\n rc = \"\"\n for node in self.node.childNodes:\n if node.nodeType == node.TEXT_NODE:\n rc = rc + node.data\n return rc", "def get_text(self):\n txt = self.lang.tool.image_to_string(\n self.image,\n lang=self.lang,\n builder=pyocr.builders.TextBuilder()\n )\n return txt", "def copy_to_clipboard(self, txt):\r\n cmd = 'echo \"' + txt.strip() + '\"|clip'\r\n return subprocess.check_call(cmd, shell=True)", "def text(self):\n return \"\\n\".join(self.raw_text)", "def get_content(self):\r\n view = self.window.active_view()\r\n selection = \"\"\r\n for region in view.sel():\r\n # If no selection, use the entire file as the selection\r\n if region.empty():\r\n selection = sublime.Region(0, view.size())\r\n else:\r\n selection = region\r\n return view.substr(selection)", "def GetText(self):\r\n \r\n return self._text", "async def getDisplayText(self):\n display_text = await self.director.getItemVariableValue(\n self.item_id, \"DISPLAY_TEXT\"\n )\n return display_text", "def get_text(self):", "def currentText(self, toNative=True):\n return self.text(toNative=toNative)", "def contents(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"contents\", _args)\n return _ctx.execute_sync(str)" ]
[ "0.65942186", "0.6435534", "0.63300586", "0.61199254", "0.6119129", "0.61073905", "0.6069073", "0.5950574", "0.5950574", "0.5950574", "0.5950574", "0.5950574", "0.58960754", "0.58941376", "0.588573", "0.5876206", "0.5874571", "0.5853478", "0.58394194", "0.58292186", "0.58160985", "0.58100575", "0.5805365", "0.57969093", "0.5796902", "0.57928896", "0.5784233", "0.5780899", "0.5770279", "0.573885" ]
0.68701446
0
Selects wordCount number of words to the left of the cursor and cuts them out of the text. Returns the text from the system clip board.
def _select_and_cut_text(wordCount): clipboard = Clipboard() clipboard.set_system_text('') Key('cs-left/3:%s/10, c-x/10' % wordCount).execute() return clipboard.get_system_text()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res", "def truncate(text, words=25):\n return ' '.join((text).split()[:words])", "def getMarked(self):\n if not self.selection.isSelection():\n return u\"\"\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx1 = sm1[1]\n cx2 = sm2[1]\n if (w1 == w2):\n return w1.string[cx1:cx2]\n # Get the word fragments at the beginning and end of the selection\n snip1 = w1.string[cx1:]\n snip2 = w2.string[:cx2]\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n # Start the text string with the format of the first line\n text = tl1.para.getFormat() + snip1\n # then get all intervening words\n if (tl1 == tl2): # only 1 line is involved\n # get words from wx1+1 to wx2-1 (incl.)\n for w in tl1.twords[wx1+1:wx2]:\n text += u\" \" + w.string\n ch = u\" \"\n\n else: # deletion block covers >1 line\n # get words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n text += u\" \" + w.string\n # get all the intervening lines\n while True:\n para = tl1.para\n tl1 = self.rsubject.nextLine(tl1)\n if (tl1.para == para):\n text += u\" \"\n else:\n text += u\"\\n\" + tl1.para.getFormat()\n if (tl1 == tl2): break\n text += tl1.getText()\n\n ch = u\"\"\n # Add the remaining words in tl2 up to w2-1\n for w in tl2.twords[:wx2]:\n text += ch + w.string\n ch = u\" \"\n\n # Add the fragment of the last marked word\n return text + ch + snip2", "def copy_text(self):\n self.window.clipboard_clear()\n if self.tab_control.index(\"current\") == 0:\n try:\n self.text = self.textbox.get(\"sel.first\", \"sel.last\")\n except tk.TclError:\n self.text = self.textbox.get(\"1.0\", tk.END)\n self.window.clipboard_append(self.text)\n elif self.tab_control.index(\"current\") == 1:\n self.window.clipboard_append(self.words)", "def Left(text, number):\n return text[:number]", "def _(event):\n pos = line.document.find_start_of_previous_word(count=event.arg)\n if pos:\n deleted = line.delete_before_cursor(count=-pos)\n line.set_clipboard(ClipboardData(deleted))", "def cut_text(value, length): # Only one argument.\n return value[0:length]", "def get_first_k_words(text: str, num_words: int) -> str:\n words = text.split()\n if num_words >= len(text):\n return text\n\n return ' '.join(words[:num_words])", "def truncate(self):\n return Truncator(self.content).words(\n self.max_words, self.more_string, html=True)", "def cut_text(text):\n for phrase in TERMINALS:\n if phrase in text:\n return text[:text.index(phrase)]\n\n SavedSource(label=LABEL, subject='cut_text', body=text).put()\n return text", "def extract_context(words, mask_index, window):\n total_length = len(words)\n half_window = int(window / 2)\n assert 0 <= mask_index < total_length\n return words[max(0, mask_index - half_window):min(total_length, mask_index + half_window + 1)]", "def show_word(self):\n self.display_word = len(self.chosen_word) * \"_ \"\n Donatello.draw_word(self.display_word)\n return self.display_word", "def characters_left(self):\r\n return self.max_chars - len(self.variable.get())", "def crop_title(self):\n top_line_and_words = self.get_line_and_word_boxes()\n top_line_and_words = [boxes for boxes in self.get_line_and_word_boxes() if boxes.content not in self.ocr_noise]\n if top_line_and_words:\n topleft, bottomright = top_line_and_words[0].position\n print(topleft, bottomright)\n if topleft[1] <= self.title_position:\n self.image = self.image.crop((0, bottomright[1], self.width, self.height))\n self.update_img()\n self.update_size()", "def pop_word(self, index: int=None) -> Optional[str]:\n if len(self.words) > 0:\n word = self.words.pop() if index is None else self.words.pop(index)\n if word == ' ':\n self.spaces_width -= self.space_width\n else:\n self.width -= self.get_word_width(word)\n return word", "def sentence(num_words=20, chars=''):\r\n word_list = _Book.get_text().split()\r\n words = ' '.join(_random.choice(word_list) for x in\r\n xrange(num_words))\r\n return (words if not chars else words[:chars])", "def remove_longer_words(text):\n return \" \".join([word for word in str(text).split() if len(word) <= 12])", "def words_before_index(text, idx):\n while text[idx] != ' ':\n idx -= 1\n if idx <= 0:\n return 0\n n_words = len(text[:idx].split(' '))\n return n_words", "def get_word_before(self):\n if self.word_before:\n return self.word_before\n\n lines = self.get_lines()\n row, col = position_from_utf16(lines, self.position)\n line = lines[row]\n start = line[:col]\n\n word_start_match = RE_START_WORD.search(start)\n\n if not word_start_match:\n self.word = \"\"\n self.word_before = \"\"\n else:\n substart = start[: word_start_match.start()].rstrip()\n word_before_match = RE_WORD_BEFORE.findall(substart)[0]\n\n self.word = word_start_match[0]\n self.word_before = word_before_match[0]\n return self.word_before", "def word_wrap(self):\n textArea = self.get_current()\n if self.wrap.get() == 0:\n textArea.config(wrap='none')\n elif self.wrap.get() == 1:\n textArea.config(wrap='word')", "def cut_words(value, arg):\n\treturn value.replace(arg, '')", "def squash_count(n):\n saveText = _get_clipboard_text()\n cutText = _select_and_cut_text(n)\n if cutText:\n endSpace = cutText.endswith(' ')\n text = _cleanup_text(cutText)\n newText = ''.join(text.split(' '))\n if endSpace:\n newText = newText + ' '\n newText = _expand_after_special_chars(newText)\n newText = newText.replace(\"%\", \"%%\") # Escape any format chars.\n Text(newText).execute()\n else: # Failed to get text from clipboard.\n Key('c-v').execute() # Restore cut out text.\n _set_clipboard_text(saveText)", "def getWordUnderCursor():\n\treturn vim.eval('expand(\"<cword>\")')", "def count_selected_chars(self, event=None):\n try:\n textArea = self.get_current()\n chars = textArea.count(\"sel.first\", \"sel.last\")\n line_breaks = textArea.count(\"sel.first\", \"sel.last\", \"lines\")\n if line_breaks:\n if line_breaks[0] == 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars, {line_breaks[0]} line break\")\n elif line_breaks[0] > 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars, {line_breaks[0]} line breaks\")\n else:\n if chars[0] == 1:\n self.selected_chars_lbl.config(text=f\"{chars[0]} char selected\")\n else:\n self.selected_chars_lbl.config(text=f\"{chars[0]} chars selected\")\n except:\n self.selected_chars_lbl.config(text=\"--------------\")", "def displayed_words(self):\n return (len(strip_tags(self.preview).split()) -\n (len(self.more_string.split()) * int(not bool(self.lead))))", "def findBestShift(wordList, text):\n ### TODO\n max_words = 0\n best_shift = 0\n lis = []\n for i in range(0,26):\n lis = applyShift(text, i).split(' ')\n count = 0\n for j in lis:\n if isWord(wordList, j):\n count += 1\n if count > max_words:\n max_words = count\n best_shift = i\n \n return best_shift", "def chosen():\n wordList = loadWords()\n w = random.choice(wordList)\n word = w[:-1]\n return word", "def truncate_description(description):\n if len(description) <= 160 :\n return description\n\n cut_desc = \"\"\n character_counter = 0\n for i, letter in enumerate(description) :\n character_counter += 1\n if character_counter > 160 :\n if letter == ' ' :\n return cut_desc+\"...\"\n else :\n return cut_desc.rsplit(' ',1)[0]+\"...\"\n cut_desc += description[i]\n return cut_desc", "def _get_clipboard_text():\n clipboard = Clipboard()\n return clipboard.get_system_text()", "def _get_edit_text(hwnd):\n buf_size = win32gui.SendMessage(hwnd, win32con.WM_GETTEXTLENGTH, 0, 0)\n buf_size += 1 # don't forget that null character boys...\n buffer = win32gui.PyMakeBuffer(buf_size)\n # odd, we're telling them how big the text is that they're giving\n # back to us\n win32gui.SendMessage(hwnd, win32con.WM_GETTEXT, buf_size, buffer)\n # don't need the null character now for Python\n return buffer[:buf_size]" ]
[ "0.5991796", "0.5976473", "0.5937633", "0.59360015", "0.5921066", "0.56286114", "0.5511649", "0.54804397", "0.54544324", "0.53912175", "0.53642124", "0.5343262", "0.5335321", "0.53241056", "0.5289842", "0.52851576", "0.52594143", "0.5233265", "0.52057403", "0.5187018", "0.51681244", "0.5141727", "0.5131182", "0.5123632", "0.5094309", "0.50735956", "0.50644696", "0.50620615", "0.506059", "0.5060172" ]
0.808763
0
Generates a plot of the specified data file and sets the ThumbnailPanel's bitmap accordingly
def plot_thumb(self, data_fname): thumbnail = self.controller.plot_thumb(data_fname, self.bitmap_width, self.bitmap_height) if thumbnail is not None: self.figure_bmp.SetBitmap(thumbnail) else: self.plot_blank()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_plot(ax, power_data, title, min_db, max_db):\n # only generate plots for the transducers that have data\n if power_data.size <= 0:\n return\n\n ax.set_title(title, fontsize=ZPLSCCPlot.font_size_large)\n return imshow(ax, power_data, interpolation='none', aspect='auto', cmap='jet', vmin=min_db, vmax=max_db)", "def test_plot_images(self):\n save_file(self.quart.plot_images)", "def plot(self, job):\n # fill PlotJob with needed data if it doesn't exist\n # Plotter will look for the files it needs relative to the work directory\n # If this fails it will fall back to a baseline location if one was \n # Provided to cmake at the time this file was generated\n if job.dataPath == None :\n job.dataPath = \"Scenarios/\" + job.verificationDirectory + \"/baselines/\"\n \n if job.dataFile == None:\n job.dataFile = job.name + \"Results.zip\"\n \n if job.outputFilename==None:\n job.outputFilename=job.titleOverride+\".jpg\"\n \n if len(job.outputFilename.split(\".\"))==1:\n job.outputFilename+=\".jpg\"\n \n if job.imageWidth==None and job.imageHeight==None:\n job.imageWidth=1600\n job.imageHeight=800\n \n if not os.path.exists(job.dataPath):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):\n job.dataPath = os.path.join(job.basedir,job.dataPath)\n \n if not job.fontSize:\n job.fontSize=22\n \n if not os.path.exists(os.path.dirname(job.outputDir)):\n os.mkdir(os.path.dirname(job.outputDir))\n \n self.drawgraph(job,os.path.join(job.dataPath,job.dataFile),os.path.join(job.outputDir,job.outputFilename))", "def save_figure(self, data):\n\n\t\tsizes = np.shape(data)\n\t\tfig = plt.figure()\n\t\tfig.set_size_inches(1, 1. * sizes[0]/sizes[1], forward = False)\n\t\tax = plt.Axes(fig, [0., 0., 1., 1.])\n\t\tax.set_axis_off()\n\t\tfig.add_axes(ax)\n\t\tax.imshow(data, \"gray\")\n\n\t\t#plt.show()\n\t\tself.plotfile = os.path.join('static', 'Figure' + '.png')\n\t\tplt.savefig(self.plotfile, dpi = sizes[1])", "def plot_chosen_data(main, dataPath):\n error = \"Error \"+errorPath+\"plot_chosen_data: Must choose data of proper format (tiff, jpeg, etc.)\"\n try:\n if dataPath == '':\n main.msg('thinks it has nothing')\n main.msg(error)\n return\n data = mpimg.imread(dataPath)\n imgObj = Img.Img(data, title = os.path.basename(dataPath), filePath = dataPath)\n main.imgObjList.append(imgObj)\n main.horizontalSlider.setMaximum(len(main.imgObjList)-1)\n main.horizontalSlider.setValue(main.horizontalSlider.maximum())\n func.plot_img_obj(main, imgObj)\n except:\n main.msg(error)", "def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)", "def plotfile(self):\r\n filename = self.locatefile()\r\n if filename == \"\":\r\n print \"\\nNo file was chosen, exiting ...\\n\"\r\n return\r\n else:\r\n print \"\\nXYZ Data file:\\n\" + filename\r\n \r\n print \"\\nReading XYZ data file....\"\r\n xyz = XYZImporter(filename)\r\n geodata = xyz.genericdata\r\n print \"FINISHED reading XYZ data file\"\r\n\r\n # Note PNG is only 8 bit, and so PDF has greater colour\r\n # depth \r\n print \"\\nAbout to render plot ...\"\r\n gp = GridPlotterCustom()\r\n gp.shownulls = False\r\n title = \"Plot of XYZ data file: \" + filename\r\n outfname = (filename.replace('.', '_') +\r\n '_PLOT_custom.pdf')\r\n gp.plotgeodata(geodata, title, outfname)\r\n print \"FINISHED rendering plot to:\\n\" + outfname\r\n print \"\\n\\n\"", "def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"", "def plot(data, title='Figure', legends=None, axis_x=None, axis_y=None, file_path=None, file_name=None,\n figure_size=(16, 9), has_grid=True, limits_axis_y=None, upper_lower_data=None, limits_axis_x=None,\n verbose=True):\n\n plots = []\n colors = ['steelblue', 'indianred', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon',\n 'blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'gray', 'sienna',\n 'tan', 'plum', 'steelblue', 'lavenderblush', 'pink', 'navajowhite', 'darkorange',\n 'darkslateblue', 'blueviolet', 'slategray', 'indianred', 'olive', 'darksalmon']\n\n plt.rcParams['figure.figsize'] = figure_size\n plt.title(title)\n plt.grid(has_grid)\n\n if not (axis_x is None):\n plt.xlabel(axis_x)\n if not (axis_y is None):\n plt.ylabel(axis_y)\n\n for d in range(len(data)):\n current_fig, = plt.plot(data[d][0], data[d][1], color=colors[d])\n if not (upper_lower_data is None):\n plt.fill_between(data[d][0], np.array(upper_lower_data[d][0], dtype=float),\n np.array(upper_lower_data[d][1], dtype=float),\n where=np.array(upper_lower_data[d][0], dtype=float) > np.array(upper_lower_data[d][1],\n dtype=float), alpha=0.5,\n interpolate=True)\n\n plots.append(current_fig)\n\n if not (legends is None):\n plt.legend(plots, legends)\n\n if not (limits_axis_y is None):\n plt.ylim(limits_axis_y[:2])\n plt.yticks(np.arange(limits_axis_y[0], limits_axis_y[1] + limits_axis_y[2], limits_axis_y[2]))\n\n if not (limits_axis_x is None):\n plt.xlim(limits_axis_x[:2])\n plt.xticks(np.arange(limits_axis_x[0], limits_axis_x[1] + limits_axis_x[2], limits_axis_x[2]))\n\n if (file_name is None) or (file_path is None):\n plt.show()\n else:\n full_path = path.join(file_path, file_name)\n if not path.isdir(file_path):\n makedirs(file_path)\n plt.savefig(full_path, format='svg')\n plt.close()\n if verbose:\n print('Figure saved at %s successfully.' % full_path)", "def plot_waveforms(data, name, title, directory_name):\n plt.figure(figsize=(20, 10))\n plt.plot(data)\n plt.title(title)\n plt.savefig('./' + directory_name + '/' + name)\n pass", "def plot_blank(self):\n self.figure_bmp.SetBitmap(self.controller.plot_blank())", "def plot_data(self):", "def plot(self, windowSize='800x600'):\n if not hasattr(self, 'compiled'):\n raise RuntimeError('The object has not compiled yet')\n # create a scrollable window\n _, fm, run = simple_scrollable_window(windowSize)\n count = 0\n img_ref = []\n for key, val in {**self.qubitDict, **self.readoutDict}.items():\n Label(\n fm, text=key + f':{val}', font='Consolas',\n relief='solid', borderwidth=1\n ).grid(row=count, column=0, ipadx=5, ipady=5, sticky='news')\n img_data = self.compiled[val].plot(\n allInOne=False, toByteStream=True, showSizeInfo=False,\n size=[20, 4]\n )\n render = ImageTk.PhotoImage(Image.open(img_data))\n img_ref += [render]\n img = Label(fm, image=render, borderwidth=1, relief='solid')\n img.grid(row=count, column=1, ipadx=5, ipady=5, sticky='news')\n img.image = render\n count += 1\n run()", "def draw_plot(self, plot_name, file_name, num_of_tests):\n plt.axis([0, num_of_tests, 0, 100])\n plt.title(plot_name)\n plt.xlabel(\"Číslo testu\")\n plt.ylabel(\"Přesnost (%)\")\n plt.legend()\n path = os.getcwd()+file_name+\".png\"\n os.makedirs(os.path.dirname(path), exist_ok=True)\n plt.savefig(path)\n plt.clf()", "def save_plot_as_image(self):\r\n plt.savefig(ROOT_DIR + '/presentation/images/' + self.folder + '/' + self.generated_image_name + '.png',\r\n bbox_inches='tight')", "def plot12(self, dataset, ts_string_indices, source_jpg_folder='jpg_images', extension='jpg', rows=3, cols=4,\n outfname='Sample Frames.png', cmap=None, gui_color='green'):\n # Settings ############################################################\n font_label_box = {\n 'color': 'green',\n 'size': 16,\n }\n font_steering = {'family': 'monospace',\n # 'color': 'darkred',\n 'weight': 'normal',\n 'size': 20,\n }\n ROWS = rows\n COLS = cols\n NUM_IMAGES = ROWS * COLS\n\n # Figure ##############################################################\n # figsize = [width, height]\n fig = plt.figure(figsize=PAPER_A3_LAND, facecolor='white')\n fig.suptitle(\"Sample frames, Dataset: {}\".format(dataset.data_folder), fontsize=20)\n\n for i, ts_string_index in enumerate(ts_string_indices):\n rec = dataset.df.loc[ts_string_index]\n\n timestamp_string = rec['datetime'].strftime(\"%D %H:%M:%S.\") + \"{:.2}\".format(\n str(rec['datetime'].microsecond))\n\n if 'steering_pred_signal' in dataset.df.columns:\n this_label = \"{}\\n{:0.2f}/{:0.2f} steering \\n{:0.2f} throttle\".format(timestamp_string,\n rec['steering_signal'],\n rec['steering_pred_signal'],\n rec['throttle_signal'])\n else:\n this_label = \"{}\\n{:0.2f}/ steering \\n{:0.2f} throttle\".format(timestamp_string, rec['steering_signal'],\n rec['throttle_signal'])\n\n ax = fig.add_subplot(ROWS, COLS, i + 1)\n\n # Main Image ##########################################################\n jpg_path = os.path.join(dataset.path_dataset, source_jpg_folder, ts_string_index + '.' + extension)\n assert os.path.exists(jpg_path), \"{} does not exist\".format(jpg_path)\n img = mpl.image.imread(jpg_path)\n ax.imshow(img, cmap=cmap)\n # plt.title(str_label)\n\n # Data box ########################################################\n\n # ax.axes.get_xaxis().set_visible(False)\n # ax.axes.get_yaxis().set_visible(False)\n t = ax.text(5, 25, this_label, color=gui_color, alpha=1)\n # t = plt.text(0.5, 0.5, 'text', transform=ax.transAxes, fontsize=30)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='none'))\n\n # Steering widget HUD #################################################\n # Steering HUD: Actual steering signal\n steer_actual = ''.join(['|' if v else '-' for v in dataset.linear_bin(rec['steering_signal'])])\n text_steer = ax.text(80, 105, steer_actual, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color=gui_color)\n # Steering HUD: Predicted steering angle\n if 'steering_pred_signal' in dataset.df.columns:\n steer_pred = ''.join(['◈' if v else ' ' for v in dataset.linear_bin(rec['steering_pred_signal'])])\n text_steer_pred = ax.text(80, 95, steer_pred, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color='red')\n\n outpath = os.path.join(dataset.path_dataset, outfname)\n fig.savefig(outpath)\n logging.debug(\"Wrote Sample Frames figure to {}\".format(outpath))", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def test_plot_save_figure(self):\n pname = os.path.join(\n self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03' + HEN_FILE_EXTENSION)\n hen.plot.main([pname, '--noplot', '--figname',\n os.path.join(self.datadir,\n 'monol_testA_E3-50_pds_rebin1.03.png'),\n '-o', 'dummy.qdp'])", "def create_preview(name):\n file_type = os.path.splitext(name)[1]\n\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n dir = os.path.dirname(os.path.realpath(__file__))\n file = open(dir+'/instances/'+name)\n if file_type == '.csv':\n\n for nodeNo,line in enumerate(file): #enumerate used to obtain line numbers and thus node numbers\n coords = line.rsplit()[0].split(\",\")\n\n x = int(coords[0])\n y = int(coords[1])\n axis.scatter(x, y, c = 'b', label = nodeNo)\n axis.set_title(name)\n axis.text(x+5,y+5, str(nodeNo))\n else:\n file.readline()\n file.readline()\n file.readline()\n no_nodes = int(file.readline().strip().split()[1])\n file.readline()\n file.readline()\n file.readline()\n\n for i in range(0, no_nodes):\n\n coords = file.readline().strip().split()[1:]\n x = float(coords[0])\n y = float(coords[1])\n axis.scatter(x, y, c = 'b', label = i)\n axis.set_title(name)\n axis.text(x,y, str(i))\n\n return fig", "def plot_and_save_2d(file_name, path_name, raw_data_file, show=False):\n print '-'*23+'PLOT (2d)'+'-'*24\n \n print 'Loading data...',\n data = load_file(path_name+file_name)\n t = data['t']\n \n pic_path = path_name+'pics/'\n if not os.path.exists(pic_path):\n os.makedirs(pic_path)\n print 'done'\n print 'Creating and saving plots...', \n\n # Moment.\n plt.figure(1)\n plt.plot(t, data['dyn']['M'], t, data['static']['M'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('M')\n plt.title('Moment')\n plt.grid()\n plt.savefig('%sM.png' %pic_path)\n\n # Axial force.\n plt.figure(2)\n plt.plot(t, data['dyn']['FY'], t, data['static']['FY'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fa')\n plt.title('Fa')\n plt.grid()\n plt.savefig('%sFa.png' %pic_path)\n\n # Transverse force.\n plt.figure(3)\n plt.plot(t, data['dyn']['FZ'], t, data['static']['FZ'])\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Ft')\n plt.title('Ft')\n plt.grid()\n plt.savefig('%sFt.png' %pic_path)\n\n # Resultant force.\n plt.figure(4)\n plt.plot(t, np.sqrt(data['dyn']['FY']**2+data['dyn']['FZ']**2),\n t, np.sqrt(data['static']['FY']**2+data['static']['FZ']**2))\n plt.legend([\"Dynamic\", \"Static\"])\n plt.xlabel('t')\n plt.ylabel('Fr')\n plt.title('Fr')\n plt.grid()\n plt.savefig('%sFr.png' %pic_path)\n print 'done'\n\n if show:\n plt.show()", "def save_fig(ax_data, file_name):\n with open(file_name,'wb') as fid:\n pickle.dump(ax_data, fid)", "def plotXY(xName,xDataRaw,yName, yDataRaw):\n scanFileHolder = getScanFileHolderXY(xName,xDataRaw,yName, yDataRaw) \n scanFileHolder.plot(xName, yName)\n return scanFileHolder", "def add_plot(self, img_path, width):\n shutil.copy(img_path, f'{ReportGenerator.TEMP_FOLDER}/{hash(img_path)}.png')\n plot_template = self.templateEnv.get_template(f'{ReportGenerator.COMPONENTS_FOLDER}/plot.html')\n plot_output = plot_template.render(img_path=f'{hash(img_path)}.png', style=f\"'width:{width};'\")\n self.contents.append(plot_output)", "def show_picture(self, data):\n raise NotImplementedError", "def making_plot(sample_points_x_y_nonZero, gauge_volume, y_upper_imit, y_lower_limit,\n sample_height=10, sample_width=5., min_color=None, max_color = None):\n if sample_points_x_y_nonZero.size==0:\n print \"the array does not have a non zero gauge volume\"\n\n\n else:\n\n xS, yS=sample_points_x_y_nonZero\n X,Y= np.meshgrid(xS,yS)\n\n gauge_volume=np.array(gauge_volume)\n\n Z = griddata((xS,yS), gauge_volume, (X,Y), method='nearest')\n\n plt.figure()\n # r=plt.contour( X, Y,Z)\n # plt.clabel(r, inline=1, fontsize=10)\n plt.pcolormesh(X, Y, Z, cmap = plt.get_cmap('rainbow'),vmin=min_color, vmax=max_color )\n plt.xlabel('points along sample width (mm)')\n plt.ylabel('points along sample height (mm)')\n plt.ylim(y_lower_limit,y_upper_imit)\n plt.colorbar()\n plt.axhline(y=-sample_height/2., color='r', linestyle='-')\n plt.axhline(y=sample_height/2., color='r', linestyle='-')\n plt.axvline(x=- sample_width/2., color='r', linestyle='-')\n plt.axvline(x= sample_width/2., color='r', linestyle='-')\n # plt.scatter(xS,yS ,marker = 'o', c = 'b', s = 5, zorder = 10)\n plt.savefig(os.path.join(thisdir, '../figures/{sample}.png'.format(sample='gauge_volume')))\n plt.show()", "def render_plot(filename):\n \n file = f\"{app.config['FILE_UPLOADS']}/{filename}\"\n df = read_dataset(file)\n plot_json = generate_dataset_JSON(df)\n\n min = str(round(df['y'].min(),2))\n max = str(round(df['y'].max(),2))\n mean = str(round(df['y'].mean(),2))\n std = str(round(df['y'].std(),2))\n \n return render_template('render_plot.html', plot_json=plot_json, filename=filename, min=min, max=max, mean=mean, std=std)", "def real_time_plot(files):\n global len_data, first_iter, colors\n\n for i,F in enumerate(files):\n\n # Load data\n data = pylab.loadtxt(F, delimiter=',', skiprows=1, usecols=(5,6,7))\n\n # Check if new data\n if (len_data!= len(data[:,0])):\n\n # Plot\n label = ntpath.basename(F)\n label = label[0:-4]\n ax.plot(data[:,0], data[:,1], data[:,2], colors[i], label=label)\n\n pyplot.draw()\n\n # Update globals\n len_data = len(data[:,0])\n\n if (first_iter == True):\n ax.legend()\n first_iter = False", "def show_plot_in_new_figure(data, ylim=(-0.3, 0.3),\n to_save=False, fname=\"extractor_test_results/result.png\"):\n \n plt.figure(figsize = (30,10))\n plt.ylim(ylim)\n plt.plot(list(data), 'b', lw=1)\n plt.grid()\n if show_plots: \n plt.show()\n \n if to_save:\n plt.savefig(fname)", "def heatmap(filename, data):\n\n fig, ax = ppl.subplots(1)\n ppl.pcolormesh(fig, ax, data, vmin=-0.0016, vmax=0.0016)\n fig.savefig(filename + \".png\")", "def render(self, chart):\n chart.create_visualization_files(self.__outputpath)" ]
[ "0.6426788", "0.63435924", "0.6265439", "0.62604624", "0.62195396", "0.62031156", "0.6172043", "0.613385", "0.61045796", "0.60914683", "0.60585433", "0.6013517", "0.5999623", "0.5993755", "0.5926441", "0.5907435", "0.58943826", "0.58767754", "0.5841144", "0.5835282", "0.5827787", "0.5816113", "0.5791753", "0.57793593", "0.5775896", "0.57556903", "0.57362264", "0.5727105", "0.5723166", "0.57099086" ]
0.7840498
0
Method to invoke Disable command on SDP Master.
def do(self): this_server = TangoServerHelper.get_instance() try: sdp_master_ln_fqdn = "" property_val = this_server.read_property("SdpMasterFQDN")[0] sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val) sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn) sdp_mln_client_obj.send_command_async( const.CMD_Disable, None, self.disable_cmd_ended_cb ) self.logger.debug(const.STR_DISABLE_CMS_SUCCESS) this_server.write_attr( "activityMessage", const.STR_DISABLE_CMS_SUCCESS, False ) except DevFailed as dev_failed: self.logger.exception(dev_failed) log_msg = f"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}" tango.Except.re_throw_exception( dev_failed, const.ERR_INVOKING_CMD, log_msg, "SdpMasterLeafNode.DisableCommand()", tango.ErrSeverity.ERR, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Disable(self):\n handler = self.get_command_object(\"Disable\")\n handler()", "def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc", "def disable(self, sid):\n return", "def disable(self):\n logging.debug(\"Disabling switch %s\" % self.name)\n self.disabled = True", "def _disable(self):\n self.enabled = False", "def on_disable(self) -> None:\n self._cancel_automation()", "def on_disable(self) -> None:\n self._on_stop_cycle({})", "def disable(self):", "async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))", "def disable(self) -> None:", "def disable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.DISARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def _doDisableRegulation(self):\n self._cmdRegulOff()", "def bdev_nvme_disable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_disable_controller', params)", "def disable(self):\n self.enabled = False", "def disable_radio(self):\n self.acquire_response(b'AT*R0')", "async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()", "async def disable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation disabled.\"))", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def disable(self):\n self._enabled = False", "def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)", "def disable(self):\n pass", "def disable(self):\n return self.enable(False)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)", "def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")", "def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")" ]
[ "0.72543144", "0.682667", "0.6537114", "0.6527344", "0.6507283", "0.6505277", "0.64890575", "0.64763004", "0.6459875", "0.6419226", "0.63770306", "0.637017", "0.6363984", "0.63474035", "0.62944055", "0.6292118", "0.6276801", "0.627648", "0.627209", "0.62699884", "0.62642103", "0.62637043", "0.62545663", "0.6235732", "0.62094367", "0.6187441", "0.6183761", "0.6175728", "0.6165915", "0.6165217" ]
0.795008
0
Returns whether the current instance is an edge server in crosssilo FL.
def is_edge_server() -> bool: return Config().args.port is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_edge_site(self) -> bool:\n return self.config.edge", "def is_connected_to(self, receiver: SkupperSite) -> bool:\n return receiver in self.connected_sites", "def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]", "def isEdge(self,x,y):\n\t\treturn y in self._dict[x]", "def isEdge(self, x, y):\n return y in self._dictOut[x]", "def isEdge(self,x,y):\n\t\treturn self._matr[x][y]", "def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges", "def isEdge(self,x,y):\r\n return self.matr[x][y]", "def has_edge(self, v1, v2):\n\n return v1 in self.get_reachables(v2[0], v2[1])", "def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False", "def is_peered_with(self, other: SkupperSite) -> bool:\n if not self.cluster.peering:\n return False\n\n for c in self.cluster.peering.connections:\n if (\n isinstance(\n c,\n (\n ClusterPeeringConnectionClusterRequesterV1,\n ClusterPeeringConnectionClusterAccepterV1,\n ),\n )\n ) and c.cluster.name == other.cluster.name:\n return True\n return False", "def isEdge(self, x, y):\n if y in self.parseX() or x in self.parseX():\n return y in self.dictOut[x]\n else :\n print(\"verteces not found\")", "def is_adjacent(self, remote_host_name):\n # Check if a topology is defined, otherwise use fully connected\n if self.topology is None:\n return True\n\n if self.name in self.topology:\n if remote_host_name in self.topology[self.name]:\n return True\n else:\n return False\n else:\n logging.warning(\n \"Node {} is not in the specified topology and is therefore \"\n \"assumed to have no neighbors\".format(self.name)\n )\n return False", "def connected(self):\n return self.izx.connected and self.ezx.connected", "def IsWire(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_IsWire(self, *args)", "def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)", "def has_neighbor(self):\n if self.cur_neighbor is None:\n return False\n if self.cur_neighbor['app_feat'] is None:\n return False\n return True", "def is_cross_onap_link(self, logical_link):\n for relationship in logical_link[\"relationship-list\"][\"relationship\"]:\n if relationship[\"related-to\"] == \"ext-aai-network\":\n return True\n return False", "def is_router(self):\n # @todo: Rewrite\n return self.address_set.count() > 1", "def is_island(self, sites: Iterable[SkupperSite]) -> bool:\n # Neither incoming nor outgoing connections\n return (\n not self.has_incoming_connections(sites)\n and not self.connected_sites\n and not self.delete\n )", "def has_edges(self):\n\n return len(self._edges) > 0", "def is_bipartite(self):\n return True", "def are_connected(self, node1, node2):\n return bool( self.get_edge(node1, node2) )", "def has_bond_crossing(self):\n return self.count_bond_collisions() > 0", "def is_connected(self):\n connected = False\n self.state = self.mesh.state()\n if self.state in (STATE_CHILD, STATE_ROUTER, STATE_LEADER, STATE_LEADER_SINGLE):\n connected = True\n return connected", "def is_central_server() -> bool:\n return hasattr(Config().algorithm,\n 'cross_silo') and Config().args.port is None", "def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")", "def is_bipartite(self):\n return self._.bipartite", "def is_connected(self) -> bool:", "def is_connected(self):\n return self.is_connected" ]
[ "0.7256668", "0.65036786", "0.6496777", "0.6456621", "0.64283705", "0.63906115", "0.6335268", "0.63215107", "0.6301984", "0.6286371", "0.62700874", "0.6203978", "0.61531115", "0.6063415", "0.60367393", "0.60330695", "0.5981147", "0.5979535", "0.5975428", "0.5965382", "0.59630185", "0.59559524", "0.59321404", "0.5926251", "0.5923715", "0.59187716", "0.5913594", "0.59066206", "0.58825046", "0.58815366" ]
0.7117317
1
Returns whether the current instance is a central server in crosssilo FL.
def is_central_server() -> bool: return hasattr(Config().algorithm, 'cross_silo') and Config().args.port is None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def is_on(self) -> bool:\n val = bool(self._cluster_handler.cluster.get(self._zcl_attribute))\n return (not val) if self.inverted else val", "def central_server_alive(cls, timeout=1):\n central_server_address, _ = cls.get_central_address()\n\n try:\n requests.get(central_server_address, timeout=timeout, verify=False)\n except (Timeout, ConnectionError):\n return False\n\n return True", "def is_connected(self):\n if self.server: return True\n return False", "def is_local_client(self):\n return self.msg.is_local_client", "def isInCluster(self):\n logger.debug(\"Checking if %s is a part of cluster\" % self)\n role = self.getClusterRole()\n return role is not None and role != \"DISABLED\"", "def is_origin_remote():\n return sync_mode in (SyncMode.RECEIVER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def _checkTorcsServer(self):\n isRunning = False\n if self.torcsServerProcess is not None:\n if self.torcsServerProcess.poll() is None:\n isRunning = True\n return isRunning", "def has_upstream_server(self) -> bool:\n return True if self.host is not None else False", "def is_client(self) -> bool:\n return self.zone.SharedRoomID and not self.zone.MasterMode", "def local(self):\n return self.hostname == \"localhost\" and self.user is None and self.ssh_args is None", "def isClientMultiplexingInterface(self):\n adaptation = self.getServerAdaptationFunction()\n if adaptation == None:\n return False # no adaptatation underneath\n else:\n clientcount = adaptation.getClientCount() # max. number of clients; None means unlimited\n return (clientcount != 1)", "def is_host(self):\n return self.host", "def is_remote(self):\n return False", "def has(self, server):\n return (server in self.servers)", "def on_internal_cluster(self) -> bool:\n return self.cluster.internal or False", "def is_remote(client):\n if client == Client.ORIGIN:\n return is_origin_remote()\n elif client == Client.TARGET:\n return is_target_remote()\n elif client == Client.LOCAL:\n return False\n else:\n return False", "def am_I_master(self, ipdict):\n hostname = socket.gethostname()\n ip_address = socket.gethostbyname(hostname)\n return ipdict.get(ip_address).is_master", "def isMaster(self):\n logger.debug(\"Checking if %s is Cloudera Master\" % self)\n is_master = self.getClusterRole()\n logger.debug(\"Is %s master: %s\" % (self, is_master))\n return is_master", "def mmo_is_configsrv(self, mmo_connection):\n return True if \"configsvr\" in str(mmo_connection[\"admin\"].command(\"getCmdLineOpts\")[\"parsed\"]) else False", "def is_master(self):\n return MPControl.is_master", "def check_connection_to_db(self):\n try:\n self._client.admin.command('ismaster')\n return True\n except Exception:\n return False", "def on_public_cluster(self) -> bool:\n return not self.on_private_cluster", "def is_connected(self) -> bool:\n return False if self._snitun is None else self._snitun.is_connected", "def is_target_remote():\n return sync_mode in (SyncMode.SENDER, SyncMode.PROXY, SyncMode.DUMP_REMOTE,\n SyncMode.IMPORT_REMOTE, SyncMode.SYNC_REMOTE)", "def is_on(self):\n return self._get_state() == ServerState.ON", "def is_esi_node():\n\n # Fetch ACME logger and write debug message\n log = logging.getLogger(\"ACME\")\n log.debug(\"Test if hostname matches the pattern 'esi-sv*'\")\n return socket.gethostname().startswith(\"esi-sv\") and os.path.isdir(\"/cs\")", "def is_lite_mode(ctx: commands.Context) -> bool:\n if is_private(ctx.message.channel):\n for g in ctx.bot.get_user_guilds(ctx.message.author.id):\n if g.id not in config.lite_servers:\n return False\n else:\n return ctx.message.guild in config.lite_servers", "def is_client(self):\n if not hasattr(self, '_is_client'):\n self._is_client = hasattr(self, 'client')\n return self._is_client", "def is_connected(self) -> bool:\n\n return self.send(self.cmd.GET_SYSTEMLINE) == self.cmd.DEFAULT_SYSTEM_LINE" ]
[ "0.66080767", "0.65418833", "0.6524869", "0.6519794", "0.6491021", "0.64296925", "0.639115", "0.63826996", "0.6380103", "0.6369597", "0.63640034", "0.6328281", "0.6286668", "0.62676233", "0.6239149", "0.6202125", "0.61955136", "0.61894524", "0.61597776", "0.6134595", "0.61243284", "0.61220825", "0.61082244", "0.6102957", "0.6099582", "0.6092593", "0.60785675", "0.6070335", "0.6064259", "0.6063599" ]
0.83141166
0
Returns the device to be used for training.
def device() -> str: import torch if torch.cuda.is_available() and torch.cuda.device_count() > 0: if hasattr(Config().trainer, 'parallelized') and Config().trainer.parallelized: device = 'cuda' else: device = 'cuda:' + str( random.randint(0, torch.cuda.device_count() - 1)) else: device = 'cpu' return device
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")", "def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')", "def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')", "def device(self):\n hw = self.hw()\n if hw: return hw.device()", "def device(self):\n return torch.cuda.current_device()", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device", "def device():\n return G.DEVICE", "def device(self) -> torch.device:\n return self._device", "def device(self):\n return self._vars[0].device", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')", "def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda:0')\n else:\n return torch.device('cpu')", "def get_device(self):\n raise NotImplementedError()", "def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "def device(self):\n return self._tensor.device", "def setup_device(self, conf: DictConfig) -> device:\n device = torch.device(conf.runner.device) if torch.cuda.is_available() else torch.device('cpu')\n\n return device", "def get_device(model):\n\tif next(model.parameters()).is_cuda:\n\t\treturn 'cuda:{}'.format(torch.cuda.current_device())\n\telse:\n\t\treturn 'cpu'", "def device(self):\n\n\t\treturn self._device", "def get_device(i=0):\n if torch.cuda.is_available():\n return torch.device(\"cuda:%d\" % i)\n else:\n return torch.device(\"cpu\")", "def get_device(self) -> str:\n pass", "def device(self):\n return self._device", "def get_device(model):\n p = next(model.parameters())\n return p.device", "def get_device(l):\n if not l.device:\n l.device = find_device()\n setup_device(l.device)\n return l.device" ]
[ "0.8276948", "0.8204713", "0.81753296", "0.80565923", "0.79779685", "0.79452527", "0.79452527", "0.79452527", "0.79452527", "0.79452527", "0.78916585", "0.78675044", "0.7835675", "0.77565765", "0.77542937", "0.77542937", "0.77542937", "0.77542937", "0.7713507", "0.7709037", "0.7634034", "0.76193523", "0.7597322", "0.7537676", "0.75102514", "0.7479524", "0.7453658", "0.7396959", "0.73892164", "0.7281287" ]
0.8220512
1
Check if the hardware and OS support data parallelism.
def is_parallel() -> bool: import torch return hasattr(Config().trainer, 'parallelized') and Config( ).trainer.parallelized and torch.cuda.is_available( ) and torch.distributed.is_available( ) and torch.cuda.device_count() > 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parallel_safe(self):\n return True", "def is_multiprocessing_problematic():\n # Handling numpy linked against accelerate.\n config_info = str([value for key, value in\n np.__config__.__dict__.items()\n if key.endswith(\"_info\")]).lower()\n\n if \"accelerate\" in config_info or \"veclib\" in config_info:\n return True\n elif \"openblas\" in config_info:\n # Most openBLAS can only operate with one thread...\n os.environ[\"OPENBLAS_NUM_THREADS\"] = \"1\"\n else:\n return False", "def parallel_safe(self):\n\n return True", "def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )", "def can_use_omp_threads(self, omp_threads):\n return self.cores_per_node >= omp_threads", "def DataAvailable(self) -> bool:", "def is_system_ready_for_benchmarking():\n\n # check if scaling_governor is set to 'performance' for all cpu cores\n cpu_governors = glob.glob('/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor')\n if not cpu_governors:\n logger.error('no scaling_governor found. Do you run on a Linux System?')\n return False\n for governor in sorted(cpu_governors):\n with open(governor, 'r') as f:\n line = f.read().splitlines()[0]\n logger.debug('%s is set to \\\"%s\\\"', governor, line)\n if line != 'performance':\n logger.warning('please set all scaling_governor to \\\"performance\\\" (using \"sudo ./ondemand.sh start\")')\n return False\n\n return True", "def get_is_data_available(self):\n return self._data_available", "def is_available():", "def check_multiprocessing():\n\n try:\n import multiprocessing\n except ImportError:\n return False\n return True", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def is_xpu_available():\n xpu_count = int(os.getenv(\"FLAGS_selected_xpus\", \"-1\"))\n if xpu_count < 0:\n return False\n\n if _HAS_FLUID:\n from paddle import fluid\n if not fluid.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n if _HAS_PADDLE:\n import paddle\n if not paddle.is_compiled_with_xpu():\n logger.warning(\"Found non-empty XPU_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with XPU, which may cause issues. \\\n Thus PARL will not use XPU.\")\n return False\n return True", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def has_data_flow(self) -> bool:\n return self.data_flow_steps is not None", "def _schedTest(self):\n if not self._hasSlices(): # There are no migratory tasks, so let's check utilization\n return self.util() <= 1.0\n else:\n return self._qpa()", "def available(self) -> bool:\n return (\n super().available\n and self.coordinator.data is not None\n and self.module_id in self.coordinator.data\n and self.data_id in self.coordinator.data[self.module_id]\n )", "def detect_available():\n global _CUDA_AVAILABLE\n if _CUDA_AVAILABLE is not None: return _CUDA_AVAILABLE\n _CUDA_AVAILABLE = shell.run('{} -c \"import torch;print(torch.cuda.is_available())\"'.format(sys.executable)).strip('\\n') == 'True'\n return _CUDA_AVAILABLE", "def is_gpu_available():\n ret = get_gpu_count() > 0\n if _HAS_PADDLE:\n import paddle\n if ret is True and not paddle.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n if _HAS_FLUID:\n from paddle import fluid\n if ret is True and not fluid.is_compiled_with_cuda():\n logger.warning(\"Found non-empty CUDA_VISIBLE_DEVICES. \\\n But PARL found that Paddle was not complied with CUDA, which may cause issues. \\\n Thus PARL will not use GPU.\")\n return False\n return ret", "def supports_prefetch(self):\n return (hasattr(self.base_dataset, 'supports_prefetch') and\n self.base_dataset.supports_prefetch) or \\\n (hasattr(self.auxiliary_targets, 'supports_prefetch') and self.auxiliary_targets.supports_prefetch)", "def check_cuda():\n if OS_VERSION[0] == \"Linux\":\n check_cuda_linux()\n elif OS_VERSION[0] == \"Windows\":\n check_cuda_windows()", "def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 73", "def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def data_available(self):\n return (self.status & 0x08) != 0", "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def is_distributed() -> NotImplementedError:\n raise NotImplementedError()", "def is_distributed() -> int:\n return collective.is_distributed()", "def check_for_data():\n if not (os.path.exists(ep.get_test_data_path()) or os.path.exists(ep.get_dbn_weight_path())):\n return False\n return True", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def gpu_availability():\n # assume if using tensorflow-gpu, then Nvidia GPU is available\n if is_built_with_cuda():\n return len(tf.config.list_physical_devices(\"GPU\")) > 0\n else:\n return False" ]
[ "0.63122064", "0.6217546", "0.6193051", "0.61780185", "0.61725044", "0.61503655", "0.6127145", "0.6078269", "0.6058447", "0.603029", "0.6027548", "0.59874004", "0.5955343", "0.5927671", "0.58607745", "0.58542687", "0.58361876", "0.58319396", "0.58313346", "0.58165216", "0.58047605", "0.5789315", "0.57813597", "0.576614", "0.57457197", "0.5716463", "0.5712461", "0.5698685", "0.569107", "0.56755656" ]
0.66771305
0
return True if the number_str can be truncated from both left and right and always be prime, e.g. 3797
def is_left_right_truncatable(number_str, prime_str_set): l = len(number_str) #left truncatable? for i in range(l): if number_str[i:] not in prime_str_set or number_str[:l-i] not in prime_str_set: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_truncatable(number: int):\n\n str_number = str(number)\n index = 0\n\n # Left shift:\n while index < len(str_number):\n if not is_prime(int(str_number[index:])):\n return False\n\n index += 1\n\n # Right shift:\n index = len(str_number)\n while index > 0:\n if not is_prime(int(str_number[:index])):\n return False\n\n index -= 1\n\n return True", "def is_truncatable(nb):\n nb = str(nb)\n if is_prime(int(nb)):\n for i in range(1, len(nb)):\n if not is_prime(int(nb[i:])) or not is_prime(int(nb[:len(nb)-i])):\n return False\n return True\n else:\n return False", "def substring_divisible(number):\n string = str(number)\n for offset in xrange(1, len(string)-2):\n substring = string[offset:offset+3]\n # print '%s / %d' % (substring, PRIMES[offset-1])\n if int(substring) % PRIMES[offset-1]:\n return False\n return True", "def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res", "def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True", "def is_number_palindrome(number, digits, start):\n number = str((number // 10**start) % 10**digits).zfill(digits)\n return is_palindrome(number)", "def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0", "def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True", "def isprime(number):\n\n if number == 1:\n return False\n for i in range(2, int(number**0.5) + 1):\n if number % i == 0:\n return False\n return True", "def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True", "def isprime(number: int) -> bool:\n for i in range(2, int(number ** 0.5) + 1):\n if number % i == 0:\n return False\n return True", "def is_circular_prime(n):\r\n\r\n # pdb.set_trace()\r\n s = str(n)\r\n for i in xrange(len(s)):\r\n if not is_prime(n):\r\n return False\r\n s = s[1:] + s[0]\r\n n = int(s)\r\n\r\n return True", "def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True", "def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double", "def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True", "def get_prime_digits_for_one(a: int) -> bool:\r\n b = a\r\n c = 0\r\n c1 = 0\r\n while b > 0:\r\n c1 += 1\r\n n = b % 10\r\n if isprime(n):\r\n c += 1\r\n b = b // 10\r\n if c == c1:\r\n return True\r\n else:\r\n return False", "def is_prime(number: int) -> bool:\n\n if number % 2 == 0 and number > 2:\n return False\n return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))", "def is_prime(num):\n if not isinstance(num, int):\n return False\n if num <= 1:\n return False\n if num == 2 or num == 3:\n return True\n if num % 6 in [0, 2, 3, 4]:\n return False\n div_max = int(math.sqrt(num))\n for div in range(5, div_max + 1, 2):\n if num % div == 0:\n return False\n return True", "def is_prime(number):\n\tif number < 0:\n\t\treturn False\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False", "def checkPerfectNumber(self, num: int) -> bool:\n if num <= 0:\n return False\n s = 0\n for i in range(1, int(math.sqrt(num) + 1)):\n if i != num:\n res = num % i\n if res == 0:\n s += i\n divisor = num // i\n if divisor != num:\n s += divisor\n if s > num:\n return False\n return s == num", "def is_prime(number):\n\tif number < 4:\n\t\treturn True\n\t#start with number 2, iterate up until up to half the number is reached\n\tfor x in range(2, int(number/2)+1):\n\t\tif number%x == 0:\n\t\t\treturn False\n\treturn True", "def is_prime(n):\n return mr_prime(n)", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(number):\n if number <= 1:\n return False\n\n max_element = int(math.ceil(math.sqrt(number)))\n # iterate through all elements from 2 through sqrt(n)\n for element in range(2,max_element + 1):\n if number % element == 0:\n return False\n\n return True", "def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True", "def is_prime(number):\n if number == 2:\n return True\n\n if number <= 1 or number % 2 == 0:\n return False\n\n # check to see if number has any odd factors\n for x in range(3, int(number ** 0.5) + 1, 2):\n if number % x == 0:\n return False\n return True", "def is_armstrong_number(number: int) -> bool:\n\n str_number = f\"{number}\"\n return sum(pow(int(x), len(str_number)) for x in str_number) == number" ]
[ "0.8172049", "0.72789836", "0.69700235", "0.6308689", "0.61999017", "0.6155211", "0.6115153", "0.6111989", "0.6109884", "0.6073659", "0.6044184", "0.6039048", "0.60375977", "0.60341597", "0.60208774", "0.6020748", "0.601937", "0.60004914", "0.5998233", "0.5982962", "0.5968794", "0.5953393", "0.5939892", "0.5923705", "0.59115493", "0.5902841", "0.58870685", "0.58655393", "0.58621526", "0.5861683" ]
0.7736553
1
Determine fixed modifications in case the reference shift is at zero. Does not need localization.
def determine_fixed_mods_zero(aastat_result, data, params_dict): fix_mod_zero_thresh = params_dict['fix_mod_zero_thresh'] min_fix_mod_pep_count_factor = params_dict['min_fix_mod_pep_count_factor'] fix_mod_dict = {} reference = utils.mass_format(0) aa_rel = aastat_result[reference][2] utils.internal('aa_rel:\n%s', aa_rel) candidates = aa_rel[aa_rel < fix_mod_zero_thresh].index logger.debug('Fixed mod candidates: %s', candidates) for i in candidates: candidate_label = get_fixed_mod_raw(i, data, params_dict) if candidate_label != reference: # number of peptides with `i` at shift `candidate label` must be higher than ... count_cand = data.peptides(candidate_label).str.contains(i).sum() # number of peptides with `i` at shift `reference` by a factor of `min_fix_mod_pep_count_factor` count_ref = data.peptides(reference).str.contains(i).sum() # peptide count at candidate shift over # of peptides at reference est_ratio = count_cand / data.ms_stats()[reference][1] logger.debug('Peptides with %s: ~%d at %s, ~%d at %s. Estimated pct: %f', i, count_ref, reference, count_cand, candidate_label, est_ratio) if aastat_result[candidate_label][2][i] > fix_mod_zero_thresh and ( est_ratio * 100 > fix_mod_zero_thresh * min_fix_mod_pep_count_factor): fix_mod_dict[i] = candidate_label else: logger.debug('Could not find %s anywhere. Can\'t fix.', i) else: logger.debug('Reference shift is the best for %s.', i) return fix_mod_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def determine_fixed_mods_nonzero(reference, locmod_df, data):\n utils.internal('Localizations for %s: %s', reference, locmod_df.at[reference, 'localization'])\n loc = get_fix_mod_from_l10n(reference, locmod_df)\n label = reference\n data_dict = data.ms_stats().copy()\n while loc is None:\n del data_dict[label]\n label = max(data_dict, key=lambda k: data_dict[k][1])\n loc = get_fix_mod_from_l10n(label, locmod_df)\n logger.debug('No luck. Trying %s. Got %s', label, loc)\n if not data_dict:\n break\n return loc", "def fixed(self):\n return self.f_fixed().m_fixed()", "def change_nochange(reference_dataframe, allow_offset=0):\r\n\r\n def changed(x, default=False, offset=0):\r\n if len(x) == 1:\r\n return default\r\n elif x[0] == (x[1]-offset):\r\n return False\r\n else:\r\n return True\r\n\r\n def valid_matches(df, shift, mask):\r\n return df.RefChg & \\\r\n df.MapChg.shift(periods=shift, fill_value=False) & \\\r\n mask & \\\r\n mask.shift(periods=shift, fill_value=False)\r\n\r\n def get_change_window(series, index, offset):\r\n window = [index - offset, index + offset + 1]\r\n for w, s in zip([[0, 1], [1, 0]], [[0, offset], [offset, 0]]):\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n while series[slc1].sum() > series[slc0].sum():\r\n window = [window[i] + w[i] for i in range(len(window))]\r\n slc0 = slice(*window)\r\n slc1 = slice(*[window[i] + s[i] for i in range(len(window))])\r\n return slice(*window)\r\n\r\n df = reference_dataframe.copy()\r\n df = df.sort_values(['plotid', 'image_year']).reset_index()\r\n\r\n # Rolling window to find changes in land cover class, plot id, or jumps in year\r\n ref_chg = df.Reference.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n map_chg = df.LC_Primary.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x), raw=True).astype(np.bool)\r\n plt_chg = df.plotid.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, default=True), raw=True).to_numpy(dtype=np.bool)\r\n year_chg_not_one = df.image_year.rolling(2, min_periods=1).apply(\r\n lambda x: changed(x, offset=1), raw=True).to_numpy(dtype=np.bool)\r\n\r\n # Potentially 'valid' data points for change/no-change are defined as follows:\r\n # a) The 'plotid' did not change (the initial observations cannot be a change)\r\n # b) The change in 'image_year' cannot be more than one (missing years are unknowns)\r\n # c) The current and previous reference class cannot be a 0 (invalid value)\r\n\r\n df.loc[:, 'Valid'] = ~plt_chg & ~year_chg_not_one & ~(df.Reference.values == 0)\r\n df.loc[1:, 'Valid'] = df.Valid.values[1:] & ~(df.Reference.values[:-1] == 0)\r\n\r\n # ---- Initialize new columns ---- #\r\n\r\n df.loc[:, 'RefChg'] = ref_chg & df['Valid'].values # Valid reference changes\r\n df.loc[:, 'MapChg'] = map_chg & df['Valid'].values # Valid map changes, not shifted yet\r\n\r\n df.loc[:, 'MapChgYear'] = df['image_year'] * df['MapChg'] # Year of map change or zero\r\n\r\n # There will be some invalid entries here, but they will be filtered out later\r\n df['RefChgFromTo'] = (df.Reference.astype(np.int16) * 100) + df.Reference\r\n df.loc[1:, 'RefChgFromTo'] = (df.Reference[:-1].astype(np.int16).values * 100) + df.Reference[1:].values\r\n df['MapChgFromTo'] = (df.LC_Primary.astype(np.int16) * 100) + df.LC_Primary\r\n df.loc[1:, 'MapChgFromTo'] = (df.LC_Primary[:-1].astype(np.int16).values * 100) + df.LC_Primary[1:].values\r\n\r\n mutable = df.Valid.copy() # Track which things are OK to change\r\n\r\n # ---- End of initialization ---- #\r\n\r\n # Find map changes that can be matched to those in the reference data set in other years, within tolerance\r\n if allow_offset:\r\n print('Adjusting changes...')\r\n change_indices = df[df.MapChg.values].index\r\n for change_index in change_indices:\r\n mask = df.plotid == df.loc[change_index, 'plotid'] # Only consider the same plotid\r\n change_compare = []\r\n window = get_change_window(df.MapChg | df.RefChg, change_index, allow_offset)\r\n for shift in range(-allow_offset, allow_offset + 1):\r\n change_compare.append((valid_matches(df, shift, mutable & mask)[window].sum(), shift))\r\n # Sort by decreasing total matches, then increasing shift amount\r\n change_compare.sort(key=lambda x: (-x[0], abs(x[1])))\r\n for changes in change_compare:\r\n n_changes, offset = changes\r\n if n_changes:\r\n matches = valid_matches(df, offset, mutable & mask)\r\n # Shift will only affect valid matches, or where the valid matches started from, for that window\r\n shift_mask = (matches | matches.shift(periods=-offset, fill_value=False)) & \\\r\n df.index.isin(df[window].index)\r\n # Update MapChg, MapChgYear, MapChgFromTo\r\n df.loc[shift_mask, 'MapChg'] = \\\r\n (df.MapChg & shift_mask).shift(\r\n periods=offset, fill_value=False)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgYear'] = \\\r\n (df.MapChgYear * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=0)[shift_mask].values\r\n df.loc[shift_mask, 'MapChgFromTo'] = \\\r\n (df.MapChgFromTo * shift_mask.astype(np.int16)).shift(\r\n periods=offset, fill_value=101)[shift_mask].values\r\n # These matches will not be changed again\r\n mutable[matches & df.index.isin(df[window].index)] = False\r\n\r\n # Fixing the change codes after moving stuff around above\r\n print('Adjusting change codes...')\r\n for i in df[df.MapChg.values].index:\r\n need_new_lc = True\r\n new_lc = 0\r\n for j in range(i, max(df.index) + 1):\r\n if plt_chg[j]:\r\n break\r\n # If we've just jumped years, we don't know the LC\r\n if year_chg_not_one[j]:\r\n need_new_lc = True\r\n # If we need LC, take it from LC_Primary if nonzero\r\n if need_new_lc and df.loc[j, 'LC_Primary']:\r\n new_lc = df.loc[j, 'LC_Primary']\r\n need_new_lc = False\r\n # If there's been a change, take the new LC from the change code\r\n if df.loc[j, 'MapChg']:\r\n new_lc = df.loc[j, 'MapChgFromTo'] % 10\r\n need_new_lc = False\r\n # Update non-change locations with LC code if possible.\r\n if (not need_new_lc) and (not df.loc[j, 'MapChg']) and (df.loc[j, 'LC_Primary']):\r\n df.loc[j, 'MapChgFromTo'] = (new_lc * 100) + new_lc\r\n\r\n # Check for leapfrogging. The code does not prevent this.\r\n print('Final checks...')\r\n for plot in np.unique(df[df.MapChg.values].plotid):\r\n masked_arr = df[(df.plotid == plot) & (df.MapChgYear > 0)].MapChgYear.values\r\n if not all(masked_arr[i] <= masked_arr[i + 1] for i in range(len(masked_arr) - 1)):\r\n raise Exception('Warning! Leapfrog change year in plot: {}'.format(plot))\r\n\r\n # Switch from True/False values to strings for clarity\r\n chg = {True: 'Chg', False: 'NoChg'}\r\n df['RefChg'] = df.RefChg.apply(lambda x: chg[x])\r\n df['MapChg'] = df.MapChg.apply(lambda x: chg[x])\r\n\r\n # Get rid of the invalid data points, those don't count for change or no-change.\r\n df.drop(df[~df.Valid].index, inplace=True)\r\n\r\n return df", "def pre_modify(self):\n return 0", "def _only_fixed(o, d):\n if d[\"fixed\"]:\n return (\"value\", \"fixed\")\n else:\n return (\"fixed\",)", "def _fixed_indicies(self):\n fixed_inds = self.constraints == 'fixed'\n return fixed_inds", "def mod_mask(self):\n # Check the *_masq values\n self.__log.debug(\"Checking the *_masq arrays\")\n # Retrieve the kid boxes\n masq_names = np.unique([\"{}_masq\".format(item[1]) for item in self.list_detector])\n self.__check_attributes(masq_names, read_missing=False)\n # Check that they are all the same\n warnings.warn(\"Temporary fix to int8\")\n masqs = [getattr(self, masq).astype(np.int8) for masq in masq_names]\n\n if np.any(np.std(masqs, axis=0) != 0):\n self.__log.error(\"*_masq is varying -- Please check : {}\".format(pprint_list(masq_names, \"_masq\")))\n\n # AB private comm) main_flag should be the bitwise_or of all boxes\n # Well not exactly....\n # cast into 8 bit, is more than enough, only 3 bits used anyway...\n masq = np.bitwise_or.reduce(masqs, axis=0).astype(np.int8)\n\n # AB (#CONCERTO_DAQ January 11 13:02)\n # _flag_balayage_en_cours & _flag_blanking_synthe\n # Ainsi on aura la modulation en bit0 et 1 et le flag blanking en bit\n # AB (#CONCERTO_DAQ February 11 11:07)\n # bit 1 & 2 code the modulation as a signed integer -1 0 1 : 11 00 01 ie 3 0 1\n # bit 3 is a blanking bit, which does not exist for KISS, but should not be taken into account for CONCERTO\n\n # Thus as a temporary fix, let's clear the 3rd bit, actually a bad idea...\n # self.__log.warning(\"Temporary fix : clearing the 3rd bit of masq\")\n # masq = masq & ~(1 << 2)\n\n return masq", "def test_correct_backward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"backward\")\r\n assert np.allclose(coeffs, [1, -1])\r\n assert np.allclose(shifts, [0, -1])", "def test_correct_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 1, \"forward\")\r\n assert np.allclose(coeffs, [-1, 1])\r\n assert np.allclose(shifts, [0, 1])", "def _set_fixed(o, d):\n if d:\n o.fix()\n else:\n o.unfix()", "def test_explicit_fixed_effects_without_mask(tmp_path):\n shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3\n _, fmri_data, design_matrices =\\\n write_fake_fmri_data_and_design(shapes, rk, file_path=tmp_path)\n contrast = np.eye(rk)[1]\n\n # session 1\n multi_session_model = FirstLevelModel().fit(\n fmri_data[0], design_matrices=design_matrices[:1])\n dic1 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # session 2\n multi_session_model.fit(\n fmri_data[1], design_matrices=design_matrices[1:])\n dic2 = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n # fixed effects model\n multi_session_model.fit(\n fmri_data, design_matrices=design_matrices)\n fixed_fx_dic = multi_session_model.compute_contrast(\n contrast, output_type='all')\n\n contrasts = [dic1['effect_size'], dic2['effect_size']]\n variance = [dic1['effect_variance'], dic2['effect_variance']]\n\n # test without mask variable\n (\n fixed_fx_contrast,\n fixed_fx_variance,\n fixed_fx_stat,\n ) = compute_fixed_effects(contrasts, variance)\n assert_almost_equal(\n get_data(fixed_fx_contrast),\n get_data(fixed_fx_dic['effect_size']))\n assert_almost_equal(\n get_data(fixed_fx_variance),\n get_data(fixed_fx_dic['effect_variance']))\n assert_almost_equal(\n get_data(fixed_fx_stat), get_data(fixed_fx_dic['stat']))", "def test_fix_mask(self):\n fixable_mask = mapreader.Map(os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_fixable_mask.map'))\n self.assertFalse(fixable_mask.is_mask)\n fixable_mask.fix_mask()\n self.assertTrue(fixable_mask.is_mask)", "def test_correct_forward_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"forward\")\r\n assert np.allclose(coeffs, [-1.5, 2, -0.5])\r\n assert np.allclose(shifts, [0, 1, 2])", "def test_correct_center_order2(self):\r\n coeffs, shifts = finite_diff_coeffs(1, 2, \"center\")\r\n assert np.allclose(coeffs, [-0.5, 0.5])\r\n assert np.allclose(shifts, [-1, 1])", "def check_fixedblock(self):\n print('This will read the fixed block then display changes as they')\n print('occur. Typically the most common change is the incrementing')\n print('of the data pointer, which happens whenever readings are saved')\n print('to the station memory. For example, if the logging interval')\n print('is set to 5 minutes, the fixed block should change at least')\n print('every 5 minutes.')\n raw_fixed = self.station.get_raw_fixed_block()\n while True:\n new_fixed = self.station.get_raw_fixed_block(unbuffered=True)\n for ptr in range(len(new_fixed)):\n if new_fixed[ptr] != raw_fixed[ptr]:\n print(datetime.datetime.now().strftime('%H:%M:%S'), end=' ')\n print(' %04x (%d) %02x -> %02x' % (\n ptr, ptr, raw_fixed[ptr], new_fixed[ptr]))\n raw_fixed = new_fixed\n time.sleep(0.5)", "def getTranslation(fracs):\n \n \n \n # Determine whether the shift needs to be from inf to 0 \n # or from -inf to 0\n \n # Along all x fractionals\n if abs(max(fracs[0]))>=abs(min(fracs[0])):\n minX = min([x for x in fracs[0] if x>0])\n else:\n minX = min([x for x in fracs[0] if x<0])\n \n # Along all y fractionals\n if abs(max(fracs[1]))>=abs(min(fracs[1])):\n minY = min([x for x in fracs[1] if x>0])\n else:\n minY = min([x for x in fracs[1] if x<0])\n \n # Along all z fractionals\n # Need to consider all atoms lying in a single\n # plane (e.g. graphene), thus the final \"else\"\n # statement\n if abs(max(fracs[2]))>abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x>0])\n elif abs(max(fracs[2]))<abs(min(fracs[2])):\n minZ = min([x for x in fracs[2] if x<0])\n else:\n minZ = max(fracs[2])\n\n shift_vector = np.array([minX,minY,minZ])\n \n return(shift_vector)", "def test_shift_ruptures_no_shift(midday):\n shift_mask, shift_amounts = time.shifts_ruptures(\n midday, midday\n )\n assert not shift_mask.any()\n assert_series_equal(\n shift_amounts,\n pd.Series(0, index=midday.index, dtype='int64'),\n check_names=False\n )", "def test_correct_second_derivative_center_order4(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 4, \"center\")\r\n assert np.allclose(coeffs, [-2.5, 4 / 3, 4 / 3, -1 / 12, -1 / 12])\r\n assert np.allclose(shifts, [0, -1, 1, -2, 2])", "def fixed(self):\n for i in range(15):\n self.factors[i].fixed()\n self.transition.fixed()", "def m_fixed(self):\n self.mx_free = self.my_free = self.mz_free = False\n return self", "def mask_fixed(self):\n ns = len(self)-1\n # mask fixed entries\n self.mask[0,0,0] = True\n self.mask[0,0,-1] = True\n self.mask[0,-1,0] = True\n self.mask[-1,0,0] = True\n # mask entries with i+j+k > ns\n for ii in range(len(self)):\n for jj in range(len(self)):\n for kk in range(len(self)):\n if ii+jj+kk > ns:\n self.mask[ii,jj,kk] = True\n \n # mask fA = 0 and fB = 0\n for ii in range(len(self)):\n self.mask[ii,ns-ii,0] = True\n self.mask[ii,0,ns-ii] = True\n\n self.mask[0,:,0] = True\n self.mask[0,0,:] = True\n return self", "def find_shift(ref, img):\n im0 = prepare(ref)\n im1 = prepare(img)\n shift, error, diffphase = register_translation(im0, im1, 100)\n\n return shift", "def test_adjust_offsets_short(self):\n tool = pybedtools.BedTool(\"chr15 91512755 91512836 ENSG00000198901_1_147 0 -\", from_string=True)\n offsets = {\"ENSG00000198901_1_147\" : 10}\n results = adjust_offsets(tool, offsets)", "def test_correct_second_derivative_forward_order1(self):\r\n coeffs, shifts = finite_diff_coeffs(2, 1, \"forward\")\r\n assert np.allclose(coeffs, [1, -2, 1])\r\n assert np.allclose(shifts, [0, 1, 2])", "def test_modified_schwefel(self):\n fun = get_problem('modified_schwefel', self.dimension, -100, 100)\n self.assertAlmostEqual(fun(self.array10), 6.9448853328785844, delta=350)", "def f_fixed(self):\n self.fx_free = self.fy_free = self.fz_free = False\n return self", "def test_findBugfixes(self):\n bugfixes = self.builder._findChanges(\n self.project, self.builder._BUGFIX)\n self.assertEquals(\n bugfixes,\n [(23, 'Broken stuff was fixed.')])", "def on_fees_change(origin_matrix, changes_on_fees):\n new_fees = origin_matrix[FEES_IDX]\n for idx in range(len(origin_matrix[0])):\n if changes_on_fees[idx] != None:\n new_fees[idx] = changes_on_fees[idx]\n return new_fees", "def test_fixups():\n binary: MachO = cast(MachO, cle.Loader(str(TEST_BASE / \"tests\" / \"aarch64\" / \"dyld_ios15.macho\")).main_object)\n expected = {\n 0x100008100: 0x100007A40,\n 0x1000081E0: 0x1000072B0,\n 0x1000081E8: 0x1000072DC,\n 0x1000081F0: 0x1000072E4,\n 0x1000081F8: 0x100007310,\n 0x100008200: 0x100007350,\n 0x100008208: 0x10000735C,\n 0x100008210: 0x10000738C,\n 0x100008218: 0x1000073E8,\n 0x100008238: 0x1000081E0,\n 0x100008248: 0x100007A40,\n 0x1000082A0: 0x100007AFC,\n 0x1000082D8: 0x10000C0E8,\n 0x10000C018: 0x100007B90,\n 0x10000C060: 0x100007B90,\n 0x10000C068: 0x100007998,\n 0x10000C090: 0x100007C2A,\n 0x10000C0D0: 0x10000C000,\n 0x10000C0D8: 0x100007210,\n 0x10000C0E8: 0x10000C0B0,\n 0x10000C108: 0x10000C04A,\n 0x10000C128: 0x1000079F0,\n }\n\n actual = {r.rebased_addr: r.value for r in binary.relocs if isinstance(r, MachOChainedFixup)}\n assert actual == expected", "def test_fix(self):\n self.check_data.side_effect = lambda: self.fixed_cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[self.mock_fix]) as mock_get_fixes:\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_data(\n self.cube,\n short_name='short_name',\n project='project',\n dataset='model',\n mip='mip',\n session=sentinel.session,\n )\n self.checker.assert_called_once_with(self.intermediate_cube)\n self.check_data.assert_called_once_with()\n assert cube_returned is not self.cube\n assert cube_returned is not self.intermediate_cube\n assert cube_returned is self.fixed_cube\n mock_get_fixes.assert_called_once_with(\n **self.expected_get_fixes_call\n )" ]
[ "0.6245811", "0.5993922", "0.58981633", "0.54384756", "0.5420952", "0.5370132", "0.5319682", "0.52733696", "0.52633834", "0.5207875", "0.5174516", "0.5168301", "0.5162118", "0.5160096", "0.5155298", "0.5127173", "0.5121005", "0.50911295", "0.49932376", "0.4955329", "0.49097493", "0.49048755", "0.4889362", "0.4889008", "0.4873095", "0.48622525", "0.4823693", "0.4778005", "0.4771348", "0.47635102" ]
0.63415945
0
r"""Compute the Einstein radius for a given isotropic velocity dispersion assuming a singular isothermal sphere (SIS) mass profile
def approximate_theta_E_for_SIS(vel_disp_iso, z_lens, z_src, cosmo): lens_cosmo = LensCosmo(z_lens, z_src, cosmo=cosmo) theta_E_SIS = lens_cosmo.sis_sigma_v2theta_E(vel_disp_iso) return theta_E_SIS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_residual_sphere(parameters, x_values, y_values, z_values):\n #extract the parameters\n x_centre, y_centre, z_centre, radius = parameters\n\n #use numpy's sqrt function here, which works by element on arrays\n distance_from_centre = numpy.sqrt((x_values - x_centre)**2 +\n (y_values - y_centre)**2 +\n (z_values - z_centre)**2)\n\n return distance_from_centre - radius", "def sphere_sre(solution):\n a = 0\n bias = 0.2\n x = solution.get_x()\n x1 = x[:10]\n x2 = x[10:]\n value1 = sum([(i-bias)*(i-bias) for i in x1])\n value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2])\n return value1 + value2", "def ISE_loop(mu, s, DIMENSION=2):\n total = 0\n for i in range(len(mu)):\n for j in range(len(mu)):\n dist_sq = np.sum((mu[i]-mu[j])**2)\n total += (i != j)*(1/(s*s*2*np.pi))**(0.5*DIMENSION)*np.exp(-dist_sq/(2*s*s))\n return (2*total/len(mu)/(len(mu)-1))", "def effective_radius(self, n):\n\n er2 = 5.0 * self.sa / n\n er = np.sqrt(er2)\n\n return er", "def estimate_radius(self):\n red = self.T[:,:,0] # empirically, the most reliable channel\n\n eye_radius = red.sum(axis=1).max() / 2\n return eye_radius", "def getSphereRadius(self):\n return 1.5", "def Iq(q, second_moment, adsorbed_amount, density_shell, radius,\n volfraction, sld_shell, sld_solvent):\n with errstate(divide='ignore'):\n aa = ((sld_shell - sld_solvent)/density_shell * adsorbed_amount) / q\n bb = q * second_moment\n #scale by 10^-2 for units conversion to cm^-1\n inten = 6.0e-02 * pi * volfraction * aa**2 * exp(-bb**2) / radius\n return inten", "def get_circumsphere(S):\n\n U = S[1:] - S[0]\n B = numpy.sqrt(numpy.square(U).sum(axis=1))\n U /= B[:, None]\n B /= 2\n C = numpy.dot(numpy.linalg.solve(numpy.inner(U, U), B), U)\n r2 = numpy.square(C).sum()\n C += S[0]\n return C, r2", "def sphere(\n network,\n pore_diameter='pore.diameter'\n):\n return 4/3*_pi*(network[pore_diameter]/2)**3", "def ellipse_ellipticity(S):\n return 1/2 * np.arcsin(S[..., 3]/S[..., 0])", "def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere", "def asphericity(Rnm_eg):\n num = (Rnm_eg[0] - Rnm_eg[2])**2 + (Rnm_eg[1] - Rnm_eg[2])**2 + (Rnm_eg[0] - Rnm_eg[1])**2\n dem = 2*(Rnm_eg[0] + Rnm_eg[1] + Rnm_eg[2])**2\n Asphere = num/dem\n return Asphere", "def velocity_dispersion_from(\r\n self, redshift_0: float, redshift_1: float, einstein_radius: float\r\n ) -> float:\r\n const = constants.c.to(\"kpc / s\")\r\n\r\n angular_diameter_distance_to_redshift_0_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_to_redshift_1_kpc = (\r\n self.angular_diameter_distance_to_earth_in_kpc_from(redshift=redshift_1)\r\n )\r\n\r\n angular_diameter_distance_between_redshifts_kpc = (\r\n self.angular_diameter_distance_between_redshifts_in_kpc_from(\r\n redshift_0=redshift_0, redshift_1=redshift_1\r\n )\r\n )\r\n\r\n kpc_per_arcsec = self.kpc_per_arcsec_from(redshift=redshift_0)\r\n\r\n einstein_radius_kpc = einstein_radius * kpc_per_arcsec\r\n\r\n velocity_dispersion_kpc = const * np.sqrt(\r\n (einstein_radius_kpc * angular_diameter_distance_to_redshift_1_kpc)\r\n / (\r\n 4\r\n * np.pi\r\n * angular_diameter_distance_to_redshift_0_kpc\r\n * angular_diameter_distance_between_redshifts_kpc\r\n )\r\n )\r\n\r\n return velocity_dispersion_kpc.to(\"km/s\").value", "def approx_sun_position_ECI(MJD):\n import math\n JD = MJD + 2400000.5\n OplusW = 282.94\n T = (JD - 2451545.0) / 36525\n\n M = math.radians(357.5256 + 35999.049 * T)\n\n long = math.radians(OplusW + math.degrees(M) + 6892 / 3600 * math.sin(M) + 72 / 3600 * math.sin(2*M))\n r_mag = (149.619 - 2.499 * math.cos(M) - 0.021 * math.cos(2*M)) * 10**6\n\n epsilon = math.radians(23.43929111)\n r_vec = (r_mag * math.cos(long), r_mag * math.sin(long) * math.cos(epsilon), r_mag * math.sin(long) * math.sin(epsilon))\n\n return r_vec", "def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])", "def radial_distance(x_i, y_i, z_i, x_j, y_j, z_j, box_length):\n delta_x = min(((x_i - x_j) % box_length), ((x_j - x_i) % box_length))\n delta_y = min(((y_i - y_j) % box_length), ((y_j - y_i) % box_length))\n delta_z = min(((z_i - z_j) % box_length), ((z_j - z_i) % box_length))\n return np.sqrt(delta_x ** 2 + delta_y ** 2 + delta_z ** 2)", "def calc_length_distortion_on_ellipsoid(self, lon, lat):\n\n # get the subgrid\n sg, _, _ = self.lonlat2xy(lon, lat)\n\n lon0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('central_meridian')\n lat0 = self.subgrids[str(sg)].core.projection.osr_spref.GetProjParm('latitude_of_origin')\n\n # get spherical distance and azimuth between projection centre and point of interest\n geod = Geodesic.WGS84\n gi = geod.Inverse(lat0, lon0, lat, lon)\n c1 = gi['s12']\n az1 = gi['azi1']\n\n # apply equation for distortion in direction perpendicular to the radius, k:\n # k = c/geod.a / np.sin(c/geod.a)\n k = c1 / geod.a / np.sin(c1 / geod.a)\n\n return k", "def big_psi(sun_pos, sat_3d_pos):\n return np.arccos(np.dot(sun_pos.T, sat_3d_pos) / (vector_magnitude(sun_pos[0], sun_pos[1], sun_pos[2]) * vector_magnitude(sat_3d_pos[0], sat_3d_pos[1], sat_3d_pos[2])))", "def Keldysh_Parameter(omega,Uion,E):\n\treturn omega*np.sqrt(2.0*Uion)/E", "def find_radius(mass,delta_m,eta,xi,mue,pp_factor):\n\n #range of radii; reason in detail under step 9 of report\n r_low = 0.01*Rsun # MKS\n r_high = 3*Rsun # MKS\n \n radius = brentq(lum_difference, r_low, r_high, xtol=1.0e-4, args = (mass,delta_m,eta,xi,mue,pp_factor))\n return radius", "def calcul_v_sphere(r):\n volume = 4/3 * math.pi * (r ** 3)\n return volume", "def welch_stetson_I(magnitudes, errors):\n num_obs = magnitudes.shape[0]\n\n if num_obs % 2 == 1:\n magnitudes = magnitudes[:-1]\n errors = errors[:-1]\n num_obs -= 1\n\n evens = np.arange(0, num_obs, 2)\n odds = np.arange(1, num_obs, 2)\n\n b = magnitudes[evens]\n v = magnitudes[odds]\n\n b_err = magnitudes[evens]\n v_err = magnitudes[odds]\n\n mean = np.mean(magnitudes)\n\n d = (b - mean) / b_err\n e = (v - mean) / v_err\n stetson_I = np.sqrt(1 / (num_obs * (num_obs - 1))) * np.sum(d * e)\n\n return stetson_I", "def wheels_radius_INV(ds):\n wr = ds[0]\n wl = ds[1]\n V = ds[2]\n Nsample = len(wr)\n H = np.zeros((Nsample,2))\n H[:,0] = wr*0.5\n H[:,1] = wl*0.5 \n X = np.dot(np.linalg.pinv(H),V) #X=rayons estimés\n Rl_est, Rr_est = X[1], X[0]\n return Rr_est, Rl_est", "def boringInterlude (radiusIn):\n\n\n import math\n volIn = (4/3) * math.pi * (radiusIn ** 3)\n vol = volIn/ 1728\n return vol", "def eggleton_roche_radius(self):\n return self.eggleton_roche_over_separation() * self.separation()", "def calculate_esi_index(radius, mass, temperature):\n density = Calculator.calculate_average_density(radius, mass)\n escape_velocity = Calculator.calculate_escape_velocity(radius, mass)\n\n factors = [\n (radius, 6.3781e6, 0.57/4),\n (density, 5513, 1.07/4),\n (escape_velocity, 11200, 0.70/4),\n (temperature, 288, 5.58/4)\n ]\n res = [(1 - abs(x - y)/abs(x + y)) ** z for x, y, z in factors]\n return functools.reduce(operator.mul, res)", "def compute_in_radius(self, boids_in_radius):\r\n \r\n avg_velocity = Vector(*np.zeros(2))\r\n center_of_mass = Vector(*np.zeros(2))\r\n avg_vector = Vector(*np.zeros(2))\r\n total = 0\r\n for boid in boids_in_radius:\r\n avg_velocity += boid.velocity # calculating average direction \r\n center_of_mass += boid.position # calculating center of mass\r\n total += 1\r\n distance = np.linalg.norm(boid.position - self.position)\r\n \r\n if self.position != boid.position:\r\n diff = self.position - boid.position\r\n diff /= distance # scaling with the distance in order to avoid closer boids with greater force \r\n avg_vector += diff # calculating repulsive force vector\r\n \r\n return avg_velocity, center_of_mass, avg_vector, total", "def hardSphereRadius(self):\n\n return self.__hardSphereRadius", "def _calculate_anisoplatanism_error(self):\n\n self.sigma_anisoplatanism = np.sqrt((self.science_object_separation/self.isoplanatic_angle)**(5/3))*(self.parameter_wavelength /(2*np.pi))", "def _template_sphere_disc(dim, outer_radius, inner_radius):\n rmax = np.array(outer_radius, ndmin=1)\n rmin = np.array(inner_radius, ndmin=1)\n ind = 2 * rmax - 1\n coord = np.indices((ind * np.ones(dim, dtype=int)))\n coord = coord - (ind - 1)/2\n x = coord[0, :]\n y = coord[1, :]\n if dim == 2:\n img = (x ** 2 + y ** 2) < rmax ** 2\n elif dim == 3:\n z = coord[2, :]\n img = (x ** 2 + y ** 2 + z ** 2) < rmax ** 2\n if rmin[0] != 0:\n if dim == 2:\n img_min = (x ** 2 + y ** 2) > rmin ** 2\n elif dim == 3:\n img_min = (x ** 2 + y ** 2 + z ** 2) > rmin ** 2\n img = img * img_min\n return img" ]
[ "0.59292984", "0.5824991", "0.5817226", "0.58086616", "0.57849306", "0.57736725", "0.5735249", "0.57111436", "0.5704036", "0.56862265", "0.5664737", "0.5664737", "0.56512725", "0.5550405", "0.5496292", "0.5456463", "0.5426158", "0.54168355", "0.53976965", "0.5365244", "0.5362637", "0.5339783", "0.5337699", "0.5287755", "0.52820945", "0.5281513", "0.52810335", "0.52762115", "0.52234447", "0.52072275" ]
0.6008606
0
Evaluate the Vband luminosity L_V expected from the FJ relation for a given velocity dispersion
def get_luminosity(self, vel_disp): log_L_V = self.slope*np.log10(vel_disp) + self.intercept return log_L_V
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vcond(lambdam, taum):\n return 2 * lambdam / taum", "def V_hipass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_hipass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_L = V_out/Z_high(C, R_L, f) # current through load branch\n V_L = I_L*R_L # voltage across load\n return V_L", "def LJ(r, epsilon, sigma, x, y):\n A=((x/y)**(x/(x-y))/((x/y)-1))\n\n\n V=A*epsilon*((sigma/r)**x-(sigma/r)**y) #-4*Epsilon*((Sigma/Rc)**12-(Sigma/Rc)**6)\n\n return V", "def calc_elv_spectra(self, red, comp, src):\n if ((src in red.data.keys())\n & (src in red.data.keys())):\n # check that the wavelenth grids are identical\n delt_wave = red.data[src].waves - comp.data[src].waves\n if np.sum(np.absolute(delt_wave)) > 0.01*u.micron:\n warnings.warn(\"wavelength grids not equal for %s\" % src,\n UserWarning)\n else:\n # reference band\n red_V = red.data['BAND'].get_band_mag('V')\n comp_V = comp.data['BAND'].get_band_mag('V')\n\n # setup the needed variables\n self.waves[src] = red.data[src].waves\n n_waves = len(self.waves[src])\n self.exts[src] = np.zeros(n_waves)\n self.uncs[src] = np.zeros(n_waves)\n self.npts[src] = np.zeros(n_waves)\n\n # only compute the extinction for good, positive fluxes\n print(comp.data[src].npts)\n print(comp.data[src].fluxes)\n indxs, = np.where((red.data[src].npts > 0)\n & (comp.data[src].npts > 0)\n & (red.data[src].fluxes.value > 0)\n & (comp.data[src].fluxes.value > 0))\n self.exts[src][indxs] = \\\n (-2.5*np.log10(red.data[src].fluxes[indxs]\n / comp.data[src].fluxes[indxs])\n + (comp_V[0] - red_V[0]))\n self.uncs[src][indxs] = np.sqrt(\n np.square(_flux_unc_as_mags(red.data[src].fluxes[indxs],\n red.data[src].uncs[indxs]))\n + np.square(_flux_unc_as_mags(comp.data[src].fluxes[indxs],\n comp.data[src].uncs[indxs]))\n + np.square(red_V[1])\n + np.square(comp_V[1]))\n self.npts[src][indxs] = np.full(len(indxs), 1)", "def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum", "def V_bandpass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_bandpass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n return V_out", "def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)", "def calc_lhv(self):\n hf = {}\n hf['hydrogen'] = 0\n hf['methane'] = -74.85\n hf['ethane'] = -84.68\n hf['propane'] = -103.8\n hf['butane'] = -124.51\n hf['O2'] = 0\n hf['CO2'] = -393.5\n # water (gaseous)\n hf['H2O'] = -241.8\n\n lhv = 0\n\n for f, x in self.fuel.val.items():\n molar_masses[f] = CP.PropsSI('M', f)\n fl = set(list(hf.keys())).intersection(\n set([a.replace(' ', '') for a in CP.get_aliases(f)]))\n if len(fl) == 0:\n continue\n\n if list(fl)[0] in self.fuels():\n structure = fluid_structure(f)\n\n n = {}\n for el in ['C', 'H', 'O']:\n if el in structure:\n n[el] = structure[el]\n else:\n n[el] = 0\n\n lhv += (-(n['H'] / 2 * hf['H2O'] + n['C'] * hf['CO2'] -\n ((n['C'] + n['H'] / 4) * hf['O2'] +\n hf[list(fl)[0]])) / molar_masses[f] * 1000) * x\n\n return lhv", "def velocity(n_core, q, beta_invariant, material_dispersion=None):\n c = scipy.constants.speed_of_light\n if material_dispersion is None:\n A = 2 / c / (2 + q)\n B = q * n_core**2 / c / (2 + q)\n else:\n N1 = n_core + material_dispersion\n y = 2 * n_core / N1\n A = 2 * N1 / n_core * (1 + 0.25 * y) / c / (q + 2)\n B = q * n_core**2 * A - 1 / 4 / c * N1 * n_core * y\n\n return A * beta_invariant + B / beta_invariant", "def LotkaVolterra_Dynamics(self):\n LV_c = self.toConceptual(self.state) # (nF, nR)\n LV_c = LV_c.mul((1 - LV_c) + self.LV_inhM.mm(LV_c))\n LV_s = self.toNeural(LV_c)\n\n return LV_c, LV_s", "def compute_Flocal(config):\n \n vlow = config['vlow']\n vhigh = config['vhigh']\n vdef = config['vdef']\n lo_restfreq = config[\"DOPPLERTRACKFREQ\"]\n\n velocity = (vlow + vhigh) * 0.5\n vd = Vdef()\n vd.compute_local_frame_with_vdef(vdef, velocity,\n lo_restfreq, velocity)\n # this better be the same as vlow since i sent in the avg\n cur_vhigh = vd.get_vhigh()\n cur_vlow = vd.get_vlow()\n if cur_vhigh != cur_vlow:\n \"PANIC: How can the avg velocities differ!!!!!\"\n \n return cur_vhigh", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def cie_luv(self):\n K = Fraction(29, 3) ** 3\n e = Fraction(6, 29) ** 3\n XYZ = self.cie_xyz\n yr = XYZ[1] / D65[1]\n L = 116 * yr ** Fraction(1, 3) - 16 if yr > e else K * yr\n u = 13 * L * (U(*XYZ) - U(*D65))\n v = 13 * L * (V(*XYZ) - V(*D65))\n return (L, u, v)", "def width_v_v_v(model: SingleRhNeutrinoModel, genv: Generation):\n mx = model.mx\n u = 0.5 * np.tan(2 * model.theta)\n w = parameters.GF**2 * mx**5 / (768 * np.pi**3) * u**2\n pre = 2 if genv == model.gen else 1.0\n return pre * w", "def __rho2v(self, vm, beta, rhoc, w, rho):\n if rho < 0:\n return float(vm)\n elif rho <= rhoc:\n return float(vm - vm * rho / beta)\n else:\n rhom = rhoc - (vm * rhoc - vm * (rhoc ** 2) / beta) / w\n # print('rho {0}; rhoc {1}'.format(rho, rhoc))\n return float(w * (rho - rhom) / rho)", "def dLJverlet(x,r2,R1,R2):\r\n rc = (2**(1/6))*((R1+R2)/(2))\r\n sig_int = (R1+R2)/(2) #JV: This is the sigma of the interaction (in the system units). We don't need to divide by sigma because we are already working with reduced units\r\n\r\n #JV: Because we are working on reduced units (from the values of the Argon gas)\r\n # we want need to divide our radius by the radius of the Argon gas\r\n\r\n #JV: See LJverlet() for more explanation on the truncation\r\n if((r2**(1/2))>rc):\r\n value = 0\r\n else:\r\n value = ((48.*x)/(r2))*(((((sig_int**2)*1.)/r2)**6) - ((((sig_int**2)*0.5)/r2)**3))\r\n\r\n return value", "def test_lfc_inversion():\n levels = np.array([963., 789., 782.3, 754.8, 728.1, 727., 700.,\n 571., 450., 300., 248.]) * units.mbar\n temperatures = np.array([25.4, 18.4, 17.8, 15.4, 12.9, 12.8,\n 10., -3.9, -16.3, -41.1, -51.5]) * units.celsius\n dewpoints = np.array([20.4, 0.4, -0.5, -4.3, -8., -8.2, -9.,\n -23.9, -33.3, -54.1, -63.5]) * units.celsius\n lfc_pressure, lfc_temp = lfc(levels, temperatures, dewpoints)\n assert_almost_equal(lfc_pressure, 705.8806 * units.mbar, 2)\n assert_almost_equal(lfc_temp, 10.6232 * units.celsius, 2)", "def __q2v_ff(self, vm, beta, q):\n return float((vm * beta - np.sqrt(np.power(vm * beta, 2) - 4 * vm * beta * q)) / (2 * vm))", "def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage", "def do_test_values(self, vel=numpy.array((3e6, 4e4, 1e2)), bf=numpy.array((0, 0, -2)),\n ef=numpy.array((0, 0, 1e6)), charge=4*e_chg):\n res = sim.lorentz(vel, ef, bf, charge)\n exp = charge*(ef + numpy.cross(vel, bf))\n nptest.assert_allclose(res, exp)", "def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s", "def test_filt_vegamag(self):\n sun = Sun.from_builtin('E490_2014')\n V = get_bandpass('johnson v')\n wave, fluxd = sun.filt(V, unit=JMmag)\n assert np.isclose(fluxd.value, -26.75, atol=0.006)", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def get_lsf(wave_obs, sigma_v, speclib=\"miles\", zred=0.0, **extras):\n # filter out some places where sdss reports zero dispersion\n good = sigma_v > 0\n wave_obs, sigma_v = wave_obs[good], sigma_v[good]\n wave_rest = wave_obs / (1 + zred)\n\n # Get the library velocity resolution function at the corresponding\n # *rest-frame* wavelength\n if speclib == \"miles\":\n miles_fwhm_aa = 2.54\n sigma_v_lib = lightspeed * miles_fwhm_aa / 2.355 / wave_rest\n # Restrict to regions where MILES is used\n good = (wave_rest > 3525.0) & (wave_rest < 7500)\n elif speclib == \"c3k_a\":\n R_c3k = 3000\n sigma_v_lib = lightspeed / (R_c3k * 2.355)\n # Restrict to regions where C3K is used\n good = (wave_rest > 2750.0) & (wave_rest < 9100.0)\n else:\n sigma_v_lib = sigma_v\n good = slice(None)\n raise ValueError(\"speclib of type {} not supported\".format(speclib))\n\n # Get the quadrature difference\n # (Zero and negative values are skipped by FSPS)\n dsv = np.sqrt(np.clip(sigma_v**2 - sigma_v_lib**2, 0, np.inf))\n\n # return the broadening of the rest-frame library spectra required to match\n # the obserrved frame instrumental lsf\n return wave_rest[good], dsv[good]", "def fLinear(Vc1,Vc2,Vc3,Vk,Vw,Va,Vf,Pc1,Pc2,Pc3,Pk,Pw,Pa,Pf):\n#\n# 1. Normalise volumetric components:\n#\t-----------------------------------\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vk)+abs(Vw)+abs(Va)+abs(Vf)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVa=abs(Va)/Sum\n\tVf=abs(Vf)/Sum\n#\n#\t2. Compute liear response function:\n#\t-----------------------------------\n\tLrf=Vc1*Pc1+Vc2*Pc2+Vc3*Pc3+Vk*Pk+Vw*Pw+Va*Pa+Vf*Pf\n#\n# 3. Output result:\n#\t-----------------\n\treturn Lrf", "def w_dispersion(q,v=1):\r\n # parameters for two-fluid hydrodynamic model from [1]\r\n Vol = np.sqrt(3)/2 * 4.63**2; # unit cell volume in graphene\r\n wr1= 4.08 / HARTREE; # Pi-electrons [eV]\r\n n1 = 2/Vol;\r\n wr2= 13.06 / HARTREE; # Sigma-electrons [eV]\r\n n2 = 6/Vol;\r\n \r\n # resonance frequencies\r\n w12 = wr1**2; # we neglect the acoustic velocity s=0\r\n w22 = wr2**2;\r\n\r\n # generalized plasma frequencies\r\n Q12 = 2*np.pi*n1*q * v ; # effective Omega_nu^2\r\n Q22 = 2*np.pi*n2*q * v ;\r\n\r\n # dispersion formula (17) in [1]\r\n A = 0.5*(w12 + Q12 + w22 + Q22);\r\n B = np.sqrt( 0.25*( w12 + Q12 - w22 - Q22 )**2 + Q12 * Q22 );\r\n\r\n return np.asarray([np.sqrt(A-B), np.sqrt(A+B)]);", "def calcLorentzGammaFromVelocity(self,direction):\n if direction not in self.v.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 /(1 - (getattr(self.v,direction)/speed_light)**2))", "def beta_fct(M_p, F_xuv, R_p):\n\n M_EARTH= const.M_earth.cgs.value\n R_EARTH = const.R_earth.cgs.value\n\n if (type(F_xuv) == float) or (type(F_xuv) == np.float64):\n # if F_xuv is single value\n grav_pot = -const.G.cgs.value * (M_p*M_EARTH) / (R_p*R_EARTH)\n log_beta = max(0.0, -0.185 * np.log10(-grav_pot)\n \t\t\t\t\t+ 0.021 * np.log10(F_xuv) + 2.42)\n beta = 10**log_beta\n return beta\n\n elif len(F_xuv) > 1:\n # if F_xuv is a list\n betas = []\n for i in range(len(F_xuv)):\n grav_pot_i = -const.G.cgs.value \\\n * (M_p[i]*M_EARTH) / (R_p[i]*R_EARTH)\n log_beta_i = max(0.0, -0.185 * np.log10(-grav_pot_i)\n \t\t\t\t\t + 0.021 * np.log10(F_xuv[i]) + 2.42)\n beta_i = 10**log_beta_i\n betas.append(beta_i)\n betas = np.array(betas)\n return betas", "def velocity_field(xt,yt,x0,y0,velf,dia,tsr,solidity):\n rad = dia/2.\n rot = tsr*velf/rad\n\n # Calculating EMG distribution parameters\n loc,spr,skw,scl = vorticity(tsr,solidity)\n \n # Translating the turbine position\n x0t = x0 - xt\n y0t = y0 - yt\n \n # Integration of the vorticity profile using Fortran code (vorticity.f90; _vortrun.so)\n vel_vs = dblquad(_vortmodel.integrand,0.,35.*dia,lambda x: -4.*dia,lambda x: 4.*dia, args=(x0t,y0t,dia,loc[0],loc[1],loc[2],spr[0],spr[1],skw[0],skw[1],scl[0],scl[1],scl[2]))\n \n # Calculating velocity deficit\n vel = (vel_vs[0]*(rot))/(2.*pi)\n vel = (vel + velf)/velf # normalization of velocity\n \n return vel", "def V_lopass(V, R_S, C, L, R_L, f):\n # current in circuit\n I = V/(R_S + Z_lopass(C, L, R_L, f))\n # voltage across circuit\n V_out = V - I*R_S\n I_C = V_out/Xcap(C, f)\n I_L = V_out/Z_low(L, R_L, f)\n V_L = I_L*R_L\n return V_L" ]
[ "0.63736004", "0.5998151", "0.5983169", "0.5946282", "0.58367145", "0.5795854", "0.5793172", "0.57542485", "0.571141", "0.57062334", "0.56781685", "0.5638643", "0.5626958", "0.5622531", "0.56085986", "0.5567615", "0.5557632", "0.5544373", "0.55408514", "0.5531132", "0.553042", "0.552939", "0.55273545", "0.5506578", "0.54752326", "0.5445424", "0.5427518", "0.5426323", "0.54248345", "0.54197603" ]
0.64296955
0
Set the parameters fit on SDSS DR4 Note The values of slope and intercept are taken from the rband orthogonal fit on SDSS DR4. See Table 2 of [1]_. References .. [1] Hyde, Joseph B., and Mariangela Bernardi. "The luminosity and stellar mass Fundamental Plane of earlytype galaxies."
def _define_SDSS_fit_params(self): self.a = 1.4335 self.b = 0.3150 self.c = -8.8979 self.intrinsic_scatter = 0.0578 #self.delta_a = 0.02 #self.delta_b = 0.01
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)", "def setup_fitting_init_pars(inparam, night, band, masterbeam, order):\n\n # Determine whether IGRINS mounting was loose or\n # the night of interest is in question\n if (int(night) < 20180401) or (int(night) > 20190531):\n IPpars = inparam.ips_tightmount_pars[band][masterbeam][order]\n else:\n IPpars = inparam.ips_loosemount_pars[band][masterbeam][order]\n\n # start at bucket loc = 1250 +- 100, width = 250 +- 100,\n # depth = 100 +- 5000 but floor at 0\n centerloc = 1250 if band == 'H' else 1180\n\n # Initialize parameter array for optimization as well as half-range values\n # for each parameter during the various steps of the optimization.\n # Many of the parameters initialized here will be changed throughout the\n # code before optimization and in between optimization steps.\n\n parA0 = np.array([\n 0.0, # 0: The shift of the stellar template (km/s)\n 0.0, # 1: The scale factor for the stellar template\n 0.0, # 2: The shift of the telluric template (km/s)\n 1.0, # 3: The scale factor for the telluric template\n 0.0, # 4: vsini (km/s)\n IPpars[2], # 5: The instrumental resolution (FWHM) in pixels\n 0.0, # 6: Wavelength 0-pt\n 0.0, # 7: Wavelength linear component\n 0.0, # 8: Wavelength quadratic component\n 0.0, # 9: Wavelength cubic component\n 1.0, #10: Continuum zero point\n 0.0, #11: Continuum linear component\n 0.0, #12: Continuum quadratic component\n IPpars[1], #13: Instrumental resolution linear component\n IPpars[0], #14: Instrumental resolution quadratic component\n centerloc, #15: Blaze dip center location\n 330, #16: Blaze dip full width\n 0.05, #17: Blaze dip depth\n 90, #18: Secondary blaze dip full width\n 0.05, #19: Blaze dip depth\n 0.0, #20: Continuum cubic component\n 0.0, #21: Continuum quartic component\n 0.0, #22: Continuum quintic component\n 0.0, #23: Continuum hexic component\n 0.0, #24: secondary par\n 0.0, #25: secondary par\n 0.0, #26: secondary par\n 0.0 #27: secondary par\n ])\n\n return parA0", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def set_parameters(self, full=None, r=None, l=None, d=None, z=None):\n\n original = _deepcopy(self.parameters) # save in case of error\n\n if type(full) is bool:\n self.parameters[\"full\"] = full\n if type(r) in [int, float]:\n self.parameters[\"r\"] = float(r)\n if self._type == 2: # observation well\n if type(d) in [int, float]:\n self.parameters[\"d\"] = float(d)\n if type(l) in [int, float]:\n self.parameters[\"l\"] = float(l)\n else: # piezometer\n if type(z) in [int, float]:\n self.parameters[\"z\"] = float(z)\n\n flag, message = self.validate_parameters()\n if not flag:\n print(message)\n self.parameters.update(original)\n # End Function", "def setParameters(self, sx_sim=None):\n # TODO rething that ..\n #if sx_sim is not None:\n #if ds_model is not None:\n #if di_model is not None:\n self.sx_sim = sx_sim\n p = defaultParams(chord=self._chord, rho=self._rho, sx=self.sx_sim, ds=self.ds_model, di=self.di_model,\n M=self._M33, C=self._C33, K=self._K33)\n p['beta'] = self._beta\n if len(p['Iq'])==0:\n raise Exception('No states are present')\n\n # --- Dynamic inflow / induction\n p['a0'] = self._a0\n p['ap0'] = self._ap0\n p['di_tau1'] = self.di_tau1\n p['di_tau2'] = self.di_tau2\n\n # --- Aerodynamic parameters\n if self._y_AQ>0: \n print('[WARN] y_AQ positive is unconventional')\n p['y_AQ'] = self._y_AQ\n if self._y_AT is None:\n p['y_AT'] = self._y_AQ+self._chord/2 # default is approximatively half a chord behind\n else:\n p['y_AT'] = self._y_AT\n p['x_AQ'] = self._x_AQ\n p['x_AT'] = self._x_AT\n if self._ppol is None:\n raise Exception('Polar parameters need to be set')\n p.update(self._ppol)\n # # p.update({'linModel':False, 'drag':drag})\n\n self.p_sim = p", "def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def create_design_params(self):\n self.design_params = np.array([self.r1, self.r2, self.d1, self.d2, self.Ixx, self.Iyy, self.Izz])", "def psdf_4(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.array([.5/(dt*n), 1./dt])\n\n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100,\n input_psd=['broken_powerlaw', [1e-4, -1, -2, 3e-2]])\n\n model = ['bpl', [-5, -2, -3]]\n inP = extra['input_psd'][1]\n inP = [np.log(inP[0]), inP[2], np.log(inP[3])]\n fit_psdf(fql, model, lc, extra, '4', input_pars=inP)", "def dline_dSFR(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_run,nGal=p.nGal)\n \n marker = 'o'\n if p.sim_run == p.sim_runs[0]: marker = '^'\n\n L_line = getattr(GR,'L_'+p.line+'_sun')#[380:400]#[0:100]\n SFR = getattr(GR,'SFR')#[380:400]#[0:100]\n M_star = getattr(GR,'M_star')#[380:400]#[0:100]\n Zsfr = getattr(GR,'Zsfr')#[380:400]#[0:100]\n R_gas = getattr(GR,'R2_gas')#[380:400]#[0:100]\n M_H2 = getattr(GR,'M_H2_R2_gas')#[380:400]#[0:100]\n\n SFR = SFR[L_line > 0]\n M_star = M_star[L_line > 0]\n Zsfr = Zsfr[L_line > 0]\n R_gas = R_gas[L_line > 0]\n M_H2 = M_H2[L_line > 0]\n L_line = L_line[L_line > 0]\n print('%i data points ' % (len(L_line)))\n\n # Distance from MS\n dlSFR = aux.distance_from_salim18(GR.M_star,GR.SFR)\n\n if p.add:\n ax = p.ax\n else:\n fig,ax = plt.subplots(figsize=(8,6))\n\n # Distance from observed relation\n L_obs,SFR_obs,fit,std = add_line_SFR_obs(p.line,[1e6,1e6],ax,plot=False,select=p.select)\n ldL_line = np.log10(L_line) - fit.predict(np.log10(SFR.reshape(-1, 1))).flatten()\n\n labs = {'_M10':'Mach=10 power-law',\\\n '_arepoPDF_ext':'AREPO parametric PDF with extinction',\\\n '_arepoPDF':'SIGAME v3',\\\n '_arepoPDF_CMZ':'SIGAME v3',\\\n '_arepoPDF_M51':'SIGAME v3'}\n lab = labs[p.table_ext]\n\n\n ax.text(0.05,0.9,p.line,transform=ax.transAxes,fontsize=13)\n ax.set_xlabel('log SFR - log SFR$_{MS,Salim+18}$')\n ax.set_ylabel('log L - log L$_{obs}$(SFR)')\n if not p.xlim: p.xlim = np.array([-3,3])\n if not p.ylim: \n p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 3]\n # if p.line == '[OI]63': p.ylim = [np.median(ldL_line) - 5,np.median(ldL_line) + 4]\n # if 'CO' in p.line: p.ylim = [np.median(ldL_line) - 4,np.median(ldL_line) + 4]\n\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n ax.plot([0,0],ax.get_ylim(),'--k',lw=1)\n ax.plot(ax.get_xlim(),[0,0],'--k',lw=1)\n\n if p.select == 'Sigma_M_H2':\n Sigma_M_H2 = M_H2/(np.pi*R_gas**2)/1e6 # per pc^-2\n m = ax.scatter(dlSFR[np.argsort(Sigma_M_H2)],ldL_line[np.argsort(Sigma_M_H2)],marker=marker,s=14,\\\n c=np.log10(Sigma_M_H2[np.argsort(Sigma_M_H2)]),vmin=-2.5,vmax=2.2,label=lab,alpha=0.5,zorder=10)\n if p.cb:\n cbar = plt.colorbar(m,ax=ax)\n cbar.set_label(label=r'log $\\Sigma_{H2}$ [M$_{\\odot}$/pc$^2$]',size=15)", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def fit(self, skydip):\n parameter_order = ['tau', 'offset', 'kelvin', 'tsky']\n self.parameters = {}\n self.errors = {}\n self.p_opt = None\n self.p_cov = None\n self.fitted_values = None\n self.data = None\n self.sigma = None\n self.elevation = None\n\n log.debug(\"Initial skydip values:\")\n log.debug(f\" Tsky = {self.initial_guess['tsky']}\")\n log.debug(f\" offset = {self.initial_guess['offset']}\")\n log.debug(f\" kelvin = {self.initial_guess['kelvin']}\")\n log.debug(f\" tau = {self.initial_guess['tau']}\")\n\n if self.el_range is not None:\n from_bin = max(0, skydip.get_bin(self.el_range.min))\n to_bin = min(skydip.data.size, skydip.get_bin(self.el_range.max))\n else:\n from_bin = 0\n to_bin = skydip.data.size\n\n self.init_parameters(skydip)\n\n data = skydip.data[from_bin:to_bin]\n weight = skydip.weight[from_bin:to_bin]\n valid = weight > 0\n data = data[valid]\n weight = weight[valid]\n\n if self.uniform_weights:\n sigma = None\n else:\n sigma = 1 / weight\n\n elevation = skydip.get_elevation(\n np.nonzero(valid)[0]).to('radian').value\n\n self.use_points = data.size\n\n p0 = []\n lower_bounds = np.zeros(4, dtype=float)\n upper_bounds = np.zeros(4, dtype=float)\n\n for i, parameter in enumerate(parameter_order):\n value = self.initial_guess[parameter]\n p0.append(value)\n if parameter in self.fit_for:\n lower_bounds[i] = self.bounds[parameter][0]\n upper_bounds[i] = self.bounds[parameter][1]\n else: # An attempt to fix parameters with curve_fit\n eps = abs(value - np.nextafter(value, 1))\n lower_bounds[i] = value - eps\n upper_bounds[i] = value + eps\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', OptimizeWarning)\n p_opt, p_cov = curve_fit(self.value_at, elevation, data,\n p0=p0, sigma=sigma,\n bounds=(lower_bounds, upper_bounds))\n self.p_opt = p_opt\n self.p_cov = p_cov\n self.data = data\n self.elevation = elevation\n self.sigma = sigma\n\n self.has_converged = np.isfinite(p_opt).all()\n if not self.has_converged: # pragma: no cover\n log.warning(\"Skydip fit did not converge!\")\n errors = np.sqrt(np.diag(p_cov))\n\n for i, parameter in enumerate(parameter_order):\n self.parameters[parameter] = p_opt[i]\n self.errors[parameter] = errors[i]\n\n self.fitted_values = self.fit_elevation(elevation)\n fit_weights = None if sigma is None else weight ** 2\n\n t_obs_rms = np.sqrt(np.average((data - self.fitted_values) ** 2,\n weights=fit_weights))\n self.rms = t_obs_rms / self.parameters['kelvin']", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def test_linear_fit_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=1)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n y = 2 + x + 0.5 * x * x\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y)\n assert_allclose(fitted_model.parameters, [2.0, 1.0, 0.5], atol=1e-14)", "def initDataParms(self):\n self.xpos = self.pltw.curvelist[self.blkno].xvinfo.vidx\n self.data = self.pltw.blklst[self.blkno] # original data block\n self.idata = None # interpolated data\n (self.nvec, self.npt) = self.data.shape\n self.xmin = (self.data[self.xpos]).min()\n self.xmax = (self.data[self.xpos]).max()\n self.xspan = self.xmax - self.xmin\n if self.parent.test:\n self.dx = self.xspan / (self.npt * 5)", "def vary_fit(xvalues, yvalues, d_sample, r1_func, f_i, thetaS_i, phiS_i, phiS_max):\n params1 = Parameters()\n params1.add('ds', value=d_sample, vary=False)\n params1.add('thetaS', value=thetaS_i, min=0, max=d_sample)\n params1.add('f', value=f_i, min=3, max=300000)\n ## originally max was 1\n params1.add('phiS', value=phiS_i, min=0, max=phiS_max)\n params1.add('w', value=2.0/3.0, vary=False)\n params1.add('a', value=4.0/3.0, vary=False)\n ##originally thetaP, phiP had no minima\n params1.add('thetaP', expr='(ds*(1 + phiS*w*f + a*thetaS)-thetaS)/ \\\n ((1 - a*ds)*(phiS*w*f + a*thetaS)-(a*ds))')\n params1.add('phiP', expr='phiS*thetaP/thetaS')\n params1.add('c', expr='w*phiS*f/(1+w*phiS*f+thetaS*a)')\n params1.add('dp', expr='thetaP/(1+a*thetaP)')\n params1.add('dc', expr='thetaS/(1+a*thetaS)')\n minner1 = Minimizer(fcn2min, params1, fcn_args=(xvalues, yvalues, r1_func))\n try:\n fitres1 = minner1.minimize()\n except:\n fitres1 = None\n return fitres1", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs", "def test_linear_fit_2d_model_set_fixed_parameters(self):\n init_model = models.Polynomial2D(\n degree=2,\n c1_0=[1, 2],\n c0_1=[-0.5, 1],\n n_models=2,\n fixed={\"c1_0\": True, \"c0_1\": True},\n )\n\n x, y = np.mgrid[0:5, 0:5]\n zz = np.array([1 + x - 0.5 * y + 0.1 * x * x, 2 * x + y - 0.2 * y * y])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, y, zz)\n\n assert_allclose(fitted_model(x, y, model_set_axis=False), zz, atol=1e-14)", "def set_parameters(self, mode, data):\n if mode == 'design' or self.local_design:\n self.new_design = True\n\n for key, dc in self.variables.items():\n if isinstance(dc, dc_cp):\n if ((mode == 'offdesign' and not self.local_design) or\n (mode == 'design' and self.local_offdesign)):\n self.get_attr(key).design = data[key]\n\n else:\n self.get_attr(key).design = np.nan", "def set_parameters(self, L, r):\n self.L = L\n self.r = r", "def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)", "def set_shape_params(self, params):\n self.alpha = params[0]\n self.beta = params[1]\n self.gamma = params[2]\n self.c500 = params[3]\n self.P0 = params[4]", "def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)", "def _set_params(self,x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]", "def set_sgd_params(self, lr_1=0.01, lr_2=0.01, \\\n mom_1=0.9, mom_2=0.999):\n zero_ary = np.zeros((1,))\n # set learning rates\n new_lr_1 = zero_ary + lr_1\n self.lr_1.set_value(to_fX(new_lr_1))\n new_lr_2 = zero_ary + lr_2\n self.lr_2.set_value(to_fX(new_lr_2))\n # set momentums\n new_mom_1 = zero_ary + mom_1\n self.mom_1.set_value(to_fX(new_mom_1))\n new_mom_2 = zero_ary + mom_2\n self.mom_2.set_value(to_fX(new_mom_2))\n return" ]
[ "0.6524372", "0.5828944", "0.5815241", "0.5792441", "0.5774732", "0.5649416", "0.560434", "0.5591655", "0.55806255", "0.5562633", "0.5548635", "0.5524629", "0.5523357", "0.5509879", "0.5508611", "0.5453511", "0.544221", "0.54013795", "0.54003555", "0.53875685", "0.5369344", "0.5365295", "0.53618485", "0.53513116", "0.53484184", "0.53309566", "0.5307402", "0.5292875", "0.5277187", "0.52509415" ]
0.6969803
0
Set the parameters fit on the Sloan Lens Arcs Survey (SLACS) sample of 73 ETGs Note See Table 4 of [1]_ for the fit values, taken from the empirical correlation derived from the SLACS lens galaxy sample. References
def _define_SLACS_fit_params(self): # Fit params from R_eff self.a = -0.41 self.b = 0.39 #self.delta_a = 0.12 #self.delta_b = 0.10 self.intrinsic_scatter = 0.14 # Fit params from vel_disp self.a_v = 0.07 self.b_v = -0.12 self.int_v = 0.17
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _define_SDSS_fit_params(self):\n\t\tself.a = 1.4335\n\t\tself.b = 0.3150 \n\t\tself.c = -8.8979\n\t\tself.intrinsic_scatter = 0.0578\n\t\t#self.delta_a = 0.02\n\t\t#self.delta_b = 0.01", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.7,0.7]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n # ss templates\n self.modelBuilder.doVar(\"R_ee_os_fakes[0.6,0.0,1.0]\");\n self.modelBuilder.doVar(\"ee16_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee17_fakes_norm[1.0, 0.01, 10.]\");\n self.modelBuilder.doVar(\"ee18_fakes_norm[1.0, 0.01, 10.]\");\n #Remember, cant use spaces in these formulas!\n #self.modelBuilder.options.verbose = 10\n self.modelBuilder.factory_('expr::R_ee16_qcd_os(\"@0*@1\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_os(\"@0*@1\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_os(\"@0*@1\",ee18_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee16_qcd_ss(\"@0*(1.0-@1)\",ee16_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee17_qcd_ss(\"@0*(1.0-@1)\",ee17_fakes_norm,R_ee_os_fakes)')\n self.modelBuilder.factory_('expr::R_ee18_qcd_ss(\"@0*(1.0-@1)\",ee18_fakes_norm,R_ee_os_fakes)')\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def fit(self, stimulus, ref, targets):\n self.pars, fval = fitSSL(stimulus, ref.samples, targets, ref.shape[1], ref.C)\n self.rsq = self._compute_coef_determination(stimulus, ref.samples, targets, ref.C)", "def fit_LuEd(self, wl, Ls, Lu, Ed, params, weights, verbose=True):\n\n\t\t\tdef min_funct(params):\n\t\t\t\tp = params.valuesdict() \n\t\t\t\n\t\t\t\tRrs_modelled, Rrs_refl, Lu_Ed_modelled = self.model(beta = p['beta'], alpha = p['alpha'], am = p['am'], rh = p['rh'], pressure = p['pressure'], C_chl = p['C_chl'], C_sm = p['C_sm'], C_mie = p['C_mie'], n_mie = p['n_mie'], C_y = p['C_y'], S_y = p['S_y'], T_w = p['T_w'], theta_sun = p['theta_sun'], theta_view = p['theta_view'], n_w = p['n_w'], rho_s = p['rho_s'], rho_dd = p['rho_dd'], rho_ds = p['rho_ds'], delta = p['delta'], wl = wl, a_w = self.spectra['a_w'].values, daw_dT = self.spectra['daw_dT'].values, astar_ph = self.spectra['astar_ph'].values, astar_y = self.spectra['astar_y'].values, Ls_Ed = Ls/Ed)\n\n\t\t\t\tRrs_obs = Lu/Ed - Rrs_refl\n\n\t\t\t\t# Least squares\n\t\t\t\tresid = np.sum((Lu_Ed_modelled - Lu/Ed)**2 * weights)\n\n\t\t\t\treturn resid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs\n\n\t\t\tstart_time = time.time()\n\n\t\t\treg = lm.minimize(lambda x: min_funct(x)[0], params=params, method='lbfgsb', options={'disp': verbose, 'gtol': 1e-16, 'eps': 1e-07, 'maxiter': 15000, 'ftol': 1e-16, 'maxls': 20, 'maxcor': 20}) \n\n\t\t\tprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\t\t\tresid, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs = min_funct(reg.params)\n\t\t\treg.params.add('resid', resid, False, 0.0, 100, None)\n\n\t\t\treturn reg, Rrs_modelled, Rrs_refl, Lu_Ed_modelled, Rrs_obs", "def setCSEParameters(csi:str, ri:str, rn:str) -> None:\n\t\t\tCSE.cseCsi = csi\n\t\t\tConfiguration.set('cse.csi', csi)\n\t\t\tCSE.cseRi = ri\n\t\t\tConfiguration.set('cse.ri', ri)\n\t\t\tCSE.cseRn = rn\n\t\t\tConfiguration.set('cse.rn', rn)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def setup_lls_fit_analy(spec_fil, zlls, lls_windows, NHI_mnx, nNHI=100, spec_keys=None):\n # Init\n if spec_keys is None:\n spec_keys = dict(sig='ERROR', flux='FLUX', wave='WAVE')\n # Load up spectrum (Table and xspec)\n spec = Table.read(spec_fil)\n # Deal with NANs\n sig = spec[spec_keys['sig']].data.flatten()\n sig[np.isnan(sig)] = 0.\n xspec = XSpectrum1D.from_tuple((np.array(spec[spec_keys['wave']].data.flatten()),\n np.array(spec[spec_keys['flux']].data.flatten()),\n sig), masking='none')\n\n # Analysis pixels\n pixels = []\n for window in lls_windows:\n gdwv = np.where((xspec.wavelength >= window[0]*u.AA) &\n (xspec.wavelength <= window[1]*u.AA))[0]\n pixels.append(gdwv)\n gdwv = np.concatenate(pixels)\n\n # NHI\n NHI = np.linspace(NHI_mnx[0], NHI_mnx[1], num=nNHI)\n wv_rest = xspec.wavelength[gdwv] / (zlls+1)\n energy = wv_rest.to(u.eV, equivalencies=u.spectral())\n # Get photo_cross and calculate tau\n tau0 = (10.**NHI[0] / u.cm**2) * ltaa.photo_cross(1, 1, energy)\n\n # Return\n return spec, xspec, gdwv, NHI, tau0", "def cvstem(self):\n if (self.iEC == \"est\") and (len(sig(self.Cfun).parameters) == 1):\n fun1 = self.Cfun\n self.Cfun = lambda x,p: fun1(x)\n if (self.iEC == \"est\") and (len(sig(self.Gw).parameters) == 1):\n fun2 = self.Gw\n self.Gw = lambda x,p: fun2(x)\n if self.iEC == \"est\":\n self.c_over = self.matrix_2bound(self.Cfun)\n self.g_over = self.matrix_2bound(self.Gw)\n if (len(sig(self.Bw).parameters) == 1):\n fun3 = self.Bw\n self.Bw = lambda x,p: fun3(x)\n self.b_over = self.matrix_2bound(self.Bw)\n self.linesearch()\n alp = self.alp_opt\n Nx = self.Nx\n Nsplit = 1\n Np = int(Nx/Nsplit)\n Nr = np.remainder(Nx,Nsplit)\n xpmin = np.hstack((self.xlims[0,:],self.plims[0,:]))\n xpmax = np.hstack((self.xlims[1,:],self.plims[1,:]))\n Nxp = self.n+self.n_p\n xps = np.random.uniform(xpmin,xpmax,size=(Nx,Nxp))\n xs_opt,ps_opt,_ = np.hsplit(xps,np.array([self.n,Nxp]))\n Ws_opt = []\n chi_opt = 0\n nu_opt = 0\n print(\"========================================================\")\n print(\"====== SAMPLING OF CONTRACTION METRICS BY CV-STEM ======\")\n print(\"========================================================\")\n for p in range(Np):\n if np.remainder(p,int(Np/10)) == 0:\n print(\"# sampled metrics: \",p*Nsplit,\"...\")\n xs_p = xs_opt[Nsplit*p:Nsplit*(p+1),:]\n ps_p = ps_opt[Nsplit*p:Nsplit*(p+1),:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n if Nr != 0:\n print(\"# samples metrics: \",Nx,\"...\")\n xs_p = xs_opt[Nsplit*(p+1):Nx,:]\n ps_p = ps_opt[Nsplit*(p+1):Nx,:]\n self.cvstem0(xs_p,ps_p,alp)\n Ws_opt += self.Ws\n if self.nu >= nu_opt:\n nu_opt = self.nu\n if self.chi >= chi_opt:\n chi_opt = self.chi\n self.xs_opt = xs_opt\n self.ps_opt = ps_opt\n self.Ws_opt = Ws_opt\n self.chi_opt = chi_opt\n self.nu_opt = nu_opt\n if self.iEC == \"est\":\n self.Jcv_opt = (self.d1_over*self.b_over*np.sqrt(chi_opt)\\\n +self.d2_over*self.c_over*self.g_over*nu_opt)/alp\n print(\"Optimal steady-state estimation error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n elif self.iEC == \"con\":\n self.Jcv_opt = self.d1_over*self.b_over*np.sqrt(chi_opt)/alp\n print(\"Optimal steady-state tracking error =\",\\\n \"{:.2f}\".format(self.Jcv_opt))\n else:\n raise ValueError('Invalid iEC: iEC = \"est\" or \"con\"')\n self.M2cholM()\n path = \"models/optvals/\"+self.fname\n if os.path.exists(path) == False:\n try:\n os.makedirs(path)\n except: \n raise OSError(\"Creation of directory %s failed\" %path)\n else:\n print (\"Successfully created directory %s \" %path)\n else:\n print (\"Directory %s already exists\" %path)\n np.save(path+\"/alp_opt.npy\",alp)\n np.save(path+\"/chi_opt.npy\",self.chi_opt)\n np.save(path+\"/nu_opt.npy\",self.nu_opt)\n np.save(path+\"/Jcv_opt.npy\",self.Jcv_opt)\n print(\"========================================================\")\n print(\"==== SAMPLING OF CONTRACTION METRICS BY CV-STEM END ====\")\n print(\"========================================================\\n\\n\")\n pass", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def _fit_point_lens(self):\n\n def chi2_fun(theta, event, parameters_to_fit):\n \"\"\"\n for a given event set attributes from parameters_to_fit\n (list of str) to values from theta list\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n chi2 = event.get_chi2()\n if chi2 < chi2_fun.best_chi2:\n chi2_fun.best_chi2 = chi2\n return chi2\n chi2_fun.best_chi2 = 1.e10\n\n def jacobian(theta, event, parameters_to_fit):\n \"\"\"\n Calculate chi^2 gradient (also called Jacobian).\n \"\"\"\n for (key, val) in enumerate(parameters_to_fit):\n setattr(event.model.parameters, val, theta[key])\n return event.chi2_gradient(parameters_to_fit)\n\n if self._event_PSPL is None:\n self._set_event_PSPL()\n\n parameters_to_fit = [\"t_0\", \"u_0\", \"t_E\"]\n initial_guess = [self._parameters[p] for p in parameters_to_fit]\n\n failed = False\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n failed = True\n\n if failed:\n try:\n result = op.minimize(\n chi2_fun, x0=initial_guess,\n args=(self._event_PSPL, parameters_to_fit),\n method='Newton-CG', jac=jacobian, tol=3.e-4)\n except:\n pass\n# XXX what if fit failed (i.e., .success is False)?\n\n self._LSST_PSPL_chi2 = chi2_fun.best_chi2", "def initialize(self, es):\n self.disregard_length_setting = True if es.opts['CSA_disregard_length'] else False\n if es.opts['CSA_clip_length_value'] is not None:\n try:\n if len(es.opts['CSA_clip_length_value']) == 0:\n es.opts['CSA_clip_length_value'] = [-np.Inf, np.Inf]\n elif len(es.opts['CSA_clip_length_value']) == 1:\n es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value'][0]]\n elif len(es.opts['CSA_clip_length_value']) == 2:\n es.opts['CSA_clip_length_value'] = np.sort(es.opts['CSA_clip_length_value'])\n else:\n raise ValueError('option CSA_clip_length_value should be a number of len(.) in [1,2]')\n except TypeError: # len(...) failed\n es.opts['CSA_clip_length_value'] = [-np.Inf, es.opts['CSA_clip_length_value']]\n es.opts['CSA_clip_length_value'] = list(np.sort(es.opts['CSA_clip_length_value']))\n if es.opts['CSA_clip_length_value'][0] > 0 or es.opts['CSA_clip_length_value'][1] < 0:\n raise ValueError('option CSA_clip_length_value must be a single positive or a negative and a positive number')\n ## meta_parameters.cs_exponent == 1.0\n b = 1.0\n ## meta_parameters.cs_multiplier == 1.0\n self.cs = 1.0 * (es.sp.weights.mueff + 2)**b / (es.N**b + (es.sp.weights.mueff + 3)**b)\n\n self.damps = es.opts['CSA_dampfac'] * (0.5 +\n 0.5 * min([1, (es.sp.lam_mirr / (0.159 * es.sp.popsize) - 1)**2])**1 +\n 2 * max([0, ((es.sp.weights.mueff - 1) / (es.N + 1))**es.opts['CSA_damp_mueff_exponent'] - 1]) +\n self.cs\n )\n self.max_delta_log_sigma = 1 # in symmetric use (strict lower bound is -cs/damps anyway)\n\n if self.disregard_length_setting:\n es.opts['CSA_clip_length_value'] = [0, 0]\n ## meta_parameters.cs_exponent == 1.0\n b = 1.0 * 0.5\n ## meta_parameters.cs_multiplier == 1.0\n self.cs = 1.0 * (es.sp.weights.mueff + 1)**b / (es.N**b + 2 * es.sp.weights.mueff**b)\n self.damps = es.opts['CSA_dampfac'] * 1 # * (1.1 - 1/(es.N+1)**0.5)\n if es.opts['verbose'] > 1:\n print('CMAAdaptSigmaCSA Parameters: ')\n for k, v in self.__dict__.items():\n print(' ', k, ':', v)\n self.ps = np.zeros(es.N)\n self._ps_updated_iteration = -1\n self.is_initialized = True", "def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def BestFit(self,initialParameterValues = None, method = None, fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n\n self.theory.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParamaeterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = scipy.optimize.minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n out = None\n end_time = time.time()\n print \"fitting took (mins)\", (end_time-start_time)/60.\n print \"number of function evals:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n self.theory.SetFixedParams()\n else:\n outputParameterValues = out[0]\n\n\n return outputParameterValues, out", "def makeFit(self):\n if not self.fitModel.params:\n return\n cs = self.spectrum\n self.worker.make_model_curve(cs, allData=csi.allLoadedItems)\n\n dfparams = cs.fitParams\n lcfRes = dfparams['lcf_result']\n self.fitR.setText('R={0:.5g}'.format(lcfRes['R']))\n self.updateFitResults()\n self.fitReady.emit()", "def BestFit(self,initialParameterValues=None, method=None , fixedParams=None):\n\n if fixedParams:\n if not isinstance(fixedParams, list):\n fixedParams=[fixedParams]\n #Check now if the name is correct\n l_index=[]\n for index, par in enumerate(fixedParams):\n pName, pValue = par\n if pName not in self.theory.parameterNameList0:\n print \"%s is not a valid name. Ignored\" %pName\n l_index.append(index)\n if l_index:\n for i in l_index:\n fixedParams.pop(i)\n self.SetFixedParams(fixedParams)\n\n if initialParameterValues is None:\n initialParameterValues = self.theory.initialParameterValues\n #d = numpy.ones(len(initialParameterValues))\n start_time = time.time()\n if method is None or method == 'lm':\n out = minpack.leastsq(self.Residual,initialParameterValues,full_output=1, ftol=1.e-16)\n elif method == 'boldAccel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = BoldAccel.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n elif method == 'bold':\n initialParameterValues = numpy.array(initialParameterValues)\n out = Bold.leastsq(self.Residual,None,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,ibold=0,verbose=True)\n #out = minpack.leastsq(self.Residual,self.AnalyJac,initialParameterValues,gtol=1e-8,xtol=1.49e-8,ftol=1e-16,full_output=1,Cgoal=4.e04)\n elif method == 'lm_accel':\n initialParameterValues=numpy.array(initialParameterValues)\n out = numrec.leastsq(self.Residual,self.AnalyJac,initialParameterValues,full_output=1,verbose=True, flags=[],maxfev=500)\n else:\n print \"fitting method is not included\"\n end_time = time.time()\n print \"fitting took time (mins): \", (end_time-start_time)/60.\n print \"number of function_calls:\", f_counter\n \n if fixedParams:\n outputParameterValues = self.MergeFixedAndVariableParams(fixedParams,out[0])\n else:\n outputParameterValues = out[0]\n\n return outputParameterValues, out", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def fit(self, samples, values, nopt=None, corr_model_params=None):\n from scipy.linalg import cholesky\n\n if self.verbose:\n print('UQpy: Running Kriging.fit')\n\n def log_likelihood(p0, cm, s, f, y):\n # Return the log-likelihood function and it's gradient. Gradient is calculate using Central Difference\n m = s.shape[0]\n n = s.shape[1]\n r__, dr_ = cm(x=s, s=s, params=p0, dt=True)\n try:\n cc = cholesky(r__ + 2 ** (-52) * np.eye(m), lower=True)\n except np.linalg.LinAlgError:\n return np.inf, np.zeros(n)\n\n # Product of diagonal terms is negligible sometimes, even when cc exists.\n if np.prod(np.diagonal(cc)) == 0:\n return np.inf, np.zeros(n)\n\n cc_inv = np.linalg.inv(cc)\n r_inv = np.matmul(cc_inv.T, cc_inv)\n f__ = cc_inv.dot(f)\n y__ = cc_inv.dot(y)\n\n q__, g__ = np.linalg.qr(f__) # Eq: 3.11, DACE\n\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g__) != min(np.size(f__, 0), np.size(f__, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n\n # Design parameters\n beta_ = np.linalg.solve(g__, np.matmul(np.transpose(q__), y__))\n\n # Computing the process variance (Eq: 3.13, DACE)\n sigma_ = np.zeros(y.shape[1])\n\n ll = 0\n for out_dim in range(y.shape[1]):\n sigma_[out_dim] = (1 / m) * (np.linalg.norm(y__[:, out_dim] - np.matmul(f__, beta_[:, out_dim])) ** 2)\n # Objective function:= log(det(sigma**2 * R)) + constant\n ll = ll + (np.log(np.linalg.det(sigma_[out_dim] * r__)) + m * (np.log(2 * np.pi) + 1)) / 2\n\n # Gradient of loglikelihood\n # Reference: C. E. Rasmussen & C. K. I. Williams, Gaussian Processes for Machine Learning, the MIT Press,\n # 2006, ISBN 026218253X. (Page 114, Eq.(5.9))\n residual = y - np.matmul(f, beta_)\n gamma = np.matmul(r_inv, residual)\n grad_mle = np.zeros(n)\n for in_dim in range(n):\n r_inv_derivative = np.matmul(r_inv, np.matmul(dr_[:, :, in_dim], r_inv))\n tmp = np.matmul(residual.T, np.matmul(r_inv_derivative, residual))\n for out_dim in range(y.shape[1]):\n alpha = gamma / sigma_[out_dim]\n tmp1 = np.matmul(alpha, alpha.T) - r_inv / sigma_[out_dim]\n cov_der = sigma_[out_dim] * dr_[:, :, in_dim] + tmp * r__ / m\n grad_mle[in_dim] = grad_mle[in_dim] - 0.5 * np.trace(np.matmul(tmp1, cov_der))\n\n return ll, grad_mle\n\n if nopt is not None:\n self.nopt = nopt\n if corr_model_params is not None:\n self.corr_model_params = corr_model_params\n self.samples = np.array(samples)\n\n # Number of samples and dimensions of samples and values\n nsamples, input_dim = self.samples.shape\n output_dim = int(np.size(values) / nsamples)\n\n self.values = np.array(values).reshape(nsamples, output_dim)\n\n # Normalizing the data\n if self.normalize:\n self.sample_mean, self.sample_std = np.mean(self.samples, 0), np.std(self.samples, 0)\n self.value_mean, self.value_std = np.mean(self.values, 0), np.std(self.values, 0)\n s_ = (self.samples - self.sample_mean) / self.sample_std\n y_ = (self.values - self.value_mean) / self.value_std\n else:\n s_ = self.samples\n y_ = self.values\n\n self.F, jf_ = self.reg_model(s_)\n\n # Maximum Likelihood Estimation : Solving optimization problem to calculate hyperparameters\n if self.op:\n starting_point = self.corr_model_params\n minimizer, fun_value = np.zeros([self.nopt, input_dim]), np.zeros([self.nopt, 1])\n for i__ in range(self.nopt):\n p_ = self.optimizer(log_likelihood, starting_point, args=(self.corr_model, s_, self.F, y_),\n **self.kwargs_optimizer)\n minimizer[i__, :] = p_[0]\n fun_value[i__, 0] = p_[1]\n # Generating new starting points using log-uniform distribution\n if i__ != self.nopt - 1:\n starting_point = stats.reciprocal.rvs([j[0] for j in self.bounds], [j[1] for j in self.bounds], 1,\n random_state=self.random_state)\n if min(fun_value) == np.inf:\n raise NotImplementedError(\"Maximum likelihood estimator failed: Choose different starting point or \"\n \"increase nopt\")\n t = np.argmin(fun_value)\n self.corr_model_params = minimizer[t, :]\n\n # Updated Correlation matrix corresponding to MLE estimates of hyperparameters\n self.R = self.corr_model(x=s_, s=s_, params=self.corr_model_params)\n # Compute the regression coefficient (solving this linear equation: F * beta = Y)\n c = np.linalg.cholesky(self.R) # Eq: 3.8, DACE\n c_inv = np.linalg.inv(c)\n f_dash = np.linalg.solve(c, self.F)\n y_dash = np.linalg.solve(c, y_)\n q_, g_ = np.linalg.qr(f_dash) # Eq: 3.11, DACE\n # Check if F is a full rank matrix\n if np.linalg.matrix_rank(g_) != min(np.size(self.F, 0), np.size(self.F, 1)):\n raise NotImplementedError(\"Chosen regression functions are not sufficiently linearly independent\")\n # Design parameters (beta: regression coefficient)\n self.beta = np.linalg.solve(g_, np.matmul(np.transpose(q_), y_dash))\n\n # Design parameter (R * gamma = Y - F * beta = residual)\n self.gamma = np.linalg.solve(c.T, (y_dash - np.matmul(f_dash, self.beta)))\n\n # Computing the process variance (Eq: 3.13, DACE)\n self.err_var = np.zeros(output_dim)\n for i in range(output_dim):\n self.err_var[i] = (1 / nsamples) * (np.linalg.norm(y_dash[:, i] - np.matmul(f_dash, self.beta[:, i])) ** 2)\n\n self.F_dash, self.C_inv, self.G = f_dash, c_inv, g_\n\n if self.verbose:\n print('UQpy: Kriging fit complete.')", "def test_linear_fit_model_set_fixed_parameter(self):\n init_model = models.Polynomial1D(degree=2, c1=[1, -2], n_models=2)\n init_model.c1.fixed = True\n\n x = np.arange(10)\n yy = np.array([2 + x + 0.5 * x * x, -2 * x])\n\n fitter = LinearLSQFitter()\n fitted_model = fitter(init_model, x, yy)\n\n assert_allclose(fitted_model.c0, [2.0, 0.0], atol=1e-14)\n assert_allclose(fitted_model.c1, [1.0, -2.0], atol=1e-14)\n assert_allclose(fitted_model.c2, [0.5, 0.0], atol=1e-14)", "def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs", "def calibrate(self, master):\n if master.polyorder == 'linear':\n self.fitfunction = \"A0 + A1 * D\"\n self.fit_fkt = self.calc_lin\n elif master.polyorder == 'quadratic':\n self.fit_fkt = self.calc_quad\n self.fitfunction = \"A0 + A1 * D + A2 * D**2\"\n elif master.polyorder == \"cubic\":\n self.fitfunction = \"A0 + A1 * D + A2 * D**2 + A3 * D**3\"\n self.fit_fkt = self.calc_cubic\n else:\n print(\"Polynomgrad nicht definiert\")\n \n self.mw = np.asarray(self.mw)\n if master.sensortype == \"Druck\":\n self.best, self.covar = curve_fit(self.fit_fkt, self.mw, master.Referencedata.caldat)\n else:\n print(\"Sensortyp noch nicht Hinterlegt\")", "def fit(az, alt, data, lmax, label=None, degrees=True, realOnly=True):\n \n terms = mathutil.sphfit(az, alt, data, lmax=lmax, degrees=degrees, realOnly=realOnly)\n fit = mathutil.sphval(terms, az, alt, degrees=degrees, realOnly=realOnly)\n diff = data - fit\n \n if label is not None:\n print \" \"+str(label)\n print \" Peak Differences:\", data.max(), fit.max()\n print \" Model Differences:\", diff.min(), diff.mean(), diff.max()\n print \" Model RMS:\", (diff**2).sum()\n \n return terms", "def fit(self, resonance_file, experiment, out_paths):\n # Set up temporary file names #\n inp = temp_file_gen('Sammy_fit','inp')\n par = temp_file_gen('Sammy_fit','par')\n cov = temp_file_gen('Sammy_fit','cov')\n ndf = temp_file_gen('Sammy_fit','ndf')\n parout = temp_file_gen('Sammy_fit','out.par')\n covout = temp_file_gen('Sammy_fit','out.cov')\n #\n # Construct SAMMY input using resonance_file and information about the #\n # 'experiment' #\n self.endf2inp_par_ndf(resonance_file, [inp, par, ndf], \n experiment[1], flag_all = True)\n #\n # Change from MLBW formalism if this was in original file. #\n # Reich-Moore will be used instead, which is recommended. #\n self.modify_inp(inp, keyremove = ['mlbw formalism is wanted'])\n #\n # Fit to total cross section data without prior #\n message = self.g_least_squares(inp, par, experiment['total'],\n parout, covout)\n shutil.move(parout, par)\n shutil.move(covout, cov)\n #\n # Check if convergence was reached. Otherwise, something is bad. #\n if message[:len('Did not converge')] == 'Did not converge':\n raise RuntimeError(message)\n #\n # Perform a Beyesian update using capture data\n self.bayesian([inp, par, cov], experiment['capture'], [parout, covout])\n #\n # Construct ENDF formatted files from output #\n self.inp_par_ndf_cov2endfs([inp, parout, ndf, covout], out_paths)\n #\n # Include ENDF file paths in ResonanceFile instance to return\n resonance_file_out = ResonanceFile(out_paths[0], resonance_file.nuclide)\n resonance_file_out.cov = ResonanceCovFile(out_paths[1])\n #\n # Clean up\n if self.cleanup:\n for p in [inp, par, cov, ndf, parout, covout]: os.remove(p)\n #\n return resonance_file_out", "def fit_altscan_position(self,data,scan_maps):\n fname = data.filename.split('/')[-1]\n\n # We do Jupiter in the Az/El frame but celestial in sky frame\n if not 0 in self.feedlist:\n return \n self.model.set_fixed(**{})\n\n def limfunc(P):\n A,x0,sigx,y0,sigy,phi,B = P\n if (sigx < 0) | (sigy < 0):\n return True\n if (phi < -np.pi/2.) | (phi >= np.pi/2.):\n return True\n return False\n\n self.alt_scan_parameters = self.model.get_param_names()\n self.alt_scan_fits ={'CW':{'Values':np.zeros((self.model.nparams)),\n 'Errors':np.zeros((self.model.nparams)),\n 'Chi2': np.zeros((2))},\n 'CCW':{'Values':np.zeros((self.model.nparams)),\n 'Errors':np.zeros((self.model.nparams)),\n 'Chi2': np.zeros(2)}}\n for key in ['CW','CCW']:\n m,c,x,y,P0 = self.prepare_maps(scan_maps[key]['map'],scan_maps[key]['cov'],scan_maps[key]['xygrid'])\n\n freq = 30\n P0_priors = self.get_fwhm_prior(freq,1)\n # Perform the least-sqaures fit\n try:\n result, error,samples,min_chi2,ddof = self.model(P0, (x,y), m, c,\n P0_priors=P0_priors,return_array=True)\n self.alt_scan_fits[key]['Values'][:] = result\n self.alt_scan_fits[key]['Errors'][:] = error\n self.alt_scan_fits[key]['Chi2'][:] = min_chi2,ddof\n\n except ValueError as e:\n try:\n self.logger(f'{fname}:emcee:{e}',error=e)\n except TypeError:\n self.logger(f'{fname}:emcee:{e}')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def config_specific_par(self, scifile, inp_par=None):\n # Start with instrument wide\n par = super().config_specific_par(scifile, inp_par=inp_par)\n\n if self.get_meta_value(scifile, 'idname') == 'OsirisMOS':\n par['reduce']['findobj']['find_trim_edge'] = [1,1]\n par['calibrations']['slitedges']['sync_predict'] = 'pca'\n par['calibrations']['slitedges']['det_buffer'] = 1\n elif self.get_meta_value(scifile, 'idname') == 'OsirisLongSlitSpectroscopy':\n # Do not tweak the slit edges for longslit\n par['calibrations']['flatfield']['tweak_slits'] = False\n\n # Wavelength calibration and setup-dependent parameters\n if self.get_meta_value(scifile, 'dispname') == 'R300B':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R300B.fits'\n par['reduce']['findobj']['find_min_max'] = [750, 2051]\n par['calibrations']['slitedges']['det_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['smash_range'] = [0.38, 0.62]\n par['calibrations']['flatfield']['slit_illum_finecorr'] = False\n par['reduce']['cube']['wave_min'] = 3600.0\n par['reduce']['cube']['wave_max'] = 7200.0\n elif self.get_meta_value(scifile, 'dispname') == 'R300R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R300R.fits'\n par['reduce']['findobj']['find_min_max'] = [750, 2051]\n par['calibrations']['slitedges']['det_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['fit_min_spec_length'] = 0.25\n par['calibrations']['slitedges']['smash_range'] = [0.38, 0.62]\n par['calibrations']['flatfield']['slit_illum_finecorr'] = False\n par['reduce']['cube']['wave_min'] = 4800.0\n par['reduce']['cube']['wave_max'] = 10000.0\n elif self.get_meta_value(scifile, 'dispname') == 'R500B':\n par['calibrations']['wavelengths']['lamps'] = ['HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R500B.fits'\n par['reduce']['findobj']['find_min_max'] = [500, 2051]\n par['reduce']['cube']['wave_min'] = 3600.0\n par['reduce']['cube']['wave_max'] = 7200.0\n elif self.get_meta_value(scifile, 'dispname') == 'R500R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R500R.fits'\n par['reduce']['findobj']['find_min_max'] = [450, 2051]\n par['reduce']['cube']['wave_min'] = 4800.0\n par['reduce']['cube']['wave_max'] = 10000.0\n elif self.get_meta_value(scifile, 'dispname') == 'R1000B':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R1000B.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R1000R':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R1000R.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2000B':\n par['calibrations']['wavelengths']['fwhm'] = 15.0\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2000B.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500U':\n par['calibrations']['wavelengths']['lamps'] = ['XeI,HgI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500U.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500V':\n par['calibrations']['wavelengths']['lamps'] = ['HgI','NeI','XeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500V.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500R':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,HgI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500R.fits'\n elif self.get_meta_value(scifile, 'dispname') == 'R2500I':\n par['calibrations']['wavelengths']['lamps'] = ['ArI,XeI,NeI']\n par['calibrations']['wavelengths']['reid_arxiv'] = 'gtc_osiris_R2500I.fits'\n par['sensfunc']['algorithm'] = 'IR'\n par['sensfunc']['IR']['telgridfile'] = \"TelFit_MaunaKea_3100_26100_R20000.fits\"\n else:\n msgs.warn('gtc_osiris.py: template arc missing for this grism! Trying holy-grail...')\n par['calibrations']['wavelengths']['method'] = 'holy-grail'\n\n # Return\n return par", "def SetRSoftSF(self, etol_rad = None, mus = None, Ls = None, \\\n radial_Xs = None, radial_Ys = None, etol_ang = None, \\\n xis = None, lambdas = None, zetas = None, angular_Xs = None, \\\n angular_Ys = None, angular_Zs = None):\n # Initializes global cutoff radius\n Rc_global = 0\n\n # Checks if any radial inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_rad, mus, Ls, radial_Xs, radial_Ys]):\n if any(v is not None for v in (etol_rad, mus, Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: If radial structure functions are used, must ')\n print(' supply etol_rad, mus, Ls, radial_Xs, radial_Ys ')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n \n # Marks that it contains radial structure functions\n self.__containsRadial = True \n\n # Initializes radial structure function variables\n if etol_rad > 0 and etol_rad < 1: \n self.etol_radial = etol_rad\n else:\n print('ERROR: 0 < etol_rad < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(mus) != len(arr) for arr in (Ls, radial_Xs, \\\n radial_Ys)):\n print('ERROR: Length of mus, radial_Xs, and radial_Ys in ')\n print(' SetRSoftSF must be equal')\n sys.exit(-1)\n self.mus = mus\n self.Ls = Ls \n if np.all(np.mod(radial_Xs,1)==0):\n self.radial_Xs = radial_Xs.astype(int)\n else:\n print('ERROR: radial_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(radial_Ys,1)==0):\n self.radial_Ys = radial_Ys.astype(int)\n else:\n print('ERROR: radial_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating radial cutoff...')\n Rc_max = 0.0\n for SF in range(len(mus)):\n mu = mus[SF]\n L = Ls[SF]\n X = radial_Xs[SF]\n Y = radial_Ys[SF]\n Rc = mu+L*sqrt(log(1/etol_rad))\n print(' mu='+str(mu)+', L='+str(L)+', X='+str(X)+', Y='+\\\n str(Y)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_radial='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Checks if any angular inputs used. If so, if any parameters are \n # not None then throws an error assuming the user is confused. \n # Checks all inputs are valid.\n if any(v is None for v in [etol_ang, xis, lambdas, angular_Xs, \n angular_Ys, angular_Zs]):\n if any(v is not None for v in (etol_ang, xis, lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: If angular structure functions are used, must ')\n print(' supply etol_ang, xis, lambdas, zetas, angular_Xs,')\n print(' angular_Ys, angular_Zs')\n print(' to SetRSoftSF')\n sys.exit(-1)\n else:\n\n # Marks that contains angular structure functions\n self.__containsAngular = True \n\n # Initializes angular structure function variables\n if etol_ang > 0 and etol_ang < 1: \n self.etol_angular = etol_ang\n else:\n print('ERROR: 0 < etol_ang < 1 used in SetRSoftSF')\n sys.exit(-1)\n if any(len(xis) != len(arr) for arr in (lambdas, zetas, \\\n angular_Xs, angular_Ys, angular_Zs)):\n print('ERROR: Length of xis, zetas, angular_Xs, angular_Ys, ')\n print(' and angular_Zs in SetRSoftSF must be equal')\n sys.exit(-1)\n self.xis = xis\n if np.all(np.abs(lambdas)==1):\n self.lambdas = lambdas\n else:\n print('ERROR: lambdas used in SetRSoftSF must be +/-1')\n sys.exit(-1)\n if np.all(np.mod(zetas,1)==0):\n self.zetas = zetas.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Xs,1)==0):\n self.angular_Xs = angular_Xs.astype(int)\n else:\n print('ERROR: angular_Xs used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Ys,1)==0):\n self.angular_Ys = angular_Ys.astype(int)\n else:\n print('ERROR: angular_Ys used in SetRSoftSF must be integers')\n sys.exit(-1)\n if np.all(np.mod(angular_Zs,1)==0):\n self.angular_Zs = angular_Zs.astype(int)\n else:\n print('ERROR: angular_Zs used in SetRSoftSF must be integers')\n sys.exit(-1)\n\n # Outputs radial cut-off radii\n print('Calculating angular cutoff...')\n Rc_max = 0.0\n for SF in range(len(xis)):\n xi = xis[SF]\n l = lambdas[SF]\n zeta = zetas[SF]\n X = angular_Xs[SF]\n Y = angular_Ys[SF]\n Z = angular_Zs[SF]\n if l==1:\n Rc = xi*sqrt(2.0*log(1.0/etol_ang)/3.0)\n else:\n Rc = xi*sqrt(log(1.0/etol_ang)/2.0)\n print(' xi='+str(xi)+', lambda='+str(l)+', zeta='+str(zeta)+\\\n ', X='+str(X)+', Y='+str(Y)+', Z='+str(Z)+' --> Rc='+str(Rc))\n if Rc > Rc_max:\n Rc_max = Rc \n print('Rc_angular='+str(Rc_max))\n print(' ')\n print('--------------------------------------------------------')\n if Rc_max > Rc_global:\n Rc_global = Rc_max\n\n # Sets structure functions into netCDF file\n self.__SetSFParams()\n\n print('Rc='+str(Rc_global))", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def SetParams(ss, sheet, setMsg):\n if sheet == \"\":\n ss.Params.ValidateSheets(go.Slice_string([\"Network\", \"Sim\"]))\n ss.SetParamsSet(\"Base\", sheet, setMsg)\n if ss.ParamSet != \"\" and ss.ParamSet != \"Base\":\n sps = ss.ParamSet.split()\n for ps in sps:\n ss.SetParamsSet(ps, sheet, setMsg)\n if ss.Learn == LearnType.Hebbian:\n ss.SetParamsSet(\"Hebbian\", sheet, setMsg)\n elif ss.Learn == LearnType.ErrorDriven:\n ss.SetParamsSet(\"ErrorDriven\", sheet, setMsg)" ]
[ "0.66335446", "0.6191415", "0.59800696", "0.58964324", "0.5816966", "0.5668345", "0.56514496", "0.5646242", "0.5645791", "0.561993", "0.56091666", "0.55902165", "0.55819917", "0.55689085", "0.5561756", "0.5520965", "0.5512555", "0.5506377", "0.550578", "0.549302", "0.54860383", "0.5479055", "0.5478156", "0.5470998", "0.5431236", "0.53811806", "0.537957", "0.5372441", "0.535648", "0.5354056" ]
0.750283
0
Sample (one minus) the axis ratio of the lens galaxy from the Rayleigh distribution with scale that depends on velocity dispersion
def get_axis_ratio(self, vel_disp): scale = self.a*vel_disp + self.b q = 0.0 while q < self.lower: q = 1.0 - np.random.rayleigh(scale, size=None) return q
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def get_scale():\r\n\r\n \r\n return 0.5", "def level_mags(slide):\n return [highest_mag(slide)/downsample for downsample in slide.level_downsamples]", "def scale(self):\n return self.distribution.scale", "def rvs(self):\n return float(self.interp(random.rand()))", "def get_mag_for_size(slide, size):\n max_size = slide.dimensions\n max_mag = highest_mag(slide)\n downsample = np.average([max_dim/size_dim for max_dim, size_dim in zip(max_size, size)])\n return max_mag/downsample", "def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)", "def getScale(self):\n return self.factor**self.turnOn", "def GetScale(self):\n ...", "def v_multiplier(self):\n return (4./3)*np.pi*(self.bins[:, 1]/2)**3", "def rho_spaxel_scale(spaxel_scale=4.0, wavelength=1.0):\n\n scale_rad = spaxel_scale / MILIARCSECS_IN_A_RAD\n rho = scale_rad * ELT_DIAM / (wavelength * 1e-6)\n return rho", "def naturalAspectRatio(self):\n return math.sin(self.view_angle_h) / math.sin(self.view_angle_v)", "def sample_radii(size=1):\n interp_func = InterpolatedUnivariateSpline(m_grid, np.log(r_grid), k=1)\n return np.exp(interp_func(np.random.uniform(0, 1, size=size))) * u.kpc", "def rvs(self, size: int) -> np.ndarray:\n return np.random.randn(size, self.ndim) * self.scales + self.means", "def rscale(mag=10.0):\n if mag > 11.5:\n return 0.5\n elif mag > 11.0:\n return 1.0\n elif mag > 10.5:\n return 1.5\n elif mag > 10.0:\n return 1.5\n elif mag > 9.5:\n return 2.0\n elif mag > 9.0:\n return 2.5\n elif mag > 8.5:\n return 3.0\n else:\n return 3.5", "def scale(self):\n return self._gev_bijector.scale", "def calculateRatio(levelDims):\n highestReso = np.asarray(levelDims[0])\n lowestReso = np.asarray(levelDims[-1])\n Xratio, Yratio = highestReso/lowestReso\n return (Xratio, Yratio)", "def sphvol(r):\n return (4./3.)*np.pi*(r**3.)", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def scale(self):", "def sphere_volume(r):\n\treturn 4/3. * math.pi * r ** 3", "def scale(self):\n return self.scale_factor / CONSTANTS.AU", "def freq_optimization(self):\n index = identify_scale(self.vz, True)\n # In case the patient is limping\n if index > 35:\n index = index / 2\n print(f\"Scale used is {index}\")", "def adapt_length_scale(self):\n Ne = max(1,self.Ne)\n Nc = max(1,self.Nc)\n ratio = Ne/(Ne+Nc)\n self.mu *= 2*ratio", "def volume_unit_ball(d_dimensions: int, norm=2) -> float:\n\n # get ball\n if norm == 0:\n b = float(\"inf\")\n elif norm == 1:\n b = 1.0\n elif norm == 2:\n b = 2.0\n else:\n raise ValueError(f\"Unrecognized norm: {norm}\")\n\n return (np.pi ** (0.5 * d_dimensions)) ** d_dimensions / gamma(b / d_dimensions + 1)", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def s_multiplier(self):\n return 4 * np.pi * (self.bins[:, 1]/2)**2", "def ret_vol_ratio(self) -> float:\n return self.geo_ret / self.vol", "def get_mpg():\n return uniform(20.0, 50.0)", "def ratio_4_doc(shot, dir, num_probes = 16):\n # data = [[0] *3 for i in range(num_probes)]\n # magdata = hdr.getMagData(shot)\n probe_locs = get_probeLocs_calib_setup(shot)\n data=hdr.getquikData(shot)\n time,eastcurrent,westcurrent = loadcurrent(shot)#using eastcurrent\n ratios = [[0]*3 for i in range(num_probes)]\n for probe in range(num_probes):\n ratio =1\n inverted = False\n # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)\n B=sp.signal.detrend(cumtrapz(data.unCalibData[dir,probe,:], data.time))\n plot_time = data.time[:-1]\n if(np.max(B[2000:6000]) < abs(np.average(B[2000:6000]))):\n # print(\"\\ninverted!\")\n inverted = True\n # B = B* -1\n # ratio = -1\n\n r = probe_locs[probe]\n max_current = polyPeak_noPlot(time,eastcurrent)\n # if(np.max(eastcurrent) < -1*(np.min(eastcurrent))):\n # max_current = -1*np.min(eastcurrent)\n helmB = helmholtz2(r,max_current)\n\n # THis is intentional! I am only using shots where the cmponent is lined\n # up with the z-direction of the helmholz field\n # helmB[2] = helmB[2]*-1\n max_theoretical = np.max(helmB[2])\n max_measured = polyPeak_noPlot(plot_time, B)\n\n\n ratio = ratio * max_theoretical/max_measured\n if ratio > 30000 or ratio < -30000:\n ratio = 0\n\n\n ratios[probe][dir] = ratio\n # print(\"\\tRatio is: %f\" %(ratio))\n # if(inverted and ratio <0):\n # print(\"Inverted and ratio reflects that\")\n # elif(not inverted and ratio <0):\n if probe ==1:\n print(\"\\n Ratio: %5f \\n\\t max_measured: %3f, \\n\\t max_theoretical: %5f\"%(ratio,max_measured,max_theoretical ) )\n\n # Compute the median of the non-zero elements\n # m = np.median(foo[foo > 0])\n # Assign the median to the zero elements\n # foo[foo == 0] = m\n return ratios" ]
[ "0.5984484", "0.5951197", "0.5821933", "0.5732663", "0.57284033", "0.567641", "0.56427747", "0.56382126", "0.5614669", "0.56115395", "0.5601904", "0.5573603", "0.5534662", "0.55199206", "0.54970366", "0.54887855", "0.5470825", "0.5469452", "0.5453029", "0.54443336", "0.54377395", "0.54361856", "0.5432325", "0.54116726", "0.5410369", "0.54055005", "0.54039025", "0.54000986", "0.53984237", "0.5394167" ]
0.7055274
0
Sample the AGN luminosity from the redshiftbinned luminosity function
def sample_agn_luminosity(self, z): # Assign redshift bin is_less_than_right_edge = (z < self.z_bins) alpha = self.alphas[is_less_than_right_edge][0] beta = self.betas[is_less_than_right_edge][0] M_star = self.M_stars[is_less_than_right_edge][0] # Evaluate function pmf = self.get_double_power_law(alpha, beta, M_star) # Sample luminosity sampled_M = np.random.choice(self.M_grid, None, replace=True, p=pmf) return sampled_M
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_luminosity(red, green, blue):\r\n return (0.299 * red) + (0.587 * green) + (0.114 * blue)", "def luminance(self):\n \n return (self.r + self.g + self.b) // 3", "def compute_radiocore_luminosity(MBH, L_AGN):\n\tL_X = bolcorr_hardX(L_AGN)\n\tm = log10(MBH / u.Msun)\n\t# Merloni, Heinz & Di Matteo (2003)\n\tlogLR = 0.6 * log10(L_X/(u.erg/u.s)) + 0.78 * m + 7.33\n\treturn 10**logLR * u.erg/u.s", "def sRGBLuminance(x):\n lin=linearFromsRGB3(x)\n return lin[0]*0.2126+lin[1]*0.7152+lin[2]*0.0722", "def average_luminosity(self, delta=1e-10):\n cumsum = 0.0\n for pix in self.pixels:\n cumsum += math.log10(delta + pix.luminosity())\n\n return math.pow(10, cumsum / len(self.pixels))", "def luminance(rgb):\n \n (r, g, b) = rgb\n return (r + g + b) // 3", "def get_luminosity(self):\n\n if self.no_dist is False and self.no_flux is False:\n\n dist = self.distance\n snu = self.snu_at_1GHz\n lum = lumin(dist, snu)\n\n self.lum = lum\n else:\n self.lum = -1 # use -1 to indicate unknown luminosity\n return self.lum", "def get_luminosity(self):\n\n h, l, s = colorsys.rgb_to_hls(self.r, self.g, self.b)\n return l", "def pixelLuminance (r, g, b):\n assert (type(r) == int and type(g) == int and type(b) == int)\n assert (0<=r<=255 and 0<=g<=255 and 0<=b<=255)\n return roundHalfUp((.2126*r)+(.7152*g)+(.0722*b))", "def Luminosity(self):\n try:\n L = (self.E*self.Weight).sum()\n N = self.E.count()\n except:\n L = self.E.sum()\n N = self.E.count()\n return L, L/np.sqrt(N)", "def loadLuminosityFunction(self):\n\n tab = np.genfromtxt(self.fname[0], skip_header=self.skip_header)\n if not self.evolve:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, self.nzbins))\n\n else:\n self.luminosity_function = np.zeros((tab.shape[0], self.nbands, 1))\n\n if self.ecol is not None:\n self.ye = np.zeros(self.luminosity_function.shape)\n imult = 1\n else:\n self.ye = None\n imult = 2\n\n self.magmean = tab[:,self.xcol]\n\n if self.nzbins==1:\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,self.ecol]\n else:\n if not self.evolve:\n assert((tab.shape[1]-1)==self.nzbins)\n for i in range(self.nzbins):\n for j in range(self.nbands):\n self.luminosity_function[:,j,i] = tab[:,i*imult+self.ycol]\n if self.ecol is not None:\n self.ye[:,j,i] = tab[:,i*imult+self.ecol]\n else:\n for j in range(self.nbands):\n self.luminosity_function[:,j,0] = tab[:,self.ycol]\n if self.ecol is not None:\n self.ye[:,j,0] = tab[:,self.ecol]\n\n self.xmean = self.magmean\n self.y = self.luminosity_function", "def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetLuminance(self)", "def luminance(self, color):\n return 0.2426 * color[2] + 0.7152 * color[1] + 0.0722 * color[0]", "def luminosity(r,T,autoDebug=True):\n\t#-----------BEGIN ERROR CHECKING----------\n\tif autoDebug:\n\t\tsam.type_check(r, sam.TYPES_math, \"r\")\n\t\tsam.type_check(T, sam.TYPES_math, \"T\")\n\t\tsam.value_check(r,.0,\">\",\"r\")\n\t\tsam.value_check(T,.0,\">\",\"T\")\n\t#-----------END ERROR CHECKING----------\n\n\tL = 4 * sam.CONSTANT_pi * r**2 * sam.CONSTANT_SB* T**4\n\treturn L", "def sRGBGrayscale(x):\n rellum=sRGBLuminance(x)\n return [rellum,rellum,rellum]", "def GetLuminance(self):\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetLuminance(self)", "def calculate_lux(r: int, g: int, b: int) -> float:\n # This only uses RGB ... how can we integrate clear or calculate lux\n # based exclusively on clear since this might be more reliable?\n illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)\n\n return illuminance", "def get_uthreshold(img):\n import noiselevel\n # sigma=Table.read('noiselevel.csv',format='csv')['sigma'][0]\n sigma = noiselevel.getnoiselevel(img,ranges=(-30,30),toplot=False)\n \n thres = sigma*np.sqrt(2*np.log(img.size))\n return thres, sigma", "def _value_as_luminance(self):\n return round(float(self._value), 1)", "def herbel_luminosities(redshift, alpha, a_m, b_m, size=None,\n x_min=0.00305,\n x_max=1100.0, resolution=100):\n\n if size is None and np.shape(redshift):\n size = np.shape(redshift)\n\n luminosity_star = _calculate_luminosity_star(redshift, a_m, b_m)\n\n x_sample = schechter(alpha, x_min, x_max, resolution=resolution, size=size)\n\n return luminosity_star * x_sample", "def luminance(self) -> float:\n use_option = 1\n\n if use_option == 1:\n # 1st option\n msb = 0\n msb_2nd = 1\n while msb != msb_2nd:\n msb = self.read_byte_data(Reg.luminance_msb)\n lsb = self.read_byte_data(Reg.luminance_lsb)\n msb_2nd = self.read_byte_data(Reg.luminance_msb)\n\n elif use_option == 2:\n # 2nd option, which does not work on rpi OSError: [Errno 95] Operation not supported\n wr_msb = i2c_msg.write(self.device_addr, [Reg.luminance_msb])\n rd_msb = i2c_msg.read(self.device_addr, 1)\n wr_lsb = i2c_msg.write(self.device_addr, [Reg.luminance_lsb])\n rd_lsb = i2c_msg.read(self.device_addr, 1)\n self.i2c_rdwr(wr_msb, rd_msb, wr_lsb, rd_lsb)\n msb = ord(rd_msb.data)\n lsb = ord(rd_lsb.data)\n\n # Convert the data to lux\n exponent = (msb & 0xF0) >> 4\n mantissa = ((msb & 0x0F) << 4) | (lsb & 0x0F)\n return 2.0 ** exponent * mantissa * 0.045", "def test_str_luminous_intensity(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"luminous_intensity\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x46,\n 0xB,\n 0xBE,\n 0x7E,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 8943.623046875)\n self.assertEqual(sensor.unit_of_measurement(), \"cd\")\n self.assertEqual(sensor.ha_device_class(), \"illuminance\")", "def luminosity_function(abs_mag, redshift):\n\n # L/L_*(z) = 10**(0.4 * (M_*(z) - M))\n L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))\n\n # Phi*(z) = 10**(log(Phi*(z))\n phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3\n\n # QLF slopes\n alpha1 = -3.35 # alpha in Table 2\n alpha2 = -0.37 # beta in Table 2\n\n Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1\n\n return Phi", "def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum", "def random_noise_levels():\n log_min_shot_noise = math.log(0.0001)\n log_max_shot_noise = math.log(0.012)\n log_shot_noise = random.uniform(log_min_shot_noise, log_max_shot_noise)\n shot_noise = math.exp(log_shot_noise)\n\n line = lambda x: 2.18 * x + 1.20\n log_read_noise = line(log_shot_noise) + random.gauss(mu=0.0, sigma=0.26)\n read_noise = math.exp(log_read_noise)\n return shot_noise, read_noise", "def compute_noise_levels(tr, cfg):\n from obspy.signal.trigger import classic_sta_lta\n tr_snr = tr.copy()\n tr_snr.filter(\"bandpass\", freqmin=cfg.sig_noise.SNR_FREQ[0],\n freqmax=cfg.sig_noise.SNR_FREQ[1])\n wa = int(cfg.sig_noise.SNR_WIN[1]*tr.stats.sampling_rate)\n wb = int(cfg.sig_noise.SNR_WIN[0]*tr.stats.sampling_rate)\n # Prevent failing due to en(data) < nlta error\n if len(tr_snr.data) < wa or len(tr_snr.data) < wb:\n noise_level = 100.0\n return noise_level\n snr = classic_sta_lta(tr_snr.data, wa, wb)\n snr_smooth = do_smooth(snr, cfg.sig_noise.SNR_SMOOTH_WIN,\n tr.stats.sampling_rate)\n thresh_snr = np.nanmax(snr_smooth) * 0.4\n A = (snr_smooth - thresh_snr)\n A = A[np.where(A > 0)]\n if len(snr_smooth[wb:-wa]) == 0: # In case zerodivision error\n noise_level = 9999.9\n return noise_level\n noise_level = (len(A) / len(snr_smooth[wb:-wa])) * 100\n return noise_level", "def intensityPSF_BlRd(N=1000):\n col_seq = [( 59/255., 76/255., 192/255.), ( 68/255., 90/255., 204/255.),\n ( 77/255., 104/255., 215/255.), ( 87/255., 117/255., 225/255.),\n ( 98/255., 130/255., 234/255.), (108/255., 142/255., 241/255.),\n (119/255., 154/255., 247/255.), (130/255., 165/255., 251/255.),\n (141/255., 176/255., 254/255.), (152/255., 185/255., 255/255.),\n (163/255., 194/255., 255/255.), (174/255., 201/255., 253/255.),\n (184/255., 208/255., 249/255.), (194/255., 213/255., 244/255.),\n (204/255., 217/255., 238/255.), (213/255., 219/255., 230/255.),\n (221/255., 221/255., 221/255.), (229/255., 216/255., 209/255.),\n (236/255., 211/255., 197/255.), (241/255., 204/255., 185/255.),\n (245/255., 196/255., 173/255.), (247/255., 187/255., 160/255.),\n (247/255., 177/255., 148/255.), (247/255., 166/255., 135/255.),\n (244/255., 154/255., 123/255.), (241/255., 141/255., 111/255.),\n (236/255., 127/255., 99/255.)]\n\n cdict = {'red': ((0.00000000, col_seq[0][0], col_seq[0][0]),\n (0.00769231, col_seq[1][0], col_seq[1][0]),\n (0.01538462, col_seq[2][0], col_seq[2][0]),\n (0.02307692, col_seq[3][0], col_seq[3][0]),\n (0.03076923, col_seq[4][0], col_seq[4][0]),\n (0.03846154, col_seq[5][0], col_seq[5][0]),\n (0.04615385, col_seq[6][0], col_seq[6][0]),\n (0.05384615, col_seq[7][0], col_seq[7][0]),\n (0.06153846, col_seq[8][0], col_seq[8][0]),\n (0.06923077, col_seq[9][0], col_seq[9][0]),\n (0.07692308, col_seq[10][0], col_seq[10][0]),\n (0.08461538, col_seq[11][0], col_seq[11][0]),\n (0.09230769, col_seq[12][0], col_seq[12][0]),\n (0.10000000, col_seq[13][0], col_seq[13][0]),\n (0.10769231, col_seq[14][0], col_seq[14][0]),\n (0.18205128, col_seq[15][0], col_seq[15][0]),\n (0.25641026, col_seq[16][0], col_seq[16][0]),\n (0.33076923, col_seq[17][0], col_seq[17][0]),\n (0.40512821, col_seq[18][0], col_seq[18][0]),\n (0.47948718, col_seq[19][0], col_seq[19][0]),\n (0.55384615, col_seq[20][0], col_seq[20][0]),\n (0.62820513, col_seq[21][0], col_seq[21][0]),\n (0.70256410, col_seq[22][0], col_seq[22][0]),\n (0.77692308, col_seq[23][0], col_seq[23][0]),\n (0.85128205, col_seq[24][0], col_seq[24][0]),\n (0.92564103, col_seq[25][0], col_seq[25][0]),\n (1.00000000, col_seq[26][0], col_seq[26][0])),\n 'green': ((0.00000000, col_seq[0][1], col_seq[0][1]),\n (0.00769231, col_seq[1][1], col_seq[1][1]),\n (0.01538462, col_seq[2][1], col_seq[2][1]),\n (0.02307692, col_seq[3][1], col_seq[3][1]),\n (0.03076923, col_seq[4][1], col_seq[4][1]),\n (0.03846154, col_seq[5][1], col_seq[5][1]),\n (0.04615385, col_seq[6][1], col_seq[6][1]),\n (0.05384615, col_seq[7][1], col_seq[7][1]),\n (0.06153846, col_seq[8][1], col_seq[8][1]),\n (0.06923077, col_seq[9][1], col_seq[9][1]),\n (0.07692308, col_seq[10][1], col_seq[10][1]),\n (0.08461538, col_seq[11][1], col_seq[11][1]),\n (0.09230769, col_seq[12][1], col_seq[12][1]),\n (0.10000000, col_seq[13][1], col_seq[13][1]),\n (0.10769231, col_seq[14][1], col_seq[14][1]),\n (0.18205128, col_seq[15][1], col_seq[15][1]),\n (0.25641026, col_seq[16][1], col_seq[16][1]),\n (0.33076923, col_seq[17][1], col_seq[17][1]),\n (0.40512821, col_seq[18][1], col_seq[18][1]),\n (0.47948718, col_seq[19][1], col_seq[19][1]),\n (0.55384615, col_seq[20][1], col_seq[20][1]),\n (0.62820513, col_seq[21][1], col_seq[21][1]),\n (0.70256410, col_seq[22][1], col_seq[22][1]),\n (0.77692308, col_seq[23][1], col_seq[23][1]),\n (0.85128205, col_seq[24][1], col_seq[24][1]),\n (0.92564103, col_seq[25][1], col_seq[25][1]),\n (1.00000000, col_seq[26][1], col_seq[26][1])),\n 'blue': ((0.00000000, col_seq[0][2], col_seq[0][2]),\n (0.00769231, col_seq[1][2], col_seq[1][2]),\n (0.01538462, col_seq[2][2], col_seq[2][2]),\n (0.02307692, col_seq[3][2], col_seq[3][2]),\n (0.03076923, col_seq[4][2], col_seq[4][2]),\n (0.03846154, col_seq[5][2], col_seq[5][2]),\n (0.04615385, col_seq[6][2], col_seq[6][2]),\n (0.05384615, col_seq[7][2], col_seq[7][2]),\n (0.06153846, col_seq[8][2], col_seq[8][2]),\n (0.06923077, col_seq[9][2], col_seq[9][2]),\n (0.07692308, col_seq[10][2], col_seq[10][2]),\n (0.08461538, col_seq[11][2], col_seq[11][2]),\n (0.09230769, col_seq[12][2], col_seq[12][2]),\n (0.10000000, col_seq[13][2], col_seq[13][2]),\n (0.10769231, col_seq[14][2], col_seq[14][2]),\n (0.18205128, col_seq[15][2], col_seq[15][2]),\n (0.25641026, col_seq[16][2], col_seq[16][2]),\n (0.33076923, col_seq[17][2], col_seq[17][2]),\n (0.40512821, col_seq[18][2], col_seq[18][2]),\n (0.47948718, col_seq[19][2], col_seq[19][2]),\n (0.55384615, col_seq[20][2], col_seq[20][2]),\n (0.62820513, col_seq[21][2], col_seq[21][2]),\n (0.70256410, col_seq[22][2], col_seq[22][2]),\n (0.77692308, col_seq[23][2], col_seq[23][2]),\n (0.85128205, col_seq[24][2], col_seq[24][2]),\n (0.92564103, col_seq[25][2], col_seq[25][2]),\n (1.00000000, col_seq[26][2], col_seq[26][2]))}\n\n psfblrd = _mplb.colors.LinearSegmentedColormap('psfblrd', cdict, N)\n return psfblrd", "def enhance(img, window=30):\n hp = highPassFilter(img, window=window)\n tmp = grayscale(img) + laplacian(img)\n return tmp", "def nyt(image):\n\n ### Get all pixels into a list\n pixels = []\n \n for x in range(image.width):\n print(\"getting the pixel at x = \", x)\n \n for y in range(image.height):\n pixel = image.getpixel((x, y))\n pixels.append(pixel)\n \n print(pixels[:10])\n \n ### We have the list of pixels, and we need to sort it by luminance.\n \n ### Create a new list that has a pixel's luminance stored with the pixel\n ### Each element of this list will look like (luminance, (r, g, b))\n ### Then, when we sort this list, it will sort on luminance, and when tied,\n ### will break the ties based on r (and then on g)\n \n pixels_with_luminance = []\n for pixel in pixels:\n lum = luminance(pixel)\n pwl = (lum, pixel)\n pixels_with_luminance.append(pwl)\n \n print(\"pixels with luminance:\", pixels_with_luminance[:10])\n \n ### Now we can sort the pixels based on luminance:\n pixels_with_luminance.sort()\n print(\"pixels with luminance after sorting:\", pixels_with_luminance[:10])", "def intensity(self) -> int:" ]
[ "0.6417541", "0.63884014", "0.6198883", "0.5988071", "0.58689255", "0.5791698", "0.5777783", "0.57607454", "0.5741125", "0.5704976", "0.5666426", "0.56387365", "0.56344163", "0.5579416", "0.5577653", "0.55769515", "0.5561606", "0.5552182", "0.55497104", "0.554356", "0.5517453", "0.54907095", "0.54766655", "0.54759985", "0.5470947", "0.54024357", "0.5391043", "0.5383013", "0.5366394", "0.5356897" ]
0.7363408
0
expects 2 arrays of shape (3, N) rigid transform algorithm from
def rigid_transform_3d(xs,ys): assert xs.shape == ys.shape assert xs.shape[0] == 3, 'The points must be of dimmensionality 3' # find centroids and H x_centroid = np.mean(xs, axis=1)[:, np.newaxis] y_centroid = np.mean(ys, axis=1)[:, np.newaxis] H = (xs - x_centroid)@(ys - y_centroid).T # find rotation U, S, Vt = np.linalg.svd(H) rotation = [email protected] # handling reflection if np.linalg.det(rotation) < 0: Vt[2, :] *= -1 rotation = np.dot(Vt.T, U.T) # find translation translation = y_centroid - rotation@x_centroid return translation, rotation
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compose_transform(T1, T2):\n aux_vec = np.array([0, 0, 1]).reshape(1, 3)\n\n T1 = np.concatenate((T1, aux_vec), axis=0)\n T2 = np.concatenate((T2, aux_vec), axis=0)\n\n T1_inv = np.linalg.inv(T1)\n T = T1_inv@T2\n\n return T[0:2]", "def transform(tvec1, rvec1, tvec2, rvec2):\n op = localToGlobal(np.squeeze(tvec2), np.squeeze(rvec2))\n tvec3 = []\n for tvec in tvec1:\n #tvec = tvec.squeeze()\n tvec3.append(np.matmul(op, tvec))\n tvec3 = np.array(tvec3)\n return tvec3", "def solve_rigid_transform(X, Y, debug=True):\n assert X.shape[0] == Y.shape[0] >= 3\n assert X.shape[1] == Y.shape[1] == 3\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n # Look for Inge Soderkvist's solution online if confused.\n meanA = np.mean(A, axis=1, keepdims=True)\n meanB = np.mean(B, axis=1, keepdims=True)\n A = A - meanA\n B = B - meanB\n covariance = B.dot(A.T)\n U, sigma, VH = np.linalg.svd(covariance) # VH = V.T, i.e. numpy transposes it for us.\n\n V = VH.T\n D = np.eye(3)\n D[2,2] = np.linalg.det( U.dot(V.T) )\n R = U.dot(D).dot(V.T)\n t = meanB - R.dot(meanA)\n RB_matrix = np.concatenate((R, t), axis=1)\n\n #################\n # SANITY CHECKS #\n #################\n\n print(\"\\nBegin debug prints for rigid transformation from A to B:\")\n print(\"meanA:\\n{}\\nmeanB:\\n{}\".format(meanA, meanB))\n print(\"Rotation R:\\n{}\\nand R^TR (should be identity):\\n{}\".format(R, (R.T).dot(R)))\n print(\"translation t:\\n{}\".format(t))\n print(\"RB_matrix:\\n{}\".format(RB_matrix))\n\n # Get residual to inspect quality of solution. Use homogeneous coordinates for A.\n # Also, recall that we're dealing with (3,N) matrices, not (N,3).\n # In addition, we don't want to zero-mean for real applications.\n A = X.T # (3,N)\n B = Y.T # (3,N)\n\n ones_vec = np.ones((1, A.shape[1]))\n A_h = np.concatenate((A, ones_vec), axis=0)\n B_pred = RB_matrix.dot(A_h)\n assert B_pred.shape == B.shape\n\n # Careful! Use raw_errors for the RF, but it will depend on pred-targ or targ-pred.\n raw_errors = B_pred - B # Use pred-targ, of shape (3,N)\n l2_per_example = np.sum((B-B_pred)*(B-B_pred), axis=0)\n frobenius_loss = np.mean(l2_per_example)\n\n if debug:\n print(\"\\nInput, A.T:\\n{}\".format(A.T))\n print(\"Target, B.T:\\n{}\".format(B.T))\n print(\"Predicted points:\\n{}\".format(B_pred.T))\n print(\"Raw errors, B-B_pred:\\n{}\".format((B-B_pred).T))\n print(\"Mean abs error per dim: {}\".format( (np.mean(np.abs(B-B_pred), axis=1))) )\n print(\"Residual (L2) for each:\\n{}\".format(l2_per_example.T))\n print(\"loss on data: {}\".format(frobenius_loss))\n print(\"End of debug prints for rigid transformation.\\n\")\n\n assert RB_matrix.shape == (3,4)\n return RB_matrix", "def rigid_transform(xyz, transform):\n xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])\n xyz_t_h = np.dot(transform, xyz_h.T).T\n return xyz_t_h[:, :3]", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2,:2] = rotation\n H[:2, 2] = translation\n return H", "def AffineTransform( from_pts, to_pts ):\n \n # check that there are match points\n if len(from_pts) != len(to_pts) or len(to_pts)<1:\n print \"from_pts and to_pts must be of same size.\"\n return False\n\n # check the dimensions\n dim = len(from_pts[0]) # num of dimensions\n if len(from_pts) < dim:\n print \"Too few points => under-determined system.\"\n return False\n elif len(from_pts) > dim + 1:\n print \"Too many points => over-determined system.\"\n return False\n\n \n #segregate the x and y coordinages\n from_pts_x, from_pts_y = zip(*from_pts)\n to_pts_x, to_pts_y = zip(*to_pts)\n \n #create the Matricies for processing\n I = np.matrix([from_pts_x, from_pts_y, [1,1,1]])\n P = np.matrix([to_pts_x, to_pts_y])\n \n #Calculate the 2D affine transform matrix (A)\n A = P * linalg.pinv(I) \n\n # Make a result object\n class Transformation:\n \"\"\"Result object that represents the transformation\n from affine fitter.\"\"\"\n\n def To_Str(self):\n res = \"\"\n for j in range(dim):\n str1 = \"x%d' = \" % j\n for i in range(dim):\n str1 +=\"x%d * %f + \" % (i, A[i][j+dim+1])\n str1 += \"%f\" % A[dim][j+dim+1]\n res += str1 + \"\\n\"\n return res\n\n def Transform(self, pt_x, pt_y):\n pt_vector = np.matrix([[pt_x], [pt_y], [1]])\n transformed_pt = A * pt_vector\n return map(itemgetter(0), transformed_pt.tolist())\n return Transformation()", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def fit_transform(self, x: Array2D) -> Array2D:", "def transform(self, x: Array2D) -> Array2D:", "def apply_transformation_np(source, transformation):\n source_homog = np.ones((source.shape[0], 4))\n source_homog[:, :-1] = source\n # source_homog = np.hstack(\n # (source, np.ones(source.shape[0], 1))\n # )\n\n source_transformed = np.matmul(transformation, source_homog.T).T[:, :-1]\n return source_transformed", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def estimate_rigid_transform(points1, points2, translation_only=False):\n centroid1 = points1.mean(axis=0)\n centroid2 = points2.mean(axis=0)\n\n if translation_only:\n rotation = np.eye(2)\n translation = centroid2 - centroid1\n\n else:\n centered_points1 = points1 - centroid1\n centered_points2 = points2 - centroid2\n\n sigma = centered_points2.T @ centered_points1\n U, _, Vt = np.linalg.svd(sigma)\n\n rotation = U @ Vt\n translation = -rotation @ centroid1 + centroid2\n\n H = np.eye(3)\n H[:2, :2] = rotation\n H[:2, 2] = translation\n return H", "def transform(self,X):\n X=np.array(X)\n if(X.ndim==1):\n return self.transform_1d(X) \n elif(X.ndim==2):\n X_tran=self.transform_1d(X[0])\n for i in range(1,X.shape[0]):\n X_tran=np.vstack((X_tran,self.transform_1d(X[i])))\n return X_tran \n else:\n print(\"Warning: The input array is not Transformed since its greater than 2 dimension\")\n print(\"Its dimension is:{} required is 2\".format(X.ndim))\n return X", "def compose_transform3(phi1,theta1,psi1,sx1,sy1,sz1,scale1,phi2,theta2,psi2,sx2,sy2,sz2,scale2):\n\n\tR1 = Transform({\"type\":\"spider\",\"phi\":float(phi1),\"theta\":float(theta1),\"psi\":float(psi1),\"tx\":float(sx1),\"ty\":float(sy1),\"tz\":float(sz1),\"mirror\":0,\"scale\":float(scale1)})\n\tR2 = Transform({\"type\":\"spider\",\"phi\":float(phi2),\"theta\":float(theta2),\"psi\":float(psi2),\"tx\":float(sx2),\"ty\":float(sy2),\"tz\":float(sz2),\"mirror\":0,\"scale\":float(scale2)})\n\tRcomp=R2*R1\n\td = Rcomp.get_params(\"spider\")\n\treturn d[\"phi\"],d[\"theta\"],d[\"psi\"],d[\"tx\"],d[\"ty\"],d[\"tz\"],d[\"scale\"]", "def compose_transforms(*transforms):\n from functools import reduce\n\n for transform in transforms:\n vg.shape.check(locals(), \"transform\", (4, 4))\n\n if len(transforms) == 0:\n return np.eye(4)\n\n return reduce(np.dot, reversed(transforms))", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def transformation_matrix(self, s1, s2, s3, t1, t2, t3):\n\n s1 = np.array(s1)\n s2 = np.array(s2)\n s3 = np.array(s3)\n t1 = np.array(t1)\n t2 = np.array(t2)\n t3 = np.array(t3)\n\n Q = np.array(\n [\n [t2[0] - t1[0], t2[1] - t1[1], t2[2] - t1[2]],\n [t3[0] - t1[0], t3[1] - t1[1], t3[2] - t1[2]],\n ]\n )\n\n P = np.array([[s2[0] - s1[0], s2[1] - s1[1]], [s3[0] - s1[0], s3[1] - s1[1]]])\n\n try:\n # Invert the P matrix\n Pinv = inv(P)\n\n # Build the dot product\n T = np.dot(Pinv, Q)\n\n # Offset\n V0 = np.subtract(t2, np.transpose(s2[0:2]).dot(T))\n except Exception as e:\n self.log.error(\"An error occured during the transformation.\", exc_info=True)\n return -1, -1\n\n return T, V0", "def pose_pair_construct(p1,n1,p2,n2):\n v1 = p2-p1; v1 /= np.linalg.norm(v1)\n R1 = tf_construct(n1,v1)\n return RigidTransform.from_Rt(R1, p1)", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine", "def test_direct_shape():\n\n n = 21\n x = np.ones((n, n))\n\n recon = abel.direct.direct_transform(x, direction='forward')\n assert recon.shape == (n, n) \n\n recon = abel.direct.direct_transform(x, direction='inverse')\n assert recon.shape == (n, n)", "def transform_pc3d(pcl_c3d, Ts, seq_n, K_cur, batch_n):\n\n ## need to transform: flat.uvb, flat.feature['xyz'], flat.feature['normal']\n ## no need to transform grid features\n \n assert batch_n % seq_n == 0 # mode==0\n n_group = batch_n // seq_n\n\n ## get relative pose\n T, R, t, target_id = relative_T(Ts, seq_n, batch_n)\n\n ## get accumulative length\n nb = pcl_c3d.flat.nb\n acc_b = []\n acc = 0\n acc_b.append( acc )\n for ib in range(batch_n):\n acc = acc + nb[ib]\n acc_b.append( acc )\n\n ## process flat features\n flat_xyz = pcl_c3d.flat.feature['xyz'] # 1*C*NB\n flat_normal = pcl_c3d.flat.feature['normal']\n trans_normal_list = []\n trans_xyz_list = []\n uvb_list = []\n new_nb = []\n for ib in range(batch_n):\n ## xyz\n trans_xyz = torch.matmul(R[ib], flat_xyz[:, :, acc_b[ib]:acc_b[ib+1]]) + t[ib]\n mask_positive = trans_xyz[0, 2, :] > 0\n trans_xyz = trans_xyz[:, :, mask_positive]\n trans_xyz_list.append(trans_xyz)\n new_nb.append(trans_xyz.shape[2])\n\n ## normal\n trans_normal = torch.matmul(R[ib], flat_normal[:, :, acc_b[ib]:acc_b[ib+1]])\n trans_normal = trans_normal[:, :, mask_positive]\n trans_normal_list.append(trans_normal)\n\n ## project to uv, add b\n uvb = torch.matmul(K_cur[ib], trans_xyz)\n uvb[:, :2] = uvb[:, :2] / uvb[:, [2]] #- 1 , commented because in dataset_read.py there is a K_mat2py() function converting K from matlab to python coordinate\n uvb[:, 2, :] = target_id[ib]\n uvb_list.append(uvb)\n\n ## construct the new object\n tr_pcl_c3d = PCL_C3D_Flat()\n tr_pcl_c3d.feature['xyz'] = torch.cat(trans_xyz_list, dim=2)\n tr_pcl_c3d.feature['normal'] = torch.cat(trans_normal_list, dim=2)\n tr_pcl_c3d.uvb = torch.cat(uvb_list, dim=2)\n tr_pcl_c3d.nb = new_nb\n\n for feat_key in pcl_c3d.flat.feature:\n if feat_key not in ['xyz', 'normal']:\n tr_pcl_c3d.feature[feat_key] = pcl_c3d.flat.feature[feat_key]\n\n return tr_pcl_c3d", "def create_transforms(ntiles, solution):\n rtransforms = []\n for i in range(ntiles):\n rtransforms.append(renderapi.transform.AffineModel(\n B0=solution[0][i],\n B1=solution[1][i]))\n return rtransforms", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def compute_T_matrix(coordinates, p, reference=[[1,0,0],[0,1,0],[0,0,1]], origin=[0,0,0]):\n e_b_x = coordinates[0]\n e_b_y = coordinates[1]\n e_b_z = coordinates[2]\n \n e_a_x = reference[0] \n e_a_y = reference[1]\n e_a_z = reference[2]\n \n # Compute the rotation matrix\n x_b_a = [np.dot(e_b_x, e_a_x), np.dot(e_b_x, e_a_y), np.dot(e_b_x, e_a_z)]\n y_b_a = [np.dot(e_b_y, e_a_x), np.dot(e_b_y, e_a_y), np.dot(e_b_y, e_a_z)]\n z_b_a = [np.dot(e_b_z, e_a_x), np.dot(e_b_z, e_a_y), np.dot(e_b_z, e_a_z)]\n \n R_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0]],[x_b_a[1], y_b_a[1], z_b_a[1]],x_b_a[2], y_b_a[2], z_b_a[2]]\n \n # Compute the displacement \n displacement = [p[0]-origin[0], p[1]-origin[1], p[2]-origin[2]]\n \n # Make it into a transform matrix\n T_b_a = [[x_b_a[0], y_b_a[0], z_b_a[0], displacement[0]],\n [x_b_a[1], y_b_a[1], z_b_a[1], displacement[1]],\n [x_b_a[2], y_b_a[2], z_b_a[2], displacement[2]],\n [0, 0, 0, 1]]\n \n T_a_b = np.linalg.inv(T_b_a).tolist()\n \n return T_b_a, T_a_b", "def task_three():\n # Formula to calculate:\n # q2 = (z2 / z1) * (R + T * nt / d) * q1\n # where R - rotation\n # T - translation\n # nt - normal vertex of common plane of the 3d points\n # d - shift of the common plane\n # and (R + T * nt / d) required homography transform\n # defined up to constant\n # But in our case T == 0\n tetta = 30 * np.pi / 180\n H = np.array([[1, 0, 0],\n [0, np.cos(tetta), -np.sin(tetta)],\n [0, np.sin(tetta), np.cos(tetta)],\n ])\n print(\"Homography transformation:\\n\", H)", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def transform(self, src):\n T, feature_dim = src.shape[0], self.Y_static_dim*3\n\n if feature_dim == self.Y_static_dim:\n return super(GMM_M, self).transform(src)\n\n # A suboptimum mixture sequence (eq.37)\n optimum_mix = self.px.predict(src)\n\n # Compute E eq.(40)\n E = np.empty((T, feature_dim))\n for t in range(T):\n m = optimum_mix[t] # estimated mixture index at time t\n xx = np.linalg.solve(self.covarXX[m], src[t] - self.src_means[m])\n #print(xx.shape,self.tgt_means[m].shape,self.covarYX[m].shape)\n # Eq. (22)\n E[t] = self.tgt_means[m] +np.dot(self.covarYX[m], xx)\n\n # Compute D eq.(23)\n # Approximated variances with diagonals so that we can do MLPG\n # efficiently in dimention-wise manner\n #print(E.shape)\n D = np.empty((T, feature_dim))\n #print(D.shape)\n for t in range(T):\n m = optimum_mix[t]\n # Eq. (23), with approximating covariances as diagonals\n #D[t] = np.diag(self.covarYY[m]) - np.diag(self.covarYX[m]) / \\\n # np.diag(self.covarXX[m]) * np.diag(self.covarXY[m])\n\n # Exact Inference\n dd = self.covarYY[m] - np.linalg.multi_dot([self.covarYX[m], np.linalg.pinv(self.covarXX[m]), self.covarXY[m]])\n #print(dd.shape)\n D[t] = np.diag(dd)\n\n # Once we have mean and variance over frames, then we can do MLPG\n return E, D, self.windows#mlpg(E, D, self.windows)", "def task_two_test():\n # First test\n # Create points list for task two\n points = np.random.rand(2, 4)\n # Translate and rotate it somehow\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta)],\n [np.sin(tetta), np.cos(tetta)]])\n T = np.random.uniform(low=0, high=3, size=(2, 1))\n H = np.append(R, T, axis=1)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n print(\"Points 2d translation + rotation:\\n\", H)\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)\n # Second test\n H = np.random.rand(3, 3)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Normalize it\n points = np.random.rand(3, 4)\n tetta = np.random.uniform(low=0, high=2 * np.pi, size=(1,))[0]\n R = np.array([[np.cos(tetta), -np.sin(tetta), 0],\n [np.sin(tetta), np.cos(tetta), 0],\n [0, 0, 1]])\n T = np.random.uniform(low=0, high=3, size=(3, 1))\n H = np.append(R, T, axis=1)\n print(\"Points 3d translation + rotation:\\n\", H)\n points_translated = np.dot(H, np.append(points, np.ones((1, 4)), axis=0))\n # Convert to p2\n norm = lambda x: [x[0] / x[2], x[1] / x[2]]\n points = np.array([norm(x) for x in points.T]).T\n points_translated = np.array([norm(x) for x in points_translated.T]).T\n points_list = np.array(list(zip(points.T, points_translated.T)))\n task_two(points_list)" ]
[ "0.64119685", "0.6391706", "0.637345", "0.63456833", "0.6247986", "0.61719114", "0.6155749", "0.6147075", "0.60972595", "0.60801315", "0.607085", "0.607085", "0.6059854", "0.6050465", "0.6016514", "0.59902495", "0.59778076", "0.59692", "0.59610087", "0.5923086", "0.5920455", "0.5881435", "0.5854746", "0.5853403", "0.5842791", "0.5842791", "0.58371675", "0.58249855", "0.57725376", "0.5754339" ]
0.6508242
0
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: data, range_ = None, None else: data = parent.getData(copy=False) range_ = parent.getDataRange() self._updateData(data, range_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()", "def sync(self):\n pass", "def sync(self):\n pass", "def sync(self):\n return", "def sync(self, other):\n pass # TODO", "def do_sync(self):\n raise NotImplementedError() # pragma: no cover", "def sync_local(self, other):\n pass # TODO", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def sync(self, **kwargs):\n pass", "def update_original_data(self):\n pass", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def sync(self):\n return self._sync", "def update(self, parent):\r\n pass", "def _post_sync(self):", "def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)", "def lock(self):\n raise NotImplementedError", "def sync() -> None:", "def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)", "def SyncRoot(self) -> object:", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self, sync):\n self._sync = sync", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def _pre_sync(self):", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)" ]
[ "0.8105605", "0.7977319", "0.7733652", "0.71153784", "0.71153784", "0.70563495", "0.7043718", "0.674526", "0.65729344", "0.6530828", "0.64562", "0.6451494", "0.6362502", "0.6362502", "0.6325555", "0.63112843", "0.6255493", "0.6245364", "0.6242624", "0.619789", "0.61851496", "0.61773336", "0.61612016", "0.61592674", "0.615458", "0.61517084", "0.6139571", "0.61280423", "0.6117423", "0.6102235" ]
0.80830806
1
Return whether values <= colormap min are displayed or not.
def getDisplayValuesBelowMin(self): return self._getPlane().colormap.displayValuesBelowMin
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setDisplayValuesBelowMin(self, display):\n display = bool(display)\n if display != self.getDisplayValuesBelowMin():\n self._getPlane().colormap.displayValuesBelowMin = display\n self._updated(ItemChangedType.ALPHA)", "def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH", "def is_lower_limit(self):\n is_lower = self.get_raw_status() & self.STATUS_LLIM\n return bool(is_lower)", "def isHittingLow(self):\n return not self.limLow.get()", "def in_pixel_range(self, pixmin: int, pixmax: int) -> bool:\n \n if any(i < pixmin or i > pixmax or np.isnan(i) for i in self.datapos):\n return False\n\n return True", "def _single_value_min(data, threshold):\r\n amin = np.min(data)\r\n amax = np.max(data)\r\n limit = amin + (amax - amin) * threshold\r\n return data < limit", "def is_lower(self):\n M = self.rep\n for i in range(self.rows):\n for j in range(i + 1, self.cols):\n if M[i, j]:\n return False\n return True", "def _get_display_range(image): # pragma: no cover\n ip = _get_image_properties(image)\n immin, immax = np.min(image), np.max(image)\n if ip.signed:\n magnitude = max(abs(immin), abs(immax))\n lo, hi = -magnitude, magnitude\n cmap = _diverging_colormap\n elif any(ip):\n _raise_warnings(ip)\n lo, hi = immin, immax\n cmap = _nonstandard_colormap\n else:\n lo = 0\n imtype = image.dtype.type\n hi = dtype_range[imtype][1]\n cmap = _default_colormap\n return lo, hi, cmap", "def is_visible(self, x, y) :\n\t\tres_x = (x > self.x_min) and (x < self.x_max)\n\t\t# print 'res_x : {0}, x : {1}, x_min : {2}, x_max:{3}'.format(res_x, x, self.x_min, self.x_max)\n\t\tres_y = (y > self.y_min) #and (y < self.y_max)\n\t\treturn res_x and res_y", "def is_visible(self):\n return self.rect.x < self.screen_rect.width", "def highlight_min_max(s, min_color=\"#5fba7d\", max_color=\"#e67575\"):\n is_max = s == s.max()\n is_min = s == s.min()\n max_mapping = [f'background-color: {max_color}' if v else '' for v in is_max]\n min_mapping = [f'background-color: {min_color}' if v else '' for v in is_min]\n return [min_mapping[i] if min_mapping[i] != '' else max_mapping[i] for i in range(len(min_mapping))]", "def YellowFilter(c):\n if (c[0] > c[2]) and (c[1] > c[2]) and (c[0] == c[1]): return True\n else: return False", "def is_modern(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_in(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def f_has_range(self):\n return len(self._explored_range) > 0", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return", "def is_scale_enabled(self) -> bool:\r\n ...", "def isScalar(self) -> bool:\n\n indices = list(range(self.layout.gaDims))\n indices.remove(self.layout.gradeList.index(0))\n\n for i in indices:\n if abs(self.value[i]) < _eps:\n continue\n else:\n return False\n\n return True", "def is_visible(self):\n return self.real > 0", "def isLow(self):\n\t\treturn self.resolution == 'LOW'", "def _is_obstacle_in_front(self):\n range_front = []\n range_front[:20] = self.lidar_data[-20:]\n range_front[20:] = self.lidar_data[:20]\n range_front = list(filter(lambda num: num != 0, range_front))\n min_front = min(range_front)\n if min_front < 0.4 and min_front != 0.0:\n\t\t\treturn True\n else:\n\t\t\treturn False", "def filter(self, intensities):\n return np.array(intensities) > self.min_ms1_intensity", "def GreenFilter(c):\n if (c[1] > c[0]) and (c[1] > c[2]) and (c[0] == c[2]): return True\n else: return False", "def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1", "def vmin(self):\n return self._vmin", "def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True", "def hasLow(self):\n\t\treturn self.toLow().exists", "def color_vals(val, threshl=[0.15, 0.30, 0.50]):\n colormap = ['red', 'black', 'blue', 'green']\n color = colormap[-1]\n for i, thresh in enumerate(threshl):\n if val < thresh:\n color = colormap[i]\n break\n return 'color: %s' % color", "def is_present(self, c, tiny=1.0E-30):\n v = [tiny if x <= tiny else x for x in c]\n present = bool(len(np.where(np.array(v) > tiny)[0]))\n return present, v", "def _get_colorbar_limits(self):\n if self.boundaries is not None:\n C = self.boundaries\n if self.extend in [\"min\", \"both\"]:\n C = C[1:]\n\n if self.extend in [\"max\", \"both\"]:\n C = C[:-1]\n return min(C), max(C)\n else:\n return self.get_clim()" ]
[ "0.6101894", "0.606636", "0.5966878", "0.59301066", "0.5888018", "0.58112633", "0.58078593", "0.5752245", "0.5747372", "0.57274777", "0.57218593", "0.5695929", "0.55961335", "0.5590847", "0.5554003", "0.5551317", "0.55380684", "0.5532524", "0.55230325", "0.5503854", "0.55005634", "0.54687977", "0.54406214", "0.5432614", "0.54093766", "0.54002166", "0.53953683", "0.53911763", "0.5387343", "0.5375715" ]
0.7410294
0
Set whether to display values <= colormap min.
def setDisplayValuesBelowMin(self, display): display = bool(display) if display != self.getDisplayValuesBelowMin(): self._getPlane().colormap.displayValuesBelowMin = display self._updated(ItemChangedType.ALPHA)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def getDisplayValuesBelowMin(self):\n return self._getPlane().colormap.displayValuesBelowMin", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def rescale(self):\n low = self.datasource.data[\"values\"].min()\n high = self.datasource.data[\"values\"].max()\n\n # force color to be at lower end of the colormap if\n # data is all equal\n if low == high:\n high += 1\n\n self.set_limits_minmax(low, high)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def normalize_cmap(self):\n vmax, vmin = np.max(self.values), np.min(self.values)\n self.midpoint = 1 - vmax/(vmax + abs(vmin))\n if self.midpoint > 0.5:\n self.start, self.stop = 0, 0.5 + (1-self.midpoint)\n else:\n self.start, self.stop = 0.5 - self.midpoint, 1", "def set_limits_minmax(self, zmin, zmax):\n self._color_mapper.update(low=zmin, high=zmax)\n self.update()", "def set_view_min(self, view_min):\n try:\n view_min = float(view_min)\n self._view_min = view_min\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()\n except ValueError:\n print \"view_min must be a number.\"", "def set_low_high_value(self):\n # do not apply scaler norm on not scalable data\n self.range_dict.clear()\n\n for data_name in self.dict_to_plot.keys():\n if self.quantitative_normalization:\n # Quantitative normalization\n data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[data_name],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=data_name,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[data_name],\n scaler=self.scaler_data,\n data_name=data_name,\n name_not_scalable=self.name_not_scalable,\n )\n\n lowv, highv = np.min(data_arr), np.max(data_arr)\n # Create some 'artificially' small range in case the array is constant\n if lowv == highv:\n lowv -= 0.005\n highv += 0.005\n self.range_dict[data_name] = {\"low\": lowv, \"low_default\": lowv, \"high\": highv, \"high_default\": highv}", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def vmin(self):\n return self._vmin", "def __set_range_to_show(self) -> None:\n cantus_firmus_positions = [\n line_element.scale_element.position_in_semitones\n for line_element in self.cantus_firmus\n ]\n cantus_firmus_lower_bound = min(cantus_firmus_positions)\n cantus_firmus_upper_bound = max(cantus_firmus_positions)\n\n counterpoint_lower_bound = self.lowest_element.position_in_semitones\n counterpoint_upper_bound = self.highest_element.position_in_semitones\n\n self.lowest_row_to_show = min(\n cantus_firmus_lower_bound,\n counterpoint_lower_bound\n )\n self.highest_row_to_show = max(\n cantus_firmus_upper_bound,\n counterpoint_upper_bound\n )", "def set_minVal(self, val):\n self.minVal = val", "def _adjust_scale(self, value):\n if self._min_val <= value <= self._max_val:\n self._scale_var.set(value)\n self.update_label_text()", "def set_limits_minmax(self, zmin, zmax):\n self.pixels.set_clim(zmin, zmax)\n self.autoscale = False", "def _changeDisplayRange(self):\n try:\n newrange = float(str(self._wmin.text())), float(str(self._wmax.text()))\n except ValueError:\n return\n self._rc.setDisplayRange(*newrange)", "def test_min_vs_max(self, fig_test, fig_ref):\n ax = fig_test.add_subplot(projection=\"ternary\")\n ax.set_ternary_min(0.1, 0.2, 0.3)\n\n ax = fig_ref.add_subplot(projection=\"ternary\")\n ax.set_ternary_max(0.5, 0.6, 0.7)", "def view_limits(self, dmin, dmax):\n base = self._select_base(dmin, dmax)\n if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers':\n vmin = base.le(dmin)\n vmax = base.ge(dmax)\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n else:\n vmin = dmin\n vmax = dmax\n\n return mtransforms.nonsingular(vmin, vmax)", "def set_limits_minmax(self, zmin, zmax):\n self.camera.set_clim(zmin, zmax)\n self.autoscale = False", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def _clamp_rgb_coordinate(self, coord):\r\n\r\n if not self.is_upscaled:\r\n return min(max(coord, 0.0), 1.0)\r\n else:\r\n return min(max(coord, 1), 255)", "def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)", "def clamp(self):\n self.threshold.data.clamp_(self.min_threshold)", "def config_pbc_min(self):\n self._config_min()\n self.title = \"PBC Minimization\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value" ]
[ "0.6897544", "0.68503755", "0.6472414", "0.64165556", "0.6364885", "0.6256008", "0.6017445", "0.6017445", "0.5980322", "0.5958103", "0.59348243", "0.58880955", "0.58671427", "0.5842141", "0.580647", "0.57340556", "0.57123756", "0.56508297", "0.56315273", "0.5560471", "0.5549892", "0.5542802", "0.55403185", "0.55403185", "0.54861516", "0.54762757", "0.54597443", "0.54445773", "0.54432094", "0.54432094" ]
0.77897596
0
Synchronize this instance data with that of its parent
def _syncDataWithParent(self): parent = self.parent() if parent is None: self._data = None else: self._data = parent.getData(copy=False) self._updateScenePrimitive()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()", "def sync(self):\n pass", "def sync(self):\n pass", "def sync(self):\n return", "def sync(self, other):\n pass # TODO", "def do_sync(self):\n raise NotImplementedError() # pragma: no cover", "def sync_local(self, other):\n pass # TODO", "def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)", "def sync(self, **kwargs):\n pass", "def update_original_data(self):\n pass", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def sync(self):\n return self._sync", "def update(self, parent):\r\n pass", "def _post_sync(self):", "def _reload(self):\n if self._ancestorModelSourceCreated:\n self._parent._reload()\n else:\n # beware this breaks parent/child links such as current selection / hierarchical groups\n dictSave = self.serialize()\n tmpRegion = self._createBlankCopy()\n tmpRegion.deserialize(dictSave)\n self._assign(tmpRegion)\n self._informRegionChange(True)", "def lock(self):\n raise NotImplementedError", "def sync() -> None:", "def sync(self):\n if not self.is_readonly():\n deser = self._deserialize()\n orig = getattr(self.model, self.name)\n if (orig != deser):\n if isinstance(orig, list):\n # first remove the original triples, instead of doing sophisticated\n # set manipulations\n setattr(self.model, self.name, [])\n setattr(self.model, self.name, deser)", "def SyncRoot(self) -> object:", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self, sync):\n self._sync = sync", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def _pre_sync(self):", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n \r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None)", "def sync(self):\n\n if self._inchild:\n os.read(self._pr_child, len(self.RELEASE_MSG))\n else:\n os.read(self._pr_parent, len(self.RELEASE_MSG))", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops\n # caused by a constantly changing state value at each run.\n #\n # Example: state.value += 1\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)" ]
[ "0.80830806", "0.7977319", "0.7733652", "0.71153784", "0.71153784", "0.70563495", "0.7043718", "0.674526", "0.65729344", "0.6530828", "0.64562", "0.6451494", "0.6362502", "0.6362502", "0.6325555", "0.63112843", "0.6255493", "0.6245364", "0.6242624", "0.619789", "0.61851496", "0.61773336", "0.61612016", "0.61592674", "0.615458", "0.61517084", "0.6139571", "0.61280423", "0.6117423", "0.6102235" ]
0.8105605
0
Return the level of this isosurface (float)
def getLevel(self): return self._level
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def level(self):\n return self.game_data['player stats']['Level']", "def level(self):\n return self.init_v[2]", "def getLevel(self):\n return _libsbml.SBasePlugin_getLevel(self)", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self._level", "def level(self):\n return self.__level", "def level(self):\n return self.__level", "def getLevel(self):\n return _libsbml.SBase_getLevel(self)", "def _get_isis_level(self):\n return self.__isis_level", "def get_level(self, channel=None):\n return int(self.getSensorData(\"FILLING_LEVEL\", channel))", "def getLevel(self):\n return self.level", "def volume_level(self):\n return self._volumeLevel/100", "def level(self) -> int:\n return self._level", "def level(self) -> int:\n return self._level", "def get_level(self) -> int:\n return self.rstate.level()", "def volume_level(self):\n return self._group.volume / 100", "def volume_level(self):\n return int(self._volume) / MAX_VOL", "def _do_get_level(self):\n logging.info(__name__ + ' : Read level of channel 1')\n result = self._execute('R1')\n return float(result.replace(\"R\", \"\")) / 10", "def volume_level(self):\n return self._table.speed", "def get_level(cls, curve_value):\n return curve_value & (2 ** cls.level_bits - 1)", "def volume_level(self) -> float:\n return int(self._state.get(\"playback_volume\", 0)) / 100", "def level(self) -> int:\n return self.__state.level()", "def getLevel(self, *args):\n return _libsbml.FbcExtension_getLevel(self, *args)", "def volume_level(self):\n if 'mixer volume' in self._status:\n return int(self._status['mixer volume']) / 100.0", "def volume_level(self) -> str | None:\n return int(self.zone.Volume) / 100.0", "def volume_level(self):\n return self._volume_level", "def volume_level(self):\n return self._client.volume / 100", "def volume_level(self):\n return self._volume" ]
[ "0.692961", "0.68555295", "0.68304414", "0.67786837", "0.67786837", "0.67786837", "0.67786837", "0.67447656", "0.67447656", "0.673165", "0.6729187", "0.67290646", "0.67083573", "0.6660453", "0.66239667", "0.66239667", "0.65987796", "0.6588329", "0.65245295", "0.65243083", "0.65113395", "0.6481602", "0.64800906", "0.64771783", "0.64745146", "0.6445088", "0.64376295", "0.6420487", "0.64171034", "0.63877404" ]
0.68635625
1
Set the value at which to build the isosurface. Setting this value reset autolevel function
def setLevel(self, level): self._autoLevelFunction = None level = float(level) if level != self._level: self._level = level self._updateScenePrimitive() self._updated(Item3DChangedType.ISO_LEVEL)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF3_SetLevelSetValue(self, _arg)", "def SetLevelSetValue(self, _arg: 'double const') -> \"void\":\n return _itkReinitializeLevelSetImageFilterPython.itkReinitializeLevelSetImageFilterIF2_SetLevelSetValue(self, _arg)", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def setvalue(self,num,name,val):\n self.M.reconfigure(num,{name:float(val)})", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)", "def setUseGizmos(value=True):\n global cc\n cc = not value", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def update_electronic_settings(self, key, value):\n\n if key in self._electronic_settings:\n self._electronic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {prec_level, algo, encut , nelm,nelmin, ediff, sigma, lasph, lreal, addgrid, bmaxmix, bmix}\")", "def _set_power(self, value: str):\n if value == STATE_ON:\n self.state[1] = self.state[1][:2] + '1' + self.state[1][3:]\n\n if value == STATE_OFF:\n self.state[1] = self.state[1][:2] + '0' + self.state[1][3:]", "def setFlatImage(self, value=1.0):\n self.fimage = None\n self.image = numpy.zeros((self.ny, self.nx), 'float') + value\n return", "def changeRingSetting(self):\n #Input code to accommodate function of Ring setting", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def Init(self):\r\n print(\"Initiating...\")\r\n if (self.Get_FullScale_Value() == self.FullScaleEnum[0]):\r\n self.gain = 0.00875\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[1]):\r\n self.gain = 0.0175\r\n elif (self.Get_FullScale_Value() == self.FullScaleEnum[2]):\r\n self.gain = 0.07\r\n print(\"Gain set to:{0}\".format(self.gain))", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def trigger_level(self, value):\n self.lib.SetTriggerLevel(ct.c_float(value))", "def async_set_level(self, value: int) -> None:\n value = max(0, min(255, value))\n self._position = int(value * 100 / 255)\n self.async_write_ha_state()", "def setSolenoidCurrent(self, current):\n self.calc_level = min(self.calc_level, CALC_B_MAP - 1)\n self.solenoid.setSolenoidCurrent(current) # to reset solenoid calc", "def set_z(self, value):\n\n # set the zero register if value is zero\n self.p &= ~(const.FLAG_ZERO)\n self.p |= const.FLAG_ZERO if value == 0b0 else 0b0", "def set_volume(self, value):\n utils.set_volume(self.config[\"alsa\"][\"card\"], value) # Sets the actual volume level\n\n if value == 0:\n mode = \"muted\"\n elif value <= 25:\n mode = \"low\"\n elif value <= 75:\n mode = \"medium\"\n else:\n mode = \"high\"\n \n icon = utils.get_volume_icon(mode)\n self.settings_window.volume_label.setPixmap(icon)", "def setAutoLevelFunction(self, autoLevel):\n assert callable(autoLevel)\n self._autoLevelFunction = autoLevel\n self._updateScenePrimitive()", "def ft_sensor_set_zero(self):\r\n return self._arm.ft_sensor_set_zero()", "def set_state(self, value):\n _LOGGER.debug(\"%s: Set state to %d\", self.entity_id, value)\n self._flag_state = True\n\n params = {ATTR_ENTITY_ID: self.entity_id}\n if value == 0:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(3)\n self.call_service(DOMAIN, SERVICE_OPEN_COVER, params)\n elif value == 1:\n if self.char_current_state.value != value:\n self.char_current_state.set_value(2)\n self.call_service(DOMAIN, SERVICE_CLOSE_COVER, params)", "def apply_settings(camera):\r\n camera.clear_mode = 0\r\n camera.exp_mode = \"Internal Trigger\"\r\n camera.readout_port = 0\r\n camera.speed_table_index = 0\r\n camera.gain = 1", "def update_magnetic_settings(self, key, value):\n\n if self._magnetic_settings:\n if key in self._magnetic_settings:\n self._magnetic_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {ispin, magmom, nupdown, saxis, lsorbit,noncollinear}\")\n else:\n print(\"magnetic settings not present!\")", "def setImage(self, image=None, autoLevels=None, **kargs):\n profile = debug.Profiler()\n\n gotNewData = False\n if image is None:\n if self.image is None:\n return\n else:\n old_xp = self._xp\n cp = getCupy()\n self._xp = cp.get_array_module(image) if cp else numpy\n gotNewData = True\n processingSubstrateChanged = old_xp != self._xp\n if processingSubstrateChanged:\n self._processingBuffer = None\n shapeChanged = (processingSubstrateChanged or self.image is None or image.shape != self.image.shape)\n image = image.view()\n if self.image is None or image.dtype != self.image.dtype:\n self._effectiveLut = None\n self.image = image\n self._imageHasNans = None\n if self.image.shape[0] > 2**15-1 or self.image.shape[1] > 2**15-1:\n if 'autoDownsample' not in kargs:\n kargs['autoDownsample'] = True\n if shapeChanged:\n self.prepareGeometryChange()\n self.informViewBoundsChanged()\n\n profile()\n\n if autoLevels is None:\n if 'levels' in kargs:\n autoLevels = False\n else:\n autoLevels = True\n if autoLevels:\n level_samples = kargs.pop('levelSamples', 2**16) \n mn, mx = self.quickMinMax( targetSize=level_samples )\n # mn and mx can still be NaN if the data is all-NaN\n if mn == mx or self._xp.isnan(mn) or self._xp.isnan(mx):\n mn = 0\n mx = 255\n kargs['levels'] = [mn,mx]\n\n profile()\n\n self.setOpts(update=False, **kargs)\n\n profile()\n\n self._renderRequired = True\n self.update()\n\n profile()\n\n if gotNewData:\n self.sigImageChanged.emit()\n if self._defferedLevels is not None:\n levels = self._defferedLevels\n self._defferedLevels = None\n self.setLevels((levels))", "def settemp(t=-10):\n print camera.SetTemperature(t)\n camera.status.update()", "def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")", "def set_value(self, on_level):\n if on_level in FanSpeedRange.OFF:\n fan_speed = FanSpeed.OFF\n elif on_level in FanSpeedRange.LOW:\n fan_speed = FanSpeed.LOW\n elif on_level in FanSpeedRange.MEDIUM:\n fan_speed = FanSpeed.MEDIUM\n else:\n fan_speed = FanSpeed.HIGH\n self.value = fan_speed", "def initialize_dynamic_settings(self):\r\n self.alien_speed_factor = 0.1\r\n self.alien_bullet_speed_factor = 0.7" ]
[ "0.59382164", "0.58852255", "0.5742166", "0.5726637", "0.57214034", "0.571938", "0.5709026", "0.56838727", "0.56748176", "0.5579223", "0.55515665", "0.55400157", "0.55338275", "0.5522072", "0.54870933", "0.5481656", "0.5459882", "0.5447825", "0.54363453", "0.5433698", "0.5425798", "0.53781307", "0.53757036", "0.5367045", "0.5360007", "0.5359763", "0.5359451", "0.5346631", "0.5334891", "0.53202844" ]
0.6042807
0
Return the function computing the isolevel (callable or None)
def getAutoLevelFunction(self): return self._autoLevelFunction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _func(self):\n return self._get_flint_func(self.domain)", "def _get_isis_level(self):\n return self.__isis_level", "def poly_level(f):\n if poly_univariate_p(f):\n return 1\n else:\n return 1 + poly_level(poly_LC(f))", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def get_function(self):\n return Gumtree.gumtree.getFunction()", "def func ( self ) :\n return self.__func", "def define_scalar_functions(self):\n\n # Exit if functions have already been defined.\n # A function decorator might work better here...\n if hasattr(self, 'pressure'):\n return None\n\n if self.config['material']['incompressible']:\n self.pressure = dlf.Function(self.scalarSpace, name='p')\n\n if self.config['formulation']['time']['unsteady']:\n self.pressure0 = dlf.Function(self.scalarSpace, name='p0')\n else:\n self.pressure0 = 0\n\n self.test_scalar = dlf.TestFunction(self.scalarSpace)\n self.trial_scalar = dlf.TrialFunction(self.scalarSpace)\n else:\n self.pressure = 0\n self.pressure0 = 0\n self.test_scalar = None\n self.trial_scalar = None\n\n # Apply initial conditions if provided\n initial_condition = self.config['formulation']['initial_condition']\n if initial_condition['pressure'] is not None:\n init_pressure = initial_condition['pressure']\n self.apply_initial_conditions(init_pressure,\n self.pressure,\n self.pressure0)\n\n return None", "def _get_impl(self, name: str) -> Optional[Callable]:\n if name in dir(operator):\n impl = getattr(operator, name)\n elif name in dir(builtins):\n impl = getattr(builtins, name)\n elif name in self['numeric/right']:\n impl = reverse_args(self._get_impl(name.lstrip('r')))\n else:\n impl = None\n return impl", "def sem_function(self, parser, node, children):\n print (\"Function name\")\n print(children[1])\n \n if len(children) == 1:\n print(children[0]) \n return children[0]\n \n sign = -1 if children[0] == '-' else 1\n \n return sign * children[-1]", "def get_function(self):\n return self.element.get_basis_functions()[self.n]", "def my_function():\n\n\treturn None", "def function(self):\n return self.generator.module.neumannz", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def function(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"function\")", "def func ( self ) :\n return self.__func", "def get_unc_f(self):\n return self.uncf", "def get_function(self):\n raise NotImplementedError()", "def lin_o_func(self):\n return self.hx", "def function(self):\n return self.generator.module.neumanny", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def function(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function\")", "def _call(self, x):\n return x.ufuncs.sign()", "def get_function(self):\n return subs(self.f.get_function(), self.sub_pre, self.sub_post)", "def calc_Uiso(self):\n if self.temp_factor is None:\n return None\n return numpy.identity(3, float) * (self.temp_factor * Constants.B2U)", "def _isNullFunc():\n try:\n return vd.sheet.isNullFunc()\n except AttributeError:\n import visidata\n\n return visidata.isNullFunc()", "def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def _call(self, x):\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)", "def function(self):\n return self.generator.module.neumannx", "def functional(self):\n return self.__functional" ]
[ "0.6047651", "0.5671476", "0.5463008", "0.54624134", "0.5348979", "0.53289545", "0.5268753", "0.5257093", "0.52481455", "0.5203101", "0.5194863", "0.51774395", "0.51627976", "0.5157307", "0.51524407", "0.5147835", "0.513994", "0.51247656", "0.5106995", "0.5046533", "0.50287354", "0.50287354", "0.50190294", "0.50177026", "0.5011827", "0.5011127", "0.49978614", "0.49810368", "0.49693674", "0.49539286" ]
0.5753576
1
Return the color of this isosurface (QColor)
def getColor(self): return qt.QColor.fromRgbF(*self._color)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getColor(self):\n return self.color", "def getColor(self):\r\n return self.color", "def get_color(self):\r\n return self._color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def getColor(self):\n return self.__color", "def get_color(self):\n return self.color", "def get_color(self):\n\n return self.color", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def get_color(self):\r\n return self.__color", "def get_color(self):\n\n return self._color", "def color(self):\n return self.__color", "def color(self):\n return self._color", "def color(self):\n return self._color", "def color(self):\n return self._rgba", "def color(self):\n return self._rgba", "def color(self):\n return rgba(self.value_of_css_property('color'))", "def color(self):\n return self.container['color']", "def getColor(self):\n return self._l[2]", "def color(self):\n return self['color']", "def color(self):\n return self.COLOR", "def get_color(self):\n return COLOR_DICT[self.element]", "def getColor(self):\n return self.side_color", "def get_color(self) -> str:\n return self.color", "def get_color(self) -> str:\r\n return self.color", "def get_colour(self):\n return self.colour", "def conseguir_color(self):\n return self.pluma.conseguir_color()", "def get_color(self):\n return self._io.last_state['color']['front-center']", "def color(self):\n return 0x2f3136" ]
[ "0.70161194", "0.6997709", "0.69830304", "0.6970522", "0.6970522", "0.6970522", "0.69664246", "0.6958327", "0.69552636", "0.69552636", "0.69544834", "0.6918692", "0.6895953", "0.68916535", "0.68916535", "0.68693244", "0.68693244", "0.68580174", "0.680577", "0.67948073", "0.6764362", "0.6753648", "0.6735035", "0.6709825", "0.66705096", "0.6656925", "0.66178995", "0.66061646", "0.64928824", "0.64739454" ]
0.77090704
0
Compute isosurface for current state.
def _computeIsosurface(self): data = self.getData(copy=False) if data is None: if self.isAutoLevel(): self._level = float('nan') else: if self.isAutoLevel(): st = time.time() try: level = float(self.getAutoLevelFunction()(data)) except Exception: module_ = self.getAutoLevelFunction().__module__ name = self.getAutoLevelFunction().__name__ _logger.error( "Error while executing iso level function %s.%s", module_, name, exc_info=True) level = float('nan') else: _logger.info( 'Computed iso-level in %f s.', time.time() - st) if level != self._level: self._level = level self._updated(Item3DChangedType.ISO_LEVEL) if numpy.isfinite(self._level): st = time.time() vertices, normals, indices = MarchingCubes( data, isolevel=self._level) _logger.info('Computed iso-surface in %f s.', time.time() - st) if len(vertices) != 0: return vertices, normals, indices return None, None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isosurface(self):\n return self._isosurface()", "def get_fsurface(self, path):\n raise NotImplementedError", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def _get_surfaces(idf):\n surfaces = idf.getsurfaces() + idf.getshadingsurfaces() + idf.getsubsurfaces()\n return surfaces", "def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)", "def closed_v(self):\n sa = ShapeAnalysis_Surface(self.surface())\n return sa.IsVClosed()", "def invert_simple(forward, meas, geom):\n\n surface = forward.surface\n RT = forward.RT\n instrument = forward.instrument\n\n vswir_present = False\n if any(forward.surface.wl < 2600):\n vswir_present = True \n\n tir_present = False\n if any(forward.surface.wl > 2600):\n tir_present = True \n\n # First step is to get the atmosphere. We start from the initial state\n # and estimate atmospheric terms using traditional heuristics.\n x = forward.init.copy()\n x_surface, x_RT, x_instrument = forward.unpack(x)\n\n if vswir_present:\n x[forward.idx_RT] = heuristic_atmosphere(RT, instrument, \n x_RT, x_instrument, meas, geom)\n\n # Now, with atmosphere fixed, we can invert the radiance algebraically\n # via Lambertian approximations to get reflectance\n x_surface, x_RT, x_instrument = forward.unpack(x)\n rfl_est, Ls_est, coeffs = invert_algebraic(surface, RT,\n instrument, x_surface, x_RT,\n x_instrument, meas, geom)\n\n # Condition thermal part on the VSWIR portion. Only works for\n # Multicomponent surfaces. Finds the cluster nearest the VSWIR heuristic\n # inversion and uses it for the TIR suface initialization.\n if tir_present:\n tir_idx = np.where(forward.surface.wl > 3000)[0]\n\n if vswir_present:\n x_surface_temp = x_surface.copy()\n x_surface_temp[:len(rfl_est)] = rfl_est\n mu = forward.surface.xa(x_surface_temp, geom)\n rfl_est[tir_idx] = mu[tir_idx]\n else:\n rfl_est = 0.03 * np.ones(len(forward.surface.wl))\n\n # Now we have an estimated reflectance. Fit the surface parameters.\n x_surface[forward.idx_surface] = forward.surface.fit_params(rfl_est, geom)\n\n # Find temperature of emissive surfaces\n if tir_present:\n\n # Estimate the total radiance at sensor, leaving out surface emission\n # Radiate transfer calculations could take place at high spectral resolution\n # so we upsample the surface reflectance\n rfl_hi = forward.upsample(forward.surface.wl, rfl_est)\n rhoatm, sphalb, transm, solar_irr, coszen, transup = coeffs\n\n L_atm = RT.get_L_atm(x_RT, geom)\n L_down_transmitted = RT.get_L_down_transmitted(x_RT, geom)\n L_total_without_surface_emission = \\\n L_atm + L_down_transmitted * rfl_hi / (1. - sphalb * rfl_hi)\n\n # These tend to have high transmission factors; the emissivity of most\n # materials is nearly 1 for these bands, so they are good for\n # initializing the surface temperature.\n clearest_wavelengths = [10125., 10390.00, 10690.00]\n\n # This is fragile if other instruments have different wavelength\n # spacing or range\n clearest_indices = [np.argmin(np.absolute(RT.wl - w))\n for w in clearest_wavelengths]\n\n # Error function for nonlinear temperature fit\n def err(z):\n T = z\n emissivity = forward.surface.emissivity_for_surface_T_init\n Ls_est, d = emissive_radiance(emissivity, T,\n forward.surface.wl[clearest_indices])\n resid = transup[clearest_indices] * Ls_est + \\\n L_total_without_surface_emission[clearest_indices] - \\\n meas[clearest_indices]\n return sum(resid**2)\n\n # Fit temperature, set bounds, and set the initial values\n idx_T = forward.surface.surf_temp_ind\n Tinit = np.array([forward.surface.init[idx_T]])\n Tbest = minimize(err, Tinit).x\n T = max(forward.surface.bounds[idx_T][0]+eps,\n min(Tbest, forward.surface.bounds[idx_T][1]-eps))\n x_surface[idx_T] = Tbest\n forward.surface.init[idx_T] = T\n\n # Update the full state vector\n x[forward.idx_surface] = x_surface\n\n # We record these initial values in the geometry object - the only\n # \"stateful\" part of the retrieval\n geom.x_surf_init = x[forward.idx_surface]\n geom.x_RT_init = x[forward.idx_RT]\n\n return x", "def _compute_grid_state(self, for_id):\n own = np.zeros_like(self._map, float)\n own_pos = self._id2pos[for_id]\n own[own_pos] = 1\n\n thieves = (self._map == THIEF ).astype(float)\n guardians = (self._map == GUARDIAN).astype(float)\n\n own_team = self.id2team[for_id]\n if own_team == THIEF:\n teammates = thieves\n opponents = guardians\n else:\n teammates = guardians\n opponents = thieves\n\n treasure_channel = (self._map == TREASURE).astype(float)\n\n # Channels first\n return np.stack([own, teammates, opponents, self._walls_channel, treasure_channel])", "def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface", "def getItems(self):\n return self.getCutPlanes() + self.getIsosurfaces()", "def drfl_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def dLs_dsurface(self, x_surface, geom):\n\n return np.zeros((self.n_wl, 1))", "def update(self) -> pygame.Surface:\n return self.surface", "def _get_state(self, obs_env):\n state = []\n obs_env = obs_env.reshape(self.n_agent, 2)\n for i in range(self.n_agent):\n local_obs = obs_env[i]\n if self.agent.startswith('ia2c'):\n imgs = [local_obs]\n\n if not self.agent == 'ia2c_fp': # ia2c\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.array([], dtype=np.float32)\n\n else: # ia2c_fp\n fps = []\n for j in np.where(self.neighbor_mask[i] == 1)[0]:\n imgs.append(obs_env[j])\n fps.append(self.fp[j])\n imgs = np.array(imgs, dtype=np.float32)\n fps = np.concatenate(fps).astype(np.float32)\n\n agent_obs = [imgs, fps]\n\n else: # ma2c\n agent_obs = local_obs.astype(np.float32)\n\n state.append(agent_obs)\n\n return state\n # return [[obs_env, np.array([], dtype=np.float32)] for _ in range(self.n_agent)]", "def project(self):\n # update positions compared to observer\n pos = self.pos.copy()\n\n # center coordinates around obs coords\n pos[:, 0] -= np.sin(self.theta) * self.V * self.time_elapsed\n pos[:, 2] -= np.cos(self.theta) * self.V * self.time_elapsed\n\n # wrap in a novel box around obs coords\n for i in range(3):\n pos[:, i] = self.bounds[2*i] + np.mod(pos[:, i], self.bounds[2*i + 1]-self.bounds[2*i])\n\n d = (pos**2).sum(axis=1)**.5\n # ind_visible = (pos[:, 2] > 0) * (self.d_min<d) * (d<self.d_max)\n ind_visible = (pos[:, 2] > self.d_min) * (d < self.d_max)\n N_visible = int(np.sum(ind_visible))\n\n # self.state = [X, Y, size]\n self.state = np.ones((N_visible, 7))\n for i in range(2):\n self.state[:, i] = self.mag * pos[ind_visible, i] / pos[ind_visible, 2]\n print(i, self.state[:, i].min(), self.state[:, i].max())\n self.state[:, 2] = self.size / d[ind_visible]\n\n # colors do not change\n self.state[:, 3:] = pos[ind_visible, 3:]\n\n # TODO: larger transparency at larger distance => too fancy :-)\n # self.state[:, 2] = self.size / d[ind_visible]\n\n # for i in range(3):\n # self.state[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n # self.state[:, i] -= self.bounds[2*i]", "def getState(game):\n pixels = pygame.surfarray.array3d(game.screen)[:]\n pixels = np.array([pixels], dtype=float)\n\n # Here we will preprocess the pixel data\n bitsize = game.screen.get_bitsize() / 4\n pixels *= 1 / 2**bitsize # Normalize to [0..1]\n\n return pixels", "def plot_fft_isosurfaces(description: str, omega: np.ndarray, \n ut: np.ndarray, filename: str) -> None:\n\n print(f'Plotting fft isosurfaces: {description}...')\n\n (omega_x_grid, omega_y_grid, omega_z_grid) = np.meshgrid(omega, omega, \n omega, indexing='ij')\n n = len(omega)\n\n num_slices = ut.shape[0]\n # We only want to plot the first, middle, and last time slices.\n slices = [0, num_slices//2, num_slices-1]\n\n titles = [f'{description}: slice {slice}' for slice in slices]\n\n num_rows = 1\n num_cols = len(slices)\n fig = make_subplots(\n rows=num_rows, \n cols=num_cols,\n specs=[\n [{'is_3d': True}]*num_cols,\n ]*num_rows,\n subplot_titles=titles,\n )\n for s in range(len(slices)):\n ut_slice = np.reshape(ut[slices[s],:], (n, n, n))\n fig.add_trace(\n go.Isosurface(\n x=omega_x_grid.flatten(), \n y=omega_y_grid.flatten(), \n z=omega_z_grid.flatten(), \n value=normalize(ut_slice).flatten(),\n opacity=0.5,\n isomin=0.6,\n isomax=0.9,\n surface_count=3,\n colorscale=\"Viridis\",\n ),\n row=1,\n col=s+1\n )\n fig.update_layout(\n scene_xaxis_title_text=\"omega_x\",\n scene_yaxis_title_text=\"omega_y\",\n scene_zaxis_title_text=\"omega_z\",\n scene2_xaxis_title_text=\"omega_x\",\n scene2_yaxis_title_text=\"omega_y\",\n scene2_zaxis_title_text=\"omega_z\",\n scene3_xaxis_title_text=\"omega_x\",\n scene3_yaxis_title_text=\"omega_y\",\n scene3_zaxis_title_text=\"omega_z\",\n )\n pio.write_html(fig, filename)", "def active_surfaces(self):\n return [surface for surface in self._surfaces if surface.active]", "def _dsurface_dbsf(self):\n # bare soil contribution\n I_bs = (self.I0 * self._mu_0\n * self.SRF.brdf(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n param_dict=self.param_dict))\n\n\n Isurf = (np.exp(-(self.V.tau / self._mu_0) -\n (self.V.tau / self._mu_ex))\n ) * I_bs * np.ones_like(self.t_0)\n\n return self.SRF.NormBRDF * (I_bs - Isurf)", "def full_output_state(self):\n state = self.circuit.global_input_state\n for layer in range(self.circuit.element_layers):\n #TODO: a way to update the state one layer at a time\n #instead of one element at a time might be slightly faster\n for element in self.circuit.elements[layer]:\n state = self.evolve_element(state, element)\n return state", "def _get_surfaces(idf):\n surface_types = [\n 'BUILDINGSURFACE:DETAILED',\n 'FENESTRATIONSURFACE:DETAILED',\n ]\n surfaces = []\n for surface_type in surface_types:\n surfaces.extend(idf.idfobjects[surface_type])\n\n return surfaces", "def surface(self):\n return self._surface", "def Surface(self, *args):\n return _Adaptor3d.Adaptor3d_HSurface_Surface(self, *args)", "def pure_energy_state(self, i):\n return unvectorize(\n ketbra(self.s, i, i)\n for i in vectorize(i)\n )", "def assemble_hyper_surface(self):\n def get_row_no(k0):\n hdf = pd.HDFStore(self.ewlibpath, 'r')\n hdf0 = hdf.get(k0)\n idx = np.where((np.abs(hdf0.th_wavelength-self.wavelength)<=0.025)\n & (np.abs(hdf0.th_EP - self.ep)<=0.02)\n & (hdf0.element == self.element))[0]\n if idx.size!=0:\n idx = idx[0]\n else:\n idx = -1\n hdf.close()\n return idx\n\n if self.interpolated:\n raise NotImplementedError(\"Interpolated model doesn't have such method!\")\n\n row_no = get_row_no(self._keys[0])\n if row_no == -1:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n return self._hyper_surface\n else:\n f = h5py.File(self.ewlibpath, 'r')\n if self.cal == \"nlte\":\n ews = [np.array(f[k+\"/table\"])[row_no][1][3] for k in self._keys]\n else:\n ews = [np.array(f[k+\"/table\"])[row_no][1][2] for k in self._keys]\n f.close()\n\n\n datapoints = np.concatenate((np.array(self._atmos_pars), np.transpose([ews])), axis=1)\n datapoints = datapoints[~np.isnan(datapoints).any(axis=1)]\n\n if datapoints.shape[0] <= 3:\n warnings.warn(\"Data for interpolation is not enough!\")\n self._hyper_surface = None\n del datapoints\n return self._hyper_surface\n else:\n self._hyper_surface = datapoints\n print(\"Grid is prepared!\")\n del datapoints\n return self._hyper_surface", "def image(self, state):\n valid_time = _to_datetime(state.valid_time)\n\n # 15 minute/1 hour slice of data?\n window = dt.timedelta(minutes=60) # 1 hour window\n paths = self.locator.find_period(valid_time, window)\n frame = self.loader.load(paths)\n frame = self.select_date(frame, valid_time, window)\n\n # Filter intra-cloud/cloud-ground rows\n if \"intra-cloud\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"IC\"]\n elif \"cloud-ground\" in state.variable.lower():\n frame = frame[frame[\"flash_type\"] == \"CG\"]\n\n # EarthNetworks validity box (not needed if tiling algorithm)\n longitude_range = (26, 40)\n latitude_range = (-12, 4)\n x_range, y_range = geo.web_mercator(longitude_range, latitude_range)\n\n x, y = geo.web_mercator(frame[\"longitude\"], frame[\"latitude\"])\n frame[\"x\"] = x\n frame[\"y\"] = y\n pixels = 256\n canvas = datashader.Canvas(\n plot_width=pixels,\n plot_height=pixels,\n x_range=x_range,\n y_range=y_range,\n )\n\n if \"density\" in state.variable.lower():\n # N flashes per pixel\n agg = canvas.points(frame, \"x\", \"y\", datashader.count())\n else:\n frame[\"since_flash\"] = self.since_flash(frame[\"date\"], valid_time)\n agg = canvas.points(frame, \"x\", \"y\", datashader.max(\"since_flash\"))\n\n # Note: DataArray objects are not JSON serializable, .values is the\n # same data cast as a numpy array\n x = agg.x.values.min()\n y = agg.y.values.min()\n dw = agg.x.values.max() - x\n dh = agg.y.values.max() - y\n image = np.ma.masked_array(\n agg.values.astype(np.float), mask=np.isnan(agg.values)\n )\n if \"density\" in state.variable.lower():\n image[image == 0] = np.ma.masked # Remove pixels with no data\n\n # Update color_mapper\n color_mapper = self.color_mappers[\"image\"]\n if \"density\" in state.variable.lower():\n color_mapper.palette = bokeh.palettes.all_palettes[\"Spectral\"][8]\n color_mapper.low = 0\n color_mapper.high = agg.values.max()\n else:\n color_mapper.palette = bokeh.palettes.all_palettes[\"RdGy\"][8]\n color_mapper.low = 0\n color_mapper.high = 60 * 60 # 1 hour\n\n # Update tooltips\n for hover_tool in self.hover_tools[\"image\"]:\n hover_tool.tooltips = self.tooltips(state.variable)\n hover_tool.formatters = self.formatters(state.variable)\n\n if \"density\" in state.variable.lower():\n units = \"events\"\n else:\n units = \"seconds\"\n\n data = {\n \"x\": [x],\n \"y\": [y],\n \"dw\": [dw],\n \"dh\": [dh],\n \"image\": [image],\n }\n meta_data = {\n \"variable\": [state.variable],\n \"date\": [valid_time],\n \"units\": [units],\n \"window\": [window.total_seconds()],\n }\n data.update(meta_data)\n self.sources[\"image\"].data = data", "def get_observation_(self):\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))\n\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:,:,0])\n return obs", "def inpaint(self):\n\n self._validate_inputs()\n self._initialize_attributes()\n\n start_time = time.time()\n keep_going = True\n while keep_going:\n self._find_front()\n print(self.front.shape)\n #imwrite('front.jpg',self.front)\n if self.plot_progress:\n self._plot_image()\n\n self._update_priority()\n\n target_pixel = self._find_highest_priority_pixel()\n find_start_time = time.time()\n source_patch = self._find_source_patch(target_pixel)\n #print('Time to find best: %f seconds'\n #% (time.time()-find_start_time))\n\n self._update_image(target_pixel, source_patch)\n\n keep_going = not self._finished()\n\n print('Took %f seconds to complete' % (time.time() - start_time))\n return self.working_image" ]
[ "0.7749475", "0.64308286", "0.6121801", "0.6089994", "0.5459395", "0.5374221", "0.49754205", "0.49631813", "0.494308", "0.49310347", "0.49100575", "0.48892054", "0.4882994", "0.48674196", "0.4839715", "0.47886744", "0.4778599", "0.476231", "0.47320402", "0.47290966", "0.47155273", "0.4709471", "0.46993586", "0.4696804", "0.46892542", "0.46792185", "0.46694776", "0.4662081", "0.4648043", "0.46425027" ]
0.71063083
1
Compute range info (min, min positive, max) from data
def _computeRangeFromData(data): if data is None: return None dataRange = min_max(data, min_positive=True, finite=True) if dataRange.minimum is None: # Only non-finite data return None if dataRange is not None: min_positive = dataRange.min_positive if min_positive is None: min_positive = float('nan') return dataRange.minimum, min_positive, dataRange.maximum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_range(cls, data: tuple or list) -> float:\n cls._data_validation(data)\n max_ = cls.get_max(data)\n min_ = cls.get_min(data)\n return float(max_ - min_)", "def data_range(x):\n return max(x)-min(x)", "def m_to_range(self, data):\n return (data - self._min_range_m) / self._total_range", "def _full_value_range(self):\n min_value, max_value = self._raw_data.data_range\n return max_value - min_value", "def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range", "def get_range_info(self):\n with open(self.range_path, 'r') as _file:\n for line in _file.readlines():\n list0 = line.strip().split('-')\n range_dict = {\n 'min': int(list0[0], 16),\n 'max': int(list0[1], 16),\n 'max_offset': int(list0[1], 16) - int(list0[0], 16),\n }\n self.ranges.append(range_dict)", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def getColorRange(self):\n vmax=self.data_matrix.max()\n vmin=self.data_matrix.min()\n\n if vmax * vmin < 0: # ie number range spans +ve and -ve\n vmax = max([vmax, abs(vmin)])\n vmin = -1*vmax\n\n return vmax,vmin", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges", "def range_to_m(self, data):\n return data * self._total_range + self._min_range_m", "def range(self):\n lows, highs = [], []\n for graph in self._graphs.values():\n low, high = graph.range()\n lows.append(low)\n highs.append(high)\n return (min(lows), max(highs))", "def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to", "def _parse_vrange(self, data):\n vmin = self.config.get('vmin', np.nanmin(data))\n vmax = self.config.get('vmax', np.nanmax(data))\n vrange = self.config.get('vrange', None)\n\n # Parse vmin, vmax\n if isinstance(vmin, str):\n vmin = np.nanquantile(data, q=float(vmin))\n if isinstance(vmax, str):\n vmax = np.nanquantile(data, q=float(vmax))\n\n # Parse vrange\n if vrange is True:\n vrange = max(abs(np.nanmin(data)), abs(np.nanmax(data)))\n elif isinstance(vrange, str):\n vrange = abs(np.nanquantile(data, q=(float(vrange), 1-float(vrange)))).max()\n\n if vrange is not None:\n if isinstance(vrange, (list, tuple, np.ndarray)):\n vmin, vmax = vrange\n else:\n vmin, vmax = -vrange, vrange\n return vmin, vmax", "def in_range(data, minval=-np.inf, maxval=np.inf):\n return (minval <= data) & (data <= maxval)", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def range(df):\r\n\r\n\tdf_range_dict = dict()\r\n\r\n\tfor i, col in enumerate(df.columns):\r\n\t\tdf_range_dict[col] = [df[col].max(), df[col].min(), df[col].max() - df[col].min()]\r\n\r\n\tdf_range = pd.DataFrame(df_range_dict, index=['Max Value', 'Min Value', 'Range (Max - Min)'])\r\n\tpd.set_option('precision', 2) # set output display precision in 2 decimal places\r\n\r\n\treturn df_range", "def calcrange(data, log=False):\n xmin, xmax = None, None\n for x in data:\n if not log or x > 0.:\n if xmin is None or x < xmin: xmin = x\n if xmax is None or x > xmax: xmax = x\n\n if xmin is None and xmax is None:\n if log:\n return 0.1, 1.\n else:\n return 0., 1.\n else:\n return xmin, xmax", "def range(self):\n\n return time_stat(self, stat=\"range\")", "def get_range(df, col):\n return df[col].min(), df[col].max()", "def _calc_range(self) -> np.ndarray:\n if self._is_ct25k():\n range_resolution = 30\n n_gates = 256\n else:\n n_gates = int(self.metadata[\"number_of_gates\"])\n range_resolution = int(self.metadata[\"range_resolution\"])\n return np.arange(n_gates) * range_resolution + range_resolution / 2", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def map_to_range(val, old_min, old_max, new_min, new_max):\n return new_max - (val - old_min) * (new_max - new_min) / (old_max - old_min)", "def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation", "def find_min_max(data):\n v = [i[1] for i in data]\n extremes = [min(v), max(v)]\n logging.info('Calculated extremes: %s', extremes)\n return extremes", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def _get_range(self):\n return tuple((0, m, 1) for m in self.level_shapes[0])", "def range(series):\n return min(series), max(series)", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def _get_min_max_value(min, max, value=None, step=None):\n # Either min and max need to be given, or value needs to be given\n if value is None:\n if min is None or max is None:\n raise ValueError('unable to infer range, value from: ({0}, {1}, {2})'.format(min, max, value))\n diff = max - min\n value = min + (diff / 2)\n # Ensure that value has the same type as diff\n if not isinstance(value, type(diff)):\n value = min + (diff // 2)\n else: # value is not None\n if not isinstance(value, Real):\n raise TypeError('expected a real number, got: %r' % value)\n # Infer min/max from value\n if value == 0:\n # This gives (0, 1) of the correct type\n vrange = (value, value + 1)\n elif value > 0:\n vrange = (-value, 3*value)\n else:\n vrange = (3*value, -value)\n if min is None:\n min = vrange[0]\n if max is None:\n max = vrange[1]\n if step is not None:\n # ensure value is on a step\n tick = int((value - min) / step)\n value = min + tick * step\n if not min <= value <= max:\n raise ValueError('value must be between min and max (min={0}, value={1}, max={2})'.format(min, value, max))\n return min, max, value" ]
[ "0.74131215", "0.7375797", "0.7371305", "0.73698366", "0.7079913", "0.69400847", "0.6933809", "0.68565273", "0.6837749", "0.6828716", "0.68116766", "0.677769", "0.6730659", "0.66858673", "0.6631129", "0.661481", "0.66129404", "0.6605451", "0.65986764", "0.65552515", "0.6548753", "0.6524711", "0.65024316", "0.64580476", "0.6457069", "0.6452427", "0.64333034", "0.6412027", "0.6397173", "0.638291" ]
0.80179286
0
Add an isosurface to this item.
def addIsosurface(self, level, color): isosurface = self._Isosurface(parent=self) isosurface.setColor(color) if callable(level): isosurface.setAutoLevelFunction(level) else: isosurface.setLevel(level) isosurface.sigItemChanged.connect(self._isosurfaceItemChanged) self._isosurfaces.append(isosurface) self._updateIsosurfaces() self.sigIsosurfaceAdded.emit(isosurface) return isosurface
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces", "def _addClicked(self):\n volume = self.volume()\n if volume is not None:\n dataRange = volume.getDataRange()\n if dataRange is None:\n dataRange = 0., 1.\n\n volume.addIsosurface(\n numpy.mean((dataRange[0], dataRange[-1])),\n '#0000FF')", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def isosurface(self):\n return self._isosurface()", "def removeIsosurface(self, isosurface):\n if isosurface not in self.getIsosurfaces():\n _logger.warning(\n \"Try to remove isosurface that is not in the list: %s\",\n str(isosurface))\n else:\n isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged)\n self._isosurfaces.remove(isosurface)\n self._updateIsosurfaces()\n self.sigIsosurfaceRemoved.emit(isosurface)", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def add_surf(self, surf: Surface, pos: Tuple[int, int]):\n self.manual_surfaces.append((pos, surf))", "def get_fsurface(self, path):\n raise NotImplementedError", "def add_stock(self, symbol, quantity, unit_price):\n # TODO write SQL statement to grab unit_price\n stock_price_total = quantity * unit_price # TODO write SQL statement\n # TODO deduct stock quantity from market ??\n self.portfolios.append((symbol, quantity, unit_price))\n self.value += stock_price_total", "def add(self, data, **kwargs):\n if not isinstance(data, (FDNetwork)):\n return super(Viewer, self).add(data, **kwargs)\n\n if kwargs.get(\"as_wireframe\"):\n del kwargs[\"as_wireframe\"]\n return super(Viewer, self).add(data, **kwargs)\n\n artist = Artist(data, viewer=self, context=\"Viewer\", **kwargs)\n self.artists.append(artist)\n artist.draw()\n artist.add()", "def addWireframe(self, name, wireframe):\n\n self.wireframes[name] = wireframe", "def addLayer(self, layer):\n self.layers.append(layer)", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def addStockType(self, item):\n # TODO\n # hint: Add an item to this.stocklist\n # No. 6\n self.stocklist.append(item)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def addStock(self,Stock):\n self.DoS[Stock.get_Symbol()]=Stock", "def add_layer(self, layer):\n self.__layers.append(layer)", "def add_station(self, station):\n self.__stations.append(station)", "def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])", "def add(self, output_svg: Drawing) -> None:\n pass", "def add_area_element(self, obj, typ_sofi, layer):\n\n qd = AreaElement(obj)\n\n pts = rs.SurfacePoints(obj)\n\n qd.n1 = self.nodes.add(Node(None, pts[0]))\n qd.n2 = self.nodes.add(Node(None, pts[1]))\n qd.n3 = self.nodes.add(Node(None, pts[3]))\n qd.n4 = self.nodes.add(Node(None, pts[2]))\n\n qd.layer = layer\n\n self.area_elements.add(qd)", "def add_layer(self, *args):\n\n nm = None\n\n #check to see if we're sending an already formed layer to add - used for data file\n if len(args) == 1 & isinstance(args[0], QgsVectorLayer):\n print('Importing {} as a vector'.format(args[0]))\n self.project.addMapLayer(args[0])\n nm = args[0].name()\n\n elif len(args) > 1:\n print('Importing {} as a vector'.format(args[0]))\n print(args)\n self.project.addMapLayer(QgsVectorLayer(*args))\n nm = args[1]\n\n if nm:\n self.get_layer(nm)\n\n else:\n print()\n print('***Bad map layer for {}***'.format(str(args)))\n print()", "def add_layer(self, layer):\n\n self._layers.append(layer)", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "def add_vertex(self, item: Any, kind: str) -> None:\n if item not in self._vertices:\n self._vertices[item] = _Vertex(item, kind)", "def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")", "def AddLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_AddLayer(self, *args)", "def _removeClicked(self):\n isosurface = self.isosurface()\n if isosurface is not None:\n volume = isosurface.parent()\n if volume is not None:\n volume.removeIsosurface(isosurface)", "def add_stock(self, symbol):\n verbose_message(\"Adding \" + symbol + \"...\")\n if symbol not in self.stocks:\n self.stocks += [symbol]\n\n data = StockData()\n\n data.name = StockDataCollection.get_stock_name(symbol)\n data.symbol = symbol\n data.market = StockDataCollection.get_market_data(symbol,\n str(self.start_date)[:USEFUL_TIMESTAMP_CHARS],\n str(self.end_date)[:USEFUL_TIMESTAMP_CHARS])\n\n # create a list of dates in the YYYY-MM-DD format\n data.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(data.market.index)]\n data.dates = data.market.index\n\n for i in data.dates:\n if i not in self.dates:\n self.dates += [i]\n self.dates.sort()\n self.str_dates = [str(i)[:USEFUL_TIMESTAMP_CHARS] for i in list(self.dates)]\n\n for collection_function in self.features:\n collection_function(data)\n\n data.position = []\n for _ in data.dates:\n data.position += [0]\n if type(self.cash) is not pd.DataFrame:\n self.cash += [self.starting_capital]\n\n data.position = pd.DataFrame({\"Position\": data.position}).set_index(data.dates)\n if type(self.cash) is not pd.DataFrame:\n self.cash = pd.DataFrame({\"cash\": self.cash}).set_index(data.dates)\n debug_message(data)\n self.shuffled_data_reset()\n self.stock_data[symbol] = data" ]
[ "0.6740248", "0.62558913", "0.60210866", "0.5904212", "0.5893358", "0.577374", "0.53729206", "0.52408725", "0.519066", "0.5183285", "0.5161759", "0.5116448", "0.5109245", "0.5073585", "0.5032729", "0.5032729", "0.5009403", "0.4923389", "0.49232364", "0.4879343", "0.48575088", "0.48441488", "0.48291886", "0.47914204", "0.47569522", "0.47477052", "0.467216", "0.46614528", "0.46555722", "0.4654333" ]
0.72267526
0
Remove an isosurface from this item.
def removeIsosurface(self, isosurface): if isosurface not in self.getIsosurfaces(): _logger.warning( "Try to remove isosurface that is not in the list: %s", str(isosurface)) else: isosurface.sigItemChanged.disconnect(self._isosurfaceItemChanged) self._isosurfaces.remove(isosurface) self._updateIsosurfaces() self.sigIsosurfaceRemoved.emit(isosurface)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _removeClicked(self):\n isosurface = self.isosurface()\n if isosurface is not None:\n volume = isosurface.parent()\n if volume is not None:\n volume.removeIsosurface(isosurface)", "def isosurface(self):\n return self._isosurface()", "def remove(self):\r\n game_ref.remove(self)", "def remove_layer(self, layer_pos):\n self.stack.pop(layer_pos)\n return", "def remove_stock(self, stock):\n if stock in self.stocks:\n self.stocks.remove(stock)\n if stock in self.stock_data.keys():\n del self.stock_data[stock]", "def remove_stock(self, symbol, quantity):\n for p_symbol, p_quantity, p_unit_price in self.portfolios:\n if p_symbol == symbol:\n logging.debug(\"Found %s, %s, %s\" %\n (p_symbol, p_quantity, p_unit_price))\n # First delete completely\n self.portfolios.remove((p_symbol,\n p_quantity,\n p_unit_price))\n # Check if some quantity of stocks should remain\n if quantity < p_quantity:\n # Keep remainder\n self.portfolios.append((p_symbol,\n p_quantity-quantity,\n p_unit_price))\n # Reduce value of portfolio by value of stocks removed\n total_price = quantity * p_unit_price\n self.value -= total_price", "def remove_curve(self, name):\n self._curve_reg.__delitem__(name)", "def removeItem(self, item):\n # remove this item from our list\n if item in self.sceneItems:\n self.sceneItems.remove(item)\n\n # remove it from the scene\n self.scene.removeItem(item)\n\n # update the viewport\n self.viewport().update()", "def removeProjection(self, iremove):\n # check that dims of Projections and Markers are the same\n nprojProjs = len(self._ProjectionList._list)\n nprojMarker = len(self.Markers[0].xProj)\n if (nprojProjs != nprojMarker):\n \"Houston: we have a problem!\"\n \"Numbers of projections in Markers and Projections do not match\"\n kk = -1\n for proj in self._ProjectionList._list:\n kk = kk + 1\n ii = proj._index\n if (ii == iremove):\n break\n self._ProjectionList._list.pop(kk)\n self._projIndices.remove(iremove)\n for Marker in self.Markers:\n Marker.xProj.pop(kk)\n Marker.yProj.pop(kk)\n Marker._projIndices.remove(iremove)\n if self.verbose:\n print((\"Projection \" + str(iremove) + \" removed from TiltSeries\"))", "def RemoveLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_RemoveLayer(self, *args)", "def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)", "def remove_feature(self, name):\n logging.info('removing feature %s' % name)\n self.fguide.remove(name)\n self.dataset.pop(name)", "def _clearLayer(self, layer=0):\n for i in self._existingLayerItems(layer):\n self._plt.removeItem(i)", "def remove_object(self, name):\n if name in self._objects:\n del self._objects[name]\n else:\n raise ValueError('Object {} not in scene!'.format(name))\n self.close_renderer()", "def removeShip(self, shipID):\n myShip = self.ships[shipID]\n # remove captain first\n myCaptain = myShip.myCaptain\n self.removeCaptain(myCaptain.id)\n # remove ship\n del self.ships[shipID]", "def remove_from_hand(self):\n pass", "def remove(self) -> None:\n self.map.remove_brush(self)", "def removeScene(self):\n del self.scene, self.imgPixmapItem", "def remove_stock(self, item_id : int):\n removal_flag = False\n for item in self._item:\n if item.id == item_id:\n self._item.remove(item)\n removal_flag = True\n break\n\n if removal_flag == True:\n break\n\n if removal_flag == False:\n raise ItemNotFound(item_id)", "def deleteLayer(self, id):\n\n # just in case we got None\n if id is None:\n return\n\n # see if what we are about to remove might be visible\n layer = self.layer_mapping[id]\n visible = layer.visible\n\n del layer\n self.layer_z_order.remove(id)\n\n # if layer was visible, refresh display\n if visible:\n self.Refresh()", "def unload(self):\n if self.material_background:\n self.parent.removeItem(self.material_background)\n self.material_background = None\n if self.mod_background:\n self.parent.removeItem(self.mod_background)\n self.mod_background = None\n if self.material_foreground:\n self.parent.removeItem(self.material_foreground)\n self.material_foreground = None\n if self.mod_foreground:\n self.parent.removeItem(self.mod_foreground)\n self.mod_foreground = None\n if self.liquid:\n self.parent.removeItem(self.liquid)\n self.liquid = None", "def removeIrisToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfor sha in shas:\n\t\t\tif sha.a.iris_Occ.exists:\n\t\t\t\tsha.a.iris_Occ.delete()", "def removeItem(self, item):\n if item.type not in self.__inventory__:\n return\n for i in range(0, len(self.__inventory__[item.type])):\n if self.__inventory__[item.type][i].id == item.id:\n self.__inventory__[item.type].pop(i)\n return", "def remove_layer(self, layer_key_name):\n del(self.config.layers[layer_key_name])", "def remove_station(self, station):\n self.__stations.remove(station)", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def remove_layer(self, layer_pos):\n\n # If not within feasible bounds, return\n if layer_pos <= 1 or layer_pos > self.number_hidden_layers:\n return\n\n # We set the number of input and output dimensions for the layer to be\n # added and for the ones in the architecture that will be connected to it\n\n # We delete the layer in pos layer_pos\n self.dims = np.delete(self.dims, layer_pos)\n self.init_functions = np.delete(self.init_functions, layer_pos)\n self.act_functions = np.delete(self.act_functions, layer_pos)\n self.batch_norm = np.delete(self.batch_norm, layer_pos)\n self.dropout = np.delete(self.dropout, layer_pos)\n self.dropout_probs = np.delete(self.dropout_probs, layer_pos)\n\n # Finally the number of hidden layers is updated\n self.number_hidden_layers = self.number_hidden_layers - 1", "def remove(self, i):\n assert self.apply_remove_point_rules((self._ys[i], self._xs[i])), 'Removal rules are not satisfied'\n\n if len(self.get_raw_xs()) > 5:\n if self.is_settable:\n self._remove_xs(i)\n self._remove_ys(i)\n self.is_changed = True\n else:\n raise ValueError('graph '+str(self.name)+' is not is_settable')\n elif not self.is_raw_data:\n raise ValueError('Must be at least 5 points for interpolation.')", "def removeFluxSurfaces(self):\n if self._fluxOverlayHandles is not None:\n for h in self._fluxOverlayHandles:\n h.remove()\n\n self._fluxOverlayHandles = []\n self.overlayFluxSurfaces = False", "def remove_item(self, idx_of_item):\n del self.items[idx_of_item]" ]
[ "0.7447585", "0.58254415", "0.5618442", "0.557807", "0.5543547", "0.54468757", "0.5415708", "0.54150975", "0.53558755", "0.533654", "0.53338057", "0.5329659", "0.53128797", "0.5312818", "0.5301393", "0.52970207", "0.52678525", "0.5265467", "0.52498937", "0.5239983", "0.5233106", "0.52129185", "0.5190236", "0.51683474", "0.5150279", "0.51365364", "0.51352006", "0.5130295", "0.5126869", "0.5109566" ]
0.8135996
0
Handle update of isosurfaces upon level changed
def _isosurfaceItemChanged(self, event): if event == Item3DChangedType.ISO_LEVEL: self._updateIsosurfaces()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def UpdateLayers(self):\n pass", "def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def _parentChanged(self, event):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexIsosurface, self)._parentChanged(event)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def update_fov(self) -> None:\n self.game_map.visible[:] = compute_fov(\n self.game_map.tiles[\"transparent\"],\n (self.player.x, self.player.y),\n radius=8,\n )\n # If a tile is \"visible\" it should be added to \"explored\".\n self.game_map.explored |= self.game_map.visible", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def update(self):\n if self.name == \"Settings\":\n args = [\"NAME:Settings\"]\n else:\n args = [\"NAME:\" + self.name, \"Enable:=\", self.Enable]\n if self.UserSpecifiedSettings:\n args += self.manualsettings\n else:\n args += self.autosettings\n if self.name == \"Settings\":\n self.meshmodule.EditGlobalMeshRegion(args)\n else:\n self.meshmodule.EditMeshRegion(self.name, args)\n return True", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def level_upgrade(self, lvl):\n\t\tpass", "def _UpdateEnergy(self):\n self.mol.GetEnergy('nokinetic')", "def _update_objects(self):\n\t\tself.clouds.update()\n\t\tif self.is_play:\n\t\t\tself.floor.update()\n\t\t\tself.bolan.update()\n\t\t\tself.obstacles.update()\n\t\t\tself.scoreboard.update()", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)", "def update(self, *args):\n\n\t\t# Update Bullets\n\t\tif self.power == 'bulletup' and self.level >= 2:\n\t\t\tself.angle_bullets(self.level)\n\t\t\n\t\t# Update Lazer\n\t\tif self.power == 'lazerup' and self.level > 0:\n\n\t\t\tself.index += 1\n\t\t\tif self.index % 12:\n\t\t\t\tself.step += 1\n\t\t\t\n\t\t\tself.y -= self.speed\n\n\n\t\t\tself.rect.y = self.y\n\t\t\tself.rect.x = self.x\n\n\t\t\t# print(\"SLOPE??? \", self.slope)\n\t\t\tself.sheet.blitme(self.screen, self.step % self.sheet.totalCells, \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.rect.x, self.rect.y)\n\n\t\t# Update Bombs\n\t\tif self.power == 'bombup' and self.level > 0:\n\t\t\tself.bomb_vector()\n\n\t\t# Update Default\n\t\telse:\n\t\t\tself.y -= self.speed\n\t\t\tself.rect.y = self.y\n\n\t\tpygame.display.flip()" ]
[ "0.6973583", "0.6578195", "0.6466973", "0.62059677", "0.6089591", "0.5909208", "0.5899945", "0.5825903", "0.57568187", "0.5703271", "0.5698263", "0.5687134", "0.56855094", "0.5576002", "0.55106765", "0.547519", "0.54643226", "0.54612565", "0.5458047", "0.5452608", "0.54472446", "0.54448307", "0.5433167", "0.5410528", "0.539943", "0.5392605", "0.5375215", "0.53721255", "0.5331784", "0.53085095" ]
0.81053317
0
Handle updates of isosurfaces level and add/remove
def _updateIsosurfaces(self): # Sorting using minus, this supposes data 'object' to be max values sortedIso = sorted(self.getIsosurfaces(), key=lambda isosurface: - isosurface.getLevel()) self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def addIsosurface(self, level, color):\n isosurface = self._Isosurface(parent=self)\n isosurface.setColor(color)\n if callable(level):\n isosurface.setAutoLevelFunction(level)\n else:\n isosurface.setLevel(level)\n isosurface.sigItemChanged.connect(self._isosurfaceItemChanged)\n\n self._isosurfaces.append(isosurface)\n\n self._updateIsosurfaces()\n\n self.sigIsosurfaceAdded.emit(isosurface)\n return isosurface", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def update_gl_state(self, *args, **kwargs):\n for v in self._subvisuals:\n v.update_gl_state(*args, **kwargs)", "def UpdateLayers(self):\n pass", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )", "def isoslider(surface_dic, surface_value_dic, min_value=0):\r\n return \\\r\nf\"\"\"\r\n\\n\\nclass IsoLevel(tk.Variable):\r\n def __init__(self, master, name, level):\r\n tk.Variable.__init__(self, master, value=level)\r\n self.name = name\r\n self.trace('w', self.callback)\r\n\r\n def callback(self, *args):\r\n cmd.isolevel(self.name, self.get())\r\n\r\n def increment(self, event=None, delta=0.1):\r\n self.set(round(float(self.get()) + delta, 2))\r\n\r\n def decrement(self, event=None):\r\n self.increment(None, -0.1)\r\n\r\n\r\nsurface_list = {surface_dic}\r\nsurface_max_list = {surface_value_dic}\r\n\r\ntop = tk.Toplevel(plugins.get_tk_root())\r\n\r\nmaster = tk.Frame(top, padx=10, pady=10)\r\nmaster.pack(fill=\"both\", expand=1)\r\n\r\nfor child in list(master.children.values()):\r\n child.destroy()\r\n\r\n\r\nrow_counter = 0\r\nfor identifier, component_dic in surface_list.items():\r\n # add calculation identifier\r\n tk.Label(master, text=identifier).grid(row=row_counter, column=0, sticky=\"w\")\r\n row_counter += 1\r\n \r\n for component_id, surfaces in component_dic.items():\r\n # add collection label, e.g. superstar or hotspot etc.\r\n tk.Label(master, text=component_id).grid(row=row_counter, column=1, sticky='w')\r\n row_counter += 1\r\n \r\n for i, surface in enumerate(surfaces):\r\n # add grid type label\r\n probe = surface.split(\"_\")[-2]\r\n tk.Label(master, text=probe).grid(row=row_counter, column=2, sticky=\"w\")\r\n \r\n # slider code \r\n v = IsoLevel(master, surface, 5)\r\n e = tk.Scale(master, orient=tk.HORIZONTAL, from_={min_value}, to=surface_max_list[identifier][component_id],\r\n resolution=0.1, showvalue=0, variable=v)\r\n e.grid(row=row_counter, column=3, sticky=\"ew\")\r\n\r\n e = tk.Entry(master, textvariable=v, width=4)\r\n e.grid(row=row_counter, column=4, sticky=\"e\")\r\n master.columnconfigure(3, weight=1)\r\n row_counter += 1\r\n\\n\\n\r\n\"\"\"", "def isosurface(grd_name, isosurface_name, level, color):\r\n # pymol_out = PyMOLCommands.load(fname, grd_name)\r\n pymol_out = f'\\ncmd.isosurface(name=\"{isosurface_name}\", map=\"{grd_name}\", level=\"{level}\")\\n'\r\n pymol_out += f'\\ncmd.color(\"{color}\", \"{isosurface_name}\")'\r\n return pymol_out", "def level_upgrade(self, lvl):\n\t\tpass", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def refreshStock(self, level : int = -1):\n self.shipsStock.clear()\n self.weaponsStock.clear()\n self.modulesStock.clear()\n self.turretsStock.clear()\n # self.currentTechLevel = random.randint(bbConfig.minTechLevel, bbConfig.maxTechLevel)\n if level == -1:\n self.currentTechLevel = bbConfig.pickRandomShopTL()\n else:\n if level not in range(bbConfig.minTechLevel, bbConfig.maxTechLevel + 1):\n raise ValueError(\"Attempted to refresh a shop at tech level \" + str(level) + \". must be within the range \" + str(bbConfig.minTechLevel) + \" to \" + str(bbConfig.maxTechLevel))\n self.currentTechLevel = level\n \n for i in range(self.maxShips):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.shipKeysByTL[itemTL - 1]) != 0:\n self.shipsStock.addItem(bbShip.bbShip.fromDict(bbData.builtInShipData[random.choice(bbData.shipKeysByTL[itemTL - 1])]))\n\n for i in range(self.maxModules):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.moduleObjsByTL[itemTL - 1]) != 0:\n self.modulesStock.addItem(random.choice(bbData.moduleObjsByTL[itemTL - 1]))\n\n for i in range(self.maxWeapons):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.weaponObjsByTL[itemTL - 1]) != 0:\n self.weaponsStock.addItem(random.choice(bbData.weaponObjsByTL[itemTL - 1]))\n\n # if random.randint(1, 100) <= bbConfig.turretSpawnProbability:\n for i in range(self.maxTurrets):\n itemTL = bbConfig.pickRandomItemTL(self.currentTechLevel)\n if len(bbData.turretObjsByTL[itemTL - 1]) != 0:\n self.turretsStock.addItem(random.choice(bbData.turretObjsByTL[itemTL - 1]))", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def setLevels(self, levels, update=True):\n if self._xp is None:\n self.levels = levels\n self._defferedLevels = levels\n return\n if levels is not None:\n levels = self._xp.asarray(levels)\n self.levels = levels\n self._effectiveLut = None\n if update:\n self.updateImage()", "def add_surface(self,s):\n self.surfaces.append(s)\n s.system=self.surfaces", "def _addClicked(self):\n volume = self.volume()\n if volume is not None:\n dataRange = volume.getDataRange()\n if dataRange is None:\n dataRange = 0., 1.\n\n volume.addIsosurface(\n numpy.mean((dataRange[0], dataRange[-1])),\n '#0000FF')", "def level_fix(planet, used_planets, planet_levels, left_control_keys, \n planet_control, level, planet_level_dict):\n planet_value = planet_control[planet]\n if len(planet_levels) < (level+1):\n planet_levels.append([])\n if (planet_value == 'Vul') or (planet_value == 'Ear'):\n if level == 0:\n used_planets.append(planet_value) # Add to used planet list\n planet_levels[level].append(planet_value) #Add planet to the level \n planet_level_dict[planet_value] = level \n else:\n used_planets.append(planet) # Add planet to the used planet list\n planet_levels[level].append(planet) # Add planet to the level\n planet_level_dict[planet] = level \n if planet in left_control_keys:\n left_control_keys.remove(planet) #Delete from left planet list\n else:\n used_planets.append(planet)\n planet_levels[level].append(planet) # Add planet to the level\n planet_level_dict[planet] = level\n if planet in left_control_keys:\n left_control_keys.remove(planet) # Delete from left planet list", "def process(self):\n\n\n index = self.dlg.ui.layerCombo.currentIndex() \n if index < 0: \n # it may occur if there's no layer in the combo/legend \n pass\n else: \n layer = self.dlg.ui.layerCombo.itemData(index) \n # layer = QgsVectorLayer(self.fileName, \"layer_name\", \"ogr\")\n \n\n nFeat = layer.featureCount()\n layer.startEditing()\n\n \n\n # Should really put these in a function\n\n index = layer.fieldNameIndex(\"_lts\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_num_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_num_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n index = layer.fieldNameIndex(\"_protected\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_protected\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_bike_lane\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_bike_lane\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"CROSSINGME\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"CROSSINGME\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts11\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts11\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts12\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts12\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts13\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts13\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"_lts_woX\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"_lts_woX\", \\\n QVariant.Int) ] )\n layer.updateFields()\n index = layer.fieldNameIndex(\"LTS\")\n if index == -1: # field doesn't exist\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes( [ QgsField(\"LTS\", \\\n QVariant.Int) ] )\n layer.updateFields()\n\n\n\n i=1\n featid_lts ={}\n for feature in layer.getFeatures():\n street = street_link_object()\n street.path_width = feature['PATHWIDTH']\n street.park_width = feature['PARKWIDTH']\n street.num_lane = feature['NUMLANE']\n street.f_code = feature['ROADCLASS']\n street.foc_width = feature['FOC_WIDTH']\n # street.median = feature['MEDIAN']\n street.speed_limit = feature['SPD_LIM']\n # street.pocket_lane = feature['RTLANE']\n street.illegial_parking = feature['ILLPARKING']\n street.center_line = feature['CL']\n street.net_type = feature['NET_TYPE']\n street.right_turn_speed=feature['RTSPEED']\n street.pocket_lane_shift = feature['RTLANSHIFT']\n street.right_turn_lane_length = feature['RTPOCKLENG']\n street.one_way = feature['ONEWAY']\n street.raw_cross_stress = feature['_rawCrossS']\n street.cross_treat = feature['CrossTreat']\n\n street.calculate_crossing_me(street.num_lane) # has to always be before computing lts\n street.compute_LTS()\n if street.LTS != None :\n i+=1\n j=ceil(i/(nFeat/100))\n self.dlg.ui.progress_bar.setValue(j)\n feature[\"_lts_woX\"] = street.LTS\n feature[\"_lts\"] = street.LTS\n feature[\"_lts11\"] = street.lts11\n feature[\"_lts12\"] = street.lts12\n feature[\"_lts13\"] = street.lts13\n feature[\"_num_lane\"] = street.num_lane\n feature[\"_bike_lane\"] = street.bike_lane\n feature[\"_protected\"] = street.protected\n feature[\"CROSSINGME\"] = street.crossing_me\n layer.updateFeature(feature)\n # layer.updateFields()\n # QMessageBox.information(self.dlg, (\"WAIT\"), (\"Please wait!\"))\n layer.commitChanges()\n # layer.commitChanges()\n QMessageBox.information(self.dlg, (\"Successful\"), (\"LTS has been computed!\")) \n\n self.dlg.close()", "def _update_objects(self):\n\t\tself.clouds.update()\n\t\tif self.is_play:\n\t\t\tself.floor.update()\n\t\t\tself.bolan.update()\n\t\t\tself.obstacles.update()\n\t\t\tself.scoreboard.update()", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "def on_update(self, delta_time: float) -> None:\n #inventory of items \"picked up\"\n hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.levels[self.current_level].item_list)\n for item in hit_list:\n item.remove_from_sprite_lists()\n self.inventory += 1\n\n #update player sprite \"outfit\" is sword item is picked up\n self.player_list.update()\n self.player_list.update_animation(self.inventory)\n\n #update physics engine for player sprite and walls\n self.physics_engine.update()\n\n #go to next level\n #level 2 blocked if coin item is not picked up\n if self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory >= 1: \n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 0 and self.inventory == 0: \n self.player_sprite.center_y = settings.HEIGHT\n\n #level 3 blocked if sword item is not picked up\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory >= 2:\n self.current_level = 2\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = 0\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 1 and self.inventory == 1:\n self.player_sprite.center_y = settings.HEIGHT\n\n #go up to empty level after winning game\n elif self.player_sprite.center_y > settings.HEIGHT and self.current_level == 2:\n self.current_level = 3\n\n #go down levels\n elif self.player_sprite.center_y < 0 and self.current_level == 1:\n self.current_level = 0\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT\n elif self.player_sprite.center_y < 0 and self.current_level == 2:\n self.current_level = 1\n self.physics_engine = arcade.PhysicsEngineSimple(self.player_sprite, self.levels[self.current_level].wall_list)\n self.player_sprite.center_y = settings.HEIGHT", "def draw_level(self):\r\n self.level_surface.blit(self.map_image, self.viewport, self.viewport)\r\n self.level_surface.blit(self.title_box, self.title_rect)", "def updateWeaponStatus(self):\n if self.myGalaxy.shipSelected == self:\n for position in self.positions:\n myQuad = self.quads[position] \n for id in funcs.sortStringList(myQuad.weapons.keys()):\n myWeapon = myQuad.weapons[id]\n self.updateMyGUIValue('%sweapon%sStatus' % (position,id), myWeapon.operational)\n self.updateMyGUIValue('%sweapon%sLock' % (position,id), myWeapon.currentLock)\n self.updateMyGUIValue('%sweapon%sPower' % (position,id), myWeapon.currentPower)\n if myWeapon.myWeaponData.ammo == 1 or myWeapon.droneID != '':\n self.updateMyGUIValue('%sweapon%sAmmo' % (position,id), myWeapon.availAmmo)", "def update():" ]
[ "0.7572625", "0.6483485", "0.62844723", "0.6087187", "0.60834914", "0.5838356", "0.58052474", "0.57790345", "0.57654625", "0.56409895", "0.5590038", "0.5531311", "0.552662", "0.55226326", "0.54919386", "0.5402099", "0.5401069", "0.53722477", "0.5337533", "0.5290507", "0.5276824", "0.5265918", "0.5265438", "0.52547926", "0.5244208", "0.5228894", "0.51997095", "0.5189326", "0.5178624", "0.5173066" ]
0.6803121
1
Handle update of the cut plane (and take care of mode change
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexCutPlane, self)._updated(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plane_update(self):\n self.plane.update()", "def _update(self):\n self.cv.update()", "def onUpdateFactors(self, evt):\n\t\tif self.blockFactorUpdate:\n\t\t\tprint \"Blocking factor update\"\n\t\t\treturn\n\t\tx, y, z = self.dataUnits[0].dataSource.getOriginalDimensions()\n\t\tfx = 1\n\t\tfy = 1\n\t\tfz = 1\n\t\ttry:\n\t\t\tfx = float(self.factorX.GetValue())\n\t\t\tfy = float(self.factorY.GetValue())\n\t\t\tfz = float(self.factorZ.GetValue())\n\t\texcept:\n\t\t\tpass\n\t\tx *= fx\n\t\ty *= fy\n\t\tz *= fz\n\t\tself.blockDimUpdate = 1\n\t\tself.newDimX.SetValue(\"%d\" % x)\n\t\tself.newDimY.SetValue(\"%d\" % y)\n\t\tself.newDimZ.SetValue(\"%d\" % z)\n\t\tself.currSize = (x, y, z)\n\t\tself.blockDimUpdate = 0", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def update():\n # TODO: Park the car 30 cm away from the closest orange cone.\n # Use both color and depth information to handle cones of multiple sizes.\n # You may wish to copy some of your code from lab2b.py\n global speed\n global angle\n global curState\n # Search for contours in the current color image\n update_contour()\n\n imgX = rc.camera.get_width()\n\n depth_image = rc.camera.get_depth_image()\n depth_image_adjust = (depth_image - 0.01) % 9999\n depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)\n\n contour_x = contour_center[1]\n contour_y = contour_center[0]\n\n if contour_center is not None:\n angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)\n\n contour_distance = depth_image_adjust_blur[contour_y][contour_x]\n\n print(contour_distance)\n # TODO: Park the car 30 cm away from the closest orange cone\n if curState == State.search:\n rc.drive.set_speed_angle(0.5, 1)\n \n if contour_center is not None:\n curState = State.approach\n\n elif curState == State.approach:\n # rc.drive.set_speed_angle(0.5, angle)\n\n if contour_distance > 50:\n rc.drive.set_speed_angle(0.3,angle)\n elif contour_distance > 32:\n rc.drive.set_speed_angle(0.1,angle)\n elif contour_distance == 32:\n rc.drive.set_speed_angle(-0.1,angle)\n elif contour_distance < 32:\n curState = State.stop\n print(\"stop\")\n\n elif curState == State.stop:\n rc.drive.set_speed_angle(0,0)\n\n pass", "def _update(self):\n self._execute_lane_changes()\n self._execute_forward_movement()", "def update(self):\n self.setVector(0.15, 0.0)", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def __init__(self):\n super(SteklovBoundary, self).__init__()\n self.value = SteklovBoundary.value\n SteklovBoundary.value -= 1\n self.update(param=\"1\")", "def update(self):\n\n obstVals = self.robot.getDepth(self.startCol, self.startRow,\n self.sampleWidth, self.sampleHeight)\n\n masked_obstVals = numpy.ma.masked_array(obstVals, obstVals == 0)\n\n if numpy.ma.count(masked_obstVals) == 0:\n meanDistance = 500\n else:\n meanDistance = numpy.mean(masked_obstVals)\n if meanDistance < 500:\n meanDistance = 500\n\n if meanDistance < 1200: # Changing this value will change how sensitive robot is to walls\n self.setVector(self.speedMult / meanDistance, 180 - self.angle)\n else:\n self.setVector(0.0, 0.0)", "def update(self): # called to update this piece's position\r\n \r\n if self.name == \"white\" and self.y == 25: self.crowned()\r\n elif self.name == \"black\" and self.y >= 350: self.crowned()\r\n self.draw()", "def update_H(self):", "def update(i):\n epoch = i//features.instance_count\n w = pl.weights_tracker[i]\n a = pl.accuracy_tracker[epoch]\n divider.set_data([xmin,xmax],[(-xmin * w[0] - w[2]) / w[1], (-xmax * w[0] - w[2]) / w[1]])\n epoch_tracker.set_text(\"{} {}\".format(epoch + 1, a))\n\n # Keep a shadow of the hyperplane at the end of each epoch\n if i % features.instance_count == 0:\n plot_hyperplane(w,xmin,xmax,iter = i, alpha = .3, color='black',linestyle='dashed')\n\n return divider", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def update(self):\n if self.state['enabled']:\n if not self.state['blue'] and not self.state['return']:\n self.update_normal()\n elif self.state['blue']:\n self.update_blue()\n elif self.state['return']:\n self.update_return()\n self.last_position = (self.rect.centerx, self.rect.centery)", "def viewUpdate(self):\n # Update Capture\n imgtk = self.model.capture\n self.updateImage(self.view.lmain, imgtk)\n # Update Stitch \n imgtk = self.model.stitch\n self.updateImage(self.view.rmain, imgtk)\n self.view.dist.set(self.model.dist)", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self,**kwargs):\n self._update_from_kwargs(**kwargs)\n #--- calc estimated chops from chop length\n self._calc_estimated_chops_from_timepoints()\n #--- adjust chops for stimulusresponse\n self._adjust_chops_for_stimulus_response()\n #--- update annotations\n self._update_annotations()\n #--- get info\n if self.verbose:\n self.GetInfo()\n #--- show plot\n if self.show:\n self.show_chops()", "def update(self):\n events = pygame.event.get()\n self.plane_update()\n self.bullet_update(events)\n self.background_update()\n self.enemy_update(events)", "def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41", "def redraw(self):\r\n self.c.update()", "def run(self):\n\n self._check_hardware_control()\n\n if self._is_stabilizing:\n #If we are locking the power, then need to update teh feedback loop and change the output label\n self._update_feedback()\n self._update_output_voltage_label()\n\n #We always need to update the plots as well and power label\n\n self._update_plots()\n self._update_power_label()\n\n self.gui.force_update()", "def mode_changed_callback(self, entity, attribute, old, new, kwargs):\n\n entity_dict = kwargs['entity_dict']\n self.log('{} mode changed to {}.'.format(entity_dict['friendly'], new))\n\n if new == 'Maximum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['max_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['max_brightness']))\n elif new == 'Minimum':\n self.turn_on(entity_dict['light'],\n brightness_pct=entity_dict['min_brightness'])\n self.log('Setting {} to {}% brightness.'.format(\n entity_dict['friendly'], entity_dict['min_brightness']))\n elif new == 'Automatic':\n self.set_value(entity_dict['setpoint'], value=0)\n self.auto_brightness_callback(\n dict(entity_dict=entity_dict))", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def _do_updates(self):\n is_right = self._puzzle.is_guess_right()\n if is_right:\n self._puzzle.reveal_puzzle()\n else:\n self._jumper.cut_line()", "def update(self, *args):\n ## If prediction is enabled then predict() handles rebounds.\n use_prediction = list(args).pop(0)\n if not use_prediction:\n self._rebound(0.0)\n ## Speed step needs to be adjusted by the value of interpolation\n ## at the time the ball collided with an edge (predictive_rebound_*).\n self.x += self.dx * self.speed/TICKS_PER_SECOND * (1-self.predictive_rebound_x)\n self.y += self.dy * self.speed/TICKS_PER_SECOND * (1-self.predictive_rebound_y)\n self.predictive_rebound_x,self.predictive_rebound_y = 0.0,0.0\n self.rect.center = round(self.x),round(self.y)", "def update(self):\n changes = {}\n for coord in INDICES: # the need for two for loops is necessary\n if self.chart[coord] == ALIVE and (\n self.number_of_neighbors(coord) < 2 or self.number_of_neighbors(coord) > 3):\n changes[coord] = KILL\n elif self.number_of_neighbors(coord) == 3:\n changes[coord] = REVIVE\n for coord in changes.keys(): # because the evolution is discrete\n if changes[coord] == KILL:\n self.kill(coord)\n elif changes[coord] == REVIVE:\n self.givebirth(coord)", "def update(self):\n\t\t# If being controlled by COM\n\t\tif self.controled_by_com :\n\t\t\t# Substract 1 from the update counter\n\t\t\tself.update_counter -= 1\n\t\t\t# If the update counter reaches zero\n\t\t\tif self.update_counter == 0. :\n\t\t\t\t# then ask for an action \n\t\t\t\tif self.intermediate_phase is False :\n\t\t\t\t\tself.action_required = True \n\t\t\t\t\t\t\n\t\t\t\t# if during a change\n\t\t\t\t# then make the change\n\t\t\t\tif self.intermediate_phase is True : \n\t\t\t\t\tself.action_required = False\n\t\t\t\t\tself._color_changer() #Make the change in the Simulator\n\t\telse :\n\t\t\tpass" ]
[ "0.6893299", "0.60525465", "0.58858526", "0.5834753", "0.5762848", "0.5702077", "0.5648497", "0.5648258", "0.56118804", "0.56109047", "0.55797815", "0.55794543", "0.55765516", "0.5570159", "0.5562381", "0.55067044", "0.550493", "0.54722816", "0.54584134", "0.5452447", "0.5450791", "0.54296863", "0.54067004", "0.5405082", "0.54015386", "0.53991103", "0.5395922", "0.5394981", "0.5363178", "0.533246" ]
0.6976842
0
Handle data change in the parent this isosurface belongs to
def _parentChanged(self, event): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() super(ComplexIsosurface, self)._parentChanged(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n\n elif event in (ItemChangedType.COLORMAP,\n Item3DChangedType.INTERPOLATION):\n self._updateScenePrimitive()\n super(ComplexIsosurface, self)._updated(event)", "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(copy=False)\n self._updateScenePrimitive()", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n self._data = None\n else:\n self._data = parent.getData(\n mode=parent.getComplexMode(), copy=False)\n\n if parent is None or self.getComplexMode() == self.ComplexMode.NONE:\n self._setColormappedData(None, copy=False)\n else:\n self._setColormappedData(\n parent.getData(mode=self.getComplexMode(), copy=False),\n copy=False)\n\n self._updateScenePrimitive()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def _parentChanged(self, event):\n if event == ItemChangedType.DATA:\n self._syncDataWithParent()", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def data_changed(self):\n return", "def XPLMDataChanged_f(inRefcon):", "def data_changed(self):\n self.data_changed_signal.emit(self)", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n mode = self.getComplexMode()\n data = parent.getData(mode=mode, copy=False)\n range_ = parent.getDataRange(mode=mode)\n self._updateData(data, range_)", "def _parent_changed(self):\n raise NotImplementedError(\"shouldnt happen, Parentable objects need to be able to change their parent\")", "def MyDataChangedCallback(self, inRefcon):\r\n pass", "def get_data(self, data):\n data = super().get_data(data)\n self.pid.update_layer1(data[self.pid_cols])\n return data", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def update_original_data(self):\n pass", "def _syncDataWithParent(self):\n parent = self.parent()\n if parent is None:\n data, range_ = None, None\n else:\n data = parent.getData(copy=False)\n range_ = parent.getDataRange()\n self._updateData(data, range_)", "def on_data_vars_change(self, change):\n if change['type'] == 'change' and change['name'] == 'value':\n self.left_ds = getattr(self.ts.data, change['new'])\n if self.mask is None:\n self.right_ds = self.left_ds.copy(deep=True)\n else:\n self.right_ds = self.left_ds * self.mask\n\n self.left_imshow.set_data(self.left_ds.data[0])\n self.right_imshow.set_data(self.right_ds.data[0])", "def update_data():\n pass", "def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()", "def update(self, parent):\r\n pass", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def dataGridView_CellValueChanged(self, sender, eventArgs):\r\n name = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[0].Value\r\n newVal = self.wf.dataGridView.Rows[eventArgs.RowIndex].Cells[eventArgs.ColumnIndex].Value\r\n child = Application.ActiveSceneRoot.FindChild2( name, constants.siPolyMeshType, constants.siMeshFamily, True )\r\n if child:\r\n transform = child.Kinematics.Local.GetTransform2(None)\r\n translation = transform.Translation\r\n if eventArgs.ColumnIndex == 1:\r\n transform.Translation = XSIMath.CreateVector3( newVal, translation.Y, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 2:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, newVal, translation.Z )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n elif eventArgs.ColumnIndex == 3:\r\n transform.Translation = XSIMath.CreateVector3( translation.X, translation.Y, newVal )\r\n child.Kinematics.Local.PutTransform2(None,transform)\r\n else:\r\n print \"DataGridView_CellValueChanged: \" + child + \" not found!\"", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def _notify_parent_change(self):\n pass", "def on_parent_changed(self):\n pass", "def updateGeometryInfo(self,*args):\r\n self.wf.dataGridView.Rows.Clear()\r\n sceneRoot = Application.ActiveSceneRoot\r\n children = sceneRoot.FindChildren2( \"\", constants.siPolyMeshType, constants.siMeshFamily, True )\r\n for child in children:\r\n vTrans = child.Kinematics.Local.GetTransform2(None).Translation\r\n self.wf.AddRow( child.FullName, vTrans.X, vTrans.Y, vTrans.Z )", "def _notify_parent_change(self):\n for p in self.parameters:\n p._parent_changed(self)", "def _load_data(self, event):\n if self.parent is not None:\n wx.PostEvent(self.parent, NewLoadDataEvent())" ]
[ "0.7281809", "0.6798219", "0.663362", "0.6617056", "0.6495038", "0.63887423", "0.63887423", "0.63203007", "0.6309291", "0.62671393", "0.6239306", "0.6219209", "0.6177352", "0.6040155", "0.59989554", "0.5994137", "0.59767103", "0.5967239", "0.5934066", "0.5895432", "0.58403164", "0.5837722", "0.58095384", "0.5787082", "0.5745996", "0.57370967", "0.57274175", "0.57120687", "0.5686296", "0.5668755" ]
0.78250813
0
Handle update of the isosurface (and take care of mode change)
def _updated(self, event=None): if event == ItemChangedType.COMPLEX_MODE: self._syncDataWithParent() elif event in (ItemChangedType.COLORMAP, Item3DChangedType.INTERPOLATION): self._updateScenePrimitive() super(ComplexIsosurface, self)._updated(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isosurfaceItemChanged(self, event):\n if event == Item3DChangedType.ISO_LEVEL:\n self._updateIsosurfaces()", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _draw(self):\r\n \r\n if self.active:\r\n self.surface = self.activeSurface # Set active surface to be displayed.\r\n else:\r\n self.surface = self.passiveSurface # Set passive surface to be displayed.\r", "def update(self, data):\n if self.mode == 'image':\n data = self.preprocess(data)\n self.main_object.set_data(data)\n\n vmin, vmax = self._parse_vrange(data)\n self.main_object.set_clim([vmin, vmax])\n\n if self.mode == 'histogram':\n raise NotImplementedError(\"Updating layer data is not in supported in 'histogram' mode. \")\n\n if self.mode == 'curve':\n x_data, y_data = self.preprocess(data)\n self.main_object.set_data(x_data, y_data)\n self.update_lims()\n\n if self.mode == 'loss':\n raise NotImplementedError(\"Updating layer data is not in supported in 'loss' mode. \")", "def update_focal_axes(self):\n #self.update_sigma()\n self.updateGL()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def update():", "def update():", "def plane_update(self):\n self.plane.update()", "def fDataChanged(self):\n\n self._layerManager.getAimsFeatures()", "def update(self, surface, keys, current_time, dt, scale):\n self.anykey.update(current_time)\n self.draw(surface)", "def update_surfs(self, surf_path, surf_type, offset=None):\n try:\n self.surf[surf_type]\n except KeyError:\n pass\n # Here should be a dialog for confirm, whether adding data or not\n else:\n self._add_surface(surf_path, surf_type, offset)", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def update_figure(self):\n\n self.draw()", "def update(self):\n # Find only unmasked data :\n xyz, sData, sColor, _ = self._select_unmasked()\n # xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n if xyz.size:\n self.mesh.visible = True\n self.mesh.set_data(xyz, edge_color=self.edgecolor, size=sData,\n face_color=sColor, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n # self.mesh.transform = self.transform\n self.mesh.update()\n else:\n self.mesh.visible = False", "def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)", "def update_focal_axes(self):\n self.update_sigma()\n self.updateGL()", "def _update(self):\n self.cv.update()", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def update( ):\r\n pass", "def update_visualization(self) -> None:\n pass", "def __call__(self, info, *fargs):\n frame = info[0] # Frame number\n update = info[1] # Update value\n grid_data = info[2] # Data to draw our grids\n mask = info[3] # Mask of data\n self._setup['update'].set_text(f'Update {update}')\n for ndx,data in enumerate(grid_data):\n self._setup['plots'][ndx].set_array(check_mask(data,mask[ndx]))\n for pp in self._setup['post_plot']:\n pp.blit_update(frame, update, ax_ndx=ndx)\n if self._setup._pbar:\n self._setup._pbar.update(frame)\n if frame == self._setup._num_frames - 1:\n self._setup._pbar.finish()\n return self._setup.get_drawables()", "def update(self):\n #self._light.update()\n #self._state = 'on' #self._light.is_on()\n #self._brightness = 80 #self._light.brightness\n _LOGGER.info(\"update() is called\")", "def update(self):\n self.getPower()\n if self._state != STATE_OFF:\n self.getVolume()\n self.getCurrentChannel()", "def update(self):\n\n self.pta_time[0] = 1 + Globals.clock.get_frame_time() * self.options.time_scale\n\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_update,\n Globals.base.win.get_gsg())\n\n self.fftX.execute()\n self.fftY.execute()\n self.fftZ.execute()\n\n # Execute the shader which combines the 3 displacement maps into\n # 1 displacement texture and 1 normal texture. We could use dFdx in\n # the fragment shader, however that gives no accurate results as\n # dFdx returns the same value for a 2x2 pixel block\n Globals.base.graphicsEngine.dispatch_compute(\n (self.options.size // 16, self.options.size // 16, 1),\n self.attr_combine,\n Globals.base.win.get_gsg())", "def _updateIsosurfaces(self):\n # Sorting using minus, this supposes data 'object' to be max values\n sortedIso = sorted(self.getIsosurfaces(),\n key=lambda isosurface: - isosurface.getLevel())\n self._isogroup.children = [iso._getScenePrimitive() for iso in sortedIso]", "def update(self) -> pygame.Surface:\n return self.surface", "def update(self):\n if (not self._run) or (not self.IA.is_loaded()):\n return\n self.IA.BG_MAP.update(speed=self.speed)\n self.IA.O_ATUAL.update()\n self._desintegrator.update()", "def update_plot():\n pass", "def update_visualizer(self):\n if self.visualizer:\n if self.frame_count == 2:\n self.visualizer.add_geometry(self.vis_points)\n self.visualizer.update_geometry(self.vis_points)\n self.visualizer.poll_events()\n self.visualizer.update_renderer()\n time.sleep(0.001)\n self.frame_count += 1" ]
[ "0.72574514", "0.6297322", "0.62271756", "0.6138882", "0.6094875", "0.6022466", "0.6002216", "0.6002216", "0.5986893", "0.5943191", "0.59352255", "0.59275275", "0.59257054", "0.5873895", "0.58552265", "0.5850752", "0.5833824", "0.57953125", "0.5777924", "0.5773839", "0.5770586", "0.57686067", "0.57650787", "0.57644564", "0.5761846", "0.57578784", "0.5749106", "0.5747303", "0.57137436", "0.5701846" ]
0.750728
0
Return 3D dataset. This method does not cache data converted to a specific mode, it computes it for each request.
def getData(self, copy=True, mode=None): if mode is None: return super(ComplexField3D, self).getData(copy=copy) else: return self._convertComplexData(self._data, mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def D3(self, *args):\n return _Adaptor3d.Adaptor3d_Surface_D3(self, *args)", "def get_3d_train(self, jnts=14):\n\n to_select, to_sort = dataset_indices(self.dataset_name, jnts)\n\n return self._data_train['3d'][:, to_select, :][:, to_sort, :]", "def get_dataset(self, cid, type=\"train\"):\n dataset = torch.load(\n os.path.join(self.path, type, \"data{}.pkl\".format(cid)))\n return dataset", "def dataset(self):\n return self.predictor_data_manager.dataset(\n self.data_name, self.trait_name, data=self._data, trait=self.trait,\n categorical_trait=self.categorical_trait)", "def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data", "def get_dataset(self):\n if self.mode == \"test\":\n return OnlineQueryDataset(self.mode, self.df, self.tokenizer)\n else:\n return OnlineQueryDataset(self.mode, self.df_reindex, self.tokenizer)", "def dataset(self) -> np.ndarray:\n if self._cache_dataset_list:\n # Concatenates the `self._dataset` and the datasets in\n # `self._cache_dataset_list`.\n if self._dataset.size > 0:\n dataset_list = [self._dataset] + self._cache_dataset_list\n else:\n dataset_list = self._cache_dataset_list\n\n self._dataset = np.vstack(dataset_list)\n self._cache_dataset_list = []\n return self._dataset", "def get_dataset(self, therm_frac=0., make_plots=False) -> (xr.Dataset):\n data_vars = {}\n for key, val in self.data.items():\n arr = np.array(val)\n steps = np.arange(len(arr))\n if therm_frac > 0:\n arr, steps = therm_arr(arr, therm_frac=therm_frac)\n if len(arr.shape) == 1:\n data_vars[key] = xr.DataArray(arr, dims=['draw'],\n coords=[steps])\n elif len(arr.shape) == 3:\n arr = arr.T\n num_chains, num_lf, _ = arr.shape\n dims = ['chain', 'leapfrog', 'draw']\n coords = [np.arange(num_chains), np.arange(num_lf), steps]\n data_vars[key] = xr.DataArray(arr, dims=dims, coords=coords)\n else:\n chains = np.arange(arr.shape[1])\n data_vars[key] = xr.DataArray(arr.T, dims=['chain', 'draw'],\n coords=[chains, steps])\n\n return xr.Dataset(data_vars)", "def get_dataset(self):\n return", "def convert_1d_to_3d(data_X, data_Y):\n\n data_X = data_X.tocsr()\n \n data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))\n data_dim_x_label = [] # contains (total_trials * dim_x) labels\n data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))\n data_dim_y_label = [] # contains (total_trials * dim_y) labels\n data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))\n data_dim_z_label = [] # contains (total_trials * dim_z) labels\n\n for num_trial in range(data_X.shape[0]):\n label = data_Y[num_trial]\n data_1d = data_X[num_trial]\n data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))\n for x in range(dim_x):\n x_slice = data_3d[:,:,x]\n # append only if the slice is not empty \n if x_slice.sum() != 0:\n data_dim_x.append(data_3d[:, :, x])\n data_dim_x_label.append(label)\n for y in range(dim_y):\n y_slice = data_3d[:, y, :]\n if y_slice.sum() != 0:\n data_dim_y.append(data_3d[:, y, :])\n data_dim_y_label.append(label)\n for z in range(dim_z):\n z_slice = data_3d[:, :, z]\n if z_slice.sum() != 0:\n data_dim_z.append(data_3d[z, :, :])\n data_dim_z_label.append(label)\n\n return np.array(data_dim_x), np.array(data_dim_x_label), \\\n np.array(data_dim_y), np.array(data_dim_y_label), \\\n np.array(data_dim_z), np.array(data_dim_z_label)", "def _get_dataset(self):\n if self.mode == 'train':\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .shuffle(buffer_size=self.num_samples, reshuffle_each_iteration=True)\n .map(map_func=self.import_waveforms_fn_train, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )\n else:\n return (\n tf.data.Dataset.from_tensor_slices(\n tensors=(tf.constant(value=self.file_paths),\n tf.reshape(tensor=tf.constant(self.labels), shape=[-1]))\n )\n .map(map_func=self.import_waveforms_fn_val, num_parallel_calls=self.num_parallel_calls)\n .repeat()\n .batch(batch_size=self.batch_size)\n .prefetch(buffer_size=self.prefetch_buffer)\n )", "def __get_dataset(self):\n # Disable RasterIO logging, just show ERRORS\n log = rio_logging.getLogger()\n log.setLevel(rio_logging.ERROR)\n\n try:\n # Get dataset\n tmp_ds = xr.open_rasterio(self.fname)\n tmp_ds = None ; del tmp_ds\n except rio.errors.RasterioIOError as e:\n raise e\n\n chunks = get_chunk_size(self.fname)\n data_array = xr.open_rasterio(self.fname, chunks=chunks)\n\n data_array = data_array.rename(\n {'x': 'longitude',\n 'y': 'latitude',\n 'band': 'time'})\n\n # Check if file is a VRT\n name, extension = os.path.splitext(self.fname)\n if extension.lower() == '.vrt':\n times = get_times(self.fname)\n else:\n times = get_times_from_file_band(self.fname)\n data_array['time'] = times\n\n # Check that _FillValue is not NaN\n if data_array.nodatavals[0] is np.NaN:\n # Use _FillValue from band metadata\n _fill_value = get_fill_value_band_metadata(self.fname)\n\n data_array.attrs['nodatavals'] = \\\n tuple(np.full((len(data_array.nodatavals))\n ,_fill_value))\n\n # Create new dataset\n self.dataset_name = self.__get_dataset_name()\n dataset = data_array.to_dataset(name=self.dataset_name)\n\n # Back to default logging settings\n logging.basicConfig(level=logging.INFO)\n\n # Set self.data\n self.data = dataset", "def get_dataset(cfg,\n augmentor,\n mode='train',\n rank=None,\n dataset_class=VolumeDataset,\n dataset_options={},\n dir_name_init: Optional[list] = None,\n img_name_init: Optional[list] = None):\n assert mode in ['train', 'val', 'test']\n\n sample_label_size = cfg.MODEL.OUTPUT_SIZE\n topt, wopt = ['0'], [['0']]\n if mode == 'train':\n sample_volume_size = augmentor.sample_size if augmentor is not None else cfg.MODEL.INPUT_SIZE\n sample_label_size = sample_volume_size\n sample_stride = (1, 1, 1)\n topt, wopt = cfg.MODEL.TARGET_OPT, cfg.MODEL.WEIGHT_OPT\n iter_num = cfg.SOLVER.ITERATION_TOTAL * cfg.SOLVER.SAMPLES_PER_BATCH\n if cfg.SOLVER.SWA.ENABLED:\n iter_num += cfg.SOLVER.SWA.BN_UPDATE_ITER\n\n elif mode == 'val':\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n sample_label_size = sample_volume_size\n sample_stride = [x//2 for x in sample_volume_size]\n topt, wopt = cfg.MODEL.TARGET_OPT, cfg.MODEL.WEIGHT_OPT\n iter_num = -1\n\n elif mode == 'test':\n sample_volume_size = cfg.MODEL.INPUT_SIZE\n sample_stride = cfg.INFERENCE.STRIDE\n iter_num = -1\n\n shared_kwargs = {\n \"sample_volume_size\": sample_volume_size,\n \"sample_label_size\": sample_label_size,\n \"sample_stride\": sample_stride,\n \"augmentor\": augmentor,\n \"target_opt\": topt,\n \"weight_opt\": wopt,\n \"mode\": mode,\n \"do_2d\": cfg.DATASET.DO_2D,\n \"reject_size_thres\": cfg.DATASET.REJECT_SAMPLING.SIZE_THRES,\n \"reject_diversity\": cfg.DATASET.REJECT_SAMPLING.DIVERSITY,\n \"reject_p\": cfg.DATASET.REJECT_SAMPLING.P,\n \"data_mean\": cfg.DATASET.MEAN,\n \"data_std\": cfg.DATASET.STD,\n \"data_match_act\": cfg.DATASET.MATCH_ACT,\n \"erosion_rates\": cfg.MODEL.LABEL_EROSION,\n \"dilation_rates\": cfg.MODEL.LABEL_DILATION,\n \"do_relabel\": cfg.DATASET.REDUCE_LABEL,\n \"valid_ratio\": cfg.DATASET.VALID_RATIO,\n }\n\n if cfg.DATASET.DO_CHUNK_TITLE == 1: # build TileDataset\n def _make_json_path(path, name):\n if isinstance(name, str):\n return [os.path.join(path, name)]\n\n assert isinstance(name, (list, tuple))\n json_list = [os.path.join(path, name[i]) for i in range(len(name))]\n return json_list\n\n input_path = cfg.DATASET.INPUT_PATH\n volume_json = _make_json_path(input_path, cfg.DATASET.IMAGE_NAME)\n\n label_json, valid_mask_json = None, None\n if mode == 'train':\n if cfg.DATASET.LABEL_NAME is not None:\n label_json = _make_json_path(input_path, cfg.DATASET.LABEL_NAME)\n if cfg.DATASET.VALID_MASK_NAME is not None:\n valid_mask_json = _make_json_path(input_path, cfg.DATASET.VALID_MASK_NAME)\n\n dataset = TileDataset(chunk_num=cfg.DATASET.DATA_CHUNK_NUM,\n chunk_ind=cfg.DATASET.DATA_CHUNK_IND,\n chunk_ind_split=cfg.DATASET.CHUNK_IND_SPLIT,\n chunk_iter=cfg.DATASET.DATA_CHUNK_ITER,\n chunk_stride=cfg.DATASET.DATA_CHUNK_STRIDE,\n volume_json=volume_json,\n label_json=label_json,\n valid_mask_json=valid_mask_json,\n pad_size=cfg.DATASET.PAD_SIZE,\n data_scale=cfg.DATASET.DATA_SCALE,\n coord_range=cfg.DATASET.DATA_COORD_RANGE,\n **shared_kwargs)\n\n else: # build VolumeDataset or VolumeDatasetMultiSeg\n volume, label, valid_mask = _get_input(\n cfg, mode, rank, dir_name_init, img_name_init, min_size=sample_volume_size)\n\n if cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT is not None:\n shared_kwargs['multiseg_split'] = cfg.MODEL.TARGET_OPT_MULTISEG_SPLIT\n dataset = dataset_class(volume=volume, label=label, valid_mask=valid_mask,\n iter_num=iter_num, **shared_kwargs, **dataset_options)\n\n return dataset", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def dataset(self):\n with self._lock:\n if self._dataset is None:\n if isinstance(self._orig_dataset, DaskLazyIndexer):\n self._orig_dataset = self._orig_dataset.dataset\n dataset = dask_getitem(self._orig_dataset, self.keep)\n for transform in self.transforms:\n dataset = transform(dataset)\n self._dataset = dataset\n self._orig_dataset = None\n return self._dataset", "def get_dataset(params, run_mode=\"train\"):\n tokenizer = get_tokenizer(params)\n # Use run_mode to decide input_folder, MR cols, MR max lens.\n msg_col, rsp_col = params.msg_col, params.rsp_col\n max_msg_len, max_rsp_len = params.max_msg_len, params.max_rsp_len\n if run_mode == \"train\":\n input_folder = params.train_input_dir\n elif run_mode == \"valid\":\n input_folder = params.valid_input_dir\n elif run_mode == \"gmr\":\n input_folder = params.gmr_input_dir\n if params.truncate is False:\n max_msg_len, max_rsp_len = np.inf, np.inf\n elif run_mode == \"rsp_set\":\n # TODO: What's the purpose of this mode?\n input_folder = params.rsp_input_dir\n msg_col, rsp_col = 0, params.rsp_text_col\n # TODO: These values should be global parameters instead of being hard coded like this\n # TODO: Why not just set these values to np.inf like above?\n if params.truncate is False:\n max_msg_len, max_rsp_len = 1000, 1000\n elif run_mode == \"eval\":\n input_folder = params.eval_input_dir\n elif run_mode == \"export\":\n # TODO: We should remove this mode from this function since it does nothing anyways\n return None, tokenizer\n else:\n raise ValueError(\"SystemLog: Invalid run mode %s.\" % run_mode)\n\n # We consider each file to be in a separate pytorch dataset. We then use ConcatDataset to combine individual datasets\n datasets = []\n total_file_processed = 0\n # This sorting of file is done to make sure that we get the same file order each time\n for file_idx, filename in enumerate(sorted(os.listdir(input_folder))):\n filepath = os.path.join(input_folder, filename)\n datasets.append(MRDataset(filepath, tokenizer, msg_col=msg_col,\n rsp_col=rsp_col, max_msg_len=max_msg_len,\n max_rsp_len=max_rsp_len, run_mode=run_mode, architecture=params.architecture, truncate=params.truncate))\n total_file_processed += 1\n if file_idx % 1000 == 0:\n print(\"SystemLog: %d files processed \" % file_idx)\n print(\"SystemLog: %d files processed in total.\" % total_file_processed)\n mr_dataset = ConcatDataset(datasets)\n\n return mr_dataset, tokenizer", "def load_dataset(\n self,\n ):\n with xr.open_dataset(self._filepath) as fdata:\n out = fdata.assign_coords({\n 'nCells': np.arange(fdata.dims['nCells']),\n })\n if self.time is not None:\n out = out.assign_coords({\n 'Time': self.time,\n })\n if 'nVertLevels' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevels': np.arange(fdata.dims['nVertLevels']),\n })\n if 'nVertLevelsP1' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsP1': np.arange(fdata.dims['nVertLevelsP1']),\n })\n if 'nEdges' in fdata.dims:\n out = out.assign_coords({\n 'nEdges': np.arange(fdata.dims['nEdges']),\n })\n if 'nVertices' in fdata.dims:\n out = out.assign_coords({\n 'nVertices': np.arange(fdata.dims['nVertices']),\n })\n if 'nVertLevelsLES' in fdata.dims:\n out = out.assign_coords({\n 'nVertLevelsLES': np.arange(fdata.dims['nVertLevelsLES']),\n })\n return out", "def get_dataset(args):\n\n if args['experiment']['dataset'] == Dataset.mindsets:\n xs, ys, cs = make_mindsets(mindset_sizes=args['dataset']['mindset_sizes'],\n nb_questions=args['dataset']['nb_questions'],\n nb_useless=args['dataset']['nb_useless'],\n noise=args['dataset']['noise'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.questionnaire_likert:\n xs, ys, cs = make_likert_questionnaire(nb_samples=args['dataset']['nb_samples'],\n nb_features=args['dataset']['nb_features'],\n nb_mindsets=args['dataset']['nb_mindsets'],\n centers=args['dataset']['centers'],\n range_answers=args['dataset']['range_answers'],\n seed=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys, cs=cs)\n\n if args['experiment']['dataset'] == Dataset.retinal:\n xs, ys = load_RETINAL(root_path=args['root_dir'],\n nb_bins=args['dataset']['nb_bins'],\n max_idx=args['dataset']['max_idx'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.moons:\n xs, ys = make_moons(n_samples=args['dataset']['n_samples'],\n noise=args['dataset']['noise'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.breast_cancer_wisconsin:\n xs, ys = load_CANCER(args['dataset']['nb_bins'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.SBM:\n A, ys, G = load_SBM(block_sizes=args['dataset']['block_sizes'],\n p_in=args['dataset']['p'],\n p_out=args['dataset']['q'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.gaussian_mixture:\n xs, ys = make_blobs(n_samples=args['dataset']['blob_sizes'],\n centers=args['dataset']['blob_centers'],\n n_features=args['dataset']['blob_centers'],\n cluster_std=args['dataset']['blob_variances'],\n random_state=args['experiment']['seed'])\n\n return Data(xs=xs, ys=ys)\n\n if args['experiment']['dataset'] == Dataset.LFR:\n A, ys, G = load_LFR(nb_nodes=args['dataset']['nb_nodes'],\n tau1=args['dataset']['tau1'],\n tau2=args['dataset']['tau2'],\n mu=args['dataset']['mu'],\n average_degree=args['dataset']['average_degree'],\n min_community=args['dataset']['min_community'],\n seed=args['experiment']['seed'])\n\n return Data(ys=ys, A=A, G=G)\n\n if args['experiment']['dataset'] == Dataset.wave:\n df = pd.read_csv('datasets/waveform.csv')\n xs = df[df.columns[:-1]].to_numpy()\n ys = df[df.columns[-1]].to_numpy()\n\n return Data(xs=xs, ys=ys)\n\n raise ValueError('Wrong name for a dataset')", "def generate_training_data_3D():\n c11 = np.random.uniform(0.05, 1.50, 20)\n c12 = np.random.uniform(-1.50, 1.50, 20)\n c13 = np.random.uniform(-2.50, -0.05, 20)\n c21 = np.random.uniform(-1.50, -0.05, 20)\n c22 = np.random.uniform(-1.50, 1.50, 20)\n c23 = np.random.uniform(0.05, 2.50, 20)\n c1 = np.array([[i, j, k] for i, j, k in zip(c11, c12, c13)])\n c2 = np.array([[i, j, k] for i, j, k in zip(c21, c22, c23)])\n\n points = plt.figure()\n ax = points.add_subplot(111, projection='3d')\n ax.scatter(c1[:, 0], c1[:, 1], c1[:, 2], c='r', marker='^')\n ax.scatter(c2[:, 0], c2[:, 1], c2[:, 2], c='b', marker='*')\n plt.show()\n plt.close()\n\n return c1, c2", "def get_dataset(self):\n return datasets.get_dataset(self.dataset_id)", "def getDataSet(self, i, raw = 0):\n\t\tdata = self.getTimepoint(i)\n\t\tif self.isRGB and self.numberOfComponents == 4:\n\t\t\textract = vtk.vtkImageExtractComponents()\n\t\t\textract.SetComponents(0, 1, 2)\n\t\t\textract.SetInput(data)\n\t\t\tdata = extract.GetOutput()\n\n\t\tif self.flipVertically:\n\t\t\tflip = vtk.vtkImageFlip()\n\t\t\tflip.SetFilteredAxis(1)\n\t\t\tflip.SetInput(data)\n\t\t\tdata = flip.GetOutput()\n\t\tif self.flipHorizontally:\n\t\t\tflip = vtk.vtkImageFlip()\n\t\t\tflip.SetFilteredAxis(0)\n\t\t\tflip.SetInput(data)\n\t\t\tdata = flip.GetOutput()\n\t\t\t\n\t\treturn data", "def get_dataset(name):\n if name == 'cityscapes':\n return Cityscapes", "def data(dataname = None, package = None, cache = False):\n\t#if dataname == None and data == None:\n\t# from rpy2.robjects import r\n\t# print(r.data())\n\treturn sm.datasets.get_rdataset(dataname = dataname, package = package, cache = cache).data", "def make_dataset(self,\n path,\n mode,\n height=None,\n width=None):\n # Split up the possibly comma seperated directories.\n if ',' in path:\n l = path.split(',')\n d = '/'.join(l[0].split('/')[:-1])\n l[0] = l[0].split('/')[-1]\n paths = [os.path.join(d, x) for x in l]\n else:\n paths = [path]\n\n # Generate list of filenames.\n # pylint:disable=g-complex-comprehension\n files = [os.path.join(d, f) for d in paths for f in tf.io.gfile.listdir(d)]\n num_files = len(files)\n ds = tf.data.Dataset.from_tensor_slices(files)\n if mode == 'multiframe':\n # Create a nested dataset.\n ds = ds.map(tf.data.TFRecordDataset)\n # pylint:disable=g-long-lambda\n ds = ds.interleave(\n lambda x: x.map(\n lambda y: self.parse_train(y, height, width),\n num_parallel_calls=tf.data.experimental.AUTOTUNE),\n cycle_length=min(10, num_files),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Prefetch a number of batches because reading new ones can take much\n # longer when they are from new files.\n ds = ds.prefetch(10)\n\n return ds", "def Pdata3(fidName=\"T\", N=0):\n\t\tData = Helper.Pdata(fidName, 3)\n\t\tNumzhuiti = Data.shape[0]\n\t\t# Data3 = []\n\t\tfor i in range(0, Numzhuiti):\n\t\t\tPa = Data[i, 0]\n\t\t\tPl = Data[i, 1]\n\t\t\tPr = Data[i, 2]\n\t\t\tPlo = Pa[1] - Pl[1]\n\t\t\tPro = Pa[1] - Pr[1]\n\t\t\tPal = Pa + [0, Plo * N, 0]\n\t\t\tPar = Pa + [0, Pro * N, 0]\n\n\t\t\tif i == 0:\n\t\t\t\tData3 = np.array([Pal])\n\t\t\t\tData3 = np.append(Data3, Pl)\n\t\t\t\tData3 = np.append(Data3, Par)\n\t\t\t\tData3 = np.append(Data3, Pr)\n\t\t\telse:\n\t\t\t\tData3 = np.append(Data3, Pal)\n\t\t\t\tData3 = np.append(Data3, Pl)\n\t\t\t\tData3 = np.append(Data3, Par)\n\t\t\t\tData3 = np.append(Data3, Pr)\n\t\tData3 = np.array(Data3.reshape(Data.shape[0] * 2, 2, 3))\n\t\treturn Data3", "def _create_dataset(self, *data):\n # Make sure data is a tuple of dense tensors\n data = [self._to_torch(x, dtype=torch.FloatTensor) for x in data]\n return TensorDataset(*data)", "def get_dataset(self):\n return self._X, self._y", "def get_dataset(self, data_path, n_workers=4, dataset_args={}):\n self.logging.info('loading dataset...')\n dataset = pd.read_csv(data_path)\n\n self.logging.info('preprocessing data...')\n\n results = [None] * n_workers\n with Pool(processes=n_workers) as pool:\n for i in range(n_workers):\n batch_start = (len(dataset) // n_workers) * i\n if i == n_workers - 1:\n batch_end = len(dataset)\n else:\n batch_end = (len(dataset) // n_workers) * (i + 1)\n\n batch = dataset[batch_start: batch_end]\n results[i] = pool.apply_async(self.preprocess_samples, [batch])\n\n # When debugging, you'd better not use multi-thread.\n # results[i] = self.preprocess_dataset(batch, preprocess_args)\n\n pool.close()\n pool.join()\n\n processed = []\n for result in results:\n processed += result.get()\n\n #padding = self.embedding.to_index('[PAD]')\n return DialogDataset(processed, **dataset_args)", "def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')", "def _dataset(filename, filter, img_count=1000000):\n try:\n # Attempt to load the dataset.\n with np.load(filename) as data:\n X = data['arr_0']\n y = data['arr_1']\n except:\n # The dataset does not exist, so we regenerate.\n\n # Set up a sample of random images:\n sample_size = (img_count, 3, 3, 3) # 3x3 windows, each containing 3 channels\n images = np.random.random(sample_size)\n\n # The correct label for each \"image\" is the color at its center\n y = images[:, 1, 1, :]\n\n # Now we apply the filter to each of our images and store the filtered image\n print(\"Generating dataset:\")\n\n X = np.zeros(images.shape)\n\n for i in range(images.shape[0]):\n thisImg = images[i]\n filtered = filter.apply(thisImg)\n X[i] = filtered\n\n if (i + 1) % (img_count / 100) == 0:\n print(\"%s: %d%% done\" % (filename, 100 * (i + 1) / img_count))\n\n print(\"Dataset generation complete.\")\n\n np.savez(filename, X, y)\n\n return X[:img_count], y[:img_count]" ]
[ "0.60187584", "0.59878784", "0.58777374", "0.58265644", "0.58206046", "0.5816656", "0.5816164", "0.57970816", "0.57919204", "0.5772738", "0.57650393", "0.57250017", "0.5724938", "0.5707195", "0.5686548", "0.5647479", "0.5564669", "0.55568475", "0.5537837", "0.5516145", "0.55019325", "0.5496016", "0.5495369", "0.5489609", "0.54872686", "0.5481195", "0.54801494", "0.54741186", "0.54517627", "0.54507184" ]
0.6331602
0
Population prior, i.e. $Categorical(\pi)$.
def prior_z(self) -> distributions.Distribution: return distributions.Categorical(self.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_prior_name(self):\n dim = Dimension(\"yolo\", \"reciprocal\", 1e-10, 1)\n assert dim.prior_name == \"reciprocal\"\n\n dim = Dimension(\"yolo\", \"norm\", 0.9)\n assert dim.prior_name == \"norm\"\n\n dim = Real(\"yolo\", \"uniform\", 1, 2)\n assert dim.prior_name == \"uniform\"\n\n dim = Integer(\"yolo1\", \"uniform\", -3, 6)\n assert dim.prior_name == \"int_uniform\"\n\n dim = Integer(\"yolo1\", \"norm\", -3, 6)\n assert dim.prior_name == \"int_norm\"\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\"yolo\", categories)\n assert dim.prior_name == \"choices\"", "def prior(self, c, labeled):\n return log(len(labeled[c])/self.N_features)", "def get_prior(self):\n assert self._prior in self._priors, 'Unsupported prior! Check the _priors attribute for a list of priors.'\n if self._prior == 'Gaussian':\n prior = 0.5 * torch.sum(self.parameters ** 2)/self.prior_var\n elif self._prior == 'Cauchy':\n dimconst = (self.parameters.shape[0] + 1)/2.\n prior = dimconst*torch.log(self.prior_var + torch.sum(self.parameters ** 2))\n elif self._prior == 'Sparse':\n n = self.dataset.shape[1]\n gauss_prior = 0.5 * torch.sum(torch.exp(self.parameters[-1] * torch.exp(self.parameters[n:2*n]) * self.parameters[:n] ** 2))\n gamma_density = torch.distributions.Gamma(1.5,0.5)\n# gamma_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n# lambda_density = torch.distributions.Gamma(1.5,0.5)\n lambda_prior = -gamma_density.log_prob(torch.exp(self.parameters[n:])).sum()\n prior = gauss_prior + lambda_prior\n return prior", "def lnprior(self):\n \n return", "def prior_sample(self):\n pass", "def prior(mu):\n p = np.ones(len(mu))/(mu.max()-mu.min())\n return p", "def P_prior(self):\n return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))", "def prior(store):\n mu = zeros(store['beta'].shape[0])\n Prec = diag(0.005 * ones(store['beta'].shape[0]))\n return -0.5 * dot(store['beta'].transpose(), dot(Prec, store['beta']))", "def test_get_prior_string_dict(self):\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, \"lalala\": 0.4}\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices({'asdfa': 0.10, 2: 0.20, 3: 0.30, 'lalala': 0.40}, \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def analysis(self) -> \"PriorFactor\":\n return self", "def _compute_mix_prior(self):\n if np.all(self.mix_prior == 1):\n return 0\n return np.dot(np.log(self.mix_weight).T, (self.mix_prior - 1))", "def priorProb(self, state):\n actions = []\n for i in range(0, 10):\n actions.append(((i, i+1), random.uniform(0, 1))) \n \n return actions", "def buildConditionalPriorTerm(self):\r\n\r\n # shape is (batch size,)\r\n self.conditional_prior = - T.mean(T.sum(T.exp(self.log_pzgxw)*(self.log_qxgy.dimshuffle(0,'x',1,'x') - self.log_pxgzw), axis=3), axis=[1,2])", "def __init__(self, prior: Prior):\n # TODO: Consider analytical solution rather than implementing optimisation\n super().__init__(prior.factor, x=prior, name=namer(self.__class__.__name__))\n self.prior = prior\n self.label = f\"PriorFactor({prior.label})\"", "def bias_prior(self):", "def prior(cube, ndim, nparams):\n # construct prior from recovery file\n counter = 0\n if params2 is None:\n return\n for key in params2.keys():\n nparams_tmp = int(params2[key]['nparams'])\n for ii in range(nparams_tmp):\n # sp = [name, prior type, x1, x2]\n sp =\\\n params2[key]['param'+str(ii+1)].split(',')\n if sp[1][0] == 'U' and sp[2][:5]=='param' and sp[3][:5]=='param':\n subtract1 = int(key[-1]) - int(sp[2][-1])\n subtract2 = int(key[-1]) - int(sp[3][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract1], cube[counter-subtract2])\n elif sp[1][0] == 'U' and sp[2][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n cube[counter-subtract], float(sp[3]))\n elif sp[1][0] == 'U' and sp[3][:5]=='param':\n subtract = int(key[-1]) - int(sp[2][-1])\n cube[counter] = GeneralPrior(cube[counter], 'U',\n float(sp[2]), cube[counter - subtract])\n else:\n cube[counter] = GeneralPrior(cube[counter], sp[1], float(sp[2]),\n float(sp[3]))\n counter += 1", "def set_prior_priorunc_synthetic(self):\n\n lai_coeff_absunc = None\n statevec_absunc = None\n\n #-- \n if self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n elif self.use_generic_prior:\n self._setprior_generic_agriculture()\n statevec_absunc = self.generic_prior_unc\n else:\n #-- overall number of time-points in schedule\n npts = self.get_npts()\n\n #-- default prior file\n prior_file = os.path.join(ipt_dir_path, 'mni_stat_jules_2017.csv')\n\n #-- get signature simulator default state\n msg = \"START reading state variables from file ***{}***...\".format(prior_file)\n FileLogger.info(msg)\n state_inst = sv.get_state_csv(fname=prior_file, fmt='%Y-%m-%d %H:%M:%S' )\n msg = \"...reading DONE\"\n FileLogger.info(msg)\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n\n for i,date_utc in enumerate(self.schedule_dct['date_utc']):\n idx, timedelt = sv.find_nearest_date_idx(state_inst.date_utc, date_utc)\n # print \"MVMV::nearest={} idx={} timedelt={}\".format(\n # state_inst.date_utc[idx], idx, timedelt)\n #-- LAI\n self.prstate[0,i] = state_inst.lai[idx]\n #-- canopy-height\n self.prstate[1,i] = state_inst.can_height[idx]\n #-- SM\n self.prstate[2,i] = state_inst.soil_moisture[idx]\n\n #-- set uncertainty values\n self._set_priorunc(statevec_absunc=statevec_absunc, lai_coeff_absunc=lai_coeff_absunc)", "def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()", "def _setprior_generic_agriculture(self):\n\n #-- number of time-points\n npts = self.get_npts()\n\n #-- LAI,Canopy-Height,Soil-Moisture\n self.prstate = np.empty((3,npts), dtype=np.float64)\n #-- LAI\n self.prstate[0,:] = self.generic_prior[0]\n #-- canopy-height\n self.prstate[1,:] = self.generic_prior[1]\n #-- soil moisture (volumetric)\n self.prstate[2,:] = self.generic_prior[2]", "def _call(self, x):\n if functional.prior is None:\n return np.exp(x)\n else:\n return functional.prior * np.exp(x)", "def prior_sample_parameter(self, parameter):\n pass", "def test_get_prior_string_list(self):\n categories = list(range(10))\n categories[0] = \"asdfa\"\n categories[2] = \"lalala\"\n dim = Categorical(\n \"yolo\", categories, shape=2, default_value=[\"asdfa\", \"lalala\"]\n )\n assert dim.get_prior_string() == (\n \"choices(['asdfa', 1, 'lalala', 3, 4, 5, 6, 7, 8, 9], \"\n \"shape=2, default_value=['asdfa', 'lalala'])\"\n )", "def prior_model(self) -> Collection:\n return Collection(self.prior)", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def prior(self):\n return self.__prior", "def log_prior(self):\n raise NotImplementedError(\"the log_prior property should \"\n \"be defined in the Estimator sub-class\")", "def prior_vars(self):\n priors = []\n for i in self.active_ssms(0):\n ssm = self.ssms[i]\n prior = ssm.prior_vars()\n\n if self.ssm_starts[i] < 0:\n P = np.diag(prior)\n P2 = P.copy()\n for k in range(-self.ssm_starts[i]):\n ssm.transition_covariance(P2, k+1, P)\n ssm.transition_noise_diag(k+1, prior)\n np.fill_diagonal(P, np.diag(P) + prior)\n P2 = P\n\n # since the interface only supports independent\n # priors, return a diagonal approximation of the true\n # prior\n prior = np.diag(P)\n priors.append(prior)\n return np.concatenate(priors)", "def set_prior_priorunc_general(self):\n\n #-- some configurations apply absolute uncertainties\n lai_coeff_absunc = None\n statevec_absunc = None\n is_generic_prior = False\n\n #--\n if self.prior_states_file!=None:\n states_file = self.prior_states_file\n basename = os.path.basename(states_file)\n if os.path.splitext(basename)[1]=='.nc':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_jules(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n elif os.path.splitext(basename)[1]=='.csv':\n msg = \"Prior state information will be read from ***{}***\".format(states_file)\n FileLogger.info(msg)\n self._setprior_csv(states_file)\n msg = \"...reading prior DONE\"\n FileLogger.info(msg)\n else:\n msg = \"Unrecognised format of states file ***{}***. Cannot continue!\".format(\n states_file)\n FileLogger.fatal(msg)\n raise RuntimeError(msg)\n return\n elif self.prior_inifile!=None:\n lai_coeff_absunc, statevec_absunc = self._setprior_from_inifile()\n else:\n self._setprior_generic_agriculture()\n is_generic_prior = True\n statevec_absunc = self.generic_prior_unc\n\n #-- set uncertainty values\n self._set_priorunc( lai_coeff_absunc=lai_coeff_absunc,\n statevec_absunc=statevec_absunc,\n is_generic_prior=is_generic_prior )" ]
[ "0.6308094", "0.6022625", "0.5991592", "0.5984886", "0.5965109", "0.5834765", "0.58280075", "0.57996655", "0.57636964", "0.57478815", "0.57455534", "0.56727767", "0.56464076", "0.559691", "0.5589314", "0.55594623", "0.5548906", "0.55311286", "0.55226177", "0.5502985", "0.55029565", "0.54959536", "0.5439871", "0.5436019", "0.5436019", "0.5436019", "0.5436019", "0.539356", "0.53821176", "0.5377098" ]
0.6626557
0
Test vertex_areas. Vertex area is the area of all of the triangles who are in contact
def test_vertex_areas(self, faces, point): number_of_contact_faces = gs.array([3, 5, 5, 5, 5, 5, 3, 5]) triangle_area = 0.5 * 2 * 2 expected = 2 * (number_of_contact_faces * triangle_area) / 3 space = self.Space(faces) result = space.vertex_areas(point) assert result.shape == (8,) assert expected.shape == (8,) assert gs.allclose(result, expected), result point = gs.array([point, point]) expected = gs.array([expected, expected]) result = space.vertex_areas(point) assert point.shape == (2, 8, 3) assert result.shape == (2, 8), result.shape assert gs.allclose(result, expected), result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_triangle_area():\n v1 = (0,0); v2 = (1,0); v3 = (0,2)\n verticies = [v1,v2,v3]\n expected = 1\n computed = get_triangle_area(verticies)\n tol = 1E-14\n success = abs(expected-computed) < tol\n msg = 'computed area={} != {} (expected)'.format(computed,expected)\n assert success,msg", "def vertex_areas(\n points: np.ndarray,\n triangles: np.ndarray,\n tri_areas: Optional[np.ndarray] = None,\n) -> np.ndarray:\n if tri_areas is None:\n tri_areas = triangle_areas(points, triangles)\n v_areas = np.zeros(len(points), dtype=float)\n for a, t in zip(tri_areas / 3, triangles):\n v_areas[t[0]] += a\n v_areas[t[1]] += a\n v_areas[t[2]] += a\n return v_areas", "def vertex_areas(self, point):\n batch_shape = point.shape[:-2]\n n_vertices = point.shape[-2]\n n_faces = self.faces.shape[0]\n area = self._triangle_areas(point)\n id_vertices = gs.broadcast_to(\n gs.flatten(self.faces), batch_shape + (math.prod(self.faces.shape),)\n )\n incident_areas = gs.zeros(batch_shape + (n_vertices,))\n val = gs.reshape(\n gs.broadcast_to(gs.expand_dims(area, axis=-2), batch_shape + (3, n_faces)),\n batch_shape + (-1,),\n )\n incident_areas = gs.scatter_add(\n incident_areas, dim=len(batch_shape), index=id_vertices, src=val\n )\n vertex_areas = 2 * incident_areas / 3.0\n return vertex_areas", "def test_regular_polygon_area(self):\n self.assertEqual(10, regular_polygon_area(\n self.values['perimeter'], self.values['apothem']))", "def test_absolute_areas(self):\n\n assert len(self.test_shape.areas) == 4\n assert len(set([round(i) for i in self.test_shape.areas])) == 3\n assert self.test_shape.areas.count(pytest.approx(60 * math.pi * 2 * 1000)) == 2\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 970)) == 1\n assert self.test_shape.areas.count(pytest.approx(50 * math.pi * 2 * 1030)) == 1", "def compute_triangle_area(vertices):\n v01 = vertices[0] - vertices[1]\n v02 = vertices[0] - vertices[2]\n cross_prod = np.cross(v01, v02)\n area = 0.5 * np.linalg.norm(cross_prod)\n return area", "def _triangle_areas(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n len_edge_12 = gs.linalg.norm((vertex_1 - vertex_2), axis=-1)\n len_edge_02 = gs.linalg.norm((vertex_0 - vertex_2), axis=-1)\n len_edge_01 = gs.linalg.norm((vertex_0 - vertex_1), axis=-1)\n half_perimeter = 0.5 * (len_edge_12 + len_edge_02 + len_edge_01)\n return gs.sqrt(\n (\n half_perimeter\n * (half_perimeter - len_edge_12)\n * (half_perimeter - len_edge_02)\n * (half_perimeter - len_edge_01)\n ).clip(min=1e-6)\n )", "def test_absolute_shape_areas(self):\n\n assert self.test_shape.area == pytest.approx((math.pi * (10**2) * 2) + (math.pi * (2 * 10) * 30))\n assert len(self.test_shape.areas) == 3\n assert self.test_shape.areas.count(pytest.approx(math.pi * (10**2))) == 2\n assert self.test_shape.areas.count(pytest.approx(math.pi * (2 * 10) * 30)) == 1", "def test_triangle(self):\n result = shape_area.triangle_area(10,5)\n self.assertEqual(result,25)", "def test_triangle_area(self):\n self.assertEqual(6, triangle_area(\n self.values['base'], self.values['height']))", "def test_polyarea(self):\n\n xcoords, ycoords = [0, 1, 1, 0, 0], [0, 0, 1, 1, 0]\n xycoords = np.stack((xcoords, ycoords), axis=1)\n\n # Area calculation from separately provided x, y coordinates\n self.assertEqual(po.polyarea(x=xcoords, y=ycoords), 1.)\n # Area calculation from combined x, y coordinates\n self.assertEqual(po.polyarea(coords=xycoords), 1.)", "def test_triangle_positive_area(self):\n t = Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023))\n self.assertEqual(t.area(1), 4.0,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1),\\\n returned value != 4.0.\")\n self.assertEqual(t.area(), 4.013,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(1) failed,\\\n returned value != 4.013.\")\n self.assertEqual(t.area(6), 4.012568,\n \"Test of Triangle(Point(0, 3.1415), Point(2.7, 3), Point(3 ** 0.5, 6.023)).area(6) failed,\\\n returned value != 4.012568.\")", "def test_inside_triangle(self):\n\n # defining triangle vertices\n v1x, v1y = 0, 0\n v2x, v2y = 1, 1\n v3x, v3y = 1, 0\n\n # test vertices are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v1x, v1y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v2x, v2y))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, v3x, v3y))\n\n # check line segments are inside\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1, 0.5))\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.5))\n\n # check an interior point\n self.assertTrue(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, 0.1))\n\n # check an exterior point\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, -0.5, -0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.5, -0.01))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 1.01, 0.5))\n self.assertFalse(inside_triangle(v1x, v1y, v2x, v2y, v3x, v3y, 0.49999, 0.5001))", "def select_area(minArea): \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n \n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n \n # Load mesh\n me = bpy.context.edit_object.data\n bm = bmesh.from_edit_mesh(me)\n # Ensure internal data needed for int subscription is initialized\n bm.faces.ensure_lookup_table()\n\n # Array containing the different areas\n loops = []\n faces = bm.faces\n\n # Loop for detect multiple areas\n while faces:\n faces[0].select_set(True) # Select 1st face\n bpy.ops.mesh.select_linked() # Select all linked faces makes a full loop\n loops.append([f.index for f in faces if f.select])\n bpy.ops.mesh.hide(unselected=False) # Hide the detected loop\n faces = [f for f in bm.faces if not f.hide] # Update faces\n\n # Unhide all faces\n bpy.ops.mesh.reveal()\n print(\"Mesh has {} parts\".format(len(loops)))\n\n print(\"\\nThe face lists are:\")\n for loop in loops:\n print(loop)\n \n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')\n # Deselect everything\n bpy.ops.mesh.select_all(action=\"DESELECT\")\n # Switch in object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Loop to select areas are higher than the area min\n area = 0 \n for rows in range(len(loops)):\n area = 0\n for columns in loops[rows]:\n # Calculate the area\n area = area + bpy.context.active_object.data.polygons[columns].area\n print(rows)\n print(area)\n print(minArea)\n # Compare the area with the area min\n if area > minArea:\n for columns in loops[rows]:\n # Select all the faces of the area\n bpy.context.active_object.data.polygons[columns].select = True\n\n # Switch in edit mode \n bpy.ops.object.mode_set(mode='EDIT')", "def test_polygon_area(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Create closed simple polygon (clock wise)\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n A = calculate_polygon_area(P)\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n A = calculate_polygon_area(P, signed=True)\n msg = 'Calculated signed area was %f, expected -1.0 deg^2' % A\n assert numpy.allclose(A, -1), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n A = calculate_polygon_area(P)\n\n msg = 'Calculated area was %f, expected 1.0 deg^2' % A\n assert numpy.allclose(A, 1), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n A = calculate_polygon_area(P)\n\n # Verify against area reported by qgis (only three decimals)\n qgis_area = 0.003\n assert numpy.allclose(A, qgis_area, atol=1.0e-3)\n\n # Verify against area reported by ESRI ARC (very good correspondence)\n esri_area = 2.63924787273461e-3\n assert numpy.allclose(A, esri_area, rtol=0, atol=1.0e-10)", "def inside(self, areas):\n\n poly_orig = geometry.Polygon(self.area_poly)\n poly_origb = affinity.scale(poly_orig, xfact=1.1, yfact=1.1)\n idf = shapely.vectorized.contains(\n poly_origb, areas['RA'], areas['Dec'])\n\n return areas[idf]", "def test(x_0, y_0, x_1, y_1, x_2, y_2):\n \n print(\"A triangle with vertices (\" + str(x_0) + \",\" + str(y_0) + \"),\")\n print(\"(\" + str(x_1) + \",\" + str(y_1) + \"), and\")\n print(\"(\" + str(x_2) + \",\" + str(y_2) + \") has an area of\")\n print(str(triangle_area(x_0, y_0, x_1, y_1, x_2, y_2)) + \".\")", "def calculate_areas(polygon):\n project = ft.partial(pj.transform,\n pj.Proj(init='epsg:4326'),\n pj.Proj('+proj=eck4 +lat_0=' + str(polygon.centroid.y) + ' +lon_0=' + str(polygon.centroid.x)))\n field_projected = transform(project, polygon)\n # convert from square meters to acres\n return uom.Uom(field_projected.area, uom.SquareMeter)", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def get_face_areas(self, idx=-1):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v1, v2, v3 = self.faces[idx]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2)\n b = np.linalg.norm(v1 - v3)\n c = np.linalg.norm(v2 - v3)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area\n else:\n v1, v2, v3 = self.faces[:, 0], self.faces[:, 1], self.faces[:, 2]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n a = np.linalg.norm(v1 - v2, axis=1)\n b = np.linalg.norm(v1 - v3, axis=1)\n c = np.linalg.norm(v2 - v3, axis=1)\n s = (a + b + c) / 2\n area = np.sqrt(s * (s - a) * (s - b) * (s - c))\n return area", "def find_area(self):\n min_lat_point = self.latitude_min\n max_lat_point = self.latitude_max\n min_lon_point = self.longitude_min\n max_lon_point = self.longitude_max\n self.rename_latitude()\n self.rename_longitude()\n all_lat_bounds = self.cube.coord('latitude').bounds\n all_lon_bounds = self.cube.coord('longitude').bounds\n # print(all_lat_bounds)\n # print(all_lon_bounds)\n for i, lat in enumerate(all_lat_bounds):\n for j, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= min_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= min_lon_point < lon_bounds[1]:\n nlat_min = i\n nlon_min = j\n else:\n pass\n else:\n pass\n\n for k, lat in enumerate(all_lat_bounds):\n for l, lon in enumerate(all_lon_bounds):\n lat_bounds = lat # 2D array of the lower and upper lat bounds\n lon_bounds = lon # 2D array of the lower and upper lon bounds\n if lat_bounds[0] <= max_lat_point < lat_bounds[1]:\n if lon_bounds[0] <= max_lon_point < lon_bounds[1]:\n nlat_max = k\n nlon_max = l\n else:\n pass\n else:\n pass\n\n area_subset = self.cube[:, nlat_min:nlat_max+1, nlon_min:nlon_max+1]\n # print(area_subset.coord('latitude').points)\n # print(area_subset.coord('longitude').points)\n area_mean = area_subset.collapsed(['latitude', 'longitude'],\n iris.analysis.MEAN)\n\n return area_mean", "def test_area():\n\n pt0 = [0, 0]\n pt1 = [5, 5]\n pt2 = [5, 0]\n\n truth = 12.5\n\n assert isclose(truth, area([pt0, pt1, pt2]))", "def compute_mesh_area(mesh):\n vertices = mesh.vertices\n faces = mesh.faces\n areas = [compute_triangle_area(vertices[face]) for face in faces]\n mesh_surface_area = sum(areas)\n return mesh_surface_area", "def planar_intersection_polygon(area_corners, segment_corners):\n # First test each \n lons = np.array([])\n lats = np.array([])\n for segment_corner in segment_corners:\n if planar_point_inside(segment_corner,area_corners):\n currlon = segment_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[segment_corner.lat]))\n log.info('Adding intersection from segment '+str(segment_corner))\n for area_corner in area_corners:\n if planar_point_inside(area_corner,segment_corners):\n currlon = area_corner.lon\n # MLS use wrap_longitudes?\n if currlon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[area_corner.lat]))\n log.info('Adding intersection from area '+str(area_corner))\n\n area_line1 = Line(area_corners[0], area_corners[1])\n area_line2 = Line(area_corners[1], area_corners[2])\n area_line3 = Line(area_corners[2], area_corners[3])\n area_line4 = Line(area_corners[3], area_corners[0])\n\n segment_line1 = Line(segment_corners[0], segment_corners[1])\n segment_line2 = Line(segment_corners[1], segment_corners[2])\n segment_line3 = Line(segment_corners[2], segment_corners[3])\n segment_line4 = Line(segment_corners[3], segment_corners[0])\n\n for i in (area_line1, area_line2, area_line3, area_line4):\n for j in (segment_line1, segment_line2, segment_line3, segment_line4):\n intersect = i.intersection(j)\n if intersect:\n log.info('Adding actual intersection '+str(intersect))\n currlon = intersect.lon\n # MLS use wrap_longitudes?\n if intersect.lon < 0:\n currlon += 2*math.pi\n lons = np.concatenate((lons,[currlon]))\n lats = np.concatenate((lats,[intersect.lat]))\n\n minlon = math.degrees(lons.min())\n maxlon = math.degrees(lons.max())\n minlat = math.degrees(lats.min())\n maxlat = math.degrees(lats.max())\n # Coordinate MUST be between -180 and 180\n # MLS use wrap_longitudes?\n if minlon > 180:\n minlon -= 180\n if maxlon > 180:\n maxlon -= 180\n from pyresample.spherical_geometry import Coordinate\n return [Coordinate(minlon,maxlat),\n Coordinate(maxlon,maxlat),\n Coordinate(maxlon,minlat),\n Coordinate(minlon,minlat)]", "def test_multi_area(self):\n pass", "def averageInsideVertices(mesh):\r\n cmds.select(mesh)\r\n cmds.polySelectConstraint(m=3, t=0x0001, w=2)\r\n cmds.polySelectConstraint(dis=True)\r\n cmds.polyAverageVertex(i = 10, ch = 0)", "def polygon_area(ppath): # pragma: no cover\n v_ = ppath.vertices\n if len(v_) < 3:\n return 0.0\n x_ = v_[:, 1] - v_[:, 1].mean()\n y_ = v_[:, 0] - v_[:, 0].mean()\n correction = x_[-1] * y_[0] - y_[-1] * x_[0]\n main_area = np.dot(x_[:-1], y_[1:]) - np.dot(y_[:-1], x_[1:])\n return 0.5 * np.abs(main_area + correction)", "def triangle_areas(points: np.ndarray, triangles: np.ndarray) -> np.ndarray:\n xy = points[triangles]\n # s1 = xy[:, 2, :] - xy[:, 1, :]\n # s2 = xy[:, 0, :] - xy[:, 2, :]\n # s3 = xy[:, 1, :] - xy[:, 0, :]\n # which can be simplified to\n # s = xy[:, [2, 0, 1]] - xy[:, [1, 2, 0]] # 3D\n s = xy[:, [2, 0]] - xy[:, [1, 2]] # 2D\n a = np.linalg.det(s)\n return a * 0.5", "def refinement_func_area(tri_points, area):\r\n max_area = 0.005\r\n return bool(area > max_area)", "def calc_surface_area(faces, verts):\n # Calculate the surface area of a mesh from it's triangle faces.\n # faces: List of all the faces on the surface. Each face indexes three\n # points from verts which make up the triangle face.\n # verts: List of all the vertices on the surface.\n area = 0\n for face in faces:\n # Extract x's and y's from the face's vertices.\n xs = [verts[face[0]][0], verts[face[1]][0], verts[face[2]][0]]\n ys = [verts[face[0]][1], verts[face[1]][1], verts[face[2]][1]]\n # Compute area of face from triangle points.\n base = max(xs) - min(xs)\n height = max(ys) - min(ys)\n area += 0.5 * (base + height)\n return area" ]
[ "0.7278366", "0.7158183", "0.68438345", "0.67501736", "0.66490245", "0.6619761", "0.658673", "0.6489864", "0.64847827", "0.63019234", "0.6286598", "0.6198661", "0.618283", "0.61759", "0.61708784", "0.61697274", "0.61374867", "0.6110089", "0.606067", "0.59845203", "0.5979044", "0.59579587", "0.59507424", "0.59365904", "0.58778286", "0.58488214", "0.58386195", "0.5832783", "0.58326", "0.5831316" ]
0.8256436
0
Test normals. We test this on a space whose initializing point is a cube, and we test the function on a cube with sides of length 2 centered at the origin. The cube is meshed with 12 triangles (2 triangles per face.) Recall that the magnitude of each normal vector is equal to the area of the face it is normal to.
def test_normals(self, faces, point): space = self.Space(faces=faces) cube_normals = gs.array( [ [0.0, 0.0, 2.0], [0.0, 0.0, 2.0], [0.0, 2.0, 0.0], [0.0, 2.0, 0.0], [2.0, 0.0, 0.0], [2.0, 0.0, 0.0], [0.0, -2.0, 0.0], [0.0, -2.0, 0.0], [-2.0, 0.0, 0.0], [-2.0, 0.0, 0.0], [0.0, 0.0, -2.0], [0.0, 0.0, -2.0], ] ) expected = cube_normals result = space.normals(point) are_close = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result, expected) ] assert gs.all(are_close) point = gs.array([point, point]) result = space.normals(point) are_close_0 = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result[0], expected) ] are_close_1 = [ (gs.allclose(res, exp) or gs.allclose(res, -exp)) for res, exp in zip(result[1], expected) ] assert gs.all(gs.array([are_close_0, are_close_1]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))", "def face_normals(xyz, triangles):\n\n\tabc_xyz = face_attr(xyz, triangles)\n\n\tbc_xyz = abc_xyz[:,:,1:3] - abc_xyz[:,:,0:1]\n\tfn = tf.linalg.cross(bc_xyz[:,:,0], bc_xyz[:,:,1])\n\tfn = tf.math.l2_normalize(fn, -1)\n\treturn fn", "def FaceNormals(self):\n\n self.__do_memebers_exist__()\n\n points = np.copy(self.points)\n if points.shape[1] < 3:\n dum = np.zeros((points.shape[0],3))\n dum[:,:points.shape[1]] = points\n points = dum\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n faces = self.faces\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n else:\n raise ValueError(\"Cannot compute face normals on {}\".format(self.element_type))\n\n\n face_coords = self.points[faces[:,:3],:]\n\n p1p0 = face_coords[:,1,:] - face_coords[:,0,:]\n p2p0 = face_coords[:,2,:] - face_coords[:,0,:]\n\n normals = np.cross(p1p0,p2p0)\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n normals[:,2] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetElementsWithBoundaryFaces()\n meds = self.Medians()\n face_element_meds = meds[self.boundary_face_to_element[:,0],:]\n p1pm = face_coords[:,1,:] - face_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n return normals", "def test_normal_unit_length(self):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n normals = np.array(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[3:6])\n lengths = np.sum(normals * normals, axis=0)\n np.testing.assert_almost_equal(np.ones_like(lengths), lengths)", "def compute_face_normals(vertices_zyx, faces, normalize=False):\n # numpy is faster than numba for face normals.\n # Always use numpy.\n return compute_face_normals_numpy(vertices_zyx, faces, normalize)", "def test_normal_always_up(self):\n z_of_normals = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n z_of_normals += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[5])\n np.testing.assert_array_less(np.zeros_like(z_of_normals), z_of_normals)", "def unit_normals(p,q,r): \n vx1 = p[0] - r[0] # x1 - x3. \n vy1 = p[1] - r[1] # y1 - y3. \n vz1 = p[2] - r[2] # z1 - z3. \n\n vx2 = q[0] - r[0] # x2 - x3. \n vy2 = q[1] - r[1] # y2 - y3. \n vz2 = q[2] - r[2] # z2 - z3. \n\n vnx = vy1*vz2 - vz1*vy2 \n vny = vz1*vx2 - vx1*vz2 \n vnz = vx1*vy2 - vy1*vx2 \n\n len_vn = math.sqrt(vnx*vnx + vny*vny + vnz*vnz) \n vnx = vnx/len_vn \n vny = vny/len_vn \n vnz = vnz/len_vn \n\n return vnx, vny, vnz", "def Normals(self, show_plot=False):\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n elif self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetBoundaryEdges()\n\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n normals = self.FaceNormals()\n elif self.element_type == \"tri\" or self.element_type == \"quad\" or self.element_type == \"line\":\n if self.points.shape[1] == 3:\n normals = self.FaceNormals()\n else:\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n edges = self.edges\n elif self.element_type == \"line\":\n edges = self.elements\n\n edge_coords = self.points[edges[:,:2],:]\n p1p0 = edge_coords[:,1,:] - edge_coords[:,0,:]\n\n normals = np.zeros_like(p1p0)\n normals[:,0] = -p1p0[:,1]\n normals[:,1] = p1p0[:,0]\n norm_normals = np.linalg.norm(normals,axis=1)\n normals[:,0] /= norm_normals\n normals[:,1] /= norm_normals\n\n # CHECK IF THE NORMAL IS OUTWARD - FOR LINES DIRECTIONALITY DOES NOT MATTER\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetElementsWithBoundaryEdges()\n meds = self.Medians()\n edge_element_meds = meds[self.boundary_edge_to_element[:,0],:]\n p1pm = edge_coords[:,1,:] - edge_element_meds\n # IF THE DOT PROUCT OF NORMALS AND EDGE-MED NODE VECTOR IS NEGATIVE THEN FLIP\n _check = np.einsum(\"ij,ij->i\",normals,p1pm)\n normals[np.less(_check,0.)] = -normals[np.less(_check,0.)]\n\n\n if show_plot:\n\n if ndim == 2:\n mid_edge_coords = 0.5*(edge_coords[:,1,:] + edge_coords[:,0,:])\n\n import matplotlib.pyplot as plt\n figure = plt.figure()\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n q = plt.quiver(mid_edge_coords[:,0], mid_edge_coords[:,1],\n normals[:,0], normals[:,1],\n color='Teal', headlength=5, width=0.004)\n\n plt.axis('equal')\n plt.axis('off')\n plt.tight_layout()\n plt.show()\n\n\n elif ndim == 3:\n faces = self.faces\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n faces = self.elements\n mid_face_coords = np.sum(self.points[faces,:3],axis=1)/faces.shape[1]\n\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(1000,800))\n\n self.SimplePlot(figure=figure, show_plot=False)\n\n mlab.quiver3d(mid_face_coords[:,0], mid_face_coords[:,1], mid_face_coords[:,2],\n normals[:,0], normals[:,1], normals[:,2],\n color=(0.,128./255,128./255),line_width=5)\n mlab.show()\n\n return normals", "def normalVect(self, n=2):\n L = len(self.vertices)\n normals = []\n while len(normals) < n:\n j = randrange(L)\n v0 = vector(self.vertices[j].coords())\n v1 = vector(self.vertices[int(j + L / 3) % L].coords())\n v2 = vector(self.vertices[int(j + 2 * L / 3) % L].coords())\n try:\n normals.append(((v1 - v0) * (v2 - v0)).normalize())\n except ValueError:\n pass\n return (1 / len(normals)) * sum(normals, vector(0, 0, 0))", "def generate_normals(v1, v2, v3, normalize_result=True):\n # make vectors relative to v2\n # we assume opengl counter-clockwise ordering\n a = v1 - v2\n b = v3 - v2\n n = cross(b, a)\n if normalize_result:\n n = normalize(n)\n return n", "def face_normals(self) -> np.ndarray:\n if self._face_normals is None:\n self.compute_face_normals()\n assert self._face_normals is not None\n return self._face_normals", "def parse_normals(lines):\n print \" * Parsing normals\"\n return _parse_vn(lines, \"vn %.6f %.6f %.6f\")", "def compareNormals():\n computeNormals = False\n if computeNormals:\n r1,r2,r3 = read('r1'),read('r2'),read('r3')\n r = [r1,r2,r3]\n x2 = [like(r1),like(r1),like(r1)]\n x3 = [like(r1),like(r1),like(r1)]\n v = [like(r1),like(r1),like(r1)]\n FlattenerUtil.getFrame(r,None,x2,x3)\n FlattenerUtil.cross(x3,x2,v)\n FlattenerUtil.normalize(v,v)\n write('v1',v[0])\n write('v2',v[1])\n write('v3',v[2])\n v1,v2,v3 = read('v1'),read('v2'),read('v3')\n u1,u2,u3 = read('u1'),read('u2'),read('u3')\n display(sub(v1,u1),cmap=rwb,cmin=-0.2,cmax=0.2,name='v1-u1')\n display(sub(v2,u2),cmap=rwb,cmin=-0.2,cmax=0.2,name='v2-u2')\n display(sub(v3,u3),cmap=rwb,cmin=-0.2,cmax=0.2,name='v3-u3')", "def vert_normals(xyz, triangles):\n\n\tB, N, _ = _shape(xyz)\n\tM = _shape(triangles)[-2]\n\ttriangles = _i64(triangles)\n\t\n\tfn = face_normals(xyz, triangles)\n\tbfn = tf.reshape(tf.tile(fn, [1,1,3]), [B*M*3, 3])\n\tbt = tf.reshape(\n\t\ttriangles[tf.newaxis,:,:] + _i64(tf.range(B)[:,tf.newaxis,tf.newaxis] * N),\n\t\t[B*M*3])\n\tvn = tf.reshape(tf.math.unsorted_segment_sum(bfn, bt, B*N), [B,N,3])\n\tvn = tf.math.l2_normalize(vn, -1)\n\treturn vn", "def calculateMeshNormal(mesh_face_vertices):\n mesh_normal = []\n for mesh in mesh_face_vertices:\n v1x = mesh[1, 0] - mesh[0, 0]\n v1y = mesh[1, 1] - mesh[0, 1]\n v1z = mesh[1, 2] - mesh[0, 2]\n v2x = mesh[2, 0] - mesh[1, 0]\n v2y = mesh[2, 1] - mesh[1, 1]\n v2z = mesh[2, 2] - mesh[1, 2]\n \n normal = np.array([v1y * v2z - v1z * v2y, v1z * v2x - v1x * v2z, v1x * v2y - v1y * v2x])\n normal = normal / np.max((np.linalg.norm(normal), 1e-5))\n normal = (normal + 1) * 127.5\n mesh_normal.append(normal)\n return np.array(mesh_normal)", "def test_cube(self):\n\n # No isosurface\n cube_zero = numpy.zeros((2, 2, 2), dtype=numpy.float32)\n\n result = marchingcubes.MarchingCubes(cube_zero, 1.)\n self.assertEqual(result.shape, cube_zero.shape)\n self.assertEqual(result.isolevel, 1.)\n self.assertEqual(result.invert_normals, True)\n\n vertices, normals, indices = result\n self.assertEqual(len(vertices), 0)\n self.assertEqual(len(normals), 0)\n self.assertEqual(len(indices), 0)\n\n # Cube array dimensions: shape = (dim 0, dim 1, dim2)\n #\n # dim 0 (Z)\n # ^\n # |\n # 4 +------+ 5\n # /| /|\n # / | / |\n # 6 +------+ 7|\n # | | | |\n # |0 +---|--+ 1 -> dim 2 (X)\n # | / | /\n # |/ |/\n # 2 +------+ 3\n # /\n # dim 1 (Y)\n\n # isosurface perpendicular to dim 0 (Z)\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((1., 1.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 0], level)\n self.assertAllClose(normals, (1., 0., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 1 (Y)\n cube = numpy.array(\n (((0., 0.), (1., 1.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.2\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(vertices[:, 1], level)\n self.assertAllClose(normals, (0., -1., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 2 (X)\n cube = numpy.array(\n (((0., 1.), (0., 1.)),\n ((0., 1.), (0., 1.))), dtype=numpy.float32)\n level = 0.9\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 2], level)\n self.assertAllClose(normals, (0., 0., 1.))\n self.assertEqual(len(indices), 2)\n\n # isosurface normal in dim1, dim 0 (Y, Z) plane\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(normals[:, 2], 0.)\n self.assertEqual(len(indices), 2)", "def testNorm(self):\n assert(Vector(0, 3, 4).norm() == 5)\n assert(Vector(3, 4).norm() == 5)\n assert Vector(0, 3, 0, 0, 4, 0, size=10).norm() == 5", "def compute_normals(ring):\n # output lists\n normals = []\n points = []\n # create normals half way each segment\n ct = len(ring)\n for i in xrange(ct - 1):\n cur, nxt = ring[i], ring[i+1]\n n = segment_normal(nxt, cur)\n center = mul(add(cur, nxt), 0.5)\n normals.append(n)\n points.append(center)\n # create normals on every point, using normals for every segment\n ct = len(normals)\n for i in xrange(ct):\n cur, nxt = normals[i], normals[(i+1) % ct]\n n = unit(add(cur, nxt))\n pt = ring[i+1]\n normals.append(n)\n points.append(pt)\n return points, normals", "def get_face_normals(self, idx=-1, norm=False):\n if idx >= len(self.faces):\n raise IndexError\n if idx >= 0:\n v1, v2, v3 = self.faces[idx]\n v1, v2, v3 = self.vertices[v1], self.vertices[v2], self.vertices[v3]\n e1 = v2 - v1\n e2 = v3 - v1\n cross = np.cross(e1, e2)\n return cross / np.linalg.norm(cross) if norm else cross\n else:\n f = self.faces\n v = self.vertices\n a = v[f[:, 0], :]\n b = v[f[:, 1], :]\n c = v[f[:, 2], :]\n fn = np.cross(b - a, c - a)\n return fn / np.linalg.norm(fn) if norm else fn", "def get_normals(self):\n c, s = np.cos(self.eangles), np.sin(self.eangles)\n r = np.array([[c, -s], [s, c]])\n us = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]])\n nsyms = 4 if self.halfexts[0] == self.halfexts[1] else 2\n return [(np.dot(r, u), nsyms) for u in us]", "def unit_normals(self):\n return np.stack(self.centers_cartesian(), axis=-1)", "def normal_vector(self, facet):\n assert len(facet) == 3\n pos = self.cluster.get_positions()\n v1 = pos[facet[1], :] - pos[facet[0], :]\n v2 = pos[facet[2], :] - pos[facet[0], :]\n n = np.cross(v1, v2)\n length = np.sqrt(np.sum(n**2))\n return n / length", "def getNormals(vertA, vertB, vertC):\n xA = vertA[0]\n xB = vertB[0]\n xC = vertC[0]\n yA = vertA[1]\n yB = vertB[1]\n yC = vertC[1]\n zA = vertA[2]\n zB = vertB[2]\n zC = vertC[2]\n ABx = xB - xA\n ABy = yB - yA\n ABz = zB - zA\n BCx = xC - xB\n BCy = yC - yB\n BCz = zC - zB\n Nx = ABy * BCz - ABz * BCy\n Ny = ABz * BCx - ABx * BCz\n Nz = ABx * BCy - ABy * BCx\n VecMag = math.sqrt(Nx ** 2 + Ny ** 2 + Nz ** 2)\n Ni = Nx / VecMag\n Nj = Ny / VecMag\n Nk = Nz / VecMag\n return [Ni, Nj, Nk]", "def normals(t, v):\n n = numpy.zeros((len(t), 3))\n for i in range(0, len(t)):\n p = vertices(t[i], v)\n n[i] = triangle.normal(p)\n return n", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def get_normal_vector_of_plane(p1, p2, p3):\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)", "def vertex_normals(self) -> np.ndarray:\n\n if self._vertex_normals is None:\n self.compute_vertex_normals()\n assert self._vertex_normals is not None\n return self._vertex_normals", "def calculate_plane_normal(patches):\n normals = []\n for patch in patches:\n normal = get_normal(patch)\n normals.append(normal)\n # Taken naive mean of normals\n # TODO outlier removal\n normals = np.mean(np.array(normals), axis=0)\n return normals", "def normal_polygon(points, unitized=True):\n p = len(points)\n assert p > 2, \"At least three points required\"\n nx = 0\n ny = 0\n nz = 0\n o = centroid_points(points)\n a = subtract_vectors(points[-1], o)\n for i in range(p):\n b = subtract_vectors(points[i], o)\n n = cross_vectors(a, b)\n a = b\n nx += n[0]\n ny += n[1]\n nz += n[2]\n if not unitized:\n return nx, ny, nz\n l = length_vector([nx, ny, nz])\n return nx / l, ny / l, nz / l", "def normals(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n normals_at_point = 0.5 * gs.cross(vertex_1 - vertex_0, vertex_2 - vertex_0)\n return normals_at_point" ]
[ "0.7417501", "0.7163587", "0.71501", "0.7141003", "0.6889132", "0.6833686", "0.6778833", "0.66802526", "0.6537442", "0.65106905", "0.64550817", "0.64479667", "0.6442539", "0.6430552", "0.6334144", "0.6329478", "0.6270688", "0.62549996", "0.6243213", "0.6220221", "0.61403275", "0.6138672", "0.613805", "0.61320025", "0.60644686", "0.6056448", "0.60092866", "0.59946376", "0.5993378", "0.59783924" ]
0.7896348
0
Test surface metric matrices.
def test_surface_metric_matrices(self, faces, point): space = self.Space(faces=faces) result = space.surface_metric_matrices(point=point) assert result.shape == ( space.n_faces, 2, 2, ), result.shape point = gs.array([point, point]) result = space.surface_metric_matrices(point=point) assert result.shape == (2, space.n_faces, 2, 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def surface_metric_matrices(self, point):\n one_forms = self.surface_one_forms(point)\n\n return self._surface_metric_matrices_from_one_forms(one_forms)", "def test_symmetry_surface_average_2(self):\n\n def test(grid, basis, true_avg=1):\n transform = Transform(grid, basis)\n\n # random data with specified average on each surface\n coeffs = np.random.rand(basis.num_modes)\n coeffs[np.where((basis.modes[:, 1:] == [0, 0]).all(axis=1))[0]] = 0\n coeffs[np.where((basis.modes == [0, 0, 0]).all(axis=1))[0]] = true_avg\n\n # compute average for each surface in grid\n values = transform.transform(coeffs)\n numerical_avg = surface_averages(grid, values, expand_out=False)\n if isinstance(grid, ConcentricGrid):\n # values closest to axis are never accurate enough\n numerical_avg = numerical_avg[1:]\n np.testing.assert_allclose(\n numerical_avg,\n true_avg,\n err_msg=str(type(grid)) + \" \" + str(grid.sym),\n )\n\n M = 10\n M_grid = 23\n test(\n QuadratureGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n LinearGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0),\n FourierZernikeBasis(L=M, M=M, N=0),\n )\n test(\n ConcentricGrid(L=M_grid, M=M_grid, N=0, sym=True),\n FourierZernikeBasis(L=M, M=M, N=0, sym=\"cos\"),\n )", "def test_compare_outputs_surface_form(self):\n # load models\n options = [\n {\"surface form\": cap} for cap in [\"false\", \"differential\", \"algebraic\"]\n ]\n model_combos = [\n ([pybamm.lead_acid.LOQS(opt) for opt in options]),\n ([pybamm.lead_acid.Full(opt) for opt in options]),\n ]\n\n for models in model_combos:\n # load parameter values (same for all models)\n param = models[0].default_parameter_values\n param.update({\"Current function [A]\": 1})\n for model in models:\n param.process_model(model)\n\n # set mesh\n var_pts = {\"x_n\": 5, \"x_s\": 5, \"x_p\": 5}\n\n # discretise models\n discs = {}\n for model in models:\n geometry = model.default_geometry\n param.process_geometry(geometry)\n mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)\n disc = pybamm.Discretisation(mesh, model.default_spatial_methods)\n disc.process_model(model)\n discs[model] = disc\n\n # solve model\n solutions = []\n t_eval = np.linspace(0, 3600 * 20, 100)\n for model in models:\n solution = pybamm.CasadiSolver().solve(model, t_eval)\n solutions.append(solution)\n\n # compare outputs\n comparison = StandardOutputComparison(solutions)\n comparison.test_all(skip_first_timestep=True)", "def test_comp_surface(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)\n\n b = comp_surface(test_obj.slot)\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def test_symmetry_surface_average_1(self):\n\n def test(grid):\n r = grid.nodes[:, 0]\n t = grid.nodes[:, 1]\n z = grid.nodes[:, 2] * grid.NFP\n true_surface_avg = 5\n function_of_rho = 1 / (r + 0.35)\n f = (\n true_surface_avg\n + np.cos(t)\n - 0.5 * np.cos(z)\n + 3 * np.cos(t) * np.cos(z) ** 2\n - 2 * np.sin(z) * np.sin(t)\n ) * function_of_rho\n np.testing.assert_allclose(\n surface_averages(grid, f),\n true_surface_avg * function_of_rho,\n rtol=1e-15,\n err_msg=type(grid),\n )\n\n # these tests should be run on relatively low resolution grids,\n # or at least low enough so that the asymmetric spacing test fails\n L = [3, 3, 5, 3]\n M = [3, 6, 5, 7]\n N = [2, 2, 2, 2]\n NFP = [5, 3, 5, 3]\n sym = np.asarray([True, True, False, False])\n # to test code not tested on grids made with M=.\n even_number = 4\n n_theta = even_number - sym\n\n # asymmetric spacing\n with pytest.raises(AssertionError):\n theta = 2 * np.pi * np.asarray([t**2 for t in np.linspace(0, 1, max(M))])\n test(LinearGrid(L=max(L), theta=theta, N=max(N), sym=False))\n\n for i in range(len(L)):\n test(LinearGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(LinearGrid(L=L[i], theta=n_theta[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, 2 * np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(QuadratureGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i]))\n test(ConcentricGrid(L=L[i], M=M[i], N=N[i], NFP=NFP[i], sym=sym[i]))\n # nonuniform spacing when sym is False, but spacing is still symmetric\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i]),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )\n test(\n LinearGrid(\n L=L[i],\n theta=np.linspace(0, np.pi, n_theta[i] + 1),\n N=N[i],\n NFP=NFP[i],\n sym=sym[i],\n )\n )", "def _surface_metric_matrices_from_one_forms(one_forms):\n ndim = one_forms.ndim\n transpose_axes = tuple(range(ndim - 2)) + tuple(reversed(range(ndim - 2, ndim)))\n transposed_one_forms = gs.transpose(one_forms, axes=transpose_axes)\n return gs.matmul(one_forms, transposed_one_forms)", "def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)", "def test_surface_one_forms(self, faces, point):\n space = self.Space(faces=faces)\n\n result = space.surface_one_forms(point=point)\n assert result.shape == (space.n_faces, 2, 3), result.shape\n\n first_vec = result[:, 0, :]\n second_vec = result[:, 1, :]\n inner_prods = gs.einsum(\"ni,ni->n\", first_vec, second_vec)\n result = [prod in [0.0, 4.0] for prod in inner_prods]\n assert gs.all(result)\n\n singleton_point = gs.expand_dims(point, axis=0)\n result = space.surface_one_forms(point=singleton_point)\n assert result.shape == (1, space.n_faces, 2, 3)\n\n point = gs.array([point, point])\n result = space.surface_one_forms(point=point)\n assert result.shape == (2, space.n_faces, 2, 3)\n\n first_vec = result[:, :, 0, :]\n second_vec = result[:, :, 1, :]\n inner_prods = gs.einsum(\"mni,mni->mn\", first_vec, second_vec)\n result = []\n for inner_prod in inner_prods:\n result.append([prod in [0.0, 4.0] for prod in inner_prod])\n assert gs.all(result)", "def metric_test(self):\n k = 10\n latent_factor = 10\n n_users = 10\n n_items = 12\n\n interactions, user_features, item_features = util.generate_dummy_data_with_indicator (num_users=n_users, num_items=n_items, interaction_density=.5)\n print (\"interactiosn shape={}\".format( np.shape(interactions) ))\n print (\"user features shape={}\".format( np.shape(user_features.toarray()) ))\n print (\"item features shape={}\".format( np.shape(item_features.toarray()) ))\n\n model = TensorRec(n_components=latent_factor)\n\n model.fit(interactions, user_features, item_features, epochs=19)\n\n ranks = model.predict_rank(user_features=user_features, item_features=item_features)\n\n print (\"Ranks shape={}\".format(np.shape(ranks)))\n\n self.assertTrue(np.shape(interactions) == np.shape(ranks))\n\n tr_recall_result = eval.recall_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print (tr_recall_result.mean())\n\n tr_precision_result = eval.precision_at_k(predicted_ranks=ranks, test_interactions=interactions, k=k, preserve_rows=False)\n # print(tr_precision_result.mean())\n\n # we need csr for interactions data\n interactions_ = interactions.tocsr()\n recall_result = metrics.recall_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print(recall_result.mean())\n\n precision_result = metrics.precision_at_k(ranks, interactions_, k=k, preserve_rows=False)\n # print (precision_result.mean())\n\n self.assertTrue (tr_recall_result.mean() == recall_result.mean())\n self.assertTrue (tr_precision_result.mean() == precision_result.mean())", "def test_surface_feature(self):\n\n # Fully valid image\n sf1 = SurfaceFeature(1, 1, 2, 2, 'dummy_wkt_string', 0.5, 'dummy_id')\n sf1.determine_quadkey()\n\n self.assertEqual(sf1.quadkey, '3000000')", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def test_fwhm(self):\n for i, func in enumerate(self.fwhm_funcs):\n for j, arr1d in enumerate(self.input_arrays):\n res = func(arr1d)\n assert_allclose(res.fwhm, self.answers[i][j], atol=1e-4)", "def test_check_matrix_threshold():\n R = np.array([\n [-9.15361835e-01, 4.01808328e-01, 2.57475872e-02],\n [5.15480570e-02, 1.80374088e-01, -9.82246499e-01],\n [-3.99318925e-01, -8.97783496e-01, -1.85819250e-01]])\n pr.assert_rotation_matrix(R)\n pr.check_matrix(R)", "def test_matrix(self, tol):\n\n res_static = qml.QFT.compute_matrix(2)\n res_dynamic = qml.QFT(wires=[0, 1]).matrix()\n res_reordered = qml.QFT(wires=[0, 1]).matrix([1, 0])\n\n expected = np.array(\n [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.0 + 0.5j, -0.5 + 0.0j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.0 - 0.5j, -0.5 + 0.0j, 0.0 + 0.5j],\n ]\n )\n\n assert np.allclose(res_static, expected, atol=tol, rtol=0)\n assert np.allclose(res_dynamic, expected, atol=tol, rtol=0)\n\n expected_permuted = [\n [0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j, 0.5 + 0.0j],\n [0.5 + 0.0j, 0.5 - 0.0j, -0.5 + 0.0j, -0.5 + 0.0j],\n [0.5 + 0.0j, -0.5 + 0.0j, 0.0 + 0.5j, -0.0 - 0.5j],\n [0.5 + 0.0j, -0.5 + 0.0j, -0.0 - 0.5j, 0.0 + 0.5j],\n ]\n assert np.allclose(res_reordered, expected_permuted, atol=tol, rtol=0)", "def UnitCubeTest(P):\n above = 0\n below = 0\n for (a,b,c) in [(0,0,0), (0,0,1), (0,1,0), (0,1,1), (1,0,0), (1,0,1), (1,1,0), (1,1,1)]:\n s = P.test(a, b, c)\n if s > 0:\n above = 1\n elif s < 0:\n below = 1\n return above - below", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def test_surface_normal(self):\n vertices = np.array([[0, 1, 0], [0, 0, 0], [1, 0, 0]])\n expected = np.array([0, 0, 1])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test against multiple triangles\n vertices = np.r_[vertices[np.newaxis, :, :], [[[0, 0, 0], [0, 2, 0], [2, 0, 0]]]]\n expected = np.array([[0, 0, 1], [0, 0, -1]])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Some real data\n vertices = np.array([[2.435, -1.82, -0.53], [2.635, -2., -0.58], [2.535, -1.7, -0.58]])\n expected = np.array([0.33424239, 0.11141413, 0.93587869])\n np.testing.assert_almost_equal(surface_normal(vertices), expected)\n\n # Test input validation\n self.assertRaises(ValueError, surface_normal, np.array([[1, 2, 3, 4]]))", "def test_model(model, dataObj, index):\n\t(s,m,l), img = dataObj.__getitem__(index)\n\timg = img.float().unsqueeze(0)\n\t\n\tif next(model.parameters()).is_cuda:\n\t\toutput = model(img.cuda()) \n\telse:\n\t\toutput = model(img)\n\n\ts_pred,m_pred,l_pred = output[0].squeeze(0).cpu(), output[1].squeeze(0).cpu(), output[2].squeeze(0).cpu()\n\ts_pred = s_pred.detach().numpy()\n\tm_pred = m_pred.detach().numpy()\n\tl_pred = l_pred.detach().numpy()\n\n\timg = img.float().squeeze(0)\n\timg = img.permute(1,2,0)\n\n\tfor j in range(22):\n\t\tvisualize(img, s[j], m[j], l[j], s_pred[j], m_pred[j], l_pred[j])\n\t\tk = np.array(s[j])", "def test_matrix_stats1(self):\r\n headers_list = [['a', 'c', 'b'], ['a', 'c', 'b']]\r\n d1 = numpy.array([[0, .2, .9],\r\n [.2, 0, .8],\r\n [.9, .8, 0]], 'float')\r\n d2 = numpy.array([[0, .3, 1.1],\r\n [.3, 0, .8],\r\n [1.1, .8, 0]], 'float')\r\n distmats_list = [d1, d2]\r\n\r\n exp_mean = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_median = numpy.array([[0, .25, 1.0],\r\n [.25, 0, .8],\r\n [1.0, .8, 0]], 'float')\r\n exp_std = numpy.array([[0, .05, .1],\r\n [.05, 0, 0],\r\n [.1, 0, 0]], 'float')\r\n results = matrix_stats(headers_list, distmats_list)\r\n assert_almost_equal(results[1:], [exp_mean, exp_median, exp_std])\r\n self.assertEqual(results[0], ['a', 'c', 'b'])", "def test_surf():\n def f(x, y):\n sin, cos = numpy.sin, numpy.cos\n return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)\n\n x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]\n s = surf(x, y, f)\n mlab.show()\n #cs = contour_surf(x, y, f, contour_z=0)\n return", "def test(self):\n bs = verif.metric.Bs()\n bsrel = verif.metric.BsRel()\n bsres = verif.metric.BsRes()\n bsunc = verif.metric.BsUnc()\n bss = verif.metric.Bss()\n obs = [[0],\n [0],\n [0],\n [1],\n [0, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]\n fcst = [[0],\n [1],\n [0.3],\n [0.1],\n [0.21, 0.21, 0.21, 0.91, 0.91],\n [0.06, 0.61, 0.45, 0.87, 0.13, 0.61, 0.79, 0.61, 0.06, 0.06, 0.79, 0.61, 0.13, 0.13, 0.79, 0.21, 0.06, 0.55, 0.37, 0.37]]\n ans = {bs: [0, 1, 0.09, 0.81, 0.1457, 0.34928],\n bsrel: [0, 1, 0.09, 0.81, 0.01236667, 0.2076133],\n bsres: [0, 0, 0, 0, 0.1066667, 0.1083333],\n bsunc: [0, 0, 0, 0, 0.24, 0.25],\n bss: [np.nan, np.nan, np.nan, np.nan, 0.3929167, -0.39712]}\n for i in range(len(obs)):\n o = np.array(obs[i])\n f = np.array(fcst[i])\n for key in ans:\n print(key, i)\n calculated = key.compute_from_obs_fcst(o, f)\n expected = ans[key][i]\n if np.isnan(expected):\n self.assertTrue(np.isnan(expected), np.isnan(calculated))\n else:\n self.assertAlmostEqual(expected, calculated, places=5)", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def test_basic_property_of_random_matrix():\n for name, random_matrix in all_random_matrix.items():\n print(name)\n\n check_input_size_random_matrix(random_matrix)\n check_size_generated(random_matrix)\n if name != \"random_subsample_normalized\":\n check_zero_mean_and_unit_norm(random_matrix)\n check_approximate_isometry(random_matrix)", "def test_basic(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertIsInstance(result, iris.cube.Cube)", "def test_number_of_surface_objects(self):\n for O in self.mod.objts.itervalues():\n no_of_surfaces = 0\n for C in O.conts.itervalues():\n if C.surf != 0:\n no_of_surfaces += 1\n self.assertEqual(O.surfsize, no_of_surfaces)", "def test_basic(self):\n data = get()\n metrics = [verif.metric.Within(),\n verif.metric.A(), # Hit\n verif.metric.B(), # FA\n verif.metric.C(), # Miss\n verif.metric.D(), # Correct rejection\n verif.metric.Hit(),\n verif.metric.Threat(),\n verif.metric.Conditional(),\n verif.metric.XConditional(func=np.median),\n ]\n intervals = [verif.interval.Interval(-np.inf, 0, True, True), # [-inf, 0]\n verif.interval.Interval(-np.inf, 1, True, True),\n verif.interval.Interval(-np.inf, 2, True, True),\n ]\n obs = [0, 1.5, 2]\n fcst = [3.1, 1.1, -2.1]\n N = len(obs)*1.0\n\n # Each line is one metric (one number for each threshold)\n expected = [[0/N, 100/N, 100/N], # Within\n [0/N, 0/N, 2/N], # Hit\n [1/N, 1/N, 0/N], # FA\n [1/N, 1/N, 1/N], # Miss\n [1/N, 1/N, 0/N], # Correct rejection\n [0, 0, 2.0/3], # Hit rate\n [0, 0, 2.0/3], # Threat score\n [3.1, 3.1, 0.7], # Average fcst given obs in interval\n [0, 0, 1.5], # Average obs given obs in interval\n ]\n\n for m in range(len(metrics)):\n metric = metrics[m]\n for i in range(len(intervals)):\n value = metric.compute_from_obs_fcst(np.array(obs), np.array(fcst), intervals[i])\n ex = expected[m][i] * 1.0\n if np.isnan(value):\n self.assertTrue(np.isnan(ex))\n else:\n self.assertAlmostEqual(ex, value)", "def test_3(self):\n for _ in range(10):\n\n # Draw random requests for testing purposes.\n num_draws_emax = np.random.randint(2, 1000)\n dim = np.random.randint(1, 6)\n\n matrix = np.random.uniform(size=dim ** 2).reshape(dim, dim)\n cov = np.dot(matrix, matrix.T)\n\n # PDF of normal distribution\n args = np.random.normal(size=3)\n args[-1] **= 2\n\n f90 = fort_debug.wrapper_normal_pdf(*args)\n py = norm.pdf(*args)\n\n assert_almost_equal(py, f90)\n\n # Singular Value Decomposition\n py = scipy.linalg.svd(matrix)\n f90 = fort_debug.wrapper_svd(matrix, dim)\n\n for i in range(3):\n assert_allclose(py[i], f90[i])\n\n # Pseudo-Inverse\n py = np.linalg.pinv(matrix)\n f90 = fort_debug.wrapper_pinv(matrix, dim)\n\n assert_allclose(py, f90)\n\n # Inverse\n py = np.linalg.inv(cov)\n f90 = fort_debug.wrapper_inverse(cov, dim)\n assert_allclose(py, f90)\n\n # Determinant\n py = np.linalg.det(cov)\n f90 = fort_debug.wrapper_determinant(cov)\n\n assert_allclose(py, f90)\n\n # Trace\n py = np.trace(cov)\n f90 = fort_debug.wrapper_trace(cov)\n\n assert_allclose(py, f90)\n\n # Random normal deviates. This only tests the interface, requires\n # visual inspection in IPYTHON notebook as well.\n fort_debug.wrapper_standard_normal(num_draws_emax)\n\n # Clipping values below and above bounds.\n num_values = np.random.randint(1, 10000)\n lower_bound = np.random.randn()\n upper_bound = lower_bound + np.random.ranf()\n values = np.random.normal(size=num_values)\n\n f90 = fort_debug.wrapper_clip_value(\n values, lower_bound, upper_bound, num_values\n )\n py = np.clip(values, lower_bound, upper_bound)\n\n assert_almost_equal(py, f90)\n\n # Spectral condition number\n py = _spectral_condition_number(cov)\n fort = fort_debug.wrapper_spectral_condition_number(cov)\n assert_almost_equal(py, fort)", "def par_test_12(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XYW_factor)\n par_res = self.XYZ_par_factor.mult(self.XYW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_voxel(self):\n for m in [g.get_mesh('featuretype.STL'),\n g.trimesh.primitives.Box(),\n g.trimesh.primitives.Sphere()]:\n for pitch in [.1, .1 - g.tol.merge]:\n surface = m.voxelized(pitch=pitch)\n\n # make sure the voxelized pitch is similar to passed\n assert g.np.allclose(surface.pitch, pitch)\n\n for fill_method in ('base', 'orthographic'):\n solid = surface.copy().fill(method=fill_method)\n\n assert len(surface.encoding.dense.shape) == 3\n assert surface.shape == surface.encoding.dense.shape\n assert surface.volume > 0.0\n\n assert isinstance(surface.filled_count, int)\n assert surface.filled_count > 0\n\n box_surface = surface.as_boxes()\n box_solid = solid.as_boxes()\n\n assert isinstance(box_surface, g.trimesh.Trimesh)\n assert abs(box_solid.volume - solid.volume) < g.tol.merge\n\n assert g.trimesh.util.is_shape(\n surface.sparse_indices, (-1, 3))\n assert len(\n solid.sparse_indices) >= len(\n surface.sparse_indices)\n assert solid.sparse_indices.shape == solid.points.shape\n outside = m.bounds[1] + m.scale\n for vox in surface, solid:\n assert vox.sparse_indices.shape == vox.points.shape\n assert g.np.all(vox.is_filled(vox.points))\n assert not vox.is_filled(outside)\n\n try:\n cubes = surface.marching_cubes\n assert cubes.area > 0.0\n except ImportError:\n g.log.info('no skimage, skipping marching cubes test')\n\n g.log.info('Mesh volume was %f, voxelized volume was %f',\n m.volume,\n surface.volume)", "def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])" ]
[ "0.6212602", "0.60263664", "0.6011479", "0.59304386", "0.5858633", "0.5743929", "0.5703311", "0.5686523", "0.5677409", "0.56035334", "0.55897456", "0.5561343", "0.5554519", "0.55176467", "0.5503147", "0.5501849", "0.5489476", "0.5488244", "0.5445561", "0.5432928", "0.54146785", "0.5406405", "0.5403345", "0.5401758", "0.539838", "0.53982306", "0.53784627", "0.53756195", "0.53690004", "0.53382355" ]
0.743953
0
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_per_time_is_positive( self, space, a0, a1, b1, c1, d1, a2, path, atol ): n_times = len(path) space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy_per_time(path) self.assertAllEqual(energy.shape, (n_times - 1, 1)) result = gs.all(energy > -1 * atol) self.assertTrue(result) expected_shape = (2, n_times - 1, 1) path = gs.array([path, path]) energy = space.metric.path_energy_per_time(path) self.assertAllEqual(energy.shape, expected_shape) result = gs.all(energy > -1 * atol) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, ())\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n path = gs.array([path, path])\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, (2,))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)", "def is_positive(self, example_path):\n candidate_planets = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']\n return example_path in candidate_planets['lightcurve_path'].values", "def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def verify_shocksine(controller):\n import numpy as np\n import os\n\n test_solution = controller.solution.state.get_q_global()\n\n if test_solution is not None:\n thisdir = os.path.dirname(__file__)\n expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))\n test_density = test_solution[0,:]\n test_err = np.linalg.norm(expected_density-test_density)\n return check_diff(0, test_err, abstol=1.e-4)", "def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False", "def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0", "def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def sign_of_path(path):\n vectors = [(a[0] - b[0], a[1] - b[1]) for b, a in pairwise(path)]\n sign_exp = 0\n for idx, vector in enumerate(vectors):\n if vector == (0, 1):\n sign_exp += len([v for v in vectors[idx + 1:] if v == (1, 0)])\n return (-1) ** (sign_exp)", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def test_steps(self, model):\r\n model.fs.unit.initialize()\r\n\r\n # Add disturbances\r\n for t in model.fs.time:\r\n if 300 <= t < 600:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 - 10)\r\n elif 600 <= t < 900:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n elif 900 <= t < 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 + 10)\r\n elif t >= 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n\r\n # Transient solution\r\n solver.solve(model)\r\n\r\n times = [0, 300, 600, 900, 1200, 1500]\r\n sco2_exp = [305.2, 304.9, 305.1, 306.5, 305.7, 305.2]\r\n air_exp = [370.4, 373.1, 370.3, 365.9, 370.7, 370.4]\r\n wall_exp = [339.4, 338.7, 339.1, 340.7, 339.9, 339.4]\r\n\r\n self.check_temperatures(model, times, sco2_exp, air_exp, wall_exp)", "def validate_edges(attack_surface_graph, admissible_path, starting_points):\n for i in range(len(admissible_path)-1):\n for edge in attack_surface_graph.edges(data=True):\n if edge[0] == admissible_path[i] and edge[1] == admissible_path[i+1]:\n descriptors = edge[2]\n if find_violation(descriptors) == [] and edge[0] not in starting_points:\n return False\n return True", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()" ]
[ "0.728073", "0.589016", "0.58340615", "0.56767166", "0.5647796", "0.56435287", "0.56361413", "0.5609778", "0.5564585", "0.55382043", "0.5497689", "0.5426183", "0.5391233", "0.5343787", "0.5343612", "0.5332643", "0.5260981", "0.5248749", "0.5233796", "0.52206767", "0.5216022", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405" ]
0.719298
1
Check that energy of a path of surfaces is positive at each timestep.
def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol): space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2) energy = space.metric.path_energy(path) self.assertAllEqual(energy.shape, ()) result = gs.all(energy > -1 * atol) self.assertTrue(result) path = gs.array([path, path]) energy = space.metric.path_energy(path) self.assertAllEqual(energy.shape, (2,)) result = gs.all(energy > -1 * atol) self.assertTrue(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_path_energy_per_time_is_positive(\n self, space, a0, a1, b1, c1, d1, a2, path, atol\n ):\n n_times = len(path)\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy_per_time(path)\n\n self.assertAllEqual(energy.shape, (n_times - 1, 1))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n expected_shape = (2, n_times - 1, 1)\n path = gs.array([path, path])\n energy = space.metric.path_energy_per_time(path)\n self.assertAllEqual(energy.shape, expected_shape)\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)", "def is_positive(self, example_path):\n candidate_planets = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']\n return example_path in candidate_planets['lightcurve_path'].values", "def isstationary(self):\n if np.all(np.abs(self.arroots) > 1.0):\n return True\n else:\n return False", "def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def test_volume_surface_empty(self):\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s = space(fake_curvature=k) \n for name in ('sphere_s1', 'sphere_v2', 'sphere_s2', 'sphere_v3'):\n self.assertTrue(getattr(s, name)(0) == 0)", "def test_negative_electrode_potential_profile(self):\n np.testing.assert_array_almost_equal(self.phi_s_n(self.t, x=0), 0, decimal=5)", "def check_convergency(self):\n if self.vars['ema_trace'][self.vars['step']] <= self.settings[\"emaSpeedTol\"]:\n return True\n else:\n return False", "def verify_shocksine(controller):\n import numpy as np\n import os\n\n test_solution = controller.solution.state.get_q_global()\n\n if test_solution is not None:\n thisdir = os.path.dirname(__file__)\n expected_density = np.loadtxt(os.path.join(thisdir,'shocksine_regression_density.txt'))\n test_density = test_solution[0,:]\n test_err = np.linalg.norm(expected_density-test_density)\n return check_diff(0, test_err, abstol=1.e-4)", "def is_edge_phase(x, x_last):\n _x = x/(2*np.pi)\n _x = round(_x - round(_x), 5)\n _x_last = x_last/(2*np.pi)\n _x_last = round(_x_last - round(_x_last), 5)\n if _x == 0.0 or (_x_last < 0.0 and _x > 0.0):\n return True\n else:\n return False", "def min_energy_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.q[g, y, s, t] <= 0", "def run_one_step(self, dt):\n self.tldiffusion(dt)\n\n # Test code stability for timestep dt\n # Raise unstability error if local slope is reversed by erosion\n # and deposition during a timestep dt\n elev_dif = self.elev - self.elev[self.receiver]\n s = elev_dif[np.where(self.grid.at_node[\"flow__sink_flag\"] == 0)]\n if np.any(s < -1) is True:\n raise ValueError(\n \"The component is unstable\" \" for such a large timestep \" \"on this grid\"\n )\n else:\n pass", "def is_equidistant(self) -> bool:\n if len(self.time) < 3:\n return True\n return len(self.time.to_series().diff().dropna().unique()) == 1", "def sign_of_path(path):\n vectors = [(a[0] - b[0], a[1] - b[1]) for b, a in pairwise(path)]\n sign_exp = 0\n for idx, vector in enumerate(vectors):\n if vector == (0, 1):\n sign_exp += len([v for v in vectors[idx + 1:] if v == (1, 0)])\n return (-1) ** (sign_exp)", "def test_t0(self):\n sol = Mader(p_cj=3.0e11, d_cj=8.0e5, gamma=3.0, u_piston=0.0)\n # r must contain 2 elements, otherwise the density and pressure are nan\n r = np.array([0.7, 0.8])\n t = 0.0\n solrt = sol(r, t)\n for quant in ['velocity', 'pressure', 'sound_speed', 'density', 'xdet']:\n assert np.all(np.isnan(solrt[quant]))", "def test_steps(self, model):\r\n model.fs.unit.initialize()\r\n\r\n # Add disturbances\r\n for t in model.fs.time:\r\n if 300 <= t < 600:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 - 10)\r\n elif 600 <= t < 900:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n elif 900 <= t < 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15 + 10)\r\n elif t >= 1200:\r\n model.fs.unit.shell_inlet.temperature[t].fix(288.15)\r\n\r\n # Transient solution\r\n solver.solve(model)\r\n\r\n times = [0, 300, 600, 900, 1200, 1500]\r\n sco2_exp = [305.2, 304.9, 305.1, 306.5, 305.7, 305.2]\r\n air_exp = [370.4, 373.1, 370.3, 365.9, 370.7, 370.4]\r\n wall_exp = [339.4, 338.7, 339.1, 340.7, 339.9, 339.4]\r\n\r\n self.check_temperatures(model, times, sco2_exp, air_exp, wall_exp)", "def validate_edges(attack_surface_graph, admissible_path, starting_points):\n for i in range(len(admissible_path)-1):\n for edge in attack_surface_graph.edges(data=True):\n if edge[0] == admissible_path[i] and edge[1] == admissible_path[i+1]:\n descriptors = edge[2]\n if find_violation(descriptors) == [] and edge[0] not in starting_points:\n return False\n return True", "def test_check_conformer_energy(self):\n v_list = [-272.2779012225, -272.2774933703, -272.2768397635, -272.2778432059, -272.278645477, -272.2789602654,\n -272.2788749196, -272.278496709, -272.2779350675, -272.2777008843, -272.2777167286, -272.2780937643,\n -272.2784838846, -272.2788050464, -272.2787865352, -272.2785091607, -272.2779977452, -272.2777957743,\n -272.2779134906, -272.2781827547, -272.278443339, -272.2788244214, -272.2787748749]\n v_list = np.array(v_list, np.float64)\n v_diff = (v_list[0] - np.min(v_list)) * constants.E_h * constants.Na / 1000\n self.assertAlmostEqual(v_diff / 2.7805169838282797, 1, 5)", "def test_comp_surface_wind(self, test_dict):\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.comp_surface_wind()\n\n a = result\n b = test_dict[\"SW_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()" ]
[ "0.719298", "0.589016", "0.58340615", "0.56767166", "0.5647796", "0.56435287", "0.56361413", "0.5609778", "0.5564585", "0.55382043", "0.5497689", "0.5426183", "0.5391233", "0.5343787", "0.5343612", "0.5332643", "0.5260981", "0.5248749", "0.5233796", "0.52206767", "0.5216022", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405", "0.52111405" ]
0.728073
0
goes through each neuron, each neuron has a chance of mutating equal to the learning rate of the network. There is a 20% chance of a physical mutation.
def mutate(self): #First, mutate masses for neuronNum in range(self.neuronCounter - 1): if self.learningRate > random.random(): self.neurons[neuronNum].mutate() else: continue #Now determine physical mutations if random.random() < 0.2: try: physMutation = random.choice(['a','l','c']) if physMutation == 'a': self.addNeuron(random.choice([0,1,2])) elif physMutation == 'l': begin = random.randint(1,self.neuronCounter - 1) end = random.randint(1, self.neuronCounter - 1) self.link(begin, end) else: begin = random.randint(1,self.neuronCounter - 1) end = random.choice(self.neurons[begin].outDic.keys()) self.cut(begin, end) except: return self return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate_nonstructural(self):\n # TODO consider clamping weights and biases?\n for link in self.gene_links:\n # Disable/Enable links\n if event(link_toggle_prob): # Chance of toggling link\n link.enabled = True if link.enabled is False else False\n if link.enabled is False and event(link_enable_prob): # Chance of enabling a disabled link\n link.enabled = True\n # Mutate weights\n if event(weight_mutate_rate):\n if event(weight_replace_rate): # replace with random weight\n link.weight = random.uniform(weight_init_min, weight_init_max)\n else: # adjust weight\n link.weight += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n for node in self.gene_nodes:\n # Mutate bias\n if event(bias_mutate_rate):\n if event(bias_replace_rate): # replace with random bias\n node.bias = random.uniform(bias_init_min, bias_init_max)\n else: # adjust bias\n node.bias += random.uniform(-uniform_weight_scale, uniform_weight_scale)\n # Mutate activation func\n if node.can_modify:\n if event(change_act_prob):\n node.act_func = self.act_set.get_random_activation_func()\n # reinit freq amp and vshift when act func changes\n if node.act_func.__name__[0] == \"g\":\n node.freq = random.uniform(-gauss_freq_range, gauss_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-gauss_vshift_range, gauss_vshift_range)\n elif node.act_func.__name__[0] == \"s\":\n node.freq = random.uniform(-sin_freq_range, sin_freq_range)\n node.amp = random.uniform(-func_amp_range, func_amp_range)\n node.vshift = random.uniform(-sin_vshift_range, sin_vshift_range)\n # Adjust freq amp and vshift of activation function\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\":\n node.freq += random.uniform(-guass_freq_adjust, guass_freq_adjust)\n elif node.act_func.__name__[0] == \"s\":\n node.freq += random.uniform(-sin_freq_adjust, sin_freq_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.amp += random.uniform(-func_amp_adjust, func_amp_adjust)\n if event(func_adjust_prob):\n if node.act_func.__name__[0] == \"g\" or node.act_func.__name__[0] == \"s\":\n node.vshift += random.uniform(-func_vshift_adjust, func_vshift_adjust)\n # Mutate substrate width/height rectangles\n if event(width_mutate_prob):\n if event(0.5):\n self.substrate_width += 1\n elif self.substrate_width > 1:\n self.substrate_width -= 1\n if event(height_mutate_prob):\n if event(0.5):\n self.substrate_height += 1\n elif self.substrate_height > 1:\n self.substrate_height -= 1\n \"\"\" ES-HyperNeat - no longer used\n # Mutate QuadTree variance\n if event(var_mutate_prob):\n self.var_thresh += np.random.normal(scale=gauss_var_scale)\n self.var_thresh = self.var_thresh if self.var_thresh > 0 else 0\n # Mutate QuadTree band thresh\n if event(band_mutate_prob):\n self.band_thresh += np.random.normal(scale=gauss_band_scale)\n self.band_thresh = self.band_thresh if self.band_thresh > 0 else 0\n \"\"\"", "def mutate(self, chance, amount):\r\n for layer in self.layers:\r\n for row in range(layer.output_size):\r\n for col in range(layer.input_size+1):\r\n if np.random.rand() < chance:\r\n new_val = layer.weights[row, col] + np.random.uniform(-amount, amount)\r\n new_val = min(max(-1, new_val), 1)\r\n layer.weights[row, col] = new_val", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def mutation(child_weights):\n for index, _ in enumerate(child_weights):\n # Add a chance for random mutation\n has_mutation = random.uniform(0, 1)\n if has_mutation <= .1:\n child_weights[index] *= random.randint(0, 5)", "def mutate(self, perturbing_probability):\n for con in self.connections.values():\n if random() < perturbing_probability:\n con.weight *= random_gaussian()\n else:\n con.weight = random(-1, 1)", "def mutate(self):\n\n if len(self.genes) < 250:\n for g in self.genes:\n\n if MUTATION_CHANCE < random.random(): # random.random() gives float in [0,1)\n g.mutate()\n\n else:\n k = int(MUTATION_CHANCE*len(self.genes))\n for g in random.sample(self.genes,int(k)): #int(k)\n g.mutate()\n\n #To add random gene\n if ADD_GENE_CHANCE < random.random():\n self.genes.append(Gene(self.size)) #Call to Gene to add to genes list\n\n #To randomly remove genes\n\n if REM_GENE_CHANCE < random.random() and len(self.genes)>0:\n self.genes.remove(random.choice(self.genes))", "def _mutate(self, individuals):\n for cur in individuals:\n if random.random() < self.mutation_probability:\n self.op.mutate(cur['individual'])\n cur['fitness'] = None", "def mutate1(self, probability):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = np.random.normal(0, 1)", "def mutatePopulation(population, rate, strength, assetList):\n if rate < 0 or rate > 1:\n print('Mutation rate has to lie in [0,1].')\n return\n\n if strength <= 0 or strength > 1:\n print('Mutation strength has to lie in (0,1].')\n return\n\n mutatedPopulation = []\n\n for individual in population:\n if random.random() < rate:\n mutatedPopulation.append(mutateIndividual(individual, strength, assetList))\n else:\n mutatedPopulation.append(individual)\n\n return mutatedPopulation", "def mutation(self):\n\n for r in range(self.pop_num*3, 5): # Mutation.\n for w in range(0,self.length): \n if random.random()<0.2: \n self.par_and_sons[r].A[w] = self.par_and_sons[r].A[w] + np.random.randint(-20, 20) # Offset + -20 pixels.", "def __mutate(self, chromosomes, mutation_probability):\n\n for chromosome in chromosomes:\n for i in range(self.chromosome_size):\n if random.randint(1, 100) <= mutation_probability:\n logging.getLogger().debug(\n \"---> Mutation in Chromosome \" + str(\n chromosome.chromosome_id) + \"in gene \" + str(i)\n + \" <---\")\n chromosome.genes[i] = random.choice(self.gene_pool)", "def mutate(self):\n \n # Mutate each weight\n self.w1 = self.w1 + np.random.normal(0, 1, 8).reshape((2,4))\n self.b1 = self.b1 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w2 = self.w2 + np.random.normal(0, 1, 4).reshape((2,2))\n self.b2 = self.b2 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w3 = self.w3 + np.random.normal(0, 1, 2).reshape((1,2))\n self.b3 = self.b3 + np.random.normal(0, 1, 1)\n \n # Return thyself\n return self", "def modify(nets, probs, ranks, desc, hypers, seed=0, seed2=0):\n\n name = str(seed)\n\n np.random.seed(seed2)\n tf.random.set_random_seed(seed2)\n random.seed(seed2)\n\n if not rnd: # If randomness is not applied\n print(ranks.sum(axis=1))\n if (ranks.sum(axis=1) == 0).any(): # If there are any network in the bottom three in importance in all objectives\n probs = (ranks.sum(axis=1) == 0) * probs # Only accept a network as modifiable if they rank between 3 least important networks in all three objectives\n probs = probs / np.sum(probs) # Update probabilities once the networks more important than bottom three have been taken away\n trainables, res, mutation, comp, reaching_outs = reducing_mutations(nets, probs, desc)\n else:\n trainables, res, mutation, comp, reaching_outs = increasing_mutations(nets, probs, desc)\n else: # Random application\n comp = np.random.choice(nets)\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n mutations = [con for con in conns if is_deletable(desc, con)]\n\n mutations += [\"add_con\", \"divide_con\", \"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n\n mutation = np.random.choice(mutations)\n res, trainables = mutate(mutation, desc, comp, conns)\n print(mutation)\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, load=None, init=False, random_seed=seed2, lr=0.001)\n\n model.initialize(load=True, load_path=\"\", variables=trainables)\n\n model.convergence_train(hypers[\"btch_sz\"], iter_lim//100, conv_param, proportion, iter_lim//20, display_step=-1)\n\n results = evaluate_model(model)\n\n del model\n\n if rnd == 1:\n n = \"resultsrandom\"\n else:\n n = \"results\"\n\n np.save(n + str(seed) + \"_\" + str(seed2) + \".npy\", np.concatenate((results, [res, mutation, comp], reaching_outs)))", "def explore(self):\n for k, v in self._hyperparameters.items():\n mutation = random.choice([0.8, 1.2])\n self._hyperparameters[k] = mutation * v", "def __init__(self, neurons, random=True, silent=False):\n\n self.neurons = neurons\n self.silent = silent\n\n # Set weights\n lastneuron = 0\n self.weights = []\n\n for neuron in self.neurons:\n if lastneuron != 0:\n x = np.random.rand(neuron, lastneuron) * 2.0 - 1.0\n if not random:\n for y in range(len(x)):\n for z in range(len(x[y])):\n x[y][z] = 0.0\n self.weights.append(x)\n lastneuron = neuron", "def reducing_mutations(nets, probs, desc):\n\n if (np.isnan(probs)).any(): # If probabilites could not be computed or mutation has to be randomly applied, apply random probabilities\n print(\"NaN prob\")\n probs = np.array([1/probs.shape[0]]*probs.shape[0])\n if rnd == 1:\n probs = np.array([1 / probs.shape[0]] * probs.shape[0])\n\n comp = np.random.choice(nets, p=probs) # Choose network to which area the mutation is going to be applied\n\n reaching_outs = list(set([x for x in desc.reachable[comp] if \"o\" in x])) # Outputs affected by the mutation\n _, in_conns, out_conns, _ = desc.get_net_context(comp)\n conns = in_conns + out_conns # Checka si esto da error\n mutations = [con for con in conns if is_deletable(desc, con)] # Add deletable connections to the mutation pool\n mutations += [\"reinit\"]\n\n if is_bypassable(desc, comp):\n mutations += [\"bypass\"]\n mutation = np.random.choice(mutations) # Choose mutation\n\n res, trainables = mutate(mutation, desc, comp, conns)\n\n return trainables, res, mutation, comp, reaching_outs", "def run(self, i):\n j = 0\n sum = 0\n probability = 0\n count = self.neuron_count\n\n sum = 0\n for j in range(0, count):\n sum += self.get_weight(i, j) * (1 if (self.current_state[j] > 0) else 0)\n\n sum -= self.threshold[i]\n probability = 1 / (1 + math.exp(-sum / self.temperature))\n if np.random.rand() <= probability:\n self.current_state[i] = 1.0\n else:\n self.current_state[i] = 0.0", "def _mutate(self, offspring):\n weight_idx = random.choice(range(len(offspring)))\n mutation_modifier = 1 + random.uniform(-self.mutation_delta, self.mutation_delta)\n offspring[weight_idx] *= mutation_modifier\n return self._normalize_weights(offspring)", "def multiplication_test():\r\n\r\n def fitness_function(neural_net):\r\n \"\"\"Calculate the fitness of a neural_net.\"\"\"\r\n fitness = 25\r\n for i in range(1, 6):\r\n for j in range(1, 6):\r\n answer = np.exp(neural_net.calculate([np.log(i), np.log(j)])[0])\r\n result = i*j\r\n fitness -= abs(answer - result)\r\n\r\n return fitness\r\n\r\n gen_size = 50\r\n net_size = (2, 1)\r\n genetic_algorithm = GeneticAlgorithm(gen_size, net_size, mutation_rate=0.3, mutation_chance=0.5)\r\n\r\n highest_so_far = 0\r\n while True:\r\n # Testing creatures\r\n for neural_net in genetic_algorithm.population:\r\n neural_net.fitness = fitness_function(neural_net)\r\n\r\n # Sorting creatures\r\n genetic_algorithm.calculate_stats()\r\n\r\n print(\"Gen\", genetic_algorithm.current_generation, \":\")\r\n print(\"Max fitness\", genetic_algorithm.stats.max_fitness)\r\n print(\"Mean fitness\", genetic_algorithm.stats.mean_fitness)\r\n highest_so_far = max(genetic_algorithm.stats.max_fitness, highest_so_far)\r\n print(\"Highest so far\", highest_so_far)\r\n\r\n\r\n # Starting next generation\r\n if genetic_algorithm.stats.max_fitness < 24.9 and genetic_algorithm.current_generation < 1000:\r\n genetic_algorithm.next_generation()\r\n else:\r\n break\r\n\r\n\r\n quit()\r\n\r\n\r\n for net in genetic_algorithm.sorted_population:\r\n print(net.fitness)\r\n best_neural_net = genetic_algorithm.sorted_population[0]\r\n print(\"Weights:\")\r\n print(best_neural_net.layers[0].weights[0])\r\n while True:\r\n print()\r\n in_a = input(\"Give net first number: \")\r\n in_b = input(\"Give net second number: \")\r\n answer = best_neural_net.calculate([np.log(float(in_a)), np.log(float(in_b))])[0]\r\n print(\"Net's answer:\", np.exp(answer))", "def mutate(weights,gen):\n mutated_weights = []\n for weight in weights:\n new_weight = np.random.normal(loc=weight, scale=0.5/(gen+1))\n if new_weight >= -1 and new_weight <= 1:\n mutated_weights.append(new_weight)\n elif new_weight < -1:\n mutated_weights.append(-1)\n else:\n mutated_weights.append(1)\n return np.array(mutated_weights)", "def mutate(self):\n #inlined 'flip_coin' for speed\n if prng.random() < self.mutation_rate:\n self._value = self.mutator.evaluate(self)\n return 1\n return 0", "def _mutate(self, noise_generator, sigma):\n\n mutation_indexes = torch.distributions.categorical.Categorical(\n torch.tensor([self.mutation_prob, 1 - self.mutation_prob])).sample([self.population_size]) > 0.5\n\n noise = noise_generator.sample([self.population_size, len(self.population[0])]).squeeze(-1)\n self.population[mutation_indexes] += noise[mutation_indexes] * sigma", "def make_neural_net_challenging():\n i0 = Input('i0', -1.0) # this input is immutable\n i1 = Input('i1', 0.0)\n i2 = Input('i2', 0.0)\n seed_random()\n wt1 = random_weight()\n wt2 = random_weight()\n wt3 = random_weight()\n wt4 = random_weight()\n wt5 = random_weight()\n wt6 = random_weight()\n wt7 = random_weight()\n wt8 = random_weight()\n wt9 = random_weight()\n wt10 = random_weight()\n\t\n w1A = Weight('w1A', wt1)\n w2A = Weight('w2A', wt2)\n w1B = Weight('w1B', wt3)\n w2B = Weight('w2B', wt4)\n wA = Weight('wA', -1)\n wB = Weight('wB', -1)\n wAC = Weight('wAC', wt5)\n wBC = Weight('wBC', wt6)\n wC = Weight('wC', -1)\n wAD = Weight('wAD', wt7)\n wBD = Weight('wBD', wt8)\n wD = Weight('wD', -1)\n wCE = Weight('wCE', wt9)\n wDE = Weight('wDE', wt10)\n wE = Weight('wE', -1)\n\n # Inputs must be in the same order as their associated weights\n A = Neuron('A', [i1,i2,i0], [w1A,w2A,wA])\n B = Neuron('B', [i1,i2,i0], [w1B,w2B,wB])\n C = Neuron('C', [A,B,i0], [wAC,wBC,wC])\n D = Neuron('D', [A,B,i0], [wAD,wBD,wD])\n E = Neuron('D', [C,D,i0], [wCE,wDE,wE])\n P = PerformanceElem(E, 0.0)\n\n net = Network(P,[A, B, C, D, E])\n return net", "def anneal():\n best_sol = list(range(SIZE))\n best_sum = get_sum(best_sol)\n shuffle(best_sol)\n\n temp = 10000000\n cool_rate = 0.0003\n\n counter = 0\n while temp > 1:\n new_sol = best_sol.copy()\n i, j = randint(0, SIZE - 1), randint(0, SIZE - 1)\n new_sol[i], new_sol[j] = new_sol[j], new_sol[i]\n new_energy = get_sum(new_sol)\n cur_energy = best_sum\n if calculate_probability(cur_energy, new_energy, temp) > random():\n best_sol = new_sol.copy()\n best_sum = new_energy\n temp *= 1 - cool_rate\n counter += 1\n\n print(counter)\n\n print(best_sol)\n print(best_sum)\n return best_sol, best_sum", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def mutate(net, seed, noise_std, copy_net=True):\r\n # copy current net\r\n mutated_net = copy.deepcopy(net) if copy_net else net\r\n # set seed for mutation\r\n np.random.seed(seed)\r\n for param in mutated_net.parameters():\r\n noise = torch.tensor(np.random.normal(size=param.shape).astype(np.float32))\r\n param.data += noise * noise_std\r\n \r\n return mutated_net", "def evolve(self, elitism='on', save='off', probability=0.05, rate=0.05):\n if self.state == 'dead':\n\n self.member_fitness = [self.members[i].fitness for i in range(self.size)]\n\n self.fittest_brain = self.members[self.member_fitness.index(max(self.member_fitness))]\n\n if save == 'on':\n self.fittest_brain.save_as('fittest_brain')\n\n self.total_population_fitness = sum(self.member_fitness)\n\n print('Total population fitness is %s' % (self.total_population_fitness))\n\n self.mating_pool = [[self.members[i]] * round(self.member_fitness[i] * 1000 / self.total_population_fitness) for i in range(self.size)]\n\n self.mating_pool = [brain for sublist in self.mating_pool for brain in sublist]\n\n self.children = []\n\n if elitism == 'on':\n\n self.children.append(self.fittest_brain)\n\n for i in range(self.size - 1):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n else:\n for i in range(self.size):\n parent1 = random.choice(self.mating_pool)\n parent2 = random.choice(self.mating_pool)\n child = crossover(parent1, parent2)\n child.mutate(probability, rate)\n self.children.append(child)\n\n self.members = self.children\n\n self.members[0].state = 'alive'\n\n self.state = 'alive'\n self.generation += 1\n\n else:\n print('Cannot evolve: some members are still alive')", "def mutate(lr_schedule):\n for i in range(2): # mutate two to increase variance\n # Choose a random key.\n idx = randint(0, len(lr_schedule)-1)\n # Mutate one of the params. Will be within (1/10, 10)*Old param 68% of the time\n lr_schedule[idx] = lr_schedule[idx]*math.pow(10, random.normalvariate(0, 1.5)) \n return lr_schedule", "def mut_individual(individual, pexist):\n \n network = individual.network\n for i in network.index.values:\n age = network.loc[i,'age']\n if random.random() < AGEDEP(age, pexist):\n if network.loc[i,'in'] == 1:\n network.loc[i, :] = 0\n network.loc[:, i] = 0\n \n if network.loc[i,'in'] == 0:\n network.loc[i,'in'] = 1\n network.loc[i,'age'] = 1\n for j in network.columns.values[2:]:\n if random.random() < 0.1 and i != j:\n network.loc[i,j] = 1\n network.loc[j,i] = network.at[i,j]\n \n relevant = network.loc[network['in']==1]\n for _ in range(10):\n i = random.choice(relevant.index.values)\n j = random.choice(relevant.columns.values[2:])\n network.loc[i,j] = abs(network.at[i,j]-1)\n network.loc[j,i] = network.at[i,j]\n \n if network.loc[i][1:].sum() == 0:\n network.loc[i,'in'] = 0 \n network.loc[i,'age'] = 0\n \n individual.network = network\n individual.age = 1\n return individual," ]
[ "0.7139885", "0.6918553", "0.6862937", "0.67987317", "0.6480224", "0.6446079", "0.64306605", "0.63981", "0.6360692", "0.6357444", "0.62559646", "0.62238425", "0.61995184", "0.61872697", "0.6183759", "0.6120707", "0.6096974", "0.6002629", "0.60015005", "0.599701", "0.59915847", "0.5989076", "0.59830415", "0.5968423", "0.59647167", "0.59471655", "0.5940044", "0.592021", "0.5907812", "0.58886296" ]
0.78212637
0
Check if a CLOUD device has already been added.
def is_cloud_device_already_added(self): for entry in self._async_current_entries(): if entry.unique_id is not None and entry.unique_id == f"{DOMAIN}Cloud": return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def addDevice(self, device):\n if device.name in self.devices:\n log.error(\"'%s' already part of '%s'\", device.name, self.name)\n else:\n self.devices[device.name] = device\n return self", "def exists_device_node(self, device_node: Path) -> bool:\n try:\n self.get_by_path(device_node)\n except HardwareNotFound:\n return False\n return True", "def isExistingSameDevice(config_db, deviceName, table):\n settings = config_db.get_table(table)\n for key,values in settings.items():\n if \"remote_device\" in values and deviceName == values[\"remote_device\"]:\n return True\n\n return False", "def is_existing(self):\n return self.backend.is_existing", "def exists(self):\n return True", "def exists(self):\n return True", "def check_existed_did(self):\n for wallet in self.wallet_state_manager.wallets.values():\n if (\n wallet.type() == WalletType.DECENTRALIZED_ID\n and self.did_info.origin_coin.name() == wallet.did_info.origin_coin.name()\n ):\n self.log.warning(f\"DID {self.did_info.origin_coin} already existed, ignore the wallet creation.\")\n raise ValueError(\"Wallet already exists\")", "def addUIdevice (self, deviceString):\n if deviceString.strip() not in self.UIdevices:\n self.UIdevices += [deviceString]\n else:\n print ( \"%s already in UI the device list\" % deviceString)", "def system_valid(self):\n return self.udev.devices_exist", "def _do_check(self):\n try:\n #breakpoint()\n ApplicationsItem.objects.exists()\n #print (\"Checking\")\n return True\n\n except Exception:\n client.captureException()\n return False", "def exists(self):\n\n if self:\n pass", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)", "def isExist(data):\n return True/False", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def item_exists(self, call_number):\n return call_number in self.item_list.keys()", "def has_upnp_devices(self) -> bool:\n return self._has_upnp_devices", "def devices_exist(self):\n return all(r.sys_path_exists for r in self.rules)", "def check_device(self, class_id, vendor_id, product_id):\n if len(self.class_id) > 0 and class_id != self.class_id:\n return False\n\n if len(self.vendor_id) > 0 and vendor_id != self.vendor_id:\n return False\n\n if len(self.devices) > 0 and product_id not in self.devices:\n return False\n\n return True", "def is_create_vendor_present(self):\n return self.is_element_present(self.create_vendor_locator)", "def hasAddOrDelete(self):\n return self.__hasAddOrDelete", "def deviceConnected(self, deviceName):\n if not deviceName:\n return False\n\n for driver in self.drivers:\n if not self.scanValid(driver=driver, deviceName=deviceName):\n continue\n\n self.drivers[driver]['uiDropDown'].setStyleSheet(self.BACK_GREEN)\n self.deviceStat[driver] = True\n # self.app.message.emit(f'{driver} connected', 0)\n return True", "def has_client(self):\n \n return len(self._clients) > 0", "def has_client(self):\n \n return len(self._clients) > 0", "def exist(self):" ]
[ "0.6239254", "0.609345", "0.609345", "0.609345", "0.6073092", "0.6065576", "0.6047093", "0.5987568", "0.5987568", "0.5947811", "0.58741647", "0.58736914", "0.58404255", "0.58278227", "0.5810434", "0.5785578", "0.5772156", "0.57598025", "0.5751442", "0.5747727", "0.57093596", "0.56728476", "0.5671144", "0.56695276", "0.56648475", "0.5637624", "0.5636435", "0.5633333", "0.5633333", "0.5628807" ]
0.8269784
0
Load the IMDB reviews dataset. Code adapted from the code for
def load_imdb_dataset(): (x_train, y_train), (x_test, y_test) = imdb.load_data( path="./datasets", num_words=_IMDB_CONFIG["max_features"]) num_train = _IMDB_CONFIG["num_train"] x_train, x_val = x_train[:num_train], x_train[num_train:] y_train, y_val = y_train[:num_train], y_train[num_train:] def preprocess(x, y, max_length): x = sequence.pad_sequences(x, maxlen=max_length) y = onp.array(y) x = onp.array(x) return x, y max_length = _IMDB_CONFIG["max_len"] x_train, y_train = preprocess(x_train, y_train, max_length=max_length) x_val, y_val = preprocess(x_val, y_val, max_length=max_length) x_test, y_test = preprocess(x_test, y_test, max_length=max_length) data_info = {"num_classes": 2} return (x_train, y_train), (x_test, y_test), (x_val, y_val), data_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(path='./data/train'):\n print(\"Loading IMDB Data...\")\n data = []\n\n dir = os.path.dirname(__file__)\n file_list = glob.glob(os.path.join(dir, path + '/pos/*'))\n file_list.extend(glob.glob(os.path.join(dir, path + '/neg/*')))\n print(\"Parsing %s files\" % len(file_list))\n for i, f in enumerate(file_list):\n with open(f, \"r\", encoding=\"utf8\") as openf:\n s = openf.read()\n data.append(imp.preprocess(s)) # NOTE: Preprocessing code called here on all reviews\n return data", "def load_movies_reviews():\n data = pd.read_csv(CSV_PATH + MOVIES_REVIEWS_CSV_NAME).T.to_dict()\n for i in range(len(data)):\n movie_id = Movies.query.filter(Movies.title == data[i]['Title'].strip()).first().id\n review = data[i]['Reviews'].strip()\n rating = float(data[i]['Rating'])*100000\n review_exist = Reviews.query.filter(Reviews.review == review).first()\n if not review_exist:\n db.session.add(Reviews(movie_id=movie_id, review=review, rating=int(rating)))\n db.session.commit()\n db.session.close()\n db.session.close()", "def load_train_test_imdb_data(data_dir):\r\n\r\n print(\"... IMDB loading \\t\\n\")\r\n data = {}\r\n for split in [\"train\", \"test\"]:\r\n data[split] = []\r\n for sentiment in [\"neg\", \"pos\"]:\r\n score = 1 if sentiment == \"pos\" else 0\r\n\r\n path = os.path.join(data_dir, split, sentiment)\r\n file_names = os.listdir(path)\r\n for f_name in file_names:\r\n with open(os.path.join(path, f_name), encoding=\"latin-1\") as f:\r\n review = f.read()\r\n data[split].append([review, score])\r\n\r\n np.random.shuffle(data[\"train\"]) \r\n\r\n return data[\"train\"], data[\"test\"]", "def prepare_review_data():\n with open(REVIEW_FILE, 'r') as fread:\n reviews = fread.read()\n with open(LABEL_FILE, 'r') as fread:\n labels = fread.read()\n return reviews, labels", "def load_reviews(id_reviews=(), load_polarities=False, load_sentences=False, load_words=False, load_deptrees=False):\n from loacore.conf import DB_TIMEOUT\n reviews = []\n conn = sql.connect(DB_PATH, timeout=DB_TIMEOUT)\n c = conn.cursor()\n if len(id_reviews) > 0:\n for id_review in id_reviews:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review \"\n \"FROM Review WHERE ID_Review = \" + str(id_review) + \" ORDER BY File_Index\")\n result = c.fetchone()\n if result is not None:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n else:\n c.execute(\"SELECT ID_Review, Review.ID_File, File_Index, Review FROM Review\")\n results = c.fetchall()\n for result in results:\n reviews.append(Review(result[0], result[1], result[2], result[3]))\n\n conn.close()\n\n if load_polarities:\n # Load Polarities\n import loacore.load.polarity_load as polarity_load\n polarity_load.load_polarities_in_reviews(reviews)\n\n if load_sentences:\n # Load Sentences\n import loacore.load.sentence_load as sentence_load\n sentence_load.load_sentences_in_reviews(reviews, load_words=load_words, load_deptrees=load_deptrees)\n\n return reviews", "def Preprocess_IMDB(path=\"datasets/raw/aclImdb/\"):\n output_path = \"datasets/preprocessed/IMDB_Data\"\n\n neg = glob.glob(os.path.join(path, 'test', 'neg', '*'))\n neg += glob.glob(os.path.join(path, 'train', 'neg', '*'))\n neg_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in neg]\n neg_data = [sentence[0] for sentence in neg_data]\n\n\n pos = glob.glob(os.path.join(path, 'test', 'pos', '*'))\n pos += glob.glob(os.path.join(path, 'train', 'pos', '*'))\n pos_data = [io.open(fname, 'r', encoding='utf-8').readlines() for fname in pos]\n pos_data = [sentence[0] for sentence in pos_data]\n\n labels = compute_labels(pos_data, neg_data)\n text, labels = shuffle_data(pos_data + neg_data, labels)\n\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n\n # split data in 70%/20%/10% train/test/dev split\n train_len = ((len(text) / 10) * 7) + (len(text) % 10)\n test_len = (len(text) / 10) * 2\n dev_len = len(text) / 10\n\n trX = text[0:train_len]\n teX = text[train_len:train_len + test_len]\n vaX = text[train_len + test_len: train_len + test_len + dev_len]\n\n trY = labels[0:train_len]\n teY = labels[train_len:train_len + test_len]\n vaY = labels[train_len + test_len: train_len + test_len + dev_len]\n\n dat1 = pd.DataFrame({'label': trY})\n dat2 = pd.DataFrame({'sentence': trX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"train_binary_sent.csv\"), encoding='utf-8', index=False)\n\n\n dat1 = pd.DataFrame({'label': teY})\n dat2 = pd.DataFrame({'sentence': teX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"test_binary_sent.csv\"), encoding='utf-8', index=False)\n\n dat1 = pd.DataFrame({'label': vaY})\n dat2 = pd.DataFrame({'sentence': vaX})\n df = dat1.join(dat2)\n df.to_csv(os.path.join(output_path, \"dev_binary_sent.csv\"), encoding='utf-8', index=False)", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()", "def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()", "def load_imdb(path, subset=\"all\", shuffle=True, rnd=2356, vct=CountVectorizer(), fix_k=None, min_size=None, raw=False):\n #analizer = vct.build_tokenizer()\n # C:\\Users\\mramire8\\Documents\\Research\\Oracle confidence and Interruption\\dataset\\aclImdb\\raw-data\n\n data = bunch.Bunch()\n\n if subset in ('train', 'test'):\n data[subset] = load_files(\"{0}/{1}\".format(IMDB_HOME, subset), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n elif subset == \"all\":\n data[\"train\"] = load_files(\"{0}/{1}\".format(IMDB_HOME, \"train\"), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n data[\"test\"] = load_files(\"{0}/{1}\".format(IMDB_HOME, \"test\"), encoding=\"latin-1\", load_content=True,\n random_state=rnd)\n else:\n raise ValueError(\n \"subset can only be 'train', 'test' or 'all', got '%s'\" % subset)\n if not raw:\n data = process_data(data, fix_k, min_size, vct)\n\n return data", "def load_data(glove_dict):\n print(\"loading data\")\n filename = check_file('reviews.tar.gz',14839260)\n extract_data(filename)\n dir= os.path.dirname(__file__)\n\n files= glob.glob(os.path.join(dir,\n 'data2/pos/*'))\n files.extend(glob.glob(os.path.join(dir,\n 'data2/neg/*')))\n\n data = np.empty([total_reviews, review_word_limit])\n\n file_idx = 0;\n for f in files:\n with open(f, 'r') as openf:\n s = openf.read()\n s = clean_line(s)\n words = s.split(\" \")\n # for word in words:\n word_count = 0\n while word_count < review_word_limit:\n if words:\n word = words.pop(0)\n if(word in string.punctuation or any(char.isdigit() for char in word)):\n continue\n data[file_idx][word_count] = glove_dict.get(word, 0)\n else:\n data[file_idx][word_count] = 0\n word_count += 1\n file_idx += 1\n print(\"file\", file_idx, \"done\")\n print(data[:5])\n # np.save(\"data\", data)\n return data", "def _get_review_data(path, num_samples, train_test_ratio=0.8):\n _download_dataset()\n print(\"Load Data at {}\".format(path))\n reviews, sentiments = [], []\n with open(path, \"r\", encoding=\"utf-8\") as f:\n reader = csv.DictReader(f)\n for line in reader:\n reviews.append(line[\"review\"])\n sentiments.append(int(line[\"sentiment\"]))\n\n # Data shuffle\n random.seed(42)\n zipped = list(zip(reviews, sentiments))\n random.shuffle(zipped)\n reviews, sentiments = zip(*(zipped[:num_samples]))\n reviews, sentiments = np.asarray(reviews), np.asarray(sentiments)\n\n # Train/test split\n num_data, num_train = len(sentiments), int(len(sentiments) * train_test_ratio)\n return (reviews[:num_train], sentiments[:num_train]), (reviews[num_train:], sentiments[num_train:])", "def __init__(self, dir):\n self.metadata_path = dir+\"reviews_metadata.csv\"\n self.word_to_docs_path = dir + \"words_to_file.bin\"\n self.doc_to_words_path = dir + \"file_to_words.bin\"\n self.vocabulary_path = dir+\"vocabulary.dat\"\n self.prod_index = 1\n self.helpfulness_index = 2\n self.score_index = 3\n self.review_id_index = 0\n\n try:\n with open(self.vocabulary_path, \"r\") as voc:\n jsondata = voc.read()\n data = json.loads(jsondata)\n self.vocabulary = data[\"words\"]\n self.word_indexes = data[\"indexes\"]\n except Exception:\n print(\"Cant load vocabulary from: \" + self.vocabulary_path)\n traceback.print_exc()\n exit(1)", "def extract_imdb_reviews(review_file):\n\n print(f'Decoding {review_file} ...')\n with open(review_file, encoding='utf-8') as f:\n raw = f.read()\n\n print('Extracting review text and labels ...')\n trash = {'<sssss>', '-rrb-', '-lrb-'}\n lines = raw.split('\\n')[:-1]\n reviews = []\n for line in lines:\n chunks = line.split('\\t\\t')\n label = chunks[2]\n review = ' '.join(w for w in chunks[3].split() if w not in trash)\n reviews.append((review, label))\n\n return reviews", "def load_data(reviews_path):\n df1 = pd.read_csv(reviews_path)\n #substituting 0 for negative reviews labeled '__label__1' and 1 for positive reviews labeled '__label__2'\n df1 = df1.replace('__label__1', 0)\n df1= df1.replace('__label__2', 1)\n \n return df1", "def load_ratings():\n res = {}\n with open(RATINGS_PATH, newline='', encoding=RATINGS_ENCRYPTION) as csvfile:\n spamreader = csv.reader(csvfile)\n for i, row in enumerate(spamreader):\n if i:\n title = row[3]\n res[title] = imdbData(row)\n return res", "def loadData():\n\tprint \"Loading POS vectorized reviews\"\n\twith open(DATA_PATH, \"rb\") as data_file:\n\t\tdata = cPickle.load(data_file)\n\treturn data", "def get_data():\n dataset = []\n y_labels = []\n # Extract categories\n for cat in movie_reviews.categories():\n # for files in each cateogry \n for fileid in movie_reviews.fileids(cat):\n # Get the words in that category\n words = list(movie_reviews.words(fileid))\n dataset.append((words,cat))\n y_labels.append(cat)\n return dataset,y_labels", "def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_movielens1m(path):\n if not os.path.isfile(path):\n data_dir = os.path.dirname(path)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(data_dir)\n download_dataset(\n 'http://files.grouplens.org/datasets/movielens/ml-1m.zip', path)\n\n zp = zipfile.ZipFile(path, 'r')\n content = zp.read('ml-1m/ratings.dat')\n data_list = content.split('\\n')\n\n output1 = open('train', 'w')\n output2 = open('test', 'w')\n num_users = 0\n num_movies = 0\n corpus = []\n for item in data_list:\n term = item.split('::')\n if len(term) < 3:\n continue\n user_id = int(term[0]) - 1\n movie_id = int(term[1]) - 1\n rating = int(term[2])\n corpus.append((user_id, movie_id, rating))\n num_users = max(num_users, user_id + 1)\n num_movies = max(num_movies, movie_id + 1)\n\n corpus_data = np.array(corpus)\n np.random.shuffle(corpus_data)\n np.random.shuffle(corpus_data)\n N = np.shape(corpus_data)[0]\n Ndv = N // 20 * 17\n Ndv2 = N // 10 * 9\n train = corpus_data[:Ndv, :]\n valid = corpus_data[Ndv:Ndv2, :]\n test = corpus_data[Ndv2:, :]\n\n for i in range(np.shape(train)[0]):\n output1.write('%d\\t%d\\t%d\\n' % (train[i, 0], train[i, 1], train[i, 2]))\n output1.close()\n for i in range(np.shape(test)[0]):\n output2.write('%d\\t%d\\t%d\\n' % (test[i, 0], test[i, 1], test[i, 2]))\n output2.close() \n\n return num_movies, num_users, train, valid, test", "def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()", "def load_data2(reviews_path):\n df2 = pd.read_csv(reviews_path)\n # substituting 0 (negative) for all reviews rated 0 to 3 and 1 (positive) for all reviews rated 4-5\n # renaming columns to 'label' containing ratings and 'text' containing reviews to match df1\n df2['label'] = np.where(df2['review_rating'] < 4, 0, 1)\n df2['text'] = df2['review_text']\n df2 = df2 [['text', 'label']]\n return df2", "def load_movies():\n print \"Movies\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Movie.query.delete()\n\n # Read u.item file and insert data\n for row in open(\"seed_data/u.item\"):\n row =row.rstrip()\n\n movie_id, title_long, released_string, imdb_url = row.split(\"|\")[:4]\n #we modified the datetime format changed released_string into \n #new format by using datetim.strptime to convert it. \n print row\n if released_string: \n release_at = datetime.strptime(released_string, \"%d-%b-%Y\")\n else: \n release_at = None \n\n #here we stripped the title of the (xxxx) year and parenthesis\n #using the slice method. \n title = title_long[:-7]\n\n print movie_id, title_long, released_string, imdb_url\n\n #assign the return values from our for loop to a new variable\n movie = Movie(movie_id=movie_id, title=title, released_at=release_at,\n imdb_url=imdb_url)\n \n # We need to add to the session or it won't ever be stored\n db.session.add(movie)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_reveiws_dataset(filename):\n review_DataFrame = pd.read_json(filename, lines=True)\n return review_DataFrame", "def prepare_imdb_data(data, labels, should_shuffle=True):\n\n #Combine positive and negative reviews and labels\n data_train = data['train']['pos'] + data['train']['neg']\n data_test = data['test']['pos'] + data['test']['neg']\n labels_train = labels['train']['pos'] + labels['train']['neg']\n labels_test = labels['test']['pos'] + labels['test']['neg']\n\n #Shuffle reviews and corresponding labels within training and test sets\n if should_shuffle:\n data_train, labels_train = shuffle(data_train, labels_train)\n data_test, labels_test = shuffle(data_test, labels_test)\n\n # Return a unified training data, test data, training labels, test labets\n return data_train, data_test, labels_train, labels_test", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexHistory._loadData(self, data)", "def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats", "def load_data(max_len: int, vocab_size: int) -> Tuple[NumpyDataset, NumpyDataset]:\n (x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.imdb.load_data(maxlen=max_len, num_words=vocab_size)\n # pad the sequences to max length\n x_train = np.array([pad(x, max_len, 0) for x in x_train])\n x_eval = np.array([pad(x, max_len, 0) for x in x_eval])\n\n train_data = NumpyDataset({\"x\": x_train, \"y\": y_train})\n eval_data = NumpyDataset({\"x\": x_eval, \"y\": y_eval})\n return train_data, eval_data", "def get_data(args, load_extracted=True):\n path = args.data_path1\n tokenizer_en = tokener()\n table = str.maketrans(\"\", \"\", '\"#$%&\\'()*+-/:;<=>@[\\\\]^_`{|}~')\n if load_extracted:\n df = load_pickle(\"df_unencoded.pkl\")\n else:\n logger.info(\"Extracting CNN stories...\")\n df = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df.iloc[idx][\"body\"] = body\n df.iloc[idx][\"highlights\"] = highlights\n \n if len(args.data_path2) > 2:\n path = args.data_path2\n logger.info(\"Extracting dailymail stories...\")\n df1 = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df1.iloc[idx][\"body\"] = body\n df1.iloc[idx][\"highlights\"] = highlights\n df = pd.concat([df, df1], ignore_index=True)\n del df1\n \n save_as_pickle(\"df_unencoded.pkl\", df)\n logger.info(\"Dataset length: %d\" % len(df)) \n \n if (args.level == \"word\") or (args.level == \"char\"):\n logger.info(\"Tokenizing and cleaning extracted text...\")\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en), \\\n axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n \n logger.info(\"Limiting to max features length, building vocab and converting to id tokens...\")\n df = df[df[\"body_length\"] <= args.max_features_length]\n v = vocab(level=args.level)\n v.build_vocab(df[\"body\"])\n v.build_vocab(df[\"highlights\"])\n df.loc[:, \"body\"] = df.apply(lambda x: v.convert_w2idx(x[\"body\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: v.convert_w2idx(x[\"highlights\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], 0, 2), axis=1)\n save_as_pickle(\"df_encoded.pkl\", df)\n save_as_pickle(\"vocab.pkl\", v)\n \n elif args.level == \"bpe\":\n encoder = Encoder(vocab_size=args.bpe_vocab_size, pct_bpe=args.bpe_word_ratio, word_tokenizer=tokenizer_en.tokenize)\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en, clean_only=True), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en, clean_only=True), \\\n axis=1)\n logger.info(\"Training bpe, this might take a while...\")\n text_list = list(df[\"body\"])\n text_list.extend(list(df[\"highlights\"]))\n encoder.fit(text_list); del text_list\n \n logger.info(\"Tokenizing to ids and limiting to max features length...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform([x[\"body\"]])), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform([x[\"highlights\"]])), axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n df = df[df[\"body_length\"] <= args.max_features_length]\n \n '''\n logger.info(\"Converting tokens to ids...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"body\"])))),\\\n axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"highlights\"])))),\\\n axis=1)\n '''\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], encoder.word_vocab[\"__sos\"], encoder.word_vocab[\"__eos\"]),\\\n axis=1)\n \n save_as_pickle(\"df_encoded.pkl\", df)\n encoder.save(\"./data/vocab.pkl\")\n return df", "def load_data():\n\n # Load data from categories\n comp = fetch_20newsgroups(subset='all', categories=['comp.graphics', 'comp.sys.mac.hardware', 'comp.windows.x'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n science = fetch_20newsgroups(subset='all', categories=['sci.crypt', 'sci.electronics', 'sci.space'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n politics = fetch_20newsgroups(subset='all', categories=['talk.politics.guns', 'talk.politics.mideast'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n religion = fetch_20newsgroups(subset='all', categories=['alt.atheism', 'soc.religion.christian'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n recreation = fetch_20newsgroups(subset='all', categories=['rec.autos', 'rec.sport.baseball', 'rec.sport.hockey'], \\\n shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes'))\n\n # Print total number of documents\n data_len = [len(comp.data), len(science.data), len(politics.data), len(recreation.data), len(religion.data)]\n\n # Subsample classes to create a balanced dataset\n sub_k = min(data_len)\n comp.data, comp.target = [list(t) for t in zip(*random.sample(list(zip(comp.data, comp.target)), sub_k))]\n science.data, science.target = [list(t) for t in zip(*random.sample(list(zip(science.data, science.target)), sub_k))]\n politics.data, politics.target = [list(t) for t in zip(*random.sample(list(zip(politics.data, politics.target)), sub_k))]\n religion.data, religion.target = [list(t) for t in zip(*random.sample(list(zip(religion.data, religion.target)), sub_k))]\n recreation.data, recreation.target = [list(t) for t in zip(*random.sample(list(zip(recreation.data, recreation.target)), sub_k))]\n\n # Subcategories labels\n subcat_comp = np.array(comp.target)\n subcat_scien = np.array(science.target) + len(comp.target_names)\n subcat_polit = np.array(politics.target) + len(comp.target_names) + len(science.target_names)\n subcat_rel = np.array(religion.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names)\n subcat_rec = np.array(recreation.target) + len(comp.target_names) + len(science.target_names) + len(politics.target_names) + len(religion.target_names)\n\n # Assign labels to train data based on categories\n y_comp = np.ones(len(comp.data))\n y_scien = 2*np.ones(len(science.data))\n y_polit = 3*np.ones(len(politics.data))\n y_rel = 4*np.ones(len(religion.data))\n y_rec = 5*np.ones(len(recreation.data))\n labels = np.concatenate((y_comp,y_scien,y_polit,y_rel,y_rec), axis=None)\n\n # Computers\n train_comp, test_comp, y_train_comp, y_test_comp, subcat_comp_train, subcat_comp_test = train_test_split(comp.data, y_comp, subcat_comp, test_size=0.2, random_state=42)\n train_comp, val_comp, y_train_comp, y_val_comp, subcat_comp_train, subcat_comp_val = train_test_split(train_comp, y_train_comp, subcat_comp_train, test_size=0.25, random_state=42)\n\n # Sciences\n train_scien, test_scien, y_train_scien, y_test_scien, subcat_scien_train, subcat_scien_test = train_test_split(science.data, y_scien, subcat_scien, test_size=0.2, random_state=42)\n train_scien, val_scien, y_train_scien, y_val_scien, subcat_scien_train, subcat_scien_val = train_test_split(train_scien, y_train_scien, subcat_scien_train, test_size=0.25, random_state=42)\n\n # Politics\n train_polit, test_polit, y_train_polit, y_test_polit, subcat_polit_train, subcat_polit_test = train_test_split(politics.data, y_polit, subcat_polit, test_size=0.2, random_state=42)\n train_polit, val_polit, y_train_polit, y_val_polit, subcat_polit_train, subcat_polit_val = train_test_split(train_polit, y_train_polit, subcat_polit_train, test_size=0.25, random_state=42)\n\n # Religion\n train_rel, test_rel, y_train_rel, y_test_rel, subcat_rel_train, subcat_rel_test = train_test_split(religion.data, y_rel, subcat_rel, test_size=0.2, random_state=42)\n train_rel, val_rel, y_train_rel, y_val_rel, subcat_rel_train, subcat_rel_val = train_test_split(train_rel, y_train_rel, subcat_rel_train, test_size=0.25, random_state=42)\n\n # Recreation\n train_rec, test_rec, y_train_rec, y_test_rec, subcat_rec_train, subcat_rec_test = train_test_split(recreation.data, y_rec, subcat_rec, test_size=0.2, random_state=42)\n train_rec, val_rec, y_train_rec, y_val_rec, subcat_rec_train, subcat_rec_val = train_test_split(train_rec, y_train_rec, subcat_rec_train, test_size=0.25, random_state=42)\n\n # Corpus from all categories in train set\n newsgroups_train = train_comp + train_scien + train_polit + train_rel + train_rec\n #print(f\"Total number of documents in all categories in the train set is {len(newsgroups_train)}.\")\n train_labels = np.concatenate((y_train_comp,y_train_scien,y_train_polit,y_train_rel,y_train_rec), axis=None)\n #print(train_labels.shape)\n train_subcat = np.concatenate((subcat_comp_train,subcat_scien_train,subcat_polit_train,subcat_rel_train,subcat_rec_train), axis=None)\n #print(train_subcat.shape)\n\n # Corpus from all categories in test set\n newsgroups_test = test_comp + test_scien + test_polit + test_rel + test_rec\n test_labels = np.concatenate((y_test_comp,y_test_scien,y_test_polit,y_test_rel,y_test_rec), axis=None)\n test_subcat = np.concatenate((subcat_comp_test,subcat_scien_test,subcat_polit_test,subcat_rel_test,subcat_rec_test), axis=None)\n\n # Corpus from all categories in validation set\n newsgroups_val = val_comp + val_scien + val_polit + val_rel + val_rec\n val_labels = np.concatenate((y_val_comp,y_val_scien,y_val_polit,y_val_rel,y_val_rec), axis=None)\n val_subcat = np.concatenate((subcat_comp_val,subcat_scien_val,subcat_polit_val,subcat_rel_val,subcat_rec_val), axis=None)\n\n # Data Split\n total = len(test_labels) + len(val_labels) + len(train_labels)\n\n return newsgroups_train, train_labels, newsgroups_test, test_labels, newsgroups_val, val_labels, train_subcat, test_subcat, val_subcat", "def read_data(self):\n # This matrix has the following shape: num_movies x num_users\n # The values stored in each row i and column j is the rating for\n # movie i by user j\n self.titles, self.ratings = ratings()\n reader = csv.reader(open('data/sentiment.txt', 'rb'))\n self.sentiment = dict(reader)\n\n self.titlesOnly = []\n\n for entry in self.titles:\n titleOnly = entry[0].split(' (')[0]\n self.titlesOnly.append(titleOnly.lower())\n self.sentiment.update({self.p.stem(k): v for k, v in self.sentiment.items()})" ]
[ "0.75287616", "0.66760135", "0.65806144", "0.6317917", "0.6313057", "0.6215495", "0.61960655", "0.61669785", "0.6146301", "0.6112559", "0.60396117", "0.5993801", "0.59847677", "0.59646434", "0.59450567", "0.5916073", "0.59076846", "0.5888437", "0.5850578", "0.5838023", "0.5837334", "0.58049804", "0.5794639", "0.5735462", "0.5723742", "0.5720875", "0.5719168", "0.57129264", "0.57035667", "0.56937844" ]
0.6858757
1
Parse name and seed for uci regression data. E.g. yacht_2 is the yacht dataset with seed 2.
def _parse_uci_regression_dataset(name_str): pattern_string = "(?P<name>[a-z]+)_(?P<seed>[0-9]+)" pattern = re.compile(pattern_string) matched = pattern.match(name_str) if matched: name = matched.group("name") seed = matched.group("seed") return name, seed return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_uci_regression_dataset(name,\n split_seed,\n train_fraction=0.9,\n data_dir=\"uci_datasets\"):\n path = os.path.join(data_dir,\n _UCI_REGRESSION_FILENAMES[UCIRegressionDatasets(name)])\n data_arr = onp.load(path)\n x, y = data_arr[\"x\"], data_arr[\"y\"]\n\n indices = jax.random.permutation(jax.random.PRNGKey(split_seed), len(x))\n indices = onp.asarray(indices)\n x, y = x[indices], y[indices]\n\n n_train = int(train_fraction * len(x))\n x_train, y_train = x[:n_train], y[:n_train]\n x_test, y_test = x[n_train:], y[n_train:]\n\n def normalize_with_stats(arr, arr_mean=None, arr_std=None):\n return (arr - arr_mean) / arr_std\n\n def normalize(arr):\n eps = 1e-6\n arr_mean = arr.mean(axis=0, keepdims=True)\n arr_std = arr.std(axis=0, keepdims=True) + eps\n return normalize_with_stats(arr, arr_mean, arr_std), arr_mean, arr_std\n\n x_train, x_mean, x_std = normalize(x_train)\n y_train, y_mean, y_std = normalize(y_train)\n x_test = normalize_with_stats(x_test, x_mean, x_std)\n y_test = normalize_with_stats(y_test, y_mean, y_std)\n\n data_info = {\"y_scale\": float(y_std)}\n\n return (x_train, y_train), (x_test, y_test), data_info", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def dataset(name):\n t = \"unknown\"\n if name ==\"boston\":\n # regression (506x13feat)\n from sklearn.datasets import load_boston\n X, y = load_boston(return_X_y=True)\n t = \"R\"\n #X,y = shap.datasets.boston()\n #return X,y\n elif name == \"iris\":\n # classification (150x4featx3classes)\n from sklearn.datasets import load_iris\n data = load_iris()\n X = data.data\n y = data.target\n t = \"C\"\n elif name == \"diabetes\":\n # regression (442x10feat)\n from sklearn.datasets import load_diabetes\n X, y = load_diabetes(return_X_y=True)\n t = \"R\"\n elif name == \"digits\":\n # classification (1797x64featx10classes)\n from sklearn.datasets import load_digits\n X, y = load_digits(return_X_y=True)\n t = \"C\"\n elif name == \"wine\":\n # classification (178x13featuresx3classes)\n from sklearn.datasets import load_wine\n X, y = load_wine(return_X_y=True)\n t = \"C\"\n elif name == \"breast_cancer\":\n # classification (569x30featx2classes)\n from sklearn.datasets import load_breast_cancer\n X, y = load_breast_cancer(return_X_y=True)\n t = \"C\"\n elif name ==\"nhanesi\":\n X,y = shap.datasets.nhanesi()\n t = \"R\"\n elif name == \"segments\":\n X,y = make_led()\n t = \"C\"\n elif name == \"segments_sampled\":\n X,y = make_led_sample()\n t = \"C\"\n elif name == \"friedman1\":\n from sklearn.datasets import make_friedman1\n X,y= make_friedman1(n_samples=500, random_state=0)\n print('Done')\n X = pd.DataFrame(X, columns=list(range(X.shape[1])))\n t = 'R'\n elif name == \"friedman2\":\n from sklearn.datasets import make_friedman2\n X,y= make_friedman2(random_state=0)\n t = 'R'\n elif name == 'linear':\n X, y, t = draw_linear_function()\n elif name == \"linear2\":\n importlib.reload(lreg)\n X,y,t = lreg.lf_dataset(nsamples=5000, with_vimp=False)\n elif name == 'friendman3':\n X, y, t = friedman_modified()\n else:\n raise ValueError(\"dataset `{}` not implemented\".format(name))\n return X,y,t", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def test_load_UCR_UEA_dataset():\n X, y = load_UCR_UEA_dataset(name=\"UnitTest\")\n assert isinstance(X, pd.DataFrame) and isinstance(y, np.ndarray)\n assert X.shape == (42, 1) and y.shape == (42,)", "def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data", "def __parse_sample_name(self):\n pattern = '(.*)(P53)(XR|NT)(\\d+)([A-Z]?|Ctr)?.*'\n vals = re.findall(pattern, self.sample_name.replace('_', ''))[0]\n self.cell_type = vals[0]\n self.treatment_type = vals[2]\n self.treatment_time = vals[3]\n if vals[3]:\n self.treatment_repeat = vals[4]", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def load_data(): \n\tdf = pandas.read_csv('data/iris.data', header=None)\n\ty = df.iloc[0:df.shape[0], 4].values\n\ty = np.where(y == 'Iris-setosa', 0, y)\n\ty = np.where(y == 'Iris-versicolor', 1, y)\n\ty = np.where(y == 'Iris-virginica', 2, y)\n\tx = df.iloc[0:df.shape[0], 0:4].values\n\tx = tuple(x)\n\ty = tuple(y)\n\ttraining_inputs = x[0:40] + x[50:90] + x[100:140]\n\ttraining_results = y[0:40] + y[50:90] + y[100:140]\n\ttraining_data = (training_inputs, training_results)\n\ttest_inputs = x[40:50] + x[90:100] + x[140:150]\n\ttest_results = y[40:50] + y[90:1000] + y[140:150]\n\ttest_data = (test_inputs, test_results)\n\treturn (training_data, test_data)", "def build_data(seed):\n rs = np.random.RandomState(seed)\n\n def y(x):\n \"\"\" y(x) = 1 + 0.3 * x_1 - 0.6 * x_2^2 - 0.2 * x_3^3 + 0.5 x_4^4 \"\"\"\n x1, x2, x3, x4 = x[:, 0], x[:, 1], x[:, 2], x[:, 3]\n return 1 + 0.3 * x1 - 0.6 * x2 ** 2 - 0.2 * x3 ** 3 + 0.5 * x4 ** 4\n\n xtrain = rs.rand(10000, 4)\n xtest = rs.rand(1000, 4)\n ytrain = y(xtrain) + rs.rand(10000) / 10\n ytest = y(xtest) + rs.rand(1000) / 10\n return xtrain, xtest, ytrain, ytest", "def esm1_t6_43M_UR50S():\n return load_model_and_alphabet_hub(\"esm1_t6_43M_UR50S\")", "def _load_vowel_test():\n vowel_data = np.loadtxt(_VOWEL_TEST_PATH, delimiter=',', skiprows=1)\n X = vowel_data[:, -10:]\n y = vowel_data[:, 1].astype(int)\n return (X, y)", "def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset", "def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")", "def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature", "def import_data(seed: object = 42) -> object:\n\n # Read input data\n df = pd.read_csv(\"x_train_gr_smpl.csv\").astype(int)\n\n # label data-frame rows based on sample data\n for x in range(10):\n index = ~pd.read_csv(\"y_train_smpl_%s.csv\" % x, squeeze=True).astype(bool) # reversed flags (~)\n df.loc[index, 'label'] = str(x)\n\n input_data_ordered = df.iloc[:, 0:2304].to_numpy()\n output_data_ordered = df.iloc[:, 2304].to_numpy()\n\n # Randomise instance order (forcing the same result each time)\n np.random.seed(seed)\n permutation = np.random.permutation(df.shape[0])\n\n # Create base input and output arrays\n input_data = input_data_ordered[permutation]\n output_data = output_data_ordered[permutation]\n\n return input_data, output_data, df, input_data_ordered, output_data_ordered", "def load_data():\n data = pd.read_csv('datasets/housing.csv')\n prices = data['MEDV']\n features = data.drop(['MEDV'], axis=1) # remove it from data as we need to predict it\n print(data.head()) # prints top columns 5 for ex\n return [features, prices]", "def reading_data(fname,goal):\n \n #Reading of the EEG data\n data = pd.read_csv(fname)\n events_fname = fname.replace('_data','_events')\n labels= pd.read_csv(events_fname)\n\n if goal==\"training\":\n data=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n elif goal==\"testing\":\n labels=labels.drop(['id' ], axis=1)\n else:\n raise SystemExit(\"The goal variable is unknown for the function\")\n\n return data, labels", "def parse_IAU_name(name):\n # First see if there is a source type acronym\n if diag:\n print \"parse_IAU_name: received\",name\n parts = name.split()\n if len(parts) == 1:\n designation = parts[0]\n elif len(parts) == 2:\n acronym, designation = parts\n else:\n raise(\"Invalid format: \"+name)\n # Now process the designation\n flag = designation[0].upper()\n if flag == \"G\":\n # Galactic coordinates\n longitude,latitude,sign = split_on_sign(name[1:])\n X = parse_decimal_angle(longitude)\n Y = parse_decimal_angle(latitude)\n elif flag == \"J\":\n # Julian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif flag == \"B\":\n # Besselian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif designation[0].isdigit():\n # This should be Besselian but who knows?\n # If it is Besselian there should be at least four digits in RA\n # otherwise it could be galactic\n x,y,sign = split_on_sign(name)\n if len(x) > 3:\n X = parse_sexagesimal_angle(x)\n Y = parse_sexagesimal_angle(y)\n flag = \"B\"\n else:\n X = parse_decimal_angle(x)\n Y = parse_decimal_angle(y)\n flag = \"G\"\n else:\n return \"?\",None,None\n if sign == \"-\":\n Y = -Y\n return flag,X,Y", "def _process_input_seed(self):\n\n Tcmb = 2.72548 * u.K # 0.00057 K\n Tfir = 70 * u.K\n ufir = 0.2 * u.eV / u.cm ** 3\n Tnir = 5000 * u.K\n unir = 0.2 * u.eV / u.cm ** 3\n\n # Allow for seed_photon_fields definitions of the type 'CMB-NIR-FIR' or 'CMB'\n if type(self.seed_photon_fields) != list:\n self.seed_photon_fields = self.seed_photon_fields.split('-')\n\n self.seeduf = {}\n self.seedT = {}\n self.seedisotropic = {}\n self.seedtheta = {}\n for idx, inseed in enumerate(self.seed_photon_fields):\n if isinstance(inseed, six.string_types):\n if inseed == 'CMB':\n self.seedT[inseed] = Tcmb\n self.seeduf[inseed] = 1.0\n self.seedisotropic[inseed] = True\n elif inseed == 'FIR':\n self.seedT[inseed] = Tfir\n self.seeduf[inseed] = (ufir / (ar * Tfir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n elif inseed == 'NIR':\n self.seedT[inseed] = Tnir\n self.seeduf[inseed] = (unir / (ar * Tnir ** 4)).decompose()\n self.seedisotropic[inseed] = True\n else:\n log.warning('Will not use seed {0} because it is not '\n 'CMB, FIR or NIR'.format(inseed))\n raise TypeError\n elif type(inseed) == list and (len(inseed) == 3 or len(inseed) == 4):\n isotropic = len(inseed) == 3\n\n if isotropic:\n name, T, uu = inseed\n self.seedisotropic[name] = True\n else:\n name, T, uu, theta = inseed\n self.seedisotropic[name] = False\n self.seedtheta[name] = validate_scalar('{0}-theta'.format(name),\n theta, physical_type='angle')\n\n validate_scalar('{0}-T'.format(name), T, domain='positive',\n physical_type='temperature')\n self.seed_photon_fields[idx] = name\n self.seedT[name] = T\n if uu == 0:\n self.seeduf[name] = 1.0\n else:\n # pressure has same physical type as energy density\n validate_scalar('{0}-u'.format(name), uu,\n domain='positive', physical_type='pressure')\n self.seeduf[name] = (uu / (ar * T ** 4)).decompose()\n else:\n log.warning(\n 'Unable to process seed photon field: {0}'.format(inseed))\n raise TypeError", "def mk_test(input_data):\r\n\r\n\ttrend, h, p, z, Tau, s, var_s, slope, intercept = mk.original_test(input_data)\r\n\r\n\treturn trend, h, p, z, Tau, s, var_s, slope, intercept", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def prepare_data_train(fname):\n # Read data\n data = pd.read_csv(fname)\n # events file\n events_fname = fname.replace('_data','_events')\n # read event file\n labels= pd.read_csv(events_fname)\n clean=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n return clean,labels", "def load_demo():\n\tprint(\"\"\"\n\tBreast Cancer Wisconsin dataset. It contains a total of 569 samples of tumor and malignant cells. \n\tData labeled 1 corresponds to malignant cells, while data labeled 0 corresponds to benign cells. \n\tThe 30 characteristics contain real values obtained from images of cell nuclei. For more information:\n\n\t\t\thttp://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+(diagnostic)\n\n\n\tThe returned value is a dictionary where 'x_data' are the predictor variables, 'y_data' the class \n\tlabels and 'features' the name of the characteristics.\n\t\"\"\")\n\tpath = '/'.join(os.path.abspath(pywinEA.__file__).split('/')[:-1])\n\t\n\tdata = pd.read_csv(path+'/dataset/data/BreastCancerWisconsin.csv', index_col=0)\n\tx_data = data.iloc[:, 1:].values\n\ty_data = data.iloc[:, 0].values\n\tfeatures = data.columns[1:].values\n\n\t# Transform labels\n\ty_data[np.where(y_data == 'M')] = 1\n\ty_data[np.where(y_data == 'B')] = 0\n\ty_data = y_data.astype(int)\n\n\treturn {'x_data': x_data, 'y_data': y_data, 'features': features}", "def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }", "def fixture_microbial_sample_name():\n return \"microbial_name_test\"" ]
[ "0.6082497", "0.5352037", "0.5263292", "0.5021193", "0.49601898", "0.48798177", "0.4859456", "0.48388806", "0.48280886", "0.48230565", "0.48205665", "0.4811698", "0.4808677", "0.4778711", "0.477406", "0.4759555", "0.47592923", "0.47398236", "0.47390524", "0.4733845", "0.47287226", "0.46976715", "0.468585", "0.46762756", "0.46732804", "0.46732804", "0.46732804", "0.46675482", "0.46493044", "0.46306705" ]
0.730633
0
Reshapes batch to have first axes size equal n_split.
def batch_split_axis(batch, n_split): x, y = batch n = x.shape[0] n_new = n / n_split assert n_new == int(n_new), ( "First axis cannot be split: batch dimension was {} when " "n_split was {}.".format(x.shape[0], n_split)) n_new = int(n_new) return tuple(arr.reshape([n_split, n_new, *arr.shape[1:]]) for arr in (x, y))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _reshape_output_batch(self, number, output):\n #tt = cutotime('reshape')\n #tt.start()\n output = output.reshape(self.output_shapes[number]) # batch, h, w, 3, (5 + 80)\n #tt.stop()\n return output", "def split_last_dimension(x, n):\n x_shape = shape_list(x)\n m = x_shape[-1]\n if isinstance(m, int) and isinstance(n, int):\n assert m % n == 0\n return tf.reshape(x, x_shape[:-1] + [n, m // n])", "def split_last_dimension(x, n):\r\n old_shape = x.get_shape().dims\r\n last = old_shape[-1]\r\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\r\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\r\n ret.set_shape(new_shape)\r\n return tf.transpose(ret,[0,2,1,3])", "def split_last_dimension(x, n):\n old_shape = x.get_shape().dims\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0))\n ret.set_shape(new_shape)\n return tf.transpose(ret, [0, 2, 1, 3])", "def reshape_to_batch(array):\n if len(array.shape) == 2:\n array = numpy.expand_dims(array, axis=2)\n array = numpy.expand_dims(array, axis=0)\n return array", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def split_last_dim(self, x, n):\n old_shape = list(x.size())\n last = old_shape[-1]\n new_shape = old_shape[:-1] + [n] + [last // n if last else None]\n ret = x.view(new_shape)\n return ret.permute(0, 2, 1, 3)", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def _reshape(self, arr: np.ndarray) -> np.ndarray:\n return arr.reshape(self.TileHeight.value, self.TileWidth.value, self.bands,)", "def split(array, nrows, ncols):\r\n r, h = array.shape\r\n return (array.reshape(h//nrows, nrows, -1, ncols)\r\n .swapaxes(1, 2)\r\n .reshape(-1, nrows, ncols))", "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def reshape(x, shape):\n return Reshape(shape)(x)", "def split(x, axis, split_size):\n assert axis < x.ndim, 'Dimension out of range!'\n\n if isinstance(split_size, int):\n _split_size = [x.shape[axis] // split_size] * split_size\n\n elif isinstance(split_size, (list, tuple)):\n _split_size = split_size\n else:\n raise TypeError\n\n if x.ndim == 0:\n\n return [x for _ in range(len(_split_size))]\n\n return T.split(x, splits_size=_split_size, n_splits=len(_split_size), axis=axis)", "def batch_split(self) -> np.array:\n pass", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def reshape_d(sequence, batch_size, num_steps):\n batch_length = batch_size * num_steps\n num_batches = sequence // batch_size\n if num_batches * batch_length > (len(sequence) - 1):\n num_batches -= 1\n # Round up batch\n X = sequence[: num_batches * batch_length]\n y = sequence[1: num_batches * batch_length + 1]\n X_splits = np.split(X, batch_size)\n y_splits = np.split(y, batch_size)\n # Stack batches\n X = np.stack(X_splits)\n y = np.stack(y_splits)\n return X, y", "def _reshape_feature(self, X, size):\n new_shape = (X.shape[0],) + size + (X.shape[-1],)\n return X.reshape(new_shape)", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def windows_partition(x, window_size):\n\n B, H, W, C = x.shape\n x = x.reshape([B, H//window_size, window_size, W//window_size, window_size, C])\n x = x.transpose([0, 1, 3, 2, 4, 5])\n x = x.reshape([-1, window_size, window_size, C]) #(num_windows*B, window_size, window_size, C)\n return x", "def squeeze_batch_dims(inp, op, inner_rank, name=None):\n with ops.name_scope(name, \"squeeze_batch_dims\", [inp]):\n inp = ops.convert_to_tensor(inp, name=\"input\")\n shape = inp.shape\n\n inner_shape = shape[-inner_rank:]\n if not inner_shape.is_fully_defined():\n inner_shape = array_ops.shape(inp)[-inner_rank:]\n\n batch_shape = shape[:-inner_rank]\n if not batch_shape.is_fully_defined():\n batch_shape = array_ops.shape(inp)[:-inner_rank]\n\n if isinstance(inner_shape, tensor_shape.TensorShape):\n inp_reshaped = array_ops.reshape(inp, [-1] + inner_shape.as_list())\n else:\n inp_reshaped = array_ops.reshape(\n inp, array_ops.concat(([-1], inner_shape), axis=-1))\n\n out_reshaped = op(inp_reshaped)\n\n out_inner_shape = out_reshaped.shape[-inner_rank:]\n if not out_inner_shape.is_fully_defined():\n out_inner_shape = array_ops.shape(out_reshaped)[-inner_rank:]\n\n out = array_ops.reshape(\n out_reshaped, array_ops.concat((batch_shape, out_inner_shape), axis=-1))\n\n out.set_shape(inp.shape[:-inner_rank] + out.shape[-inner_rank:])\n return out", "def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))", "def split_into_n_states(inp, n):\n *start, m = shapes_list(inp)\n out = tf.reshape(inp, start + [n, m // n])\n return out", "def split_into_n_states(inp, n):\n *start, m = shapes_list(inp)\n out = tf.reshape(inp, start + [n, m // n])\n return out", "def reshape(x, shape):\n if x.shape == shape:\n return chainer.as_variable(x)\n y, = Reshape(shape).apply((x,))\n return y", "def reshape(self, *shape):\n return F.Reshape.apply(self, shape)", "def split_heads(self, x, batch_size):\n x = tf.reshape(x, (batch_size, -1, self.h, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])", "def blockshaped(arr, nrows, ncols):\r\n\t h, w = arr.shape\r\n\t return (arr.reshape(h//nrows, nrows, -1, ncols)\r\n\t .swapaxes(1,2)\r\n\t .reshape(-1, nrows, ncols))" ]
[ "0.69617146", "0.68236685", "0.66688055", "0.6664411", "0.65866363", "0.65084076", "0.65084076", "0.65084076", "0.6464381", "0.6412519", "0.6412519", "0.6395728", "0.6334293", "0.630652", "0.6277924", "0.62474674", "0.61763036", "0.61714095", "0.6166665", "0.61203986", "0.6088742", "0.6058957", "0.6057854", "0.6024649", "0.6013037", "0.6013037", "0.59323484", "0.5861982", "0.5827883", "0.5818059" ]
0.72161376
0
Iterate through the spike waveforms belonging in the current trace view.
def _iter_spike_waveforms( interval=None, traces_interval=None, model=None, supervisor=None, n_samples_waveforms=None, get_best_channels=None, show_all_spikes=False): m = model p = supervisor sr = m.sample_rate a, b = m.spike_times.searchsorted(interval) s0, s1 = int(round(interval[0] * sr)), int(round(interval[1] * sr)) ns = n_samples_waveforms k = ns // 2 for show_selected in (False, True): for i in range(a, b): t = m.spike_times[i] c = m.spike_clusters[i] is_selected = c in p.selected # Show non selected spikes first, then selected spikes so that they appear on top. if is_selected is not show_selected: continue # Skip non-selected spikes if requested. if (not show_all_spikes and c not in supervisor.selected): continue # cg = p.cluster_meta.get('group', c) channel_ids, channel_amps = get_best_channels(c) s = int(round(t * sr)) - s0 # Skip partial spikes. if s - k < 0 or s + k >= (s1 - s0): # pragma: no cover continue # Extract the waveform. wave = Bunch( data=traces_interval[s - k:s + ns - k, channel_ids], channel_ids=channel_ids, start_time=(s + s0 - k) / sr, spike_id=i, spike_time=t, spike_cluster=c, channel_amps=channel_amps, # for each of the channel_ids, the relative amp select_index=p.selected.index(c) if c in p.selected else None, ) assert wave.data.shape == (ns, len(channel_ids)) yield wave
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_waveforms(self, key, _):\n if key == self.controls.Arrays.WAVEFORMS:\n self.trace_lines[0].set_ydata(self.pv_monitor.arrays[key][0])\n self.trace_lines[1].set_ydata(self.pv_monitor.arrays[key][1])\n self.draw()", "def waveforms(self):\n return list(self._waveforms)", "def getSpikes(self, compatible_output=False, gather=True):\n global controller\n timer = None\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer = Timer()\n timer.start_timing()\n spikes = self.vertex.getSpikes(controller, controller.dao.run_time, compatible_output)\n\n if conf.config.getboolean(\"Reports\", \"outputTimesForSections\"):\n timer.take_sample()\n return spikes", "def iter_spectra(self):\n for record in self.session.query(SpectrumLibraryIndexRecord).order_by(\n SpectrumLibraryIndexRecord.number).yield_per(10000):\n yield record", "def AllSpikeTimes(self):\n blah = []\n for neur in self.neurons:\n blah.append(np.array(neur.spikes))\n\n return blah", "def set_spike_data(self):\n self.spike_record = {l: self.network.monitors['{:}_spikes'.format(l)].get('s') for l in self.network.layers}", "def get_ser_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_ser_neurons()):\n spktimes_singlesweep.append(\n np.where(self.ser_spktrains[sweep_no, cell_no, :] > 0.5)[0]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def _get_all_spectra(self):\n pass", "def __iter__(self):\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0\n\n while True:\n\n # Randomizing wav lists\n random.shuffle(self._lst_spk_files)\n random.shuffle(self._lst_noise_files)\n\n for spk_file, noise_file in zip(self._lst_spk_files, self._lst_noise_files):\n\n # Read wav files\n sig_spk = self.__read_wav_file(spk_file)\n sig_noise = self.__read_wav_file(noise_file)\n\n # Align signal\n min_length = min(sig_spk.shape[0], sig_noise.shape[0])\n\n if min_length < self._fftsize:\n raise Exception(\"ERROR: Too short signals in dataset\")\n\n sig_spk = sig_spk[:min_length]\n sig_noise = sig_noise[:min_length]\n\n # Generate need SNR\n need_snr = random.uniform(self._min_snr, self._max_snr)\n\n # Calc scaled signals\n sig_spk, sig_noise = self.__mix_with_snr(sig_spk, sig_noise, need_snr)\n\n # Calc STFT\n stft_spk = stft(sig_spk, fftsize=self._fftsize, overlap=self._overlap)\n stft_noise = stft(sig_noise, fftsize=self._fftsize, overlap=self._overlap)\n stft_mix = stft_spk + stft_noise\n\n # Skip small segments\n frames, bin = stft_mix.shape\n if frames <= self._context_size:\n continue\n\n # Collect batch\n i = 0\n while i + self._context_size < frames:\n\n batch_sp.append(stft_spk[i:i + self._context_size, :])\n batch_noise.append(stft_noise[i:i + self._context_size, :])\n batch_mix.append(stft_mix[i:i + self._context_size, :])\n\n i += self._context_size // 2\n batch_count += 1\n\n if batch_count == self._batch_size:\n sp = np.array(batch_sp).reshape((self._batch_size,\n self._context_size, -1))\n noise = np.array(batch_noise).reshape((self._batch_size,\n self._context_size, -1))\n mix = np.array(batch_mix).reshape((self._batch_size,\n self._context_size, -1))\n yield sp, noise, mix\n\n batch_sp = []\n batch_noise = []\n batch_mix = []\n batch_count = 0", "def iterate_tsne(self):\n send_stop_event = False\n if self.analysis is None:\n return\n if not self._stop_iter:\n self.timer_count = self.timer_count + 1\n for j in range(self.iters_per_frame):\n self.analysis.do_iteration()\n self._iter_count = self.timer_count * self.iters_per_frame + j\n self.status.showMessage(f\"Iteration: {self._iter_count}\")\n\n if self.timer_count == self.num_frames - 1:\n self._stop_iter = True\n send_stop_event = True\n self.timer_count = 0\n self.status.showMessage(\"Iteration: Completed\")\n\n # Update point positions\n self.embedding_viewer.update_plot(self.analysis.embedding)\n else:\n if self.timer_count % 10 == 0:\n self.embedding_viewer.force_refresh()\n\n if send_stop_event:\n self.embedding_viewer.force_refresh()\n time.sleep(0.1)\n self.analysis_stopped(self.analysis,\n self.embedding_viewer.get_figure_as_buffer())", "def train(self):\n \n for demo_traj in self._demo_trajs:\n\n interpolate = interp1d(self._phase._z, demo_traj, kind='cubic')\n\n #strech the trajectory to fit 0 to 1\n stretched_demo = interpolate(self._phase._z)[None,:]\n\n #compute the weights of the trajectory using the basis function\n w_demo_traj = np.dot(np.linalg.inv(np.dot(self._Phi, self._Phi.T) + 1e-12*np.eye(self._n_bfs) ), np.dot(self._Phi, stretched_demo.T)).T # weights for each trajectory\n \n #append the weights to the list\n self._W.append(w_demo_traj.copy())\n\n self._W = np.asarray(self._W).squeeze()\n \n # mean of weights\n self._mean_W = np.mean(self._W, axis=0)\n \n # covariance of weights\n # w1 = np.array(map(lambda x: x - self._mean_W.T, self._W))\n # self._sigma_W = np.dot(w1.T, w1)/self._W.shape[0]\n\n self._sigma_W = np.cov(self._W.T)", "def extract_spikes(spike_data, spt_dict, sp_win,\n resample=1, contacts='all'):\n sp_data = spike_data['data']\n n_contacts = spike_data['n_contacts']\n\n if contacts == \"all\":\n contacts = np.arange(n_contacts)\n elif isinstance(contacts, int):\n contacts = np.array([contacts])\n else:\n contacts = np.asarray(contacts)\n\n FS = spike_data['FS']\n spt = spt_dict['data']\n idx = np.arange(len(spt))\n inner_idx = filter_spt(spike_data, spt_dict, sp_win)\n outer_idx = idx[~np.in1d(idx, inner_idx)]\n\n indices = (spt / 1000.0 * FS).astype(np.int32)\n win = (np.asarray(sp_win) / 1000.0 * FS).astype(np.int32)\n time = np.arange(win[1] - win[0]) * 1000.0 / FS + sp_win[0]\n n_contacts, n_pts = sp_data.shape\n\n # auxiliary function to find a valid spike window within data range\n minmax = lambda x: np.max([np.min([n_pts, x]), 0])\n spWave = np.zeros((len(time), len(spt), len(contacts)),\n dtype=np.float32)\n\n for i in inner_idx:\n sp = indices[i]\n spWave[:, i, :] = np.atleast_2d(sp_data[contacts,\n sp + win[0]:sp + win[1]]).T\n\n for i in outer_idx:\n sp = indices[i]\n l, r = map(minmax, sp + win)\n if l != r:\n spWave[(l - sp) - win[0]:(r - sp) - win[0], i, :] = \\\n sp_data[contacts, l:r].T\n\n wavedict = {\"data\": spWave, \"time\": time, \"FS\": FS}\n\n if len(idx) != len(inner_idx):\n is_valid = np.zeros(len(spt), dtype=np.bool)\n is_valid[inner_idx] = True\n wavedict['is_valid'] = is_valid\n\n if resample != 1:\n warn(\"resample argument is deprecated.\"\n \"Please update your code to use function\"\n \"resample_spikes\", DeprecationWarning)\n wavedict = resample_spikes(wavedict, FS * resample)\n return wavedict", "def get_spike_trains(self, current=None):\n\n # For compability with sciunit as many spike trains are generated as there exists ground truth observations\n spike_trains = []\n if current:\n self.set_external_current(current)\n self.simulate(T_max=TMAX)\n voltage_trial = self.v\n vm_trial = AnalogSignal(voltage_trial, self.dt)\n spike_train = vm_trial.threshold_detection(0)\n spike_trains = [spike_train for _ in range(0,3)]\n return spike_trains", "def extract_waveforms(signal, fs, spikes_idx, pre, post):\n cutouts = []\n pre_idx = int(pre * fs)\n post_idx = int(post * fs)\n for index in spikes_idx:\n if index-pre_idx >= 0 and index+post_idx <= signal.shape[0]:\n cutout = signal[(index-pre_idx):(index+post_idx)]\n cutouts.append(cutout)\n return np.stack(cutouts)", "def get_list_of_tracers_for_wsp(self):\n sacc_file = self.io.get_sacc_file()\n tracers = sacc_file.get_tracer_combinations()\n\n fnames = []\n tracers_out = []\n for i, trs1 in enumerate(tracers):\n s1, s2 = self.get_tracer_comb_spin(trs1)\n mn1, mn2 = [self.mask_names[tri] for tri in trs1]\n\n for trs2 in tracers[i:]:\n s3, s4 = self.get_tracer_comb_spin(trs2)\n mn3, mn4 = [self.mask_names[tri] for tri in trs2]\n\n fname1 = f\"w{s1}{s2}__{mn1}__{mn2}.fits\"\n fname2 = f\"w{s3}{s4}__{mn3}__{mn4}.fits\"\n\n if (fname1 in fnames) or (fname2 in fnames):\n continue\n\n fnames.append(fname1)\n fnames.append(fname2)\n\n tracers_out.append((trs1, trs2))\n\n return tracers_out", "def show_pipline_infor(self):\r\n self.normalOutputWritten('--------Pipeline general info--------\\n')\r\n for eachround in range(int(len(self.RoundQueueDict)/2-1)):\r\n\r\n #--------------------------------------------------------------\r\n # show waveform settings\r\n waveformPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][0]\r\n camOperationPackage = self.RoundQueueDict['RoundPackage_'+str(eachround+1)][1]\r\n waveform_sequence = 1\r\n \r\n for eachwaveform in waveformPackage:\r\n\r\n try:\r\n if len(waveformPackage[eachwaveform][3]) != 0:\r\n self.normalOutputWritten('Round {}, sequence {}, recording channels:{}.\\n'.format(eachround+1, waveform_sequence, waveformPackage[eachwaveform][3]))\r\n print('Round {}, recording channels:{}.'.format(eachround+1, waveformPackage[eachwaveform][3]))#[1]['Sepcification']\r\n# else:\r\n# self.normalOutputWritten('Round {} No recording channel.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No recording channel.\\n')\r\n print('No recording channel.')\r\n try:\r\n self.normalOutputWritten('Round {}, Analog signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))\r\n print('Round {}, Analog signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][1]['Sepcification']))#\r\n except:\r\n self.normalOutputWritten('No Analog signals.\\n')\r\n print('No Analog signals.')\r\n try:\r\n if len(waveformPackage[eachwaveform][2]['Sepcification']) != 0:\r\n self.normalOutputWritten('Round {}, Digital signals:{}.\\n'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))\r\n self.normalOutputWritten('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n \r\n print('Lasting time:{} s.\\n'.format(len(waveformPackage[eachwaveform][2]['Waveform'][0])/waveformPackage[eachwaveform][0]))\r\n print('Round {}, Digital signals:{}.'.format(eachround+1, waveformPackage[eachwaveform][2]['Sepcification']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No Digital signals.\\n')\r\n print('No Digital signals.')\r\n waveform_sequence += 1\r\n self.normalOutputWritten('\\n')\r\n \r\n for eachcamoperation in camOperationPackage:\r\n #--------------------------------------------------------------\r\n # Show camera operations\r\n \r\n try:\r\n if len(camOperationPackage[eachcamoperation]) != 0:\r\n self.normalOutputWritten('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))\r\n print('Round {}, cam Buffer_number:{}.\\n'.format(eachround+1, camOperationPackage[eachcamoperation]['Buffer_number']))#\r\n# else:\r\n# self.normalOutputWritten('Round {} No Digital signals.\\n'.format(eachround+1))\r\n except:\r\n self.normalOutputWritten('No camera operations.\\n')\r\n print('No camera operations.') \r\n \r\n self.normalOutputWritten('-----------end of round-----------\\n')\r\n self.normalOutputWritten('----------------------------------------\\n')", "def iter_recorded(self):\n return iter(self._recorded)", "def isolate_strokes(self):\n if self.onset_times is False:\n self.find_onsets()\n # Defining the frame to contain the strokes\n frame_sz = int(self.stroke_length*self.sampling_rate)\n self.strokes = np.array(\n [self.audio[i:i+frame_sz] for i in self.onset_samples])", "def wrapPlotsOverEdges(self):\n if not self.__selectedCurves:\n return\n wrapcurve = self.__selectedCurves[-1]\n path = self.curve_path_dict[wrapcurve]\n times = []\n xdata = numpy.array(wrapcurve.data().xData())\n ydata = numpy.array(wrapcurve.data().yData())\n # It is a spike train, x values are spike times, wrap around those\n if 'spikes' in path:\n times = xdata\n # It is a stimulus: take the leadin edges\n elif 'stim' in path:\n times = xdata[numpy.r_[False, numpy.diff(ydata) < 0].nonzero()[0]]\n else:\n ydata = analyzer.smooth(ydata)\n mid = numpy.mean(ydata)\n ydata = ydata[ydata > mid] # Threshold at midpoint\n times = xdata[numpy.r_[True, ydata[1:] > ydata[:-1]] & numpy.r_[ydata[:-1] > ydata[1:], True]]\n # start from the first edge, ignoring everything before it\n # and put end of simulation as the upper bound\n for curve in self.itemList():\n ydata = numpy.array(curve.data().yData())\n xdata = numpy.array(curve.data().xData()) \n path = self.curve_path_dict[curve]\n path_curve_list = self.path_curve_dict[path]\n path_curve_list.pop(path_curve_list.index(curve))\n self.curve_path_dict.pop(curve)\n curve.detach()\n start = 0\n end = len(xdata)\n for ii in range(-1, - len(times) - 1, -1):\n points = numpy.nonzero(xdata >= times[ii])[0]\n if len(points) == 0:\n continue\n start = points[0]\n xx = numpy.array(xdata[start:end] - times[ii])\n xdata[start:end] = -1.0\n new_curve = Qwt.QwtPlotCurve('%s #%d' % (curve.title().text(), len(times) + ii, ))\n new_curve.setData(xx, ydata[start:end])\n new_curve.setStyle(curve.style())\n new_curve.setPen(QtGui.QPen(curve.pen()))\n new_curve.setSymbol(Qwt.QwtSymbol(curve.symbol()))\n new_curve.attach(self)\n self.curve_path_dict[new_curve] = path\n self.path_curve_dict[path].append(new_curve)\n end = start \n self.replot()", "def get_gaba_spktimes(self):\n\n spktimes = []\n for sweep_no in range(self.get_no_sweeps()):\n spktimes_singlesweep = []\n for cell_no in range(self.get_no_gaba_neurons()):\n spktimes_singlesweep.append(\n np.where(self.gaba_spktrains[sweep_no, cell_no, :] > 0.5)[\n 0\n ]\n * self.get_dt()\n )\n spktimes.append(spktimes_singlesweep)\n return spktimes", "def all_spike_ind(t, V):\n spikes, _ = find_peaks(V, [1, 1000])\n\n return spikes", "def spectate(self):\n pass", "def waves(repeats = 1):\r\n for i in range(repeats):\r\n alex.up()\r\n alex.color(hueGen(i, .5*i/repeats, .5))\r\n alex.goto(-315,315 - i)\r\n alex.seth(45) # set heading\r\n x = alex.xcor()\r\n y = alex.ycor()\r\n f = i + 1\r\n for j in range(630):\r\n x = alex.xcor()\r\n alex.goto(x + 1, y + 25*sin(8*j/f + i/25)) # plot sines\r\n alex.down()\r\n x = alex.xcor()", "def pump_curves(self):\n for key in self._pump_curves:\n yield key, self._data[key]", "def wave_get_cbs():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSC, 0, 0))", "def spew(self):\n for frame in self.frames:\n print frame.func, frame", "def drawBolts(self,view):\r\n for bolt in self.getBolts():\r\n bolt.draw(view)", "def __iter__(self):\n for element in self.focals:\n yield element", "def find_spectra(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def printSpikes(self, filename, gather=True):\n spikes = self.getSpikes(compatible_output=True)\n if spikes != None:\n first_id = 0\n num_neurons = self.vertex.atoms\n dimensions = self.vertex.atoms\n last_id = self.vertex.atoms - 1\n utility_calls.check_directory_exists(filename)\n spikeFile = open(filename, \"w\")\n spikeFile.write(\"# first_id = %d\\n\" % first_id)\n spikeFile.write(\"# n = %d\\n\" % num_neurons)\n spikeFile.write(\"# dimensions = [%d]\\n\" % dimensions)\n spikeFile.write(\"# last_id = %d\\n\" % last_id)\n for (neuronId, time) in spikes:\n spikeFile.write(\"%d\\t%d\\n\" % (time, neuronId))\n spikeFile.close()" ]
[ "0.60995203", "0.60207593", "0.58172476", "0.56029576", "0.5505114", "0.5358715", "0.5346913", "0.53436625", "0.5329249", "0.5252956", "0.52186084", "0.5192006", "0.5182989", "0.5122417", "0.5108391", "0.50970674", "0.5088726", "0.5059595", "0.5016616", "0.5011529", "0.50094086", "0.5001465", "0.49915072", "0.4984667", "0.49736238", "0.49505764", "0.4940775", "0.49297512", "0.49246913", "0.49090725" ]
0.71562934
0
Switch between top and bottom origin for the channels.
def switch_origin(self): self.origin = 'bottom' if self.origin == 'top' else 'top'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reshape(self, bottom, top):\r\n pass", "def reshape(self, bottom, top):\n\t\tpass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self, bottom, top):\n pass", "def reshape(self,bottom,top):\n pass", "def update(self):\n self.active = False\n self.top.update(self.rgb,self.cmyk,self.hsv)\n self.bot.update(self.rgb,self.cmyk,self.hsv)\n self.active = True", "def update_ballpos(self,pos):\n if self.options.visualize_switch_xy:\n self.col.set_offsets(pos[:,::-1]) # reverse x-y direction\n else:\n self.col.set_offsets(pos)", "def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()", "def backToMiddlePos():\n\tprogMode(True) # Active le couple de servos\n\taxDriver.goToPosition(axDriver.BROADCASTID, 0x1FF) # Renvoie a la position 0x1FF", "def __init__(self):\r\n #set up pannel in centre of screen, just above the bottom of the screen.\r\n super(Pannel, self).__init__(image = Pannel.pannel,\r\n x = games.screen.width/2,\r\n y = games.screen.height -11)", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def set_zlim(self, bottom=None, top=None):\n if isinstance(self._frame, root.TH1F):\n warnings.warn(\"Attempting to set z-axis limits for 2D axes\")\n return\n\n if top is None and np.iterable(bottom):\n bottom, top = bottom\n\n if bottom is None or top is None:\n old_bottom, old_top = self.get_zlim()\n if bottom is None:\n bottom = old_bottom\n if top is None:\n top = old_top\n\n if bottom == top:\n warnings.warn(\n \"Attempting to set identical bottom == top == {} z-axis limits\".format(\n bottom\n ),\n stacklevel=2,\n )\n\n if bottom > top:\n raise ValueError(\"Axis limits must be in increasing order\")\n\n if top <= 0 and self._logz:\n warnings.warn(\n \"Attempting to set non-positive top zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n top = self.get_zlim()[1]\n\n elif bottom <= 0 and self._logy:\n warnings.warn(\n \"Attempting to set non-positive bottom zlim on a log-scaled axis.\\n\"\n \"Invalid limit will be ignored.\",\n stacklevel=2,\n )\n bottom = self.get_zlim()[0]\n\n self._frame.SetMinimum(bottom)\n self._frame.SetMaximum(top)\n\n self._pad.Modified() # Draw the updated axes\n\n return (bottom, top)", "def __window_forward(self):\n pass", "def move_north(self):\n self.vertical = (self.vertical * 2)[1:5]\n self.horizontal[1] = self.vertical[0]\n self.horizontal[3] = self.vertical[2]", "def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y", "def move_east(self):\n self.horizontal = (self.horizontal * 2)[3:7]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def switchPlayer(self):\n\n \n tmp = self.current\n self.current = self.other\n self.other = tmp\n\n self.topSelector.toggleActive()\n self.bottomSelector.toggleActive()", "def __init__(self, bottom, top, current):\n self.bottom = bottom\n self.top = top\n self.current = current", "def backward(self, top, propagate_down, bottom):\r\n pass", "def backward(self, top, propagate_down, bottom):\n\t\tpass", "def mirror_y(board):\n new_board = board[:]\n new_board.reverse()\n return new_board" ]
[ "0.5853456", "0.58374345", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.57300776", "0.56316376", "0.56309074", "0.5490777", "0.5398423", "0.5386707", "0.53046316", "0.5291994", "0.5280545", "0.5252932", "0.5172233", "0.5165766", "0.51486814", "0.5148352", "0.512192", "0.50995785", "0.5093663", "0.5070834" ]
0.7755325
0
Half of the duration of the current interval.
def half_duration(self): if self._interval is not None: a, b = self._interval return (b - a) * .5 else: return self.interval_duration * .5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def half_step_time(self):\n\n return self.full_step_time() * self.half_to_full_step_time_ratio", "def _get_half_time(self):\n return self.__half_time", "def widen(self):\n t, h = self.time, self.half_duration\n h *= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def narrow(self):\n t, h = self.time, self.half_duration\n h /= self.scaling_coeff_x\n self.set_interval((t - h, t + h))", "def getDurationReciprocal(self):\n return 1/self.duration", "def duration(self):\r\n return self.t2 - self.t1", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def get_interval(self):\n return self.interval * 1000", "def duration(self):\n return self._end - self._begin", "def duration(self):\r\n return self.stop - self.start", "def time(self):\n return sum(self._interval) * .5", "def full_step_time(self):\n\n total_step_time = self.duration()\n return total_step_time / (2 * self.half_to_full_step_time_ratio + (self.num_steps() - 2))", "def break_time(self):\n\t\ts = timedelta()\n\t\tfor i in xrange(1, len(self.toggles)-1, 2):\n\t\t\ts += self.toggles[i+1] - self.toggles[i]\n\n\t\t# If not working need to add the last period of time\n\t\tif not self.status():\n\t\t\ts += datetime.now() - self.toggles[-1]\n\t\treturn s", "def middle_value(self):\n duration = self.__end.get_midpoint() - self.__begin.get_midpoint()\n return float(self.__begin.get_midpoint()) + float(duration) / 2.", "def period(self) -> int:", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])", "def duration(self):\n self._current_duration = time.perf_counter() - self._duration_start\n return round(self._current_duration, 4)", "def _period( self ):\r\n\treturn 2 * pi * sqrt( self.orbital_elements[0]**3 / self.mu_central_body )\r\n\t# http://en.wikipedia.org/wiki/Orbital_period#Calculation\r", "def duration(self):\n return self.end - self.start", "def duration(self) -> float:\n return self.delta_t * len(self)", "def duration(self) -> float:\n return self.delta_t * len(self)", "def update_period(self):\n return 0.1", "def duration(self) -> float:\n return float(len(self.__samples))/float(self.__rate)", "def duration(self) -> float:\n return self._stop - self._start if self._stop is not None else None", "def duration(self) -> float:\n return self.endTime()-self.startTime()", "def duration(self):\n return self.end_abs - self.start", "def period(self):\n return float(self._period) / 1000", "def half_frame(self) -> None:\n pass", "def half_frame(self) -> None:\n pass", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60" ]
[ "0.72750044", "0.70751405", "0.6361613", "0.6327403", "0.63129026", "0.6184249", "0.61641526", "0.61528426", "0.6123823", "0.6084359", "0.608264", "0.6048802", "0.6041668", "0.603156", "0.60129833", "0.60048217", "0.59908354", "0.5974596", "0.59592074", "0.5935029", "0.5935029", "0.5930361", "0.5929488", "0.58833724", "0.58503926", "0.5845789", "0.58188003", "0.5786204", "0.5786204", "0.57613546" ]
0.8561854
0
Go to a specific time (in seconds).
def go_to(self, time): half_dur = self.half_duration self.set_interval((time - half_dur, time + half_dur))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jump(self, seconds: float) -> None:\n if seconds < 0:\n raise ValueError(\"time can't go backwards\")\n self._virtual_base += seconds", "def set_time(self, sec):\n self.set_timed(round(sec * 10.0))", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def sleep(self, seconds=60):\n\t\ttime.sleep(seconds)", "def advance_time_seconds(seconds):\r\n advance_time_delta(datetime.timedelta(0, seconds))", "def sleep(seconds):\r\n time.sleep(seconds)", "def sleep(self, seconds):\n time.sleep(seconds)", "def go_then_wait(self, position, seconds):\n self.go(position)\n self.wait(seconds)", "def wait_up_to_second(second, time_template=None):\r\n current_second = datetime.datetime.now().second\r\n target_second = int(second)\r\n\r\n if current_second > target_second:\r\n sleep_time = 60 - (current_second - target_second)\r\n else:\r\n sleep_time = target_second - current_second\r\n\r\n if sleep_time:\r\n print('Waiting {} second(s)'.format(sleep_time))\r\n time.sleep(sleep_time)\r\n\r\n if time_template:\r\n return Utils.get_current_time(time_template)", "def sleep(seconds):\n time.sleep(seconds)", "def sleep(seconds):\n time.sleep(seconds)", "def setTimeOut(self, sec):\n if (sec is not None) and (sec > 0):\n to = sec\n else:\n to = None\n self._simulator_.update(timeout=to)\n\n return", "def set_sleep_timer(self, option, time):\n params = [\n ('option', option),\n ('sleeptime', int(time)),\n ]\n\n self.get(COMMAND_UIC, 'SetSleepTimer', params)", "def set_sleep_time(self, time):\n self.sleep_time = time", "def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)", "def pause(seconds):\n time.sleep(seconds);", "def pause(seconds: float) -> None:\n time.sleep(cast(float, seconds))", "def set_timeout(self, seconds):\n self._timeout = seconds", "def timer_change(self):\n if self.time < 999:\n self.time += 1\n self.time_lcd.display(self.time)\n else:\n self.timer.stop()", "def sleep_for(timeToSleep):\r\n time.sleep(timeToSleep)", "def sleep(seconds):\n # After load and initializing the PvAPI Python's built-in 'sleep' function\n # stops working (returns too early). The is a replacement.\n from time import sleep,time\n t = t0 = time()\n while t < t0+seconds: sleep(t0+seconds - t); t = time()", "def update_time(self, *args):\n s = int(time.time() - self.start_time)\n self.time_label.text = str(datetime.timedelta(seconds=s))", "def start_timer(self, secs):\r\n self.secs = secs\r\n self.countdownTimer.start(1000)", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.player.seek(value)", "def process_next_second(self):\n self.check_day_advance()\n rd = self.active_row\n if not rd:\n # Paused when we still have the 'after' method active.\n # Now that it is not active so we do nothing.\n return\n secs = int((datetime.now() - self.start_time).total_seconds())\n time = self.seconds_to_hms(secs)\n rd.time = time\n rd.label.config(text=time)\n rd.frame.after(1000, self.process_next_second)", "def sleep(seconds):\n\n # Check seconds to ensure it is a valid type.\n if type(seconds) not in [long, float, int]:\n raise RepyArgumentError(\"Invalid type \" + str(type(seconds)))\n\n # Using getruntime() in lieu of time.time() because we want elapsed time \n # regardless of the oddities of NTP\n start = nonportable.getruntime()\n sleeptime = seconds\n\n # Return no earlier than the finish time\n finish = start + seconds\n\n while sleeptime > 0.0:\n time.sleep(sleeptime)\n\n # If sleeptime > 0.0 then I woke up early...\n sleeptime = finish - nonportable.getruntime()", "def set_timer_time(self, time: int) -> None:\n current_mode = self.get_mode()\n # Defining the time for the Timer program only has an effect\n # when first the Timer program is selected.\n if current_mode != 'Timer':\n self.set_mode('Timer')\n self.logger.info(f\"Switching program from '{current_mode}' to \"\n \"'Timer'.\")\n\n return self.send(self.cmd.SET_TIMER_TIME, time)", "def sleep(min_seconds=1, max_seconds=10):\n time.sleep(randint(min_seconds, max_seconds))", "def _change_time(self):\r\n msg = \"Notice! if you don't write hours the time\\nwill be calculated as seconds.\\nEnter new time:\"\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=msg)\r\n\r\n # new_time has to be a digit bigger than 0\r\n while not new_time:\r\n msg = \"Time must have a value. For example: 1 hours/ 1.5 hours/ 25 seconds\"\r\n messagebox.showerror(title=\"ERROR\", message=msg)\r\n new_time = simpledialog.askstring(title=\"Change recording time\", prompt=\"Enter new time:\")\r\n if new_time:\r\n self.time.set(\"time: \" + new_time + ''.join(' ' for _ in range(42 - len(new_time))))", "def sleep(secs=1.0):\n time.sleep(secs)" ]
[ "0.6700288", "0.66491824", "0.6632575", "0.6577623", "0.65731984", "0.629269", "0.62756586", "0.62237495", "0.6199625", "0.6169536", "0.6169536", "0.6143821", "0.6137382", "0.6088691", "0.60664135", "0.602092", "0.6020566", "0.60139376", "0.6013062", "0.593964", "0.5881879", "0.58608747", "0.58516955", "0.5849226", "0.5840093", "0.58319664", "0.58282197", "0.5811083", "0.5795957", "0.5794308" ]
0.6876498
0
Shift the interval by a given delay (in seconds).
def shift(self, delay): self.go_to(self.time + delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shift(self, delay):\n self.__begin.shift(delay)\n self.__end.shift(delay)", "def delay(interval):\n time.sleep(interval / 1000.0)", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def delay():\r\n time.sleep(2)", "async def sleep(cls, delay: float) -> None:", "def _delay(self):\n if not self.next_scheduled:\n self.next_scheduled = self.clock_func() + self.interval\n return\n while True:\n current = self.clock_func()\n if current >= self.next_scheduled:\n extratime = current - self.next_scheduled\n self.next_scheduled = current + self.interval - extratime\n return\n delay_amt = self.next_scheduled - current\n #Call for 0, because that might be meaningful to sleep_func.\n if self.allow_negative_sleep or delay_amt >= 0: \n self.sleep_func(self.next_scheduled - current)", "def delay(ms: int, /) -> None:", "def sleep(interval):\n time.sleep(interval) # pragma: no cover", "def set_delay(delay):\r\n inst.write(\"PULS:DEL %f\" %(delay))", "def delay(self, delay=None):\n if delay is None:\n return self._delayvalue\n self._delayvalue = int(delay)", "def delay(self, dt, keep_length=True):\n x = delay(self.fs, self.in_time, dt, keep_length=keep_length)\n return self.from_time(self.fs, x)", "def delay(self, distance, seconds):\n delay = distance/seconds\n return delay", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def delay(dt):\n return dt.total_seconds()", "def __delay(msecs):\n time.sleep(msecs / 1000)", "def set_delay_ns(delay):\r\n inst.write(\"PULS:DEL %f NS\" %(delay))", "def _delay(self, n=None):", "def _delay(self, delay=None):\n return self.screen.delay(delay)", "def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)", "def _stop_after(delay):\n timer = CFRunLoopTimerCreate(\n None, # allocator\n CFAbsoluteTimeGetCurrent() + delay, # fireDate\n 0, # interval\n 0, # flags\n 0, # order\n _c_stop_callback,\n None,\n )\n CFRunLoopAddTimer(\n CFRunLoopGetMain(),\n timer,\n kCFRunLoopCommonModes,\n )", "def fake_delay(self, ha_delay):\n hass_now = dt_util.utcnow()\n shifted_time = hass_now + timedelta(seconds=ha_delay)\n self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time})", "def setDelay(self, channel, delay, unitCode=0):\n resp = self.XAPCommand('DELAY', channel, delay, unitCode=unitCode)\n return float(resp)", "def delay(self, interval=None):\n if self._start:\n raise OperationFailError(\"Task is already running.\")\n\n if interval is None:\n self._delay = None\n else:\n if isinstance(interval, timedelta):\n self._start_at = None # Use delay instead of start time.\n self._delay = interval\n elif isinstance(interval, int):\n self._start_at = None # Use delay instead of start time.\n self._delay = timedelta(seconds=interval)\n else:\n time_pattern = r'^([0-1]?\\d|[2][0-3]):[0-5]?\\d:[0-5]?\\d$'\n if re.match(time_pattern, interval):\n self._start_at = None # Use delay instead of start time.\n tsp = interval.split(\":\")\n self._delay = timedelta(hours=int(tsp[0]),\n minutes=int(tsp[1]),\n seconds=int(tsp[2]))\n else:\n raise TimeFormatError\n\n return self", "def run_after_delay(delay_ms: float, callback: Callable[[], None]):\n heapq.heappush(\n _sorted_scheduled_events,\n _ScheduledEvent(\n time=pygame.time.get_ticks() + delay_ms, callback=callback\n ),\n )", "def _delay(self, delay):\n self.cv.after(delay)", "def moving_delay(self, duration):\n start_time = monotonic()\n while (monotonic() - start_time)*1e3 < duration:\n if self.check_movement() == False:\n if self.move_state != MOV_ROTATE: # rotate is only valid movement\n print(\"Stopping in moving_delay()\")\n self.move_brake()", "def id_sleep(x, delay=0):\n sleep(delay)\n return x", "def sleep(seconds):\n\n return Sleep(seconds)", "def delay(dt):\n return dt.days * 86400 + dt.seconds + 1e-6 * dt.microseconds", "def udelay(us: int, /) -> None:" ]
[ "0.7768829", "0.7139381", "0.6876155", "0.6494857", "0.649145", "0.64053226", "0.6397227", "0.6366633", "0.63602847", "0.6356303", "0.6325486", "0.6317313", "0.6315866", "0.62965715", "0.62463284", "0.6218299", "0.61278665", "0.6077595", "0.60770804", "0.60277456", "0.60083115", "0.599665", "0.5984027", "0.5964296", "0.5938278", "0.5935404", "0.5934932", "0.59299296", "0.5928673", "0.5907131" ]
0.82353926
0
Go to end of the recording.
def go_to_end(self): self.go_to(self.duration)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end(self) -> None:", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def handle_record_end():\n LOG.info(\"End Recording...\")\n context = {'client_name': 'mycroft_listener',\n 'source': 'audio',\n 'destination': [\"skills\"]}\n bus.emit(Message('recognizer_loop:record_end', context=context))", "def end(self):\n ...", "def stop(self):\n self.recording = False", "def end(self) -> None:\n return", "def end(self):\r\n # print(self.send_command('battery?'))\r\n if not self.is_dummy:\r\n self.send_command('land')\r\n if self.background_frame_read is not None:\r\n self.background_frame_read.stop()\r\n # It appears that the VideoCapture destructor releases the capture, hence when \r\n # attempting to release it manually, a segmentation error occurs.\r\n # if self.cap is not None:\r\n # self.cap.release()\r", "def end(self):\n # Stop driving\n self.robot.drivetrain.arcade_drive(0.0, 0.0)", "def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False", "def goto_end(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.RIGHT))", "def end(self):\n self.kill_flag.value = True\n while (not self.pseye.thread_complete.value) or (not self.saver.saving_complete.value):\n pass", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._servo.end()\n self._mtr.end()\n self._log.debug('done')", "def finish(self):\n pass", "def finish(self):\n pass", "def stop_recording(self):\n\n\t\tself.eyetribe.stop_recording()\n\t\tself.recording = False", "def recording_stop(self):\n self._post('recording/stop')", "def finish():\n pass", "def end(self):\n self.f.close()\n print(\"Macro recorded, filename \" + self.name)", "def end(self) -> None:\n unicurses.endwin()", "def stopit(self):\n\n self.stop.stop()\n self.stream.close()\n self.p.terminate()\n self.p = None\n\n print(\"Recording terminated!\")", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def end(self):\n if self.flowComponent:\n self.flowComponent.end()\n pass", "def finishTurn(self):\n print \"go\"\n sys.stdout.flush()", "def stop_recording(self):\n self.flag_event.clear()\n self.statusBar().showMessage('Recording Stopped')\n print('boo ya')", "def _on_key_release(self, key):\n if key is self.TRIGGER_KEY:\n print(\"End Recording\")\n self.do_record = False", "def endGame(self):\n pass" ]
[ "0.71201277", "0.709415", "0.709415", "0.709415", "0.7093299", "0.7023222", "0.6937872", "0.6920976", "0.6730083", "0.6668539", "0.6595177", "0.652316", "0.6494475", "0.6478494", "0.6469474", "0.64187795", "0.6406178", "0.6406178", "0.63864815", "0.63863313", "0.636873", "0.63622594", "0.63096756", "0.62970585", "0.6283105", "0.627556", "0.6257113", "0.62548035", "0.62500185", "0.621994" ]
0.7161482
0
Jump to the next spike from the first selected cluster.
def go_to_next_spike(self, ): self._jump_to_spike(+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_previous_spike(self, ):\n self._jump_to_spike(-1)", "def _jump_to_spike(self, delta=+1):\n spike_times = self.get_spike_times()\n if spike_times is not None and len(spike_times):\n ind = np.searchsorted(spike_times, self.time)\n n = len(spike_times)\n self.go_to(spike_times[(ind + delta) % n])", "def next(self):\n self.jumpahead(1)", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def goto(self, index):\n raise NotImplementedError", "def next(self):\n while not self.is_stable():\n self.step()", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def go_to_next_state(self):\n pass", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def go_to_start(self):\n self.go_to(0)", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def next(self):\n next_index = self.current_target_index + 1\n self.current_target_index = next_index % self.targets_amount\n updated_pos = self.positions[self.current_target_index]\n self.current_target = updated_pos\n return updated_pos", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def getNextTarget(self):\r\n\r\n\t\tif self.pathToGoal == []:\r\n#\t\t\tprint \"\\tPath empty, finding a new one.\"\r\n\t\t\tself.decideOnGoal()\r\n\t\t\tself.calculateNewPath()\r\n\t\r\n\t\tself.currentTarget = self.pathToGoal.pop(0)", "def next(self):\n old_candidate = self._candidate\n new_candidate = self._genome_factory.build([old_candidate])\n new_candidate.run()\n if new_candidate.fitness > old_candidate.fitness:\n self._candidate = new_candidate\n\n self._converged = self._convergence_criterion.converged(old_candidate, new_candidate)", "def goto_next_level(self, *args):\n self.manager.current = self.manager.next()\n self.reset()", "def next_point(self, start_pos, goal_pos):\r\n\t\tself.shift = 0\r\n\t\tself.start_pos = start_pos\r\n\t\tself.goal_pos = goal_pos", "def goto(self, speed=1):\n\n self.safe_goto(speed, 0)", "def jump(distance):\r\n t.penup()\r\n t.forward(200)\r\n t.pendown()\r\n return None", "def next( self ):\n next(self)", "def FindNext():\r\n return _hiew.HiewGate_FindNext()", "def refresh(self):\n self.goto(self.starting_position)", "def test_restart(self):\n\n selector = PCovCUR(n_to_select=1)\n selector.fit(self.X, self.y)\n\n for i in range(len(self.idx) - 2):\n selector.n_to_select += 1\n selector.fit(self.X, warm_start=True)\n self.assertEqual(selector.selected_idx_[i], self.idx[i])", "def next_step(self):\n self.proceed()\n self.execute_current()", "def next_step(self):\n logging.debug(u\"Moving to next step\")\n\n if not self.steps or len(self.steps) < 1:\n logging.debug(u\"- no steps have ben set\")\n return None\n\n index = self.get('_index')\n\n if index is None:\n index = 0\n elif index < len(self.steps)-1:\n index += 1\n else:\n logging.debug(u\"- all steps have ben consumed\")\n return None\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- triggering step #{}\".format(index+1))\n self.set('_index', index)\n step = self.steps[index]\n step.trigger(bot=self.bot)\n return step", "def pick_next_station(self, station):\n self.best_score = 0\n\n stations = self.grid.stations\n # all connections of the last added added station \n lookahead_1 = self.grid.get_station(self.best_connection[1]).connections\n\n for la1 in lookahead_1.values():\n next_station = la1[0].name\n # if adding the connection exceeds the tracks max time length \n if self.track.add_station(self.grid, next_station) is False:\n break\n\n lookahead_2 = self.grid.get_station(la1[0].name).connections\n\n # keeps adding stations untill the time limit is reached\n for la2 in lookahead_2:\n la2 = stations.get(la2)\n if self.track.add_station(self.grid, la2.name) is False:\n break\n \n quality = self.grid.get_quality()\n \n self.track.remove_last_station()\n\n # if quality improves, add first station to the track\n if quality > self.best_score:\n self.best_score = quality \n self.best_connection = [la2.name, la1[0].name]\n \n self.track.remove_last_station()", "def get_next_position(self):", "def cmd_k(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.prev is not None:\n self.cursor = node.prev\n break\n node = node.next\n self.get_text()" ]
[ "0.68979293", "0.6640994", "0.6507172", "0.5977406", "0.5866238", "0.5749817", "0.57480246", "0.568603", "0.5647011", "0.5641555", "0.56349427", "0.55913675", "0.5588329", "0.5521469", "0.54015243", "0.5398123", "0.5389734", "0.53794944", "0.5359989", "0.53176546", "0.528841", "0.5274591", "0.522305", "0.5221612", "0.52207553", "0.52070814", "0.5191429", "0.5189402", "0.5177124", "0.51536715" ]
0.7934081
0
Jump to the previous spike from the first selected cluster.
def go_to_previous_spike(self, ): self._jump_to_spike(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_to_next_spike(self, ):\n self._jump_to_spike(+1)", "def _jump_to_spike(self, delta=+1):\n spike_times = self.get_spike_times()\n if spike_times is not None and len(spike_times):\n ind = np.searchsorted(spike_times, self.time)\n n = len(spike_times)\n self.go_to(spike_times[(ind + delta) % n])", "def jump_to_previous(self):\n self.nvim.command('silent! wincmd p')", "def _prev(self, _):\n self.notebook.SetSelection(self.idx-1)", "def on_reset(self):\n\n current = self.current_step\n if current:\n current.stop()\n\n logging.debug(u\"- seeking back before first step\")\n self.set('_index', None)", "def goto_node(self):\n p = self.get_position()\n if p and p != self.c.p:\n self.c.selectPosition(p)", "def go_to_start(self):\n self.go_to(0)", "def previous(self, event):\n self.result = -1", "def previousRange(self):\r\n if (self.selectedmap > 0):\r\n self.pickMap(self.selectedmap-1)", "def jumpp(self):\r\n\r\n if not self.current_jump is None:\r\n self.current_jump = self.current_jump.next", "def next(self):\n self.jumpahead(1)", "def go_to_next_state(self):\n pass", "def refresh(self):\n self.goto(self.starting_position)", "def goto(self, index):\n raise NotImplementedError", "def jump(self):\n global jumpSize\n print \"jumping...\"\n # create a range that includes all the available feature indices\n featureIndices = range(0, len(self.features))\n # remove indices until there are only jumpSize left\n while len(featureIndices) > jumpSize:\n # choose a random index\n index = random.randint(0, len(featureIndices)-1)\n # remove that item from the list of indices\n del featureIndices[index]\n for featureIndex in featureIndices:\n # get a pointer to that feature\n feature = self.features[featureIndex]\n # pick a random number based on the size of the feature's domain\n domainIncrement = random.randint(0, len(feature.domain) - 1)\n # get the index within the domain of the current feature value\n domainIndex = feature.domain.index(feature.value)\n # go to a different value in the domain\n newDomainIndex = (domainIndex + domainIncrement) % len(feature.domain)\n # assign the value from the domain\n feature.value = feature.domain[newDomainIndex]", "def previous(self):\n self._select_interface(self._rc_previous, self._http_previous)", "def focus_prev(self):\n self.focus_item(forward=False)", "def restart(self):\n self.idx = 0", "def previous(self):\n\n pass", "def jumped_on(self):\r\n pass", "def MoveToPreviousSlide(self, event):\n pass", "def move_to_position1(self):", "def previous_line():\r\n set_point(point().previous_line())", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def backtrack(self):\n last_intersection = self.intersection.pop()\n retrace = Shortest_path().shortestPath(self.graph, self.current, last_intersection)\n print retrace\n print \"Moving back...\"\n self.current = retrace.pop(0)\n if self.current in self.intersection:\n self.intersection.remove(self.current)\n while retrace:\n position = retrace.pop(0)\n self.move_to_position(position)\n if position in self.intersection:\n self.intersection.remove(position)", "def cmd_k(self):\n node = self.start\n while node is not None:\n if node == self.cursor:\n if node.prev is not None:\n self.cursor = node.prev\n break\n node = node.next\n self.get_text()", "def select_new_current_cup(self):\n\n # \"The crab selects a new current cup: the cup which is immediately clockwise of the current cup.\"\n\n current_position = self.cups.index(self.current)\n if current_position < len(self.cups) - 1: # Current cup is not on the end of the list.\n self.current = self.cups[current_position + 1]\n else:\n self.current = self.cups[0]", "def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]", "def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass", "def go_home(self):\n self.set_all_positions([0]*self.nleaflets)" ]
[ "0.714149", "0.63978726", "0.63840806", "0.61748135", "0.60427266", "0.5830913", "0.5813735", "0.5735725", "0.57223296", "0.57025665", "0.57012117", "0.55288374", "0.5477547", "0.547428", "0.5469675", "0.5461348", "0.5453362", "0.5451666", "0.5414452", "0.534706", "0.5315017", "0.53017795", "0.5295359", "0.5289185", "0.52636623", "0.52520895", "0.52414554", "0.523569", "0.5233294", "0.52109677" ]
0.79967767
0
Toggle between showing all spikes or selected spikes.
def toggle_highlighted_spikes(self, checked): self.show_all_spikes = checked self.set_interval()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def hidden_singles(self):\n self.change = True\n while self.change:\n self.hidden_round()", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def toggle(self):", "def toggle(self) -> None:", "def toggle(self) -> None:", "def __toggleAll(self):\n aw = self.activeWindow()\n if aw:\n aw.foldAll()", "def toggle(self) -> None:\n ...", "def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)", "def toggleSelection(self):\n for item in self.getItemsToModify():\n checked = item.checkState() == Qt.Checked\n item.setCheckState(Qt.Unchecked if checked else Qt.Checked)", "def toggle(self):\n self.checked = not self.checked\n if self.command:\n self.command(self.name)", "def stopAll(self, event=None):\n self.paused = False if self.paused else True\n self.gotData = False\n label = \"Resume all Sensors\" if (~self.paused) else \"Pause all Sensors\"\n self.displayPanel1.paused = False if self.displayPanel1.paused else True\n pass", "def hideallstate(self):\n if self.hideallcheck.isChecked() == True:\n self.field.setOwnRobotsVisibility(False, self.index)\n self.field.setPathVisibility(False, self.index)\n self.field.setBallVisibility(False, self.index)\n self.field.setTeammateVisibility(False, self.index)\n #self.field.setPathVisibility(False, self.index)\n self.field.setOpponentVisibility(False, self.index)\n self.field.setUndefVisibility(False, self.index)\n self.ballcheck.setChecked(False)\n self.teammatecheck.setChecked(False)\n self.opponentcheck.setChecked(False)\n self.undefcheck.setChecked(False)\n self.targetcheck.setChecked(False)\n else:\n self.field.setOwnRobotsVisibility(True, self.index)\n self.field.setPathVisibility(True, self.index)\n self.field.setBallVisibility(True, self.index)\n self.field.setTeammateVisibility(True, self.index)\n #self.field.setPathVisibility(True, self.index)\n self.field.setOpponentVisibility(True, self.index)\n self.field.setUndefVisibility(True, self.index)\n self.ballcheck.setChecked(True)\n self.teammatecheck.setChecked(True)\n self.opponentcheck.setChecked(True)\n self.undefcheck.setChecked(True)\n self.targetcheck.setChecked(True)", "def toggle_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.toggle()", "def toggle(self):\n self._interrupt_flash()\n GPIO.output(self.pin, GPIO.LOW if self.on else GPIO.HIGH)\n self.on = not self.on", "def toggle(self):\n if self.is_enabled:\n self.disable()\n else:\n self.enable()", "def toggle(self, **kwargs):\n self.on = False if self.on else True", "def set_highlighted_spikes(self, spikes=[]):\n \n if len(spikes) == 0:\n # do update only if there were previously selected spikes\n do_update = len(self.highlighted_spikes) > 0\n self.highlight_mask[:] = 0\n else:\n do_update = True\n self.highlight_mask[:] = 0\n if len(spikes) > 0:\n ind = self.find_indices_from_spikes(spikes)\n self.highlight_mask[ind] = 1\n \n if do_update:\n self.paint_manager.set_data(\n highlight=self.highlight_mask,\n visual='waveforms')\n \n self.highlighted_spikes = spikes", "def toggle_refresh(self, event):\n self._continue = not self._continue\n if self._continue:\n self.canvas.itemconfig(\"toggle-text\", text=\"Stop\")\n self.refresh(self._refresh_rate)\n else:\n self.canvas.itemconfig(\"toggle-text\", text=\"Start\")", "def toggle_valve():\n new_status = not tank_valve_open\n print(\"- Toggling valve status to '{}'.\".format(\"Open\" if new_status\n else \"Closed\"))\n set_valve_open(new_status)", "def toggle_fore_mod(self, checked):\n for tile in self.tiles:\n tile.toggle_fore_mod(checked)", "def on_gas_filled_toggled(self, checked):\n # TODO: not implemented yet\n if checked:\n self.gas_set = 1\n self.VI_gas_set.setEnabled(True)\n else:\n self.gas_set = 0\n self.VI_gas_set.setEnabled(False)", "def change(self):\r\n\r\n # If checkboxes are available, check status and set boat speed reference line visibility accordingly.\r\n if self.cb:\r\n if self.cb_bt.checkState() == QtCore.Qt.Checked:\r\n for item in self.bt:\r\n item.set_visible(True)\r\n else:\r\n for item in self.bt:\r\n item.set_visible(False)\r\n # GGA\r\n if self.cb_gga.checkState() == QtCore.Qt.Checked:\r\n for item in self.gga:\r\n item.set_visible(True)\r\n # self.gga[0].set_visible(True)\r\n elif self.gga is not None:\r\n for item in self.gga:\r\n item.set_visible(False)\r\n # self.gga[0].set_visible(False)\r\n # VTG\r\n if self.cb_vtg.checkState() == QtCore.Qt.Checked:\r\n for item in self.vtg:\r\n item.set_visible(True)\r\n # self.vtg[0].set_visible(True)\r\n elif self.vtg is not None:\r\n for item in self.vtg:\r\n item.set_visible(False)\r\n # self.vtg[0].set_visible(False)\r\n\r\n # Draw canvas\r\n self.canvas.draw()", "def toggleShowOnlySelection(self):\r\n\t\tself.showOnlySelection = not self.showOnlySelection", "def select_toggle(self):\n self.selection_toggle(*self.get_children())", "def all_off(self):\n self.fill_off()\n self.update()\n self.fill_off()\n self.update()", "def highlight_spikes(self, spikes):\n spikes = np.intersect1d(self.data_manager.waveform_indices_array, \n spikes)\n if len(spikes) > 0:\n spikes_rel = np.digitize(spikes, \n self.data_manager.waveform_indices_array) - 1\n self.highlighting = True\n self.set_highlighted_spikes(spikes_rel)\n else:\n self.cancel_highlight()", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def _button_sweep_toggled(self, *a):\r\n _debug('GUISignalGenerator: _button_sweep_toggled()', a)\r\n \r\n # Only run the sweep if we have enabled the button\r\n if self.button_sweep.is_checked():\r\n \r\n # Run the \"before sweep\" setup function for the user to overwrite \r\n # (default is just a pause)\r\n self.before_sweep()\r\n \r\n # Set list mode\r\n self.combo_mode.set_index(1)\r\n self.api.set_mode('List') #Set the mode to list !!\r\n # Update the RF button\r\n self.button_rf.set_checked(self.api.get_output(), block_events=True)\r\n \r\n \r\n # Get list length from the generator\r\n ps = self.api.get_list_powers()\r\n fs = self.api.get_list_frequencies()\r\n \r\n # Make sure they match!\r\n if not len(ps) == len(fs): \r\n print(\"ERROR: Lengths of power and frequency lists do not match!\")\r\n return\r\n \r\n \r\n # Update the user\r\n self.label_list_status.set_text(str(len(fs)) + ' points in list memory')\r\n \r\n # Loop for the number of iterations\r\n self.number_iteration.set_value(0)\r\n while self.number_iteration.get_value() < self.settings['Sweep/Iterations'] \\\r\n or self.settings['Sweep/Iterations'] <= 0:\r\n \r\n # Break out if canceled\r\n if not self.button_sweep.is_checked(): break\r\n \r\n # Loop\r\n for n in range(self.settings['Sweep/n1'], min(self.settings['Sweep/n2'], len(fs))):\r\n \r\n # Break out if canceled\r\n if not self.button_sweep.is_checked(): break\r\n \r\n # Set the list index, which updates the machine\r\n self.api.set_list_index(n)\r\n #I'm adding these lines to debug the fact that Api doesn't change the frequency of its output. \r\n _debug(self.api.get_list_index(), self.api.get_frequency(), self.api.get_power())\r\n #print(self.api.get_list_frequencies())\r\n \r\n self.number_list_index.set_value(n, block_events=True)\r\n self.number_frequency .set_value(fs[n], block_events=True)\r\n self.number_dbm .set_value(ps[n], block_events=True)\r\n self.window.process_events()\r\n \r\n # This is where you could insert some interesting code.\r\n self.after_sweep_set_list_index()\r\n \r\n # Increase the iteration count\r\n self.number_iteration.increment()\r\n \r\n # Run user code\r\n self.after_single_sweep()\r\n \r\n # Run user code\r\n self.after_all_sweeps()\r\n \r\n # All done with the loop. Disable the sweep button!\r\n # We put this after the user functions so they can tell if\r\n # someone manually quit out of the loop.\r\n self.button_sweep.set_checked(False, block_events=True)", "def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()" ]
[ "0.6208926", "0.6050148", "0.5986689", "0.5931181", "0.5903119", "0.5903119", "0.5695317", "0.56854576", "0.56528676", "0.56528676", "0.56507105", "0.5585108", "0.5578206", "0.557582", "0.5550592", "0.55118", "0.5508104", "0.5504786", "0.5502983", "0.55022866", "0.54935396", "0.54821044", "0.54805285", "0.5471335", "0.54580235", "0.5436714", "0.5390373", "0.5367722", "0.535418", "0.53518206" ]
0.69948447
0
Toggle automatic scaling of the traces.
def toggle_auto_scale(self, checked): logger.debug("Set auto scale to %s.", checked) self.auto_scale = checked
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()", "def clickAutoscale(self, event):\n self.axes.autoscale_view()", "def scaling_enabled(self):\n return False", "def on_scale (self):\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_foreground()\n\t\t\tself.redraw_background()\n\n\t\tif self.expand2 == _('Use a scrollbar'):\n\t\t\tself.width = int((self.icon_size * 2 * self.rows + ((self.border_size+self.shadow_size)*2)+15 ) + 24/self.scale)\n\t\t\tself.update_scrollbar()", "def reset_scale(self) -> None:\n self._scale.set(self._start_val)", "def is_scale_enabled(self) -> bool:\r\n ...", "def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale", "def setScaleMode(self, mode):\n if mode != self.__scale_mode and mode in (self.ScaleModeGlobal, self.ScaleModeLocal):\n self.__scale_mode = mode\n self.__scaled_datasets = None\n self.__axisDomains = None\n self.dataChanged.emit()", "def scale(self):", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def linux_zoomer_minus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def setscaling(self, scaling):\n\n self.__scaling = scaling", "def reset_limits(self):\n self.autoscale = True\n self.camera.autoscale()", "def update_axis_scale(self, scale, axis='left'):\n self.plt.getAxis(axis).setScale(scale=scale)", "def change_scaling(self, scales=None, offsets=None) -> None:\n self.points.change_scaling(scales, offsets)\n\n self.header.scales = scales\n self.header.offsets = offsets", "def scale_invert(self):", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def setDoRescale(self, value):\n return self._set(doRescale=value)", "def toggle_maximized(self):\n if self.isMaximized():\n self.showNormal()\n else:\n self.showMaximized()", "def rescale(self):\n # forecast on real data, don't need this anymore\n pass", "def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def scale(self, scale):\n \n scale_matrix = wf.scaleMatrix(scale, self.width/2, self.height/2, 0)\n self.transform(scale_matrix)", "def autoScale(self):\n\t\tif self.autoscaleToggle:\n\t\t\tif not self.fullscreenToggle:\n\t\t\t\tmaxSize = (self.get_screen().get_width() - 100, self.get_screen().get_height() - 100)\n\t\t\telse:\n\t\t\t\tmaxSize = (self.get_screen().get_width(), self.get_screen().get_height())\n\t\t\timgSize = [self.currentPixbuf.get_width(), self.currentPixbuf.get_height()]\n\n\t\t\tif imgSize[0] > maxSize[0] or imgSize[1] > maxSize[1]:\n\t\t\t\tscaleFactor = 1.0 * maxSize[0] / imgSize[0]\n\t\t\t\tif imgSize[1] * scaleFactor > maxSize[1]:\n\t\t\t\t\tscaleFactor = 1.0 * maxSize[1] / imgSize[1]\n\t\t\t\tself.scaleFactor = scaleFactor\n\t\t\t\timgSize[0] = int(imgSize[0] * scaleFactor)\n\t\t\t\timgSize[1] = int(imgSize[1] * scaleFactor)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def toggle(self):\n if not self.hidden and not self.vimiv.commandline.entry.is_visible():\n self.bar.hide()\n else:\n self.bar.show()\n self.hidden = not self.hidden\n # Resize the image if necessary\n if not self.vimiv.image.user_zoomed and self.vimiv.paths and \\\n not self.vimiv.thumbnail.toggled:\n self.vimiv.image.zoom_to(0)", "def autorange(self):\n self._checkfigure()\n self.axes.autoscale_view(True)", "def showscale(self):\n return self['showscale']" ]
[ "0.7329849", "0.7177367", "0.65722066", "0.61756915", "0.61486137", "0.6119862", "0.6056649", "0.6035681", "0.6035681", "0.6006283", "0.5996011", "0.5986425", "0.597488", "0.5941395", "0.5927027", "0.5925209", "0.5909223", "0.5884189", "0.5856268", "0.5849367", "0.5846804", "0.58342034", "0.5830538", "0.582898", "0.57837635", "0.57739615", "0.576577", "0.5754124", "0.573523", "0.5723398" ]
0.72248846
1
Select a cluster by clicking on a spike.
def on_mouse_click(self, e): if 'Control' in e.modifiers: # Get mouse position in NDC. box_id, _ = self.canvas.stacked.box_map(e.pos) channel_id = np.nonzero(self.channel_y_ranks == box_id)[0] # Find the spike and cluster closest to the mouse. db = self.data_bounds # Get the information about the displayed spikes. wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch] if not wt: return # Get the time coordinate of the mouse position. mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos) mouse_time = Range(NDC, db).apply(mouse_pos)[0][0] # Get the closest spike id. times, spike_ids, spike_clusters, channel_ids = zip(*wt) i = np.argmin(np.abs(np.array(times) - mouse_time)) # Raise the select_spike event. spike_id = spike_ids[i] cluster_id = spike_clusters[i] emit('select_spike', self, channel_id=channel_id, spike_id=spike_id, cluster_id=cluster_id) if 'Shift' in e.modifiers: # Get mouse position in NDC. box_id, _ = self.canvas.stacked.box_map(e.pos) channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0]) emit('select_channel', self, channel_id=channel_id, button=e.button)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click(self, event):\n x, y = self.canvas.invert([event.x, event.y])\n i, j = int(floor(x)), int(floor(y))\n patch = self.get_cell(i, j)\n if patch and patch.state == \"green\":\n cluster = self.get_cluster(patch)\n self.show_cluster(cluster)", "def selectPointsUnderCursor(self):\n spw = self.spw\n sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids is None:\n return\n #t0 = time.time()\n spw.SelectSpikes(sids, on=self.selecting)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def selectPointsUnderCursor(self):\n #spw = self.spw\n #sw = spw.windows['Sort']\n #if clear:\n # sw.uslist.clearSelection()\n # sw.nlist.clearSelection()\n x, y = self.cursorPosGL()\n sids = self.pick(x, y, pb=10, multiple=True)\n if sids == None:\n return\n #t0 = time.time()\n #if not sw.panel.maxed_out:\n # spw.SelectSpikes(sids, on=self.selecting)\n #else:\n # # for speed, while the mouse is held down and the sort panel is maxed out,\n # # don't call SelectSpikes, only call it once when the mouse is released\n self.collected_sids.append(sids)\n #print('SelectSpikes took %.3f sec' % (time.time()-t0))\n if self.selecting == True:\n sat = 0.2 # desaturate\n else: # self.selecting == False\n sat = 1 # resaturate\n self.color(sids, sat=sat)\n self.updateGL()", "def tool_selection_click_ok_btn(driver, class_name, index):\r\n\r\n proximity_button = driver.find_elements_by_class_name(class_name)\r\n proximity_button[index].click()\r\n time.sleep(2)", "def find_cluster(self, id):\n raise NotImplementedError", "def selectVertex(self, addToSelection: bool) -> None:\n ...", "def mk_station_selector(on_select,\n stations=None,\n dst_map=None,\n **kw):\n import ipyleaflet as L\n\n if stations is None:\n stations = get_stations()\n\n stations = [st for st in stations if st.pos is not None]\n pos2st = {st.pos: st for st in stations}\n\n def on_click(event='', type='', coordinates=None):\n pos = tuple(coordinates)\n st = pos2st.get(pos)\n if st is None:\n # should probably log warning here\n print(\"Can't map click to station\")\n return\n\n on_select(st)\n\n markers = [L.Marker(location=st.pos,\n draggable=False,\n title=st.name)\n for st in stations]\n\n cluster = L.MarkerCluster(markers=markers)\n\n if dst_map is None:\n dst_map = L.Map(**kw)\n\n dst_map.add_layer(cluster)\n cluster.on_click(on_click)\n\n return dst_map, cluster", "def test_selecting_nodes_clicking_them_discovered(self):\n with Nodes()as n:\n for node in n.nodes_discovered:\n node.parent.click()\n self.assertTrue(\n node.checkbox.find_element_by_tag_name('input').\n is_selected(),\n 'Discovered node is selected')", "def get_cluster(self,cluster_name,project_id=''):\n print( f'>>>>>>{self.project_id}')\n if project_id == '':\n project_id = self.project_id\n return self.get('{}/groups/{}/clusters/{}'.format(ApiVersion.A1.value,project_id,cluster_name))", "def select(self):\r\n pass", "def find_cluster(self, id: str) -> dto.Cluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )", "def select_cell(self, event):\n # Get row and symbols.\n row = event.GetRow()\n symbol1 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL1)\n symbol2 = self.grid_correlations.GetCellValue(row, self.COLUMN_SYMBOL2)\n self.__selected_correlation = [symbol1, symbol2]\n\n self.show_graph(symbol1, symbol2)", "def cluster(self,\n clustering=None,\n algorithm='klustakwik',\n spike_ids=None,\n **kwargs):\n if clustering is None:\n clustering = 'main'\n\n kk2_dir = op.join(self.settings.exp_settings_dir, 'klustakwik2')\n _ensure_dir_exists(kk2_dir)\n\n # Take KK2's default parameters.\n from klustakwik2.default_parameters import default_parameters\n params = default_parameters.copy()\n # Update the PRM ones, by filtering them.\n params.update({k: v for k, v in self.model.metadata.items()\n if k in default_parameters})\n # Update the ones passed to the function.\n params.update(kwargs)\n\n # Original spike_clusters array.\n if self.model.spike_clusters is None:\n n_spikes = (len(spike_ids) if spike_ids is not None\n else self.model.n_spikes)\n spike_clusters_orig = np.zeros(n_spikes, dtype=np.int32)\n else:\n spike_clusters_orig = self.model.spike_clusters.copy()\n\n # HACK: there needs to be one clustering.\n if 'empty' not in self.model.clusterings:\n self.model.add_clustering('empty', spike_clusters_orig)\n\n # Instantiate the KlustaKwik instance.\n kk = KlustaKwik(**params)\n\n # Save the current clustering in the Kwik file.\n @kk.connect\n def on_iter(sc):\n # Update the original spike clusters.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n # Save to a text file.\n path = op.join(kk2_dir, 'spike_clusters.txt')\n # Backup.\n if op.exists(path):\n shutil.copy(path, path + '~')\n np.savetxt(path, spike_clusters, fmt='%d')\n\n info(\"Running {}...\".format(algorithm))\n # Run KK.\n sc = kk.cluster(model=self.model, spike_ids=spike_ids)\n info(\"The automatic clustering process has finished.\")\n\n # Save the results in the Kwik file.\n spike_clusters = spike_clusters_orig.copy()\n spike_clusters[spike_ids] = sc\n\n # Add a new clustering and switch to it.\n if clustering in self.model.clusterings:\n self.change_clustering('empty')\n self.model.delete_clustering(clustering)\n self.model.add_clustering(clustering, spike_clusters)\n\n # Copy the main clustering to original (only if this is the very\n # first run of the clustering algorithm).\n if clustering == 'main':\n self.model.copy_clustering('main', 'original')\n self.change_clustering(clustering)\n\n # Set the new clustering metadata.\n params = kk.params\n params['version'] = kk.version\n metadata = {'{}_{}'.format(algorithm, name): value\n for name, value in params.items()}\n self.model.clustering_metadata.update(metadata)\n self.save()\n info(\"The clustering has been saved in the \"\n \"`{}` clustering in the `.kwik` file.\".format(clustering))\n self.model.delete_clustering('empty')\n return sc", "def select(self):\n pass", "def select(self):\n pass", "def on_cell_clicked(self, modelIndex):\n self.catalogue_map.select([self.catalogue_model.event_at(modelIndex)])", "def assignSpikes(clusts, df, show=False, force=True):\n if 'clust_inds' in df.columns and force is False:\n print('Data frame already contains clust_inds')\n return\n \n def assignTms(clusts, tms):\n # Assign a delta_tms to cluster1 or cluster2\n assns = [abs(np.mean(clusts[c])-tms) for c in range(len(clusts))]\n return assns.index(min(assns))\n \n # Assign each spike time to a cluster\n clust_tms = [ [] for c in clusts]\n for t in range(len(df.times)-1):\n t_clust = assignTms(clusts, df.times.values[t+1]-df.times.values[t])\n clust_tms[t_clust].append(df.times.values[t])\n \n # Group spikes from same spike type together\n type_tms = []\n for c in range(len(clust_tms)):\n for t in clust_tms[c]:\n type_tms.append([t, c]) # [spk tms, clust index]\n \n # Group these together \n clust_id = []\n for i in range(df.shape[0]):\n if df.iloc[i].times in [k[0] for k in type_tms]:\n clust_id.append(type_tms[[k[0] for k in type_tms].index(df.iloc[i].times)][1])\n else: # Not matching spike found -- happens w/ isolated spikes\n clust_id.append(np.nan)\n \n df['clust_inds'] = clust_id\n print([clust_id.count(j) for j in list(set(clust_id))], list(set(clust_id)))\n if show: # Show the cluter spikes\n for c in range(max(clust_id)+1): # Plot cluster spikes individually\n temp_spikes = df[df['clust_inds']==c]['times']\n plt.plot(temp_spikes, [c+1 for i in temp_spikes], '|', \n color=['blue', 'red'][c])\n plt.ylim([0,3])\n plt.show()\n \n return df", "def change_clustering(self, clustering):\n self._clustering = clustering\n self.model.clustering = clustering\n info(\"Switched to `{}` clustering.\".format(clustering))\n self.emit('open')", "def pressSCV(self):\n\t\t\t\n\t\tm_name = self.ui.findChild(QWidget, \"m_name\")\n\t\tm_name.setText(\"Sensorinen neurografia\")\n\t\t\n\t\t\n\t\tprint \"SCV button pressed\"\n\t\t# Make a database query and draw a graph and distribution\n\t\t\n\t\t# set every checkbox back to the initial state\n\t\t\n\t\tif self.patient_chosen:\n\t\t\t# Make a database query which fetches the patient's SCV data.\n\t\t\tprint \"showing patient SCV data\"\n\t\t\n\t\tself.current_measurement = \"SCV\"\t\n\t\treturn", "def select_cluster(self, clusters):\n min_sim = float(\"inf\")\n min_cluster = None\n \n for cluster in clusters:\n sim = 0.0\n for index, value in cluster.centroid.items():\n sim += value * value\n \n if sim < min_sim:\n min_sim = sim\n min_cluster = cluster\n \n return min_cluster", "def get_one_cluster_by_name(ctx, cluster_name, project_name):\n project = ctx.obj.groups.byName[project_name].get().data\n cluster = ctx.obj.groups[project.id].clusters[cluster_name].get()\n pprint(cluster.data)", "def createNewcluster(self):\n self.segsChanged = True\n\n # There should be at least one segment selected to proceed\n proceed = False\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n proceed = True\n break\n\n if proceed:\n # User to enter new cluster name\n #newLabel, ok = QInputDialog.getText(self, 'Cluster name', 'Enter unique Cluster Name\\t\\t\\t')\n #if not ok:\n #self.completeChanged.emit()\n #return\n names = [self.tboxes[ID].text() for ID in range(self.nclasses)]\n nextNumber = 0\n newLabel = 'Cluster_'+str(nextNumber)\n names.append(newLabel)\n while len(names) != len(set(names)):\n del(names[-1])\n nextNumber += 1\n newLabel = 'Cluster_'+str(nextNumber)\n names.append(newLabel)\n\n # create new cluster ID, label\n newID = len(self.clusters)\n self.clusters[newID] = newLabel\n self.nclasses += 1\n print('after adding new cluster: ', self.clusters)\n\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n self.segments[ix][-1] = newID\n self.picbuttons[ix].mark = 'green'\n\n # Delete clusters with no members left and update self.clusters before adding the new cluster\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n # print('[old, new] labels')\n labels = dict(labels)\n print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before: ', self.clusters)\n self.clusters = clusters\n self.nclasses = nclasses\n print('after: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n # redraw the buttons\n self.clearButtons()\n self.updateButtons()\n #self.cmbUpdateSeg.addItem(newLabel)\n self.completeChanged.emit()\n else:\n msg = SupportClasses_GUI.MessagePopup(\"t\", \"Select\", \"Select calls to make the new cluster\")\n msg.exec_()\n self.completeChanged.emit()\n return", "def select_sweepstakes(self):\n pass", "def select(self, target):", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def ksel(self, k: int) -> Status:\n result = self._read_inline(f\"ksel({k})\")\n return Status(result)", "def poll_cluster(self, server, obj, name):\n\n return self._poll_group('cluster', server, obj, name)", "def select_desired_index_from_the_list(self, index_name):\n select_index = \"(//*[name()='svg'][@class='css-8mmkcg'])\"\n select_index_sitem = self.locator_finder_by_xpath(select_index)\n select_index_sitem.click()\n time.sleep(1)\n\n element = self.locator_finder_by_xpath(f\"//*[contains(text(), '{index_name}')]\")\n actions = ActionChains(self.driver)\n # Move the mouse pointer to the element containing the text\n actions.move_to_element(element)\n # Perform a click action\n actions.click().perform()", "def select_critical_for_failover_group_select_2(driver):\n driver.find_element_by_xpath('//mat-checkbox[@ix-auto=\"checkbox__Critical\"]').click()\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"select__Failover Group\"]').click()\n wait_on_element(driver, 0.5, 5, '//mat-option[@ix-auto=\"option__Failover Group_2\"]')\n driver.find_element_by_xpath('//mat-option[@ix-auto=\"option__Failover Group_2\"]').click()", "def find_kubernetes_cluster(self, id: str) -> dto.KubernetesCluster:\n raise errors.UnsupportedOperationError(\n \"Operation not supported for provider '{}'\".format(self.provider_name)\n )" ]
[ "0.64221257", "0.58902055", "0.56319547", "0.55411613", "0.5464253", "0.5434376", "0.5432982", "0.5413814", "0.54018533", "0.5387682", "0.5354226", "0.5342046", "0.5314084", "0.5284342", "0.5284342", "0.5258224", "0.52542984", "0.5254069", "0.5253677", "0.5247113", "0.5224413", "0.5219483", "0.52075285", "0.520349", "0.52015686", "0.5191905", "0.5186228", "0.5178217", "0.51689595", "0.5162985" ]
0.6448263
0
Overloading the addition operator for particles types
def __add__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a + b changes a as well! p = particles(self) p.pos[:] = self.pos + other.pos p.vel[:] = self.vel + other.vel p.m = self.m p.q = self.q return p else: raise DataError("Type error: cannot add %s to %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.add)", "def __iadd__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.add)", "def __add__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for +\"", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def __add__(self, rhs: Union[float]):\n if isinstance(rhs, Pt):\n return Pt(self.x + rhs.x, self.y + rhs.y)\n else:\n return Pt(self.x + rhs, self.y + rhs)", "def ADD (self, n1, n2):", "def __add__(self, p):\n return Point(self.x + p.x, self.y + p.y)", "def add(self,particle):\n\n if not self.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n if not particle.check_def(['E','px','py','pz']): \n sys.exit('Particle error: Quadri impulsion not define')\n \n neut=part_quadvec(self.E+particle.E,self.px+particle.px,self.py+particle.py,self.pz+particle.pz)\n neut.cal_mass()\n return neut", "def __add__(self,other):\n return Vector(self.x+other.x,self.y+other.y,self.z+other.z)", "def __add__(self,other):\n return Vector(self.x + other.x, self.y+other.y)\n pass", "def __iadd__(self,other):\n return Vector(self.x + other.x, self.y + other.y)\n pass", "def __add__(self,other):\n if isinstance(other, point):\n return self.add_points(other)\n else:\n return self.add_points_tuple(other)", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n return (self.x + other.x, self.y + other.y)", "def __add__(self, other):\n return Vector(self.x + other.x, self.y + other.y)", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def __add__(self, other):\n cls = self.__class__\n return cls(self.x+other.x, self.y+other.y, self.z+other.z)", "def __add__(self, other):\n return add_mps(self, other)", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def __add__(self, p: np.ndarray):\n return Quaternion(self.to_array() + p)", "def __add__(self, other):\n pass", "def __add__(self, other):\n pass", "def add(self, a, b):\n return a + b", "def vars_add ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) + float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## shortcut \n if 0 == var1 : return var2 ## SHORTCUT\n #\n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_add ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_add ( var1 , var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Addition ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def __add__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] += other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self, other\n if( len( self ) < len( other ) ) : c_l1, c_l2 = other, self\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __add__(self, other):\n return asarray(add(self, other))", "def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self", "def __add__(self, other):\n if len( self) != len(other):\n raise ValueError('Dimensions must match.')\n result = Vector(len(self))\n for i in range(len(self)):\n result[i] = self[i] + other[i]\n return result" ]
[ "0.7108404", "0.70331186", "0.7007142", "0.69473356", "0.6846601", "0.6845393", "0.68245333", "0.6748368", "0.671954", "0.66836524", "0.6639666", "0.6628716", "0.6572368", "0.6572368", "0.65575284", "0.65301085", "0.65287226", "0.6522699", "0.64960563", "0.64930576", "0.649", "0.64833504", "0.64833504", "0.647256", "0.64678484", "0.6467019", "0.64559925", "0.6436312", "0.64347565", "0.6434374" ]
0.7784703
0
Overloading the subtraction operator for particles types
def __sub__(self, other): if isinstance(other, type(self)): # always create new particles, since otherwise c = a - b changes a as well! p = particles(self) p.pos[:] = self.pos - other.pos p.vel[:] = self.vel - other.vel p.m = self.m p.q = self.q return p else: raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)", "def __isub__(self, other):\r\n if isinstance(other, vec4):\r\n self.x-=other.x\r\n self.y-=other.y\r\n self.z-=other.z\r\n self.w-=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for -=\"", "def __sub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__op(other, operator.sub)", "def __sub__(self,other):\n return Vector(self.x - other.x, self.y-other.y)\n pass", "def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __sub__(self, other):\n\n if isinstance(other, type(self)):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = self.elec - other.elec\n p.magn[:] = self.magn - other.magn\n return p\n else:\n raise DataError(\"Type error: cannot subtract %s from %s\" % (type(other), type(self)))", "def __sub__(self, other):\n return self.subtract(other)", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def __sub__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) - float(argument))", "def __sub__(self, other):\n return Vector([c1 - c2 for (c1, c2) in zip(self.components, other.components)])", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)", "def __isub__(self, other: PointOrIterableOrScalar) -> PointType:\n return self.__iop(other, operator.sub)", "def minus(self, a, b):\n return a - b", "def __sub__(self, other):\n tmp = VectorHeat1D(self.size)\n tmp.set_values(self.get_values() - other.get_values())\n return tmp", "def __sub__(self, other):\n return (self.x - other.x, self.y - other.y)", "def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")", "def __sub__(self, other):\n return Point([c1 - c2 for (c1, c2) in zip(self, other)])", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def __sub__(self, other):\n if isinstance(other, Vec2Array):\n if len(self) == len(other):\n return self.from_points(\n a - b for a, b in zip(self, other))\n else:\n raise ValueError(\n \"cannot subtract arrays with different lengths\")\n else:\n try:\n b = Vec2(*other)\n except Exception:\n return NotImplemented\n return self.from_points(a - b for a in self)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n if isinstance(other, Vector):\n a = self._ar - other._ar\n else:\n a = self._ar - numpy.array(other)\n return Vector(a)", "def __sub__(self, other):\n try:\n ox, oy = other\n except Exception:\n return NotImplemented\n return tuple.__new__(Vec2, (self[0] - ox, self[1] - oy))", "def __neg__(self):\n return UnaryMinus(self)", "def __sub__(self, p: np.ndarray):\n return Quaternion(self.to_array() - p)" ]
[ "0.7190107", "0.69218826", "0.6909827", "0.6842003", "0.6795763", "0.6745326", "0.6738838", "0.666937", "0.6658452", "0.6623842", "0.66205114", "0.6617514", "0.66050494", "0.65911543", "0.65911543", "0.658858", "0.65842706", "0.6576843", "0.65648913", "0.654615", "0.654368", "0.6481214", "0.64724463", "0.64706707", "0.64655066", "0.64655066", "0.64350855", "0.6430663", "0.6426268", "0.641007" ]
0.7674791
0
Overloading the addition operator for fields types
def __add__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec + other.elec p.magn[:] = self.magn + other.magn return p else: raise DataError("Type error: cannot add %s to %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def __add__(self, other):\n try:\n total = {self.var: 1, other.var: 1}\n return AutoDiffReverse(self.val + other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val + other, None, {self.var: 1})", "def __add__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(self._real + value, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(self._real + value._real, self._imag + value._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n self.__class__.__name__, value.__class__.__name__\r\n )\r\n )", "def __add__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value + other.value),\"\")", "def __radd__(self, value):\r\n if isinstance(value, (int, dec.Decimal)):\r\n return self.__class__(value + self._real, self._imag)\r\n elif isinstance(value, self.__class__):\r\n return self.__class__(value._real + self._real, value._imag + self._imag)\r\n raise TypeError(\r\n 'unsupported operand type(s) for +: {!r} and {!r}'.format(\r\n value.__class__.__name__, self.__class__.__name__\r\n )\r\n )", "def __iadd__(self, other):\n\n return self + other", "def __iadd__(self, other):\n\n if isinstance(other, float):\n self.iadd_scalar(other)\n else:\n self.iadd(other)", "def __add__(self, other):\r\n return self.add(other)", "def __add__(self, other):\n return self.__class__(\n {\n name:\n self.__getattribute__(name) + other.__getattribute__(name)\n for name in self._fields\n }\n )", "def __radd__(self, other):\n return self + other", "def __radd__(self, other):\n return self + other", "def __iadd__(self, other):\r\n if isinstance(other, vec4):\r\n self.x+=other.x\r\n self.y+=other.y\r\n self.z+=other.z\r\n self.w+=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for +=\"", "def __add__(self, other):\n pass", "def __add__(self, other):\n pass", "def __add__(self, other):\n return self.add(other)", "def __add__(self, other):\n if isinstance(other, float) or isinstance(other, int):\n return Complex(self._reNum + other, self._imNum)\n if isinstance(other, complex):\n return Complex(self._reNum + other.real, self._imNum + other.imag)\n return Complex(self._reNum + other._reNum, self._imNum + other._imNum)", "def __add__(self, other: Any) -> TypeValue:\n if isinstance(other, np.ndarray):\n return other + float(self)\n\n return self._like_self_from_float(\n float(self) + self._other_same_units(other)\n )", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def __add__(self, other):\n if isinstance(other, EncryptedNumber):\n return self._add_encrypted(other)\n else:\n return self._add_scalar(other)", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)", "def plus(self, a, b):\n return a + b", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n raise NotImplementedError", "def __add__(self, other):\n if type(other) == int:\n other = float(other)\n\n if type(other) == float:\n other = Tensor(other)\n\n return F.Add.apply(self, other)", "def __add__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps + other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps + other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def add(self, a, b):\n return a + b", "def __iadd__(self, other):\n return (hasattr(other, '__iter__') and self.applyMaterFunc or self.applyScalarFunc)(other, '__add__')", "def _add(self, other):\n raise NotImplementedError(\n \"{} does not support addition\".format(type(self)))", "def __add__(self,other):\n self._obj['u'] += other._obj['u']\n self._obj['v'] += other._obj['v']\n return self._obj" ]
[ "0.70417506", "0.69787914", "0.688292", "0.6869126", "0.6797251", "0.67966515", "0.67823577", "0.6694425", "0.66731805", "0.6661316", "0.6661316", "0.66475755", "0.6635631", "0.6635631", "0.6613988", "0.66052085", "0.65744835", "0.6570402", "0.6558253", "0.65581644", "0.65564525", "0.65500927", "0.6512995", "0.6512995", "0.6503215", "0.6497609", "0.64697087", "0.64539814", "0.6446988", "0.6446401" ]
0.7395196
0
Overloading the subtraction operator for fields types
def __sub__(self, other): if isinstance(other, type(self)): # always create new fields, since otherwise c = a - b changes a as well! p = fields(self) p.elec[:] = self.elec - other.elec p.magn[:] = self.magn - other.magn return p else: raise DataError("Type error: cannot subtract %s from %s" % (type(other), type(self)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __sub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: 1})", "def __rsub__(self, other):\n try:\n total = {self.var: 1, other.var: -1}\n return AutoDiffReverse(self.val - other.val, None, der=total)\n except AttributeError:\n return AutoDiffReverse(self.val - other, None, {self.var: -1})", "def __sub__(self: _TT, other: _TT) -> _TT:\n if type(self) != type(other):\n raise TypeError(\"Types do not match\")\n return type(self)(str(self.value - other.value),\"\")", "def minus(self, a, b):\n return a - b", "def __rsub__(self, other):\n\t\treturn (-self).__add__(float(other))", "def __rmul__(self, other):\n\n if isinstance(other, float):\n # always create new fields, since otherwise c = a - b changes a as well!\n p = fields(self)\n p.elec[:] = other * self.elec\n p.magn[:] = other * self.magn\n return p\n else:\n raise DataError(\"Type error: cannot multiply %s with %s\" % (type(other), type(self)))", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Volt(self.volts - other, self.volt_unit, self.freq, self.freq_unit)\n if self.volt_unit != other.volt_unit:\n raise ArithmeticError(f\"The objects' volt units {self.volt_unit} and {other.volt_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n volt_sum = self.volts - other.volts\n return Volt(volt_sum, self.volt_unit, self.freq, self.freq_unit)", "def __sub__(self, other):\n return self.__add__(other.__neg__())", "def __sub__(self, other):\n return self.subtract(other)", "def subtract(self, other, label=None, atol=1.0E-12):\n # check the two solutions share the same grid\n assert numpy.allclose(self.x, other.x, atol=atol)\n assert numpy.allclose(self.y, other.y, atol=atol)\n assert self.values.shape == other.values.shape\n if not label:\n label = self.label + '-subtracted'\n return Field(label=label,\n time_step=self.time_step,\n x=self.x, y=self.y,\n values=self.values - other.values)", "def __sub__(self, other):\r\n if isinstance(other, vec4):\r\n return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)\r\n else:\r\n raise TypeError, \"unsupported operand type for -\"", "def __isub__(self, other):\r\n if isinstance(other, vec4):\r\n self.x-=other.x\r\n self.y-=other.y\r\n self.z-=other.z\r\n self.w-=other.w\r\n return self\r\n else:\r\n raise TypeError, \"unsupported operand type for -=\"", "def __sub__(self, other):\n\t\tif isinstance(other, int) or isinstance(other, float):\n\t\t\t# Maintain state of self and create new trace variable new_var\n\t\t\tnew_var = Var(self.val, self.der)\n\t\t\treturn new_var.__add__(-other)\n\t\treturn (-other).__add__(self)", "def __sub__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Subtract, value)\n return out", "def __sub__(self, other):\n if hasattr(other, '_d'):\n return (self.micros() - other.micros()) / 86400000000.0\n else:\n return self.__add__(-(other))", "def __sub__(self, other):\n return self + other.__neg__()", "def __sub__(self, other):\n return self + other.__neg__()", "def subtract(self, other):\n return self.add(other.neg())", "def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __sub__(self, other):\n return self.__add__(other * -1)", "def __neg__(self):\n return UnaryMinus(self)", "def __sub__(self, other):\n if isinstance(other, Factorization):\n other = other.value()\n return self.value() - other", "def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)", "def __sub__(self, argument):\n try:\n argument = type(self)(argument)\n except Exception:\n return NotImplemented\n return type(self)(float(self) - float(argument))", "def __sub__( self, other ) :\n\n try :\n other = float( other )\n c_ls = self.copy( )\n for l, c_l in enumerate( c_ls ) : c_ls.coefficients[l] -= other\n except :\n self.checkSameSeriesType( other )\n c_l1, c_l2 = self.coefficients, other.coefficients\n if( len( self ) < len( other ) ) : c_l1, c_l2 = c_l2, c_l1\n c_ls = c_l1.copy( )\n for l, c_l in enumerate( c_l2 ) : c_ls.coefficients[l] += c_l\n return( c_ls )", "def __sub__(self, other):\n if isinstance(other, int) or isinstance(other, float):\n return Amp(self.amps - other, self.amp_unit, self.freq, self.freq_unit)\n if self.amp_unit != other.amp_unit:\n raise ArithmeticError(f\"The objects' amp units {self.amp_unit} and {other.amp_unit} are not the same.\")\n if self.freq != other.frequency:\n raise ArithmeticError(f\"The objects' frequency {self.freq} and {other.frequency} are not the same.\")\n if self.freq_unit != other.freq_unit:\n raise ArithmeticError(f\"The objects' frequency units {self.freq_unit} and {other.freq_unit} \"\n f\"are not the same.\")\n amp_sum = self.amps - other.amps\n return Amp(amp_sum, self.amp_unit, self.freq, self.freq_unit)", "def subtract(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n difference = str(ft.reduce(oper.sub,values))\n\n return difference", "def vars_subtract ( self , var1 , var2 , name = '' , title = '' ) :\n\n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n ##\n res = float ( var1 ) - float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n ## \n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n elif f2 :\n ## shortcut \n if 0 == var2 : return var1 ## SHORTCUT\n #\n var2 = ROOT.RooRealConstant.value ( var2 ) \n return self.vars_subtract ( var1 , var2 , name , title )\n\n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Subtraction ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def __sub__(self,that):\n #return self.__opExpand1(that, np.subtract)\n return self.__opExpand2(that,np.subtract)" ]
[ "0.68123215", "0.65827966", "0.658205", "0.65601283", "0.6536032", "0.6514197", "0.6397643", "0.6376854", "0.6373891", "0.63729715", "0.6359867", "0.6323568", "0.63116324", "0.6266902", "0.62532926", "0.624705", "0.624705", "0.6241416", "0.62308973", "0.6229982", "0.6229982", "0.62059516", "0.62044346", "0.6200343", "0.61568284", "0.6128242", "0.6122729", "0.61196035", "0.6110786", "0.60993844" ]
0.7506139
0
The names of the roles performed by the model. This is required by QtQuick
def roleNames(self): return self._roles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roles(self):\n return self._roles", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def roles(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"roles\")", "def object_role_names(self):\n return [object_role.name for object_role in self.object_roles]", "def roles(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"roles\")", "def getRoles(self):", "def roles(self):\r\n return self._roles_str.split(\",\")", "def getRoles(self):\n return [self.getRole(), {\"roleName\":\"policajti\", \"roleTitle\":\"Svestky\"}]", "def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles", "def roles(self) -> List[str]:\n\n role_list = []\n for spec in self.specs.values():\n role = spec.role()\n if role not in role_list:\n role_list.append(role)\n return role_list", "def rolenames(self):\n try:\n return self.roles.split(',')\n except Exception:\n return []", "def roles(self) -> Optional[Sequence['outputs.AssessmentRole']]:\n return pulumi.get(self, \"roles\")", "def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)", "def listRoleInfo(self):\n return self._roles.values()", "def list(self):\n return self.client.find_all_roles()", "def role(self) -> str:\n\n assert self.data is not None\n return self.data[\"role\"][\"name\"]", "def present_roles(self):\n print(\"User\" + str(self.unique_id) + \": roles=\")\n for group in self._roles:\n print(\"\\tGroup\" + str(group) + \" -> [\"\n + self.get_role_from_type(group, roles_influence) + \", \"\n + self.get_role_from_type(group, roles_neighbors) + \", \"\n + self.get_role_from_type(group, roles_activities) + \", \"\n + self.get_role_from_type(group, roles_attitude) + \"]\")\n print('')", "def get_roles(role):", "def roles(self):\n # TODO: The admin interface only allows a subset of the roles\n # listed in model.py since it uses the OPDS representation of\n # the data, and some of the roles map to the same MARC code.\n CODES = Contributor.MARC_ROLE_CODES\n marc_to_role = dict()\n for role in [\n Contributor.ACTOR_ROLE,\n Contributor.ADAPTER_ROLE,\n Contributor.AFTERWORD_ROLE,\n Contributor.ARTIST_ROLE,\n Contributor.ASSOCIATED_ROLE,\n Contributor.AUTHOR_ROLE,\n Contributor.COMPILER_ROLE,\n Contributor.COMPOSER_ROLE,\n Contributor.CONTRIBUTOR_ROLE,\n Contributor.COPYRIGHT_HOLDER_ROLE,\n Contributor.DESIGNER_ROLE,\n Contributor.DIRECTOR_ROLE,\n Contributor.EDITOR_ROLE,\n Contributor.ENGINEER_ROLE,\n Contributor.FOREWORD_ROLE,\n Contributor.ILLUSTRATOR_ROLE,\n Contributor.INTRODUCTION_ROLE,\n Contributor.LYRICIST_ROLE,\n Contributor.MUSICIAN_ROLE,\n Contributor.NARRATOR_ROLE,\n Contributor.PERFORMER_ROLE,\n Contributor.PHOTOGRAPHER_ROLE,\n Contributor.PRODUCER_ROLE,\n Contributor.TRANSCRIBER_ROLE,\n Contributor.TRANSLATOR_ROLE,\n ]:\n marc_to_role[CODES[role]] = role\n return marc_to_role", "def get_roles():\r\n global _roles\r\n return _roles", "def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def role(self):\r\n roles = {\r\n 'student': u'Student',\r\n 'staff': u'Administrator',\r\n 'instructor': u'Instructor',\r\n }\r\n return roles.get(self.system.get_user_role(), u'Student')", "def role_strings(self):\n return [s[RoleInfo.STRING] for s in [v for item in self.role_strings_info.values() for v in item] if s[RoleInfo.STRING]]", "def list_roles(self, hints):\n raise exception.NotImplemented() # pragma: no cover", "def get_granted_roles(self):", "def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')", "def role(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role\")", "async def roles(self, ctx):\n\n pass", "def roles(self):\n # type: (...) -> Set[Role]\n return self._roles", "def editor_role_values(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"editor_role_values\")" ]
[ "0.7476474", "0.73596126", "0.73596126", "0.73139435", "0.7310977", "0.7254139", "0.7241115", "0.7161311", "0.7130106", "0.71001154", "0.7057589", "0.7041748", "0.7031347", "0.69231516", "0.69207555", "0.68998986", "0.6880031", "0.6854992", "0.6836892", "0.6824553", "0.67943746", "0.67850405", "0.67571616", "0.6625915", "0.6618259", "0.6588489", "0.6525221", "0.65175635", "0.65133345", "0.6500767" ]
0.7644932
0
The outline of the command used to perform a horizontal run
def horizontalCommand(self): return self._horizontal_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)", "def _display_command(self):\n idx = self.current_idx # Local copy to avoid race condition updates\n output = self.outputs[idx]\n if output is None:\n self.screen.addstr('Waiting for command to run...')\n return\n\n # Set row limits\n top_line = self.top_lines[idx]\n top_line = 0 if len(output) < self.max_line else min(max(top_line, 0), len(output)-self.max_line)\n bottom_line = min(top_line+self.max_line, len(output)) # Last page may not be full\n self.top_lines[idx] = top_line\n\n # Crop output to fit screen height & width\n output = [line[:self.max_col-1] for line in output[top_line:bottom_line]]\n self.screen.addstr(b'\\n'.join(output))", "def cmd(self):", "def explainerdashboard_cli(ctx):", "def display_hline():\n for i in range(12):\n print(\"-\", end=\"\")\n print()", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.6144864", "0.60430384", "0.60299814", "0.59511566", "0.5867638", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455", "0.5822455" ]
0.64010274
0
The outline of the command used to perform a vertical run
def verticalCommand(self): return self._vertical_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cmd(self):", "def vertical_line(t, n):\n lt(t)\n fd(t,n)\n rt(t)" ]
[ "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.59563714", "0.5884879", "0.5875417" ]
0.6988323
0
The current number of runs
def count(self): return len(self._runs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_run_idx(self):\n return self.num_runs", "def number_of_launches(self):\n return self._number_of_launches", "def number_of_iterations(self) -> int:\n pass", "def num_runs(self):\n return len(self._h5[RUNS])", "def run(self) -> int:\n self._times_called += 1\n return self._times_called", "def number_of_iterations(self) -> int:\n return self._stats[\"iter_count\"]", "def counter(self) -> int:", "def counter(self) -> int:", "def run_number(self):\n return self._runNumber", "def num_trials(self):", "def num_launches(self):\n return len(self.launches)", "def number_of_iterations(self):\n return self._solution.nit", "def count(self) -> int:\n return self.end_measure_num - self.start_measure_num + 1", "def count(self):\n return int()", "def count(self):\n return int()", "def number_of_iterations(self) -> int:\n return self._solution.info.iter", "def counter(self) -> int:\n return self._counter", "def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0", "def tally(self):\n return self.count", "def number_of_iterations(self):\n return self._solution[\"iterations\"]", "def numRunningTotal(self):\n activeRuns = sum(run is not None for run in self.__running + self.__clientRunning)\n return activeRuns", "def __len__(self):\n return self.nb_iterations", "def execution_count(self):\n if not self._execution_count:\n self.fill_heatmap()\n return self._execution_count", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def get_number_of_evaluation(self):\n return self.n_eval", "def count():", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def num_considered(self):\n return self._current", "def get_count(self):\r\n return self.count", "def IterationCount(self):\r\n\t\treturn self._get_attribute('iterationCount')" ]
[ "0.76630276", "0.75599295", "0.74130595", "0.7332917", "0.73038995", "0.72296184", "0.7181953", "0.7181953", "0.71678597", "0.7135644", "0.6975584", "0.6965288", "0.6940288", "0.6895412", "0.6895412", "0.68646985", "0.68560135", "0.68351436", "0.6822677", "0.68223685", "0.68212193", "0.6794721", "0.67787343", "0.6778261", "0.677366", "0.67671865", "0.675277", "0.67328835", "0.6730876", "0.6723504" ]
0.77989334
0
Function to perform a 5 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask5. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow5years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-3): img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)])) img_out = img_out.addBands(imagem.select(bandNames[-3])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def system_5(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(3,3), as_numeric=True, stretched=True):\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([cv2.imread(f) for f in filenames[0:num_prev_frames]])\n seed_img = mode(initial_background_model)\n previous_frames = deque(initial_background_model, maxlen=num_prev_frames)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def computeCloudMasking(image_name, numberOfTrees=NUMBER_TREES, threshold=CUTTOF):\n\n # Import training data as GEE object\n # Build randomForest model at each run\n fc_training = ee.FeatureCollection(\n 'ft:1XzZPz8HZMARKQ9OPTWvfuRkPaGIASzkRYMfhKT8H')\n\n # Use these methods for prediction.\n methods_name = ee.List(['percentile1', 'percentile5', 'tree2', 'tree3'])\n\n # Random Forest model\n randomForest = ee.Classifier.randomForest(numberOfTrees=numberOfTrees)\n randomForest = randomForest.train(fc_training, 'cloud', methods_name)\n\n # Image + region of interest\n image = ee.Image(image_name)\n roi = getGeometryImage(image)\n\n # UK BORDER <=> mask sea\n land_geometry = ee.FeatureCollection(parameters.land_geometry)\n # image = image.clip(land_geometry)\n\n # Apply the different methods\n # tree1 = getMaskTree1(image, roi)\n tree2 = getMaskTree2(image, roi)\n tree3 = getMaskTree3(image, roi)\n percentile1, percentile5 = CloudClusterScore(image, roi)\n\n # Add each result as a band of the final image\n final_image = tree3.addBands([tree2, percentile1, percentile5]) \\\n .clip(land_geometry)\n\n # Apply the random Forest classification\n masked_image = final_image.classify(randomForest) \\\n .gt(threshold)\n\n # Add meta data: geometry + date\n masked_image = masked_image.set(\"system:footprint\", image.get('system:footprint'))\n masked_image = masked_image.set(\"system:time_start\", image.get('system:time_start'))\n masked_image = masked_image.set(\"system:time_end\", image.get('system:time_end'))\n\n return masked_image", "def _simulate_flux(self, times, five_sigma_mag, band):\n magnification = self._calculate_magnification(times)\n \n source_flux = self.source_flux[band]\n blending_flux = self.blending_flux[band]\n model_flux = source_flux * magnification + blending_flux\n model_mag = MM.Utils.get_mag_from_flux(model_flux)\n sigma_mag = self._LSST_uncertainties(model_mag, five_sigma_mag, band)\n temp = MM.Utils.get_flux_and_err_from_mag(model_mag, sigma_mag)\n sigma_flux = temp[1]\n \n simulated = model_flux + np.random.normal(scale=sigma_flux)\n simulated[simulated < 0.] = 0.\n \n if self._model.n_lenses == 2:\n diff = (model_flux - simulated) / sigma_flux\n self._binary_chi2_sum += np.sum(diff**2)\n\n return (simulated, sigma_flux)", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def five_years_avg_dividend(self, five_years_avg_dividend: float):\n\n self._five_years_avg_dividend = five_years_avg_dividend", "def testMask5D(self):\n mask = np.ones((3, 3, 3, 5, 1), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def main_bf_MISR(h5f, output_folder, SPATIAL_RESOLUTION=0.5, VZA_MAX=18, CAMERA='AN'):\n\n # =============================================================================\n # 1. Initialization\n # calculate constant parameters\n # initialize output arrays and output hdf5 file\n # check the number of CERES granules \n # =============================================================================\n\n print(\"-------MISR----->\", h5f)\n print(\"-------FID------<>\", h5f.fid)\n print(\"---->\", type(h5f))\n if type(h5f.fid) is str:\n output_nc_name = h5f.fid.split('/')[-1].replace('TERRA_BF_L1B', 'CLIMARBLE')\n else:\n output_nc_name = h5f.fid.name. \\\n decode(\"utf-8\").split('/')[-1]. \\\n replace('TERRA_BF_L1B', 'CLIMARBLE')\n\n output_nc_name = output_nc_name.replace('.h5', '.nc')\n\n # \n NUM_POINTS = 1 / SPATIAL_RESOLUTION\n NUM_LATS = int(180 / SPATIAL_RESOLUTION)\n NUM_LONS = int(360 / SPATIAL_RESOLUTION)\n\n LAT_EDGES = np.arange(-90.0, 90.0001, SPATIAL_RESOLUTION)\n LON_EDGES = np.arange(-180.0, 180.0001, SPATIAL_RESOLUTION)\n\n # \n orbit_radiance_sum = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_radiance_num = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_nc_out = os.path.join(output_folder, output_nc_name)\n\n\n # =============================================================================\n # 2. Main processing\n # Loop through each CERES granule and sort radiances into the corresponding lat/lon bins\n # When encounters an asceding granule, script will move to the next granule\n # =============================================================================\n\n # USE MODIS granules to match first and last time of the descending node\n MISR_blocks = get_descending(h5f, 'MISR.{}'.format(CAMERA))\n if MISR_blocks[0] == 0:\n print(\">> IOError( no available MODIS granule in orbit {} )\".format(bf_file))\n return\n\n # LOAD lat/lon here\n lat = h5f['MISR/Geolocation/GeoLatitude'][:]\n lon = h5f['MISR/Geolocation/GeoLongitude'][:]\n\n # LOAD radiance here\n MISR_bands = ['Blue', 'Green', 'Red', 'NIR']\n rads_all = []\n for iband in MISR_bands:\n rads_all.append(h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, iband)][:])\n\n # SPECIFY data dimension to interpolate SZA/VZA\n rad_shape = (128, 512)\n \n\n # LOOP through MISR blocks (starts from 0)\n for iblk in MISR_blocks:\n\n # INTERPOLATE sza and vza (this part can be replaced by a more accurate function)\n raw_sza = h5f['MISR/Solar_Geometry/SolarZenith'][iblk]\n raw_vza = h5f['MISR/{}/Sensor_Geometry/{}Zenith'.format(CAMERA, ''.join(c.lower() if i==1 else c for i,c in enumerate(CAMERA)))][iblk]\n np.place(raw_sza, raw_sza<0, np.nan)\n np.place(raw_vza, raw_vza<0, np.nan)\n blk_sza = resize(raw_sza, rad_shape)\n blk_vza = resize(raw_vza, rad_shape)\n\n\n # SELECT lat/lon\n idx_geometry = np.where((blk_sza<89.0) & (blk_vza<VZA_MAX))\n select_lat = lat[iblk][idx_geometry]\n select_lon = lon[iblk][idx_geometry]\n\n\n # SELECT spectral radiances here\n # Aggregate 275-m res data to 1.1-km when necessary\n # Separate band by band to allow one (or more) band(s) failure\n for iband, band_name in enumerate(MISR_bands, start=0):\n blk_rad = rads_all[iband][iblk]\n # blk_rad = h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, band_name)][iblk]\n\n if blk_rad.shape == (512, 2048): \n # 275-m res band\n np.place(blk_rad, blk_rad<0, np.nan)\n fnl_blk_rad = np.nanmean(np.reshape(blk_rad, (blk_rad.shape[0]//4, 4, blk_rad.shape[1]//4,4)), axis=(1,3))\n else:\n fnl_blk_rad = blk_rad\n\n\n select_rad = np.nan_to_num(fnl_blk_rad[idx_geometry])\n fnl_idx = np.where((select_rad>0)&(select_rad<1000))[0]\n\n fnl_lat = select_lat[fnl_idx] * -1\n fnl_lon = select_lon[fnl_idx]\n fnl_rad = select_rad[fnl_idx]\n\n try:\n rad_sum, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='sum')\n rad_cnt, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='count')\n\n orbit_radiance_sum[:, :, iband] += rad_sum\n orbit_radiance_num[:, :, iband] += rad_cnt\n except ValueError:\n continue\n\n # =============================================================================\n # 3. Save results\n # =============================================================================\n orbit_radiance_num = np.array(orbit_radiance_num, dtype='int16')\n\n coords_lats = np.linspace(90-SPATIAL_RESOLUTION/2, -90+SPATIAL_RESOLUTION/2, NUM_LATS)\n coords_lons = np.linspace(-180+SPATIAL_RESOLUTION/2, 180-SPATIAL_RESOLUTION/2, NUM_LONS)\n\n xr_rad_sum = xr.DataArray(orbit_radiance_sum, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_num = xr.DataArray(orbit_radiance_num, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_sum.encoding['_FillValue'] = 0\n xr_rad_num.encoding['_FillValue'] = 0\n xr_rad_sum.name = 'MISR spec rad sum'\n xr_rad_num.name = 'MISR spec rad num'\n xr_rad_sum.to_netcdf(orbit_nc_out, 'a')\n xr_rad_num.to_netcdf(orbit_nc_out, 'a')\n return orbit_nc_out", "def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])", "def at_rSNR(h5):\n ses = h5['SES'][:]['ses'].copy()\n ses.sort()\n h5.attrs['clipSNR'] = np.mean(ses[:-3]) / h5.attrs['noise'] *np.sqrt(ses.size)\n x = np.median(ses) \n h5.attrs['medSNR'] = np.median(ses) / h5.attrs['noise'] *np.sqrt(ses.size)", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def test_matched_filter5():\n x_size = 80\n y_size = 90\n\n objects = numpy.zeros((1, 5))\n\n # Make filter with unit sum.\n objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]\n psf = dg.drawGaussians((x_size, y_size), objects)\n psf = psf/numpy.sum(psf)\n flt = matchedFilterC.MatchedFilter(psf)\n\n # Make test image.\n image = numpy.zeros((x_size, y_size))\n image[int(x_size/2), int(y_size/2)] = float(100)\n\n mf_conv = flt.convolve(image)\n\n t1 = numpy.fft.fft2(recenterPSF.recenterPSF(psf))\n t2 = numpy.fft.fft2(image)\n np_conv = numpy.real(numpy.fft.ifft2(t1*t2))\n\n assert(numpy.allclose(mf_conv, np_conv))\n\n flt.cleanup()" ]
[ "0.66861373", "0.65514684", "0.6478541", "0.5613955", "0.54316807", "0.5428405", "0.5313811", "0.520505", "0.51899797", "0.50904", "0.50412196", "0.5036899", "0.49864736", "0.47982645", "0.4767842", "0.47645608", "0.4758746", "0.47383195", "0.46717232", "0.46011153", "0.4570777", "0.45688832", "0.45655566", "0.45477608", "0.45297286", "0.44968966", "0.44782507", "0.44734576", "0.4399", "0.4386791" ]
0.80881137
0
Function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask4. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow4years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-2): img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def mask(mode: str = 'illuminated', band: str = '78') -> np.ndarray:\n if band in ('7', '8'):\n res = np.full((256, 500), False)\n else:\n res = np.full((256, 1000), False)\n\n res[coords(mode, band)] = True\n\n return res", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def masked(months=range(1, 13), years=[2009], folder=\"data/\", layer=\"BHR_VIS\"):\n data = []\n file_template = 'NETCDF:\"{:s}\":{:s}' # Template for the Netcdf path\n # the actual filename\n fname_template = '{:s}/GlobAlbedo.merge.albedo.05.{:d}{:02d}.nc'\n for year in years:\n for month in months:\n fname = fname_template.format(folder, year, month)\n netcdf_fname = file_template.format(fname, layer)\n g = gdal.Open(netcdf_fname)\n if g is None:\n raise IOError(\"Problem with reading file {}\".format(fname))\n the_data = g.ReadAsArray()\n masked_data = np.ma.array(the_data,mask=np.isnan(the_data))\n data.append(masked_data)\n output_data = np.ma.array(data)\n return output_data" ]
[ "0.6658748", "0.64081764", "0.6048875", "0.5828636", "0.53740543", "0.5318547", "0.5314894", "0.5258221", "0.5224046", "0.49241713", "0.48995483", "0.48989847", "0.48840585", "0.47406876", "0.47206843", "0.4713083", "0.46992207", "0.46542522", "0.4640126", "0.4630526", "0.46242535", "0.4550905", "0.45305178", "0.44995046", "0.44573098", "0.4433689", "0.44312298", "0.43917808", "0.43894905", "0.43639097" ]
0.76945615
0
Function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask3. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow3years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-1): img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def _mask3d(self, n, i, window):\n\n n = np.array(n)\n i = np.array(i)\n\n w2 = (window - 1) // 2\n\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n mask = np.zeros(n, dtype=np.bool)\n mask[x1:x2, y1:y2, z1:z2] = True\n\n return mask", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def test_3dtproject_temporal_filter_wf(self):\n \n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )", "def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {},\n show_mask: bool = False) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(detail_brz=1500, lines_brz=1000)\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def maskClouds(self,img):\n\t\t\n\t\tscore = ee.Image(1.0);\n\t\t# Clouds are reasonably bright in the blue band.\n\t\tblue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))\n\t\tscore = score.min(blue_rescale);\n\n\t\t# Clouds are reasonably bright in all visible bands.\n\t\tvisible = img.select('red').add(img.select('green')).add(img.select('blue'))\n\t\tvisible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))\n\t\tscore = score.min(visible_rescale);\n\n\t\t# Clouds are reasonably bright in all infrared bands.\n\t\tinfrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))\n\t\tinfrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))\n\t\tscore = score.min(infrared_rescale);\n\n\t\t# Clouds are reasonably cool in temperature.\n\t\ttemp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))\n\t\tscore = score.min(temp_rescale);\n\n\t\t# However, clouds are not snow.\n\t\tndsi = img.normalizedDifference(['green', 'swir1']);\n\t\tndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))\n\t\tscore = score.min(ndsi_rescale).multiply(100).byte();\n\t\tmask = score.lt(self.env.cloudThreshold).rename(['cloudMask']);\n\t\timg = img.updateMask(mask);\n \n\t\treturn img;", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def apply_land_ocean_mask(data_cube, mask_cube, include_only):\n\n target_shape = data_cube.shape\n target_ndim = len(target_shape)\n\n if include_only == 'land':\n mask_array = numpy.where(mask_cube.data > 0.1, False, True)\n elif include_only == 'ocean':\n mask_array = numpy.where(mask_cube.data < 0.1, False, True)\n\n mask = broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)\n assert mask.shape == target_shape \n\n data_cube.data = numpy.ma.asarray(data_cube.data)\n data_cube.data.mask = mask\n\n return data_cube", "def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def read_dr3_spectrum(path, common_dispersion=None, bounds_error=False):\n\n header_keys = (\"helio_rv\", \"z\", \"z_err\")\n\n with fits.open(path) as image:\n # data array indices:\n # flux, inverse variance, wavelength, andmask, ormask.\n flux, ivar, dispersion, and_mask, or_mask = image[0].data\n\n # Create a meta dictionary that contains things we will probably care \n # about later on, and the path so that we can trace provenance of other\n # things as needed.\n meta = dict(path=path)\n for header_key in header_keys:\n meta[header_key] = image[0].header[header_key.upper()]\n\n # Use the OR mask to set the inverse variances to zero for any pixels with\n # indications of being bad. For example, the bit mask meanings are:\n # 1 : BADCCD : bad pixel on CCD\n # 2 : BADPROFILE : bad profile in extraction\n # 3 : NOSKY : no sky information at this wavelength\n # 4 : BRIGHTSKY : sky level too high\n # 5 : BADCENTER : fibre trace out of the CCD\n # 6 : NODATA : no good data.\n\n # From http://dr3.lamost.org/doc/data-production-description\n\n # These are all bad things. And the LAMOST pipeline people are more familiar\n # with the data than we are. So let's believe them.\n\n rest_dispersion = dispersion * (1 - meta[\"z\"])\n ivar[or_mask > 0] = 0.0\n\n if common_dispersion is not None:\n flux = (interpolate.interp1d(rest_dispersion, flux,\n bounds_error=bounds_error, fill_value=1))(common_dispersion)\n ivar = (interpolate.interp1d(rest_dispersion, ivar,\n bounds_error=bounds_error, fill_value=0))(common_dispersion)\n\n rest_dispersion = common_dispersion\n ivar[ivar < 0] = 0\n\n assert np.all(ivar >= 0), \"negative inverse variances\"\n assert np.all(np.isfinite(flux)), \"non-finite fluxes\"\n\n return (rest_dispersion, flux, ivar, meta)", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)" ]
[ "0.6262926", "0.61455876", "0.6122352", "0.59712267", "0.5810346", "0.56390077", "0.5292091", "0.5260056", "0.52369714", "0.5159779", "0.5131087", "0.50615054", "0.49572933", "0.48659185", "0.4854936", "0.4852849", "0.48406282", "0.48388356", "0.47845525", "0.47620434", "0.47155675", "0.47076923", "0.4703111", "0.46666485", "0.46103007", "0.4597425", "0.45727402", "0.4557659", "0.4546504", "0.45439723" ]
0.7711574
0
Function to perform a forward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The forward gap fill is applied iteratively from the first year of bandNames through the final year, where if the current image has missing data, it is filled with the following year's values.
def applyForwardNoDataFilter(image, bandNames): #Get a list of band names from year(1) through the last year bandNamesEE = ee.List(bandNames[1:]) #Define forwards filter #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year #currentImage = image.select(bandNames[1]), the image for the second year #previousImage = image.select(bandNames[0]), the first year #Find where the second year has missing data, replace those values with the values of the first year #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill #and the second band is the first years classification #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year def forwardNoDataFilter(bandName, previousImage): currentImage = image.select(ee.String(bandName)) previousImage = ee.Image(previousImage) currentImage = currentImage.unmask(previousImage.select([0])) return currentImage.addBands(previousImage) #Iterate through all the years, starting with the first year's classification filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0]))) filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def complete_zeros(df_dm,year):\n df_dm.insert(1,year,0)\n return df_dm", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))", "def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def FS2Years(inputFolderPath = './FormattedFilesWithoutMissingToNextYear', outputFolderPath = './FormattedFilesWithoutMissingToNextYear'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4)+'N')]\n\t\t\t regex = re.compile(\"....N:.*\")\n\t\t\t nextYearIDs = [idx for idx, item in enumerate(header) if regex.search(item)]\n\t\t\t nextYearCount = len(nextYearIDs)\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\t# X = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\tX = dataset[:,nextYearCount:dataset.shape[1]]\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\n\t\t\tk = 40\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[nextYearCount+1:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\tprint 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('FeatureSelectionIndicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def resample(self, dataframes, freq='5s'):\n\n for df in dataframes:\n yield df.resample(freq, fill_method='bfill')", "def increment_year(self):", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def fill_between(initial,final):\n return np.arange(initial + 1, final)", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def main(years=(2000, 2019)):\n year_list = range(years[0], years[1] + 1)\n dfs = []\n for year in year_list:\n dfs.append(get_df(year))\n print(f\"Done: {len(dfs)} dataframes written\")", "def modis_lai_fill_gap(in_path, doy_start, doy_end):\n date_start = datetime.datetime.strptime(doy_start, \"%Y%m%d\").date()\n date_end = datetime.datetime.strptime(doy_end, \"%Y%m%d\").date()\n\n for nc_file in os.listdir(in_path):\n if nc_file.endswith('.nc'):\n nc_date = datetime.datetime.strptime(nc_file[:-3], \"%Y%m%d\").date()\n if date_start <= nc_date <= date_end:\n print(nc_file, \"----\",)\n doy = int(datetime.datetime.strptime(nc_file[:-3], '%Y%m%d').strftime('%Y%j'))\n for new_doy in [doy + x for x in range(1, 4)]:\n shutil.copy2(os.path.join(in_path, nc_file), os.path.join(in_path, '{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d'))))\n print('{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d')),)\n print('\\n')", "def fill_gaps(self):\n\n for source in self.sources.keys():\n if source in self.staticsources:\n continue\n src = self.sources[source]\n print '[INFO] Scanning ' + source + ' for gaps'\n src.fill_gaps()", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def linear_interpolate(df, offset, final_year=\"2050\", harmonize_year=\"2015\"):\n df = df.copy()\n x1, x2 = harmonize_year, final_year\n y1, y2 = offset + df[x1], df[x2]\n m = (y2 - y1) / (float(x2) - float(x1))\n b = y1 - m * float(x1)\n\n cols = [x for x in utils.numcols(df) if int(x) < int(final_year)]\n for c in cols:\n df[c] = m * float(c) + b\n return df", "def _enumerate_years(self, preprocessed_data, disjoint):\n pass" ]
[ "0.65311575", "0.5876845", "0.5747447", "0.5284138", "0.52226", "0.5096014", "0.5058866", "0.5047209", "0.49857956", "0.49474898", "0.48613372", "0.48247787", "0.47784477", "0.46002764", "0.45790556", "0.45642906", "0.4548984", "0.4521293", "0.45085937", "0.44796604", "0.44686285", "0.44508076", "0.44366032", "0.44195914", "0.4402968", "0.43964994", "0.4350866", "0.43493602", "0.43422398", "0.43408775" ]
0.70489925
0
Function to perform a backward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The backward gap fill is applied iteratively from the last year of bandNames through the first year, where if the current image has missing data, it is filled with the previous year's values.
def applyBackwardNoDataFilter(image, bandNames): #Get a list of band names to iterate over, from year(-2) through year(0) bandNamesEE = ee.List(bandNames[:-1]).reverse() #Define backwards filter #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year #currentImage = image.select(bandNames[-2]), the second to last year #followingImage = image.select(bandNames[-1]), the final year #Find where the second to last year has missing data, replace those values with the values of the following year #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill #and the second band is the final years classification #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year def backwardNoDataFilter(bandName, followingImage): currentImage = image.select(ee.String(bandName)) followingImage = ee.Image(followingImage) currentImage = currentImage.unmask(followingImage.select([0])) return currentImage.addBands(followingImage) #Apply backwards filter, starting with the final year and iterating through to year(0) filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1]))) #Re-order bands to be in chronological order filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")", "def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))", "def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)", "def complete_zeros(df_dm,year):\n df_dm.insert(1,year,0)\n return df_dm", "def accumulate_to_year_end(da, year_ends, mask=None, shift=5, accumulate=12, time_name='time'):\n da = da.shift({time_name: shift}) \\\n .rolling({time_name: accumulate}).sum()\n da = da.where(da[time_name].dt.month == year_ends) \\\n .resample({time_name: '1YS'}).sum(skipna=True)\n if mask is None:\n return da\n else:\n return da.where(mask == 1)", "def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries", "def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final", "def fake_date_fill(df, back_method: str = 'slice'):\n df_index = df.index.to_series().copy()\n df2 = df.sort_index(ascending=False).copy()\n df2 = df2.apply(lambda x: pd.Series(x.dropna().values))\n df2 = df2.sort_index(ascending=False)\n df2.index = df_index.tail(len(df2.index))\n df2 = df2.dropna(how='all', axis=0)\n if df2.empty:\n df2 = df.fillna(0)\n\n if back_method == 'bfill':\n df2 = fill_forward(df2)\n return df\n elif back_method == 'slice':\n thresh = int(df.shape[1] * 0.5)\n thresh = thresh if thresh > 1 else 1\n df3 = df2.dropna(thresh=thresh, axis=0)\n if df3.empty or df3.shape[0] < 8:\n df3 = fill_forward(df2)\n else:\n df3 = fill_forward(df3)\n return df3\n elif back_method == 'keepna':\n return df2\n else:\n print('back_method not recognized in fake_date_fill')\n return df2", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def get_backward(prev_dfs, right_most_path, hist, pm_backward, dfs_codes, db):\n\tlast_edge = hist.edges[right_most_path[0]]\n\tg = db[prev_dfs.id]\n\tlast_node = g.nodes[last_edge.to]\n\n\tfor idx,rmp in reversed(list(enumerate(right_most_path[1:]))):\n\t\tedge = hist.edges[rmp]\n\t\tfor e in last_node.edges:\n\t\t\tif e.id in hist.has_edges:\n\t\t\t\tcontinue\n\t\t\tif e.to not in hist.has_node:\n\t\t\t\tcontinue\n\t\t\tfrom_node = g.nodes[edge.fromn]\n\t\t\tto_node = g.nodes[edge.to]\n\t\t\tif e.to == edge.fromn and (e.label > edge.label or (e.label == edge.label and last_node.label >= to_node.label)):\n\t\t\t\tfrom_id = dfs_codes[right_most_path[0]].to\n\t\t\t\tto_id = dfs_codes[rmp].fromn\n\t\t\t\tdfsc = dfs_code(from_id, to_id, last_node.label, e.label, from_node.label)\n\t\t\t\tpdfs = pre_dfs(g.id, e, prev_dfs)\n\t\t\t\tif dfsc in pm_backward:\n\t\t\t\t\tpm_backward[dfsc].append(pdfs)\n\t\t\t\telse:\n\t\t\t\t\tpm_backward[dfsc] = [pdfs,]\n\t\n\treturn pm_backward", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def reduce_dataset(X, year):\n\n drop_list = [i for i in range(config.DB_YEAR_MIN, config.DB_YEAR_MAX + 1)]\n drop_list.remove(year - 1)\n red_X = X.drop(drop_list, axis=0)\n return red_X", "def get_previous_yr(df, df2, years):\n # Get n+_ year\n df[\"season_n-{}_tmp\".format(years)] = df[\"season\"] - years\n df_merged = pd.merge(df, df2, how=\"left\", left_on=[\"player\", \"player_id\", \"season_n-{}_tmp\".format(years)],\n right_on=[\"player\", \"player_id\", \"season\"],\n suffixes=['', \"_n-{}\".format(years)])\n\n df_merged = df_merged.drop([\"season_n-{}_tmp\".format(years)], axis=1)\n\n return df_merged", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout" ]
[ "0.7032449", "0.5949247", "0.5812707", "0.5253641", "0.51127684", "0.49725264", "0.47500893", "0.46962532", "0.46487218", "0.45113215", "0.44795865", "0.44701588", "0.4448179", "0.4420199", "0.4388033", "0.43850613", "0.43616962", "0.4360365", "0.43425742", "0.43269235", "0.4294934", "0.4291195", "0.42749843", "0.4262502", "0.42407838", "0.42378584", "0.42289925", "0.42165184", "0.42092714", "0.41809776" ]
0.7573101
0
Function to apply forward gap filling and backward gap filling to an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. This funciton calls applyForwardNoDataFilter then applyBackwardNoDataFilter
def applyGapFilter(image, bandNames): filtered = applyForwardNoDataFilter(image, bandNames) filtered = applyBackwardNoDataFilter(filtered, bandNames) return filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def bandpasscorrect(data):\n ret=[x for x in data]\n n=len(ret)\n ret[0]=1.083*ret[0]-0.083*ret[1]\n ret[n-1]=1.083*ret[n-1]-0.083*ret[n-2]\n for k in range(1,n-1):\n ret[k]=1.166*ret[k]-0.083*ret[k-1]-0.083*ret[k+1]\n return ret", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def mask_gradient(self, override=False):\n self.MaskPrefix = 'g' + self.MaskPrefix #append prefix 'g' for gradient\n print('applying gradient filter to remove edge effects and isolated unwrapping errors')\n # If a signal mask exists, use it to prevent np.gradient() from scrapping important data\n indSignal = np.zeros(self.Set.Size)\n if override:\n #manually created boolean array, 1=pixel containing known signal\n indSignal = np.load(override)\n\n for ig in self.Set:\n igram = self.load_ma(ig)\n Fx, Fy = np.gradient(phase) #locate pixels adjacent to NaNs\n Fx[indSignal] = 1\n Fy[indSignal] = 1\n igram[np.isnan(Fx)] = ma.masked\n igram[np.isnan(Fx)] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n print('Done')", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def preprocess_images(input_image, soften=None, fill_holes=None):\n ratio = get_scaling_ratio(input_image)\n if soften == None:\n soften = max(soften_amt_deafult * ratio, 1)\n if fill_holes == None:\n fill_holes = round(fill_holes_deafult * ratio)\n fill_holes = max(fill_holes, 1)\n\n # ensure that all points which are transparent have RGB values of 255 (will become white when\n # converted to non-transparent grayscale.)\n input_image = img_as_float32(input_image)\n if len(input_image.shape) == 3 and input_image.shape[2] == 4:\n input_image = rgba2rgb(input_image)\n gray_img = img_as_ubyte(rgb2gray(input_image))\n\n # get the otsu threshold after running a flood fill on the corners, so that those huge clumps of\n # dark pixels don't mess up the statistics too much (we only care about text!)\n thresh = threshold_otsu(\n fill_corners(gray_img, fill_value=255, thresh=5, tol=1, fill_below_thresh=True)\n )\n\n # n.b. here we are setting black pixels from the original image to have a value of 1 (effectively inverting\n # what you would get from a normal binarization, because the math gets easier this way)\n img_bin = img_as_ubyte(gray_img < thresh)\n \n # need to add clipping because of a weird case where the range of the\n # blurred imagewill be from -1 to 1.0000000004\n blurred = np.clip(gaussian(gray_img, soften), -1, 1)\n img_blur_bin = img_as_ubyte(img_as_ubyte(blurred) < thresh)\n\n # now, fill corners of binarized images with black (value 0)\n img_bin = fill_corners(\n img_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n img_blur_bin = fill_corners(\n img_blur_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n\n # run smoothing on the blurred-binarized image so we get blobs of text in neat lines\n kernel = np.ones((fill_holes, fill_holes), np.uint8)\n img_cleaned = binary_opening(binary_closing(img_blur_bin, kernel), kernel)\n\n # find rotation angle of cleaned, smoothed image. use that to correct the rotation of the unsmoothed image\n angle = find_rotation_angle(img_cleaned)\n img_cleaned_rot = rotate(img_cleaned, angle, order=0, mode=\"edge\") > 0\n img_bin_rot = rotate(img_bin, angle, order=0, mode=\"edge\") > 0\n\n return img_bin_rot, img_cleaned_rot, angle", "def bqa_fmask_func(qa):\n # Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1\n cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit\n cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.\n cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus\n shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3\n snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3\n\n fmask = (fill_mask != True).astype(np.uint8)\n fmask[shadow_mask] = 2\n fmask[snow_mask] = 3\n fmask[cloud_mask] = 4\n\n return fmask", "def iterate_grey_level(prev_mask, new_g_disc, converter, \n num_grey_levels=256, upward=True):\n gl_delta = 1./num_grey_levels\n grey_level = new_g_disc/(num_grey_levels - 1)\n \n # Create desired spectrum.\n desired = desired_PSD_nd(\n new_g_disc*gl_delta, prev_mask.shape[0], prev_mask.ndim)\n desired_radial = converter.radially_average(desired)\n \n # Find error:\n corrected_sig = correct_signal(prev_mask, desired_radial, converter)\n error = np.abs(corrected_sig - prev_mask)\n \n # Make corrections:\n num_replacements = int(np.multiply.reduce(prev_mask.shape)*gl_delta)\n \n ## Identify worst zeros. This is different than BIPPSMA, because we \n ## have to check each replacement's neighbourhood to avoid clusters.\n replace_value = 0 if upward else 1\n replace_to = 1 - replace_value\n \n void = prev_mask == replace_value\n void_error = np.where(void, error, 0)\n void_error_order = np.argsort(-void_error, None)# descending.\n \n ## Replace:\n new_sig = prev_mask.copy()\n error_coords = np.unravel_index(void_error_order[:void.sum()], prev_mask.shape)\n \n # We need to make sure replacements don't cluster, by observing the local\n # means. We do that for the entire array - in NumPy. It's cheaper than\n # doing it individually per point in pure Python.\n half_window = 4\n window_size = (2*half_window + 1)\n window = np.full((window_size,)*prev_mask.ndim, 1/window_size**prev_mask.ndim)\n local_mean = ndi.convolve(prev_mask, window, mode='wrap')\n \n for coords in zip(*error_coords):\n if upward:\n crowded = local_mean[coords] > grey_level\n else:\n crowded = local_mean[coords] < grey_level\n \n if crowded:\n continue\n \n assert(new_sig[coords] == replace_value)\n new_sig[coords] = replace_to\n num_replacements -= 1\n if num_replacements == 0:\n break\n \n # Profit:\n return new_sig", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def processframe(pilimage):\n # TODO: Idea on of overfilling\n # [[0,0,0],\n # [1,1,1],\n # [0,0,0]]\n # Keep this as template. aka pattern. use scipy measure and that s pattern to match all connecting\n # this gets all the fills. the rest is thrown into the pile of sets.\n # we assume index 0 as discarded (Can't really do much with black images.)\n numpyarrayfrompil = numpy.array(pilimage)\n # First we pass to regionprops\n props = createfillers(numpyarrayfrompil)\n # pass all the data we need now to the mapprops2color\n # returns a string which can be cerealised.\n return mapprops2color(props, numpyarrayfrompil, pilimage)", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y", "def find_bandgap(bandsdata, number_electrons=None, fermi_energy=None):\n\n def nint(num):\n \"\"\"\n Stable rounding function\n \"\"\"\n if num > 0:\n return int(num + 0.5)\n else:\n return int(num - 0.5)\n\n if fermi_energy and number_electrons:\n raise EitherNumberOfElectronsOrFermiEnergyError()\n\n assert bandsdata.units == \"eV\"\n stored_bands = bandsdata.get_bands()\n\n if len(stored_bands.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n bands = np.concatenate(list(stored_bands), axis=1)\n else:\n bands = stored_bands\n\n # analysis on occupations:\n if fermi_energy is None:\n num_kpoints = len(bands)\n\n if number_electrons is None:\n try:\n _, stored_occupations = bandsdata.get_bands(also_occupations=True)\n except KeyError as exc:\n raise FermiEnergyOrOccupationsNotPresentError() from exc\n\n # put the occupations in the same order of bands, also in case of multiple bands\n if len(stored_occupations.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n occupations = np.concatenate(list(stored_occupations), axis=1)\n else:\n occupations = stored_occupations\n\n # now sort the bands by energy\n # Note: I am sort of assuming that I have an electronic ground state\n\n # sort the bands by energy, and reorder the occupations accordingly\n # since after joining the two spins, I might have unsorted stuff\n bands, occupations = (\n np.array(y)\n for y in zip(\n *[\n zip(*j)\n for j in [\n sorted(\n zip(i[0].tolist(), i[1].tolist()),\n key=lambda x: x[0],\n )\n for i in zip(bands, occupations)\n ]\n ]\n )\n )\n number_electrons = int(\n round(sum([sum(i) for i in occupations]) / num_kpoints)\n )\n\n homo_indexes = [\n np.where(np.array([nint(_) for _ in x]) > 0)[0][-1]\n for x in occupations\n ]\n if (\n len(set(homo_indexes)) > 1\n ): # there must be intersections of valence and conduction bands\n return False, None, None, None\n else:\n homo = [_[0][_[1]] for _ in zip(bands, homo_indexes)]\n try:\n lumo = [_[0][_[1] + 1] for _ in zip(bands, homo_indexes)]\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n else:\n bands = np.sort(bands)\n number_electrons = int(number_electrons)\n\n # find the zero-temperature occupation per band (1 for spin-polarized\n # calculation, 2 otherwise)\n number_electrons_per_band = 4 - len(stored_bands.shape) # 1 or 2\n # gather the energies of the homo band, for every kpoint\n homo = [\n i[number_electrons / number_electrons_per_band - 1] for i in bands\n ] # take the nth level\n try:\n # gather the energies of the lumo band, for every kpoint\n lumo = [\n i[number_electrons / number_electrons_per_band] for i in bands\n ] # take the n+1th level\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n if number_electrons % 2 == 1 and len(stored_bands.shape) == 2:\n # if #electrons is odd and we have a non spin polarized calculation\n # it must be a metal and I don't need further checks\n return False, None, None, None\n\n # if the nth band crosses the (n+1)th, it is an insulator\n gap = min(lumo) - max(homo)\n if gap == 0.0:\n return False, 0.0, None, None\n elif gap < 0.0:\n return False, gap, None, None\n else:\n return True, gap, max(homo), min(lumo)\n\n # analysis on the fermi energy\n else:\n # reorganize the bands, rather than per kpoint, per energy level\n\n # I need the bands sorted by energy\n bands.sort()\n\n levels = bands.transpose()\n max_mins = [(max(i), min(i)) for i in levels]\n\n if fermi_energy > bands.max():\n raise FermiEnergyAndBandsEnergiesError(where=\"above\")\n if fermi_energy < bands.min():\n raise FermiEnergyAndBandsEnergiesError(where=\"below\")\n\n # one band is crossed by the fermi energy\n if any(i[1] < fermi_energy and fermi_energy < i[0] for i in max_mins):\n return False, 0.0, None, None\n\n # case of semimetals, fermi energy at the crossing of two bands\n # this will only work if the dirac point is computed!\n elif any(i[0] == fermi_energy for i in max_mins) and any(\n i[1] == fermi_energy for i in max_mins\n ):\n return False, 0.0, None, None\n # insulating case\n else:\n # Take the max of the band maxima below the fermi energy.\n homo = max([i[0] for i in max_mins if i[0] < fermi_energy])\n # Take the min of the band minima above the fermi energy.x\n lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])\n\n gap = lumo - homo\n if gap <= 0.0:\n raise WrongCodeError()\n return True, gap, homo, lumo", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def apply_bandpass_filter_timeseries(self, folder_name, indices, start_stop_freq, stop_stop_freq):\n (x_index, y_index) = indices\n photo_list = self.get_photo_list(folder_name)\n\n ts = self.get_pixel_timeseries(folder_name, (x_index, y_index))\n self.plot_fft_pixel_timeseries(folder_name, ts, str(x_index) + '_' + str(y_index) + 'pre_butterworth')\n n = len(ts)\n frequency = self.get_sampling_frequency(folder_name)\n d = 1.0 / frequency # 'sample spacing'\n fig, ax = plt.subplots()\n sample_freqs = np.fft.rfftfreq(n, d)\n fourier = np.fft.rfft(ts)\n print(sample_freqs)\n nyquist = frequency / 2.0\n\n start_stop_band = start_stop_freq / nyquist\n stop_stop_band = stop_stop_freq / nyquist\n\n print(start_stop_band)\n print(stop_stop_band)\n\n sos = sgnl.butter(2, Wn=[start_stop_band, stop_stop_band], btype='bandstop', output='sos')\n filtered = sgnl.sosfilt(sos, ts)\n self.plot_fft_pixel_timeseries(folder_name, filtered, str(x_index) + '_' + str(y_index) + 'post_butterworth')\n fig, ax = plt.subplots()\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index))\n ax.plot_date(index_dates, filtered, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index) + ' filtered')\n\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n fig.set_figwidth(40)\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.png')\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def ledaps(image):\n cmask = image.select('QA')\n\n valid_data_mask = tools.compute_bits(cmask, 1, 1, 'valid_data')\n cloud_mask = tools.compute_bits(cmask, 2, 2, 'cloud')\n snow_mask = tools.compute_bits(cmask, 4, 4, 'snow')\n\n good_pix = cloud_mask.eq(0).And(valid_data_mask.eq(0)).And(snow_mask.eq(0))\n result = image.updateMask(good_pix)\n\n return result", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out" ]
[ "0.7110317", "0.704121", "0.5153824", "0.51472473", "0.50976783", "0.5092685", "0.50910985", "0.5038838", "0.4983372", "0.4950319", "0.48741138", "0.48643586", "0.4801015", "0.47917843", "0.47886074", "0.47822264", "0.4780743", "0.47651857", "0.47144574", "0.4703689", "0.4698323", "0.4694441", "0.46926516", "0.46833235", "0.46519268", "0.4648738", "0.464113", "0.4614552", "0.4609973", "0.46032184" ]
0.745405
0
Function to apply an incidence filter. The incidence filter finds all pixels that changed more than numChangesCutoff times and is connected to less than connectedPixelCutoff pixels, then replaces those pixels with the MODE value of that given pixel position in the stack of years.
def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6): #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff num_changes = calculateNumberOfChanges(image, bandNames) too_many_changes = num_changes.gt(numChangesCutoff) #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff))) #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Replace pixels of image where incidence_filter is True with mode_image incidence_filtered = image.where(incidence_filter, mode_image) return incidence_filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def offset_ion_cut(df): \n\n offset_threshold = quality_parameters['offset_ion_threshold']\n \n truth_array = pd.Series(data=True, index=df.index)\n for suffix in ion_channel_labels:\n offset_column_name = 'offset_ion{}'.format(suffix)\n offset = abs( df[offset_column_name] )\n truth_array = truth_array & (offset < offset_threshold)\n \n df['offset_ion_cut'] = truth_array\n \n return None", "def apply(self,src,dst):\n cv2.filter2D(src,-1,self._kernel,dst) #The second argument specifies the per-channel depth of the destination image\n #(such as cv2.CV_8U for 8 bits per channel). A negative value (as used here) means\n #that the destination image has the same depth as the source image.", "def filter_segmap(segimage, id_keep, output, blur_kernel=\"\", threshold=0.1):\n seg = pyfits.getdata(segimage)\n mask = np.zeros(seg.shape, 'int')\n # Loop through all IDs... is there a better way??\n for x in id_keep:\n mask = np.where(seg==x, 1, mask)\n seg_masked = np.where(mask==1, 1, 0)\n if os.path.exists(output):\n os.system('rm %s' % output)\n # Now convolve with a blurring kernel if desired\n if len(blur_kernel):\n mask = blur_mask(mask, blur_kernel, threshold=threshold)\n # k = pyfits.getdata(blur_kernel)\n # mask = hconvolve.hconvolve(mask, )\n pyfits.append(output, data=seg_masked, header=pyfits.getheader(segimage))\n return mask", "def filter_Nofinding_imgs(ori_ann_file, filter_info_file, out_file,\n score_thr=0.08, key_name='class'):\n ori_ann_infos = mmcv.load(ori_ann_file)\n df = pd.read_csv(filter_info_file)\n\n ori_image_infos = {os.path.splitext(info['file_name'])[0]: info\n for info in ori_ann_infos['images']}\n print('before filter, there are {} images.'.format(len(ori_image_infos)))\n new_images = []\n for idx, row in df.iterrows():\n image_name = row['image_id']\n cls = row[key_name]\n if cls >= score_thr:\n new_images.append(ori_image_infos[image_name])\n print('after filter, there are {} images.'.format(len(new_images)))\n print('saving new test annotations into file')\n ori_ann_infos['images'] = new_images\n mmcv.dump(ori_ann_infos, out_file)\n print('all done!')", "def sepfirnd(input,filters,axes,output=None,mode='reflect',cval=0.0,origin=0):\n if output is None:\n output = np.empty_like(input)\n tmp = output\n if np.isscalar(filters[0]):\n filters = [np.asarray(filters)]\n if np.isscalar(axes):\n axes = [axes]\n if len(axes) > 1:\n tmp = np.empty_like(output)\n if len(filters) == 1:\n filters = [filters[0]]*len(axes)\n if len(axes) & 1 == 1: #pre-swap so that last write goes to output\n output,tmp = tmp,output \n for filt,ax in zip(filters,axes):\n output,tmp = tmp,output #swap buffers\n convolve1d(input,filt,ax,output,mode,cval,origin)\n input = output\n return output", "def filteringEngine(original, debug=False):\n\n processedImage1 = filterNotInRange(original, LABmin_healthy, LABmax_healthy, cv2.COLOR_BGR2LAB)\n processedImage2 = filterNotInRange(original, LABmin_terrain, LABmax_terrain, cv2.COLOR_BGR2LAB)\n # Image containing many FPs\n processedImage3 = filterNotInRange(original, HSVmin_yellow, HSVmax_yellow, cv2.COLOR_BGR2HSV)\n\n sum1 = cv2.add(processedImage1, processedImage2)\n sub1 = differentialNode(original, sum1)\n\n processedImage = filterNotInRange(sub1, LABmin, LABmax, cv2.COLOR_BGR2LAB)\n # sum2 = cv2.add(processedImage, processedImage3)\n\n kernel = np.ones((6, 6), np.uint8)\n temp = closing(processedImage, kernel)\n\n kernel = np.ones((3, 3), np.uint8)\n out = opening(temp, kernel)\n\n if debug:\n cv2.imshow('processedImage1', processedImage1)\n cv2.imshow('processedImage2', processedImage2)\n cv2.imshow('processedImage3', processedImage3)\n cv2.imshow('sum1', sum1)\n cv2.imshow('sub1', sub1)\n cv2.imshow('processedImage', processedImage)\n cv2.imshow('sum2', sum2)\n cv2.imshow('out', out)\n\n return out", "def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False):\n\n # Housekeeping\n img = img_as_float(image.copy())\n\n if stretch is True:\n img = img/img.max()\n\n if dark_objects is False:\n img = 1-img # invert\n\n img_in = img.copy() # for use later\n\n if kernel_size is None:\n kernel_size = np.int(max(image.shape[0], image.shape[1])/10)\n\n # mean filter kernel\n kernel = morphology.disk(int(kernel_size/2))\n\n # identify objects to ignore\n if kernel_size % 2 is 0:\n block_size = kernel_size + 1\n else:\n block_size = kernel_size\n\n #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function\n objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max())\n objects = morphology.remove_small_objects(objects, min_size = min_object_size)\n\n # Correct Exposure x times\n i = 0\n while i < iterations:\n # Global mean\n img_mean = np.ma.masked_array(img, mask=objects).mean()\n\n # global means\n local_means = filters.rank.mean(img, selem=kernel, mask=~objects)\n local_means = filters.gaussian(local_means, kernel_size)\n\n # Correct Image\n img += (img_mean - local_means)\n img[img>1] = 1 # for compatibilty with img_as_float\n img[img<0] = 0 # for compatibilty with img_as_float\n i += 1\n\n out = img_as_float(img)\n\n return(out)", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def apply_filter(self, image):\n pass", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def _occlude_image(im, cR, cC, size_patch, stride):\n im[cR:cR + stride, cC:cC + stride, :] = 127.5\n occ_map = np.ones((im_target_size, im_target_size))\n occ_map[cR:cR + stride, cC:cC + stride] = 0\n return im, occ_map", "def plot_filtered_spots(\n adata, \n kernel_matrix, \n contrib_thresh,\n row_key='row',\n col_key='col',\n ax=None,\n figure=None,\n dsize=37,\n ticks=True,\n fig_path=None,\n fig_format='pdf',\n fig_dpi=150\n ):\n if ax is None:\n width = 5\n figure, ax = plt.subplots(\n 1,\n 1,\n figsize=(width,5)\n )\n\n # Filter spots with too little contribution\n # from neighbors\n contrib = np.sum(kernel_matrix, axis=1)\n keep_inds = [\n i\n for i, c in enumerate(contrib)\n if c >= contrib_thresh\n ]\n print('Kept {}/{} spots.'.format(len(keep_inds), len(adata.obs)))\n\n cat = []\n keep_inds = set(keep_inds)\n for ind in range(adata.obs.shape[0]):\n if ind in keep_inds:\n cat.append('Kept')\n else:\n cat.append('Filtered')\n cat_palette = ['#595959', '#d9d9d9']\n plot_slide(\n adata.obs,\n cat,\n cmap='categorical',\n colorbar=False,\n vmin=None,\n vmax=None,\n title='Filtered Spots',\n ax=ax,\n figure=figure,\n ticks=ticks,\n dsize=dsize,\n row_key=row_key,\n col_key=col_key,\n cat_palette=cat_palette\n )\n\n if fig_path:\n plt.tight_layout()\n figure.savefig(\n fig_path,\n format=fig_format,\n dpi=fig_dpi\n )\n plt.show()", "def FoldChangeFilterToControl(X, data_headers, FCto, cutoff=0.4):\n XX = LinearFoldChange(X.copy(), data_headers, FCto)\n Xidx = np.any(XX[data_headers].values <= 1 - cutoff, axis=1) | np.any(XX[data_headers].values >= 1 + cutoff, axis=1)\n return X.iloc[Xidx, :]", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def filter_sinc_channel(img, mask_circle_diameter=40.0):\n dft_image = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft_image)\n mask = np.zeros((img.shape[0], img.shape[1], 2), dtype=np.uint8)\n circle_center = (int(img.shape[0] / 2), int(img.shape[1] / 2))\n points_x, points_y = np.ogrid[:img.shape[0], :img.shape[1]]\n mask_area = (points_x - circle_center[0]) ** 2 + (points_y - circle_center[1]) ** 2 <= \\\n (mask_circle_diameter / 2) ** 2\n mask[mask_area] = 1\n filtered_dft = dft_shift * mask\n idft_image = np.fft.ifftshift(filtered_dft)\n img_filtered = cv2.idft(idft_image)\n img_filtered = cv2.magnitude(img_filtered[:, :, 0], img_filtered[:, :, 1])\n return img_filtered", "def filterInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n mask = cv2.bitwise_not(mask)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def adaptiveContrast(image, mask, target_path, name, kernel_sizes, save=False):\n\n transforms = []\n for kernel_size in kernel_sizes:\n image_adapteq = exposure.equalize_adapthist(image, kernel_size=kernel_size, clip_limit=0.03)\n transforms.append(image_adapteq)\n \n # Display results\n fig = plt.figure(figsize=(19, 16))\n axes = np.zeros((2, 5), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 5, 1)\n for i in range(1, 5):\n axes[0, i] = fig.add_subplot(2, 5, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 5):\n axes[1, i] = fig.add_subplot(2, 5, 6+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[0], mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('%d' %kernel_sizes[0])\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[1], mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('%d' %kernel_sizes[1])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[2], mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('%d' %kernel_sizes[2])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[3],mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('%d' %kernel_sizes[3])\n \n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[4],mask, mask_cmap, img_cmap,\n axes[:, 4])\n ax_image.set_title('%d' %kernel_sizes[4])\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n\n return image_adapteq", "def filter_img(inarr, data_resolution):\n outt = inarr.copy()\n print('outmin', np.nanmin(outt), np.nanmax(outt))\n\n t_thresh_size = -40\n t_thresh_cut = -50\n\n outt[outt >= t_thresh_size] = 0\n outt[np.isnan(outt)] = 0\n\n labels, numL = label(outt)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n pix_nb = 700/data_resolution**2\n\n badinds = u[(n < pix_nb)]\n # all blobs with more than 1000 pixels = 25,000km2 (meteosat regridded 5km), 200pix = 5000km2, 8pix = 200km2\n # scale 30km, radius 15km ca. 700km2 circular area equals 28 pix\n\n for bi in badinds:\n inds = np.where(labels == bi)\n outt[inds] = 0\n\n outt[outt >= t_thresh_cut] = 150\n\n grad = np.gradient(outt)\n outt[outt == 150] = np.nan\n\n nogood = np.isnan(outt) # filters edge maxima later, no maxima in -40 edge area by definition!\n\n # tdiff = np.nanmax(outt) - np.nanmin(outt) # define background temperature for image\n # if tdiff > 28: # temp difference of 28 degrees\n # xmin = 15\n # else:\n # xmin = 10\n\n xmin = 10\n outt[nogood] = t_thresh_cut - xmin\n nok = np.where(abs(grad[0]) > 80)\n d = 2\n i = nok[0]\n j = nok[1]\n # edge smoothing for wavelet application\n for ii, jj in zip(i, j):\n kern = outt[ii - d:ii + d + 1, jj - d:jj + d + 1]\n outt[ii - d:ii + d + 1, jj - d:jj + d + 1] = ndimage.gaussian_filter(kern, 3, mode='nearest')\n\n return outt, nogood, t_thresh_size, t_thresh_cut, pix_nb", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def imgFiltering(inputPath, outputPath):\n\t# open the target image\n\tpollenImg = IJ.openImage(inputPath)\n\t\n\t# Create duplicator\n\tduplicator = Duplicator()\n\t\n\t# Duplicate the image with channel 1\n\tpollenImgCopy = duplicator.run(pollenImg, 1, 1, 1, 1, 1, 1);\n\t\n\t# set auto threshold\n\t# IJ.setAutoThreshold(pollenImgCopy, \"Default dark\");\n\t\n\t# set threshold\n\tIJ.setThreshold(pollenImgCopy, 17000, 65520)\n\t\n\t# Call the Thresholder to convert the image to a mask\n\tIJ.run(pollenImgCopy, \"Convert to Mask\", \"\")\n\t\n\t# create result table\n\trt = ResultsTable()\n\t\n\t# create particle analyzer\n\tpAnalyzer = ParticleAnalyzer(ParticleAnalyzer.SHOW_NONE, Measurements.ALL_STATS, rt, 20.0, 1000.0, 0.5 ,1.0)\n\t\n\t# Analyze the particle\n\tpAnalyzer.analyze(pollenImgCopy)\n\t\n\t# Save results as csv\n\trt.saveAs(outputPath)", "def continuous_hann_sinc_filter(\n fs: int, fc: float, L: int, dtype: torch.dtype, device: torch.device\n) -> Tensor:\n assert L % 2 == 1\n assert fc < fs / 2\n hsupp = torch.linspace(-(L-1)/2, (L-1)/2, L, dtype=dtype, device=device)\n hideal = (2 * fc / fs) * torch.sinc(2 * fc * hsupp / fs)\n hann = torch.hann_window(L, dtype=dtype, device=device)\n return hideal * hann", "def calculate_force_change(data, axis=\"x\", forceChannel=\"force\", distanceChannel=\"surfaceSep\", window=15):\n axis = axis.upper()\n\n #check if the regions have been assigned\n if \"unfolding\" not in data.columns.values.tolist():\n raise ValueError(\"The unfolding events have not yet been identified. See function identify_unfolding_events\")\n\n #Label the different isolated events using scipy.ndimage\n data[\"eventID\"], eventsNumber = ndimage.label(data[\"unfolding\"])\n\n #Start the counting in 0\n data[\"eventID\"] -= 1\n #Show how many events were identified\n print(eventsNumber, \"events identified\")\n\n def averaged_values(column, startT, endT, window=5):\n start = column.index.get_loc(startT)\n end = column.index.get_loc(endT)\n averagedBefore = column.iloc[start-window: start - 3].mean()\n averagedAfter = column.iloc[end + 3: end + window].mean()\n diffAverage = averagedAfter - averagedBefore\n return averagedBefore, averagedAfter, diffAverage\n\n startForce = []\n forceChange = []\n\n pullingCycle = []\n #Take the first and last times point of each unfolding event, discarding the first point because it is the\n # unclassified regions\n times = {\"startTimes\": data.groupby(\"eventID\").time.first()[1:], \"endTimes\": data.groupby(\"eventID\").time.last()[1:]}\n newWindow = deepcopy(window)\n for startTime, endTime in zip(times[\"startTimes\"], times[\"endTimes\"]):\n if data.index.get_loc(startTime) < newWindow:\n window = data.index.get_loc(startTime) - 1\n else:\n window = newWindow\n forceBefore, forceAfter, forceDifference = averaged_values(data[forceChannel+axis], startTime, endTime, window)\n startForce.append(forceBefore)\n forceChange.append(forceDifference)\n pullingCycle.append(data.loc[startTime, \"pullingCycle\"])\n\n unfoldingData = pd.DataFrame({\"startTime\": times[\"startTimes\"], \"endTime\": times[\"endTimes\"],\n \"force\": startForce, \"forceChange\": forceChange, \"pullingCycle\": pullingCycle})\n\n return unfoldingData", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)" ]
[ "0.52584434", "0.501605", "0.50095785", "0.5009003", "0.5002469", "0.49740124", "0.49180493", "0.49099478", "0.48357704", "0.47827873", "0.47746482", "0.47566548", "0.4741615", "0.4737234", "0.46963915", "0.46842596", "0.4639385", "0.46338367", "0.46156648", "0.46046755", "0.45878288", "0.45706195", "0.4564185", "0.4555902", "0.4550769", "0.4549048", "0.45445293", "0.4537495", "0.45249462", "0.45064425" ]
0.7578108
0
Function to apply an frequency filter. This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are replaced with the MODE value of that given pixel position in the stack of years.
def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): #Grab land cover classes as a list of strings lc_classes = classDictionary.keys().getInfo() #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Define an image to add bands with frequency filter applied out_img = ee.Image() #Loop through years for yearBand in yearBandNames: #Select the target year from the image yearImage = image.select(yearBand) #Loop through land cover classes in filterParams for lc_class in lc_classes: #Get the minimum occurance allowed in that land cover class min_occurance = filterParams.get(lc_class) #Find if the land cover class had less than the number of min_occurances in each pixel change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance)) #If change_class==1, then replace that pixel with the mode of all the years in that pixel #This filter is only applied to pixels of this land cover class #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1, #if both conditions are true, then the pixel is replaced with the mode yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image) #Rename yearImage to bandName yearImage = yearImage.rename(yearBand) #Append to output image out_img = out_img.addBands(yearImage) return out_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frequency_filter(fc, L, srf, KIND=2):\n\n if hasattr(KIND, \"__len__\"):\n PASS = KIND\n KIND = 2\n else:\n PASS = [2,3]\n KIND = [KIND]\n\n # fourier transform of lateral inhibitory function \n\n # tonotopic axis\n if issubclass(type(fc), str):\n fc = float(fc)\n R1 = np.arange(L).astype(np.float)/L*srf/2/np.abs(fc)\n\n if KIND == 1:\n # Gabor function\n C1 = 1./2/0.3/0.3\n H = np.exp(-C1*(R1-1)**2) + np.exp(-C1*(R1+1)**2)\n else:\n # Gaussian Function\n R1 = R1 ** 2\n H = R1 * np.exp(1-R1)\n\n # passband\n if PASS[0] == 1:\n #lowpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[0:maxi] = 1\n H = H / (H.sum() or 1) * sumH\n elif PASS[0] == PASS[1]:\n # highpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[maxi+1:L] = 1\n H = H / (H.sum() or 1) * sumH\n\n return H", "def temporal_ideal_filter(tensor,low,high,fps,axis=0): \n fft=fftpack.fft(tensor,axis=axis)\n frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n if (bound_low==bound_high) and (bound_high<len(fft)-1):\n bound_high+=1\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff=fftpack.ifft(fft, axis=axis)\n \n return np.abs(iff)", "def generate_filter(length, fs, f_low=None, f_high=None, mode='box', is_plot=False):\n\n freqs = np.fft.fftfreq(int(length), d=(1. / float(fs)))\n\n filter_array = np.ones(length)\n\n if f_low is None and f_high is None:\n print('no filtering required!')\n elif f_low is None and f_high is not None:\n print('low-pass fileter')\n if f_high <= 0:\n raise(ValueError, 'Higher cutoff frquency should be positive!')\n filter_array[freqs >= f_high] = 0.\n filter_array[freqs <= -f_high] = 0.\n elif f_low is not None and f_high is None:\n print('high-pass fileter')\n if f_low < 0:\n raise (ValueError, 'Lower cutoff frquency should be non-negative!')\n filter_array[np.logical_and((freqs >= -f_low), (freqs <= f_low))] = 0.\n else:\n print('band-pass filter')\n if f_high <= 0:\n raise (ValueError, 'Higher cutoff frquency should be positive!')\n if f_low < 0:\n raise (ValueError, 'Lower cutoff frquency should be non-negative!')\n filter_array[freqs >= f_high] = 0.\n filter_array[freqs <= -f_high] = 0.\n filter_array[np.logical_and((freqs >= -f_low), (freqs <= f_low))] = 0.\n\n if mode == '1/f':\n filter_array[1:] = filter_array[1:] / abs(freqs[1:])\n filter_array[0] = 0\n filter_array = bas.array_nor(filter_array)\n elif mode == 'box':\n filter_array[0] = 0\n else:\n raise(NameError, 'Variable \"mode\" should be either \"1/f\" or \"box\"!')\n\n if is_plot:\n plot_array = zip(freqs, filter_array)\n plot_array.sort(key=lambda x: x[0])\n plot_array = zip(*plot_array)\n\n _ = plt.figure(figsize=(10, 3))\n plt.plot(plot_array[0], plot_array[1])\n plt.xlabel('frequency (Hz)')\n plt.ylim([-0.1, 1.1])\n plt.show()\n\n return freqs, filter_array", "def fir_filter(sig, sampling_freq, critical_freq, kernel_window = 'hamming', taps = 101, kind = 'band', **kwargs):\n\n kernel = make_fir_filter(sampling_freq, critical_freq, kernel_window, taps, kind, **kwargs) \n\n return np.roll(scipy.signal.lfilter(kernel, [1], sig), -taps/2+1)", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def detect(self, frame, cur_count, player):\n if cur_count % self.freq is 0:\n frame = cv.GaussianBlur(frame, (3, 3), 1)\n self.process(frame, cur_count, player)", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def tconst_filter(freq, tau):\n\treturn 1/(2*np.pi*1j*freq*tau+1)", "def apply_freq_filter(self, min_freq):\n self._apply_filter(lambda ng, freq: freq < min_freq)", "def continuous_hann_sinc_filter(\n fs: int, fc: float, L: int, dtype: torch.dtype, device: torch.device\n) -> Tensor:\n assert L % 2 == 1\n assert fc < fs / 2\n hsupp = torch.linspace(-(L-1)/2, (L-1)/2, L, dtype=dtype, device=device)\n hideal = (2 * fc / fs) * torch.sinc(2 * fc * hsupp / fs)\n hann = torch.hann_window(L, dtype=dtype, device=device)\n return hideal * hann", "def _prevalent_freq(self, data, framerate):\n if not(np.std(data) == 0):\n data = (data-np.mean(data))/np.std(data)\n transform = np.fft.rfft(data)\n freqs = np.fft.rfftfreq(len(data), 1.0/framerate) \n freqs = 60*freqs\n band_pass = np.where((freqs < 40) | (freqs > 240) )[0]\n transform[band_pass] = 0\n transform = np.abs(transform)**2\n sos = scipy.signal.butter(3, 0.2, output='sos')\n transform = scipy.signal.sosfilt(sos, transform)\n powers = np.argsort(-1*transform)\n hr, power = self._respiration_rejection([freqs[powers[0]], freqs[powers[1]]],[transform[powers[0]], transform[powers[1]]])\n return hr, power", "def freq_filt(orig_img: np.ndarray, transfer_func: np.ndarray) -> np.ndarray:\n # pad and center the input image\n M, N = orig_img.shape[:2]\n padded_img = np.pad(\n orig_img,\n (\n (int(np.floor(M / 2)), int(np.ceil(M / 2))),\n (int(np.floor(N / 2)), int(np.ceil(N / 2))),\n (0, 0),\n ),\n constant_values=0,\n )\n\n # take fft of image\n f_img = np.fft.fftshift(np.fft.fft2(padded_img.astype(np.float32)))\n\n # get product of image and transfer func\n f_filtered = np.empty_like(f_img)\n for channel_idx in range(f_img.shape[-1]):\n f_filtered[:, :, channel_idx] = f_img[:, :, channel_idx] * transfer_func\n\n # get image using ifft\n filtered_img = np.real(np.fft.ifft2(np.fft.fftshift(f_filtered)))\n\n # slice to remove padding\n filtered_img = filtered_img[\n int(M / 2) : int(3 * M / 2), int(N / 2) : int(3 * N / 2), :\n ]\n\n # scale and return filtered image\n return (\n 255\n * (filtered_img - np.min(filtered_img))\n / (np.max(filtered_img) - np.min(filtered_img))\n ).astype(np.uint8)", "def temporal_bandpass_filter(video_to_filter, low, high, fps):\n fft = fftpack.fft(video_to_filter, axis=0)\n frequencies = fftpack.fftfreq(video_to_filter.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff = fftpack.ifft(fft, axis=0)\n return iff", "def filters(array, sample_frequency):\n strain = TimeSeries(array, sample_rate=int(sample_frequency))\n white_data = strain.whiten(fftlength=4, fduration=4)\n bp_data = white_data.bandpass(50, 250)\n return bp_data.value", "def filter_(self,fltr:torch.tensor):\n self.container = self.container[:,fltr]\n self.count_hist = self.count_hist[fltr]", "def temporal_filter(fc, L, srt, PASS = [2,3]):\n if issubclass(type(fc), str):\n fc = float(fc)\n t = np.arange(L).astype(np.float32)/srt\n k = t*fc\n h = np.sin(2*np.pi*k) * k**2 * np.exp(-3.5*k) * fc\n\n h = h-np.mean(h)\n H0 = np.fft.fft(h, n=2*L)\n A = np.angle(H0[0:L])\n H = np.abs(H0[0:L])\n maxi = np.argmax(H)\n H = H / (H[maxi] or 1)\n\n # passband\n if PASS[0] == 1:\n #low pass\n H[0:maxi] = 1\n elif PASS[0] == PASS[1]:\n #high pass\n H[maxi+1:L] = 1\n\n H = H * np.exp(1j*A)\n return H", "def analysis_fourier_map(self, target=1, mode=0):\r\n\r\n \r\n\r\n print('Starting fourier analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n\r\n\r\n width = int(self.period*self.framerate*2)\r\n\r\n print( \" Detrending:\")\r\n\r\n print( ' Median filter width: ', width)\r\n\r\n # footprint = np.ones((width, 1, 1))\r\n\r\n # self.imageData = self.imageData - scipy.ndimage.median_filter(self.imageData, footprint=footprint)\r\n\r\n print( \" Done detrending\")\r\n\r\n\r\n\r\n self.n_times = self.timebase\r\n\r\n\r\n\r\n # calculate FFT and get amplitude and phase\r\n\r\n self.DF = np.fft.fft(self.imageData, axis = 0)\r\n self.freqs = np.fft.fftfreq(self.DF.shape[0], d=1./self.framerate)\r\n\r\n # self.freqs = np.fft.fftfreq(self.DF.shape[0], d=1./self.framerate)\r\n\r\n print (' df shape: ', self.DF.shape)\r\n\r\n print (' 1/framerate: ', 1./self.framerate)\r\n\r\n self.freq_point = np.argmin(np.abs(self.freqs - 1./self.period))\r\n print ('period:', self.period)\r\n print ('frequency: ', 1./self.period)\r\n print ('freq_point: ', self.freq_point)\r\n print ('frequency value: ',self.freqs[self.freq_point])\r\n steps = np.arange(1,6,dtype=np.float)\r\n steps = (steps)+1.\r\n self.assigned_freqs=2.*np.pi*1./1.6*steps\r\n print ('assigned freqs', self.assigned_freqs)\r\n\r\n #j = j + 2 # just looking at FFT leakage...`\r\n\r\n print (' closest index/freq, period: ', self.freq_point, self.freqs[self.freq_point], 1./self.period)\r\n\r\n self.print_image_info()\r\n\r\n ampimg = np.absolute(self.DF[self.freq_point,:,:])\r\n\r\n phaseimg = np.angle(self.DF[self.freq_point,:,:])\r\n\r\n \r\n # ampimg = np.absolute(self.DF[self.freq_point,:,:])\r\n\r\n\r\n # phaseimg = np.angle(self.DF[self.freq_point,:,:])\r\n\r\n if target == 1:\r\n\r\n f = open('img_phase1.dat', 'w')\r\n\r\n pickle.dump(phaseimg, f)\r\n\r\n f.close()\r\n\r\n f = open('img_amplitude1.dat', 'w')\r\n\r\n pickle.dump(ampimg, f)\r\n\r\n f.close()\r\n\r\n self.amplitudeImage1 = ampimg\r\n\r\n self.phaseImage1 = phaseimg\r\n\r\n if target == 2:\r\n\r\n f = open('img_phase2.dat', 'w')\r\n\r\n pickle.dump(phaseimg, f)\r\n\r\n f.close()\r\n\r\n f = open('img_amplitude2.dat', 'w')\r\n\r\n pickle.dump(ampimg, f)\r\n\r\n f.close()\r\n\r\n self.amplitudeImage2 = ampimg\r\n\r\n self.phaseImage2 = phaseimg\r\n\r\n print (\" FFT calculated, data saved.\\n\")\r\n\r\n # save most recent calculation to disk\r", "def _adapt_freq(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n thresh: float = 0,\n) -> xr.Dataset:\n # Compute the probability of finding a value <= thresh\n # This is the \"dry-day frequency\" in the precipitation case\n P0_sim = ecdf(ds.sim, thresh, dim=dim)\n P0_ref = ecdf(ds.ref, thresh, dim=dim)\n\n # The proportion of values <= thresh in sim that need to be corrected, compared to ref\n dP0 = (P0_sim - P0_ref) / P0_sim\n\n if dP0.isnull().all():\n # All NaN slice.\n pth = dP0.copy()\n sim_ad = ds.sim.copy()\n else:\n # Compute : ecdf_ref^-1( ecdf_sim( thresh ) )\n # The value in ref with the same rank as the first non-zero value in sim.\n # pth is meaningless when freq. adaptation is not needed\n pth = nbu.vecquantiles(ds.ref, P0_sim, dim).where(dP0 > 0)\n\n # Probabilities and quantiles computed within all dims, but correction along the first one only.\n if \"window\" in dim:\n # P0_sim was computed using the window, but only the original time series is corrected.\n # Grouper.apply does this step, but if done here it makes the code faster.\n sim = ds.sim.isel(window=(ds.sim.window.size - 1) // 2)\n else:\n sim = ds.sim\n dim = dim[0]\n\n # Get the percentile rank of each value in sim.\n rank = sim.rank(dim, pct=True)\n\n # Frequency-adapted sim\n sim_ad = sim.where(\n dP0 < 0, # dP0 < 0 means no-adaptation.\n sim.where(\n (rank < P0_ref) | (rank > P0_sim), # Preserve current values\n # Generate random numbers ~ U[T0, Pth]\n (pth.broadcast_like(sim) - thresh)\n * np.random.random_sample(size=sim.shape)\n + thresh,\n ),\n )\n\n # Tell group_apply that these will need reshaping (regrouping)\n # This is needed since if any variable comes out a `groupby` with the original group axis,\n # the whole output is broadcasted back to the original dims.\n pth.attrs[\"_group_apply_reshape\"] = True\n dP0.attrs[\"_group_apply_reshape\"] = True\n return xr.Dataset(data_vars={\"pth\": pth, \"dP0\": dP0, \"sim_ad\": sim_ad})", "def filter_ms2fits(stack, fit_data, channel=1, peakiness=4.5):\n \n fit_data = fit_data.copy()\n for t in range(0, len(fit_data)):\n frame_data = fit_data[t]\n frame_med = np.median(stack[channel, t])\n xy_width_means = np.mean(frame_data[:,5:7], axis=1)\n peak_heights = frame_data[:,3]\n spot_peakiness = np.log(peak_heights / xy_width_means)\n frame_data_filtered = frame_data[(peak_heights > frame_med) & (spot_peakiness > peakiness),:]\n fit_data[t] = frame_data_filtered\n return fit_data", "def apply_filter(data, filter_bank, sfreq): \n if data.ndim == 1:\n filtered = np.zeros((1, filter_bank.shape[0], sfreq))\n for filt in range(filter_bank.shape[0]):\n filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]\n elif data.ndim == 2:\n filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))\n for chan in range(data.shape[0]):\n for filt in range(filter_bank.shape[0]):\n filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \\\n data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode=\"full\"\n return filtered", "def bandpass_cnt(data, low_cut_hz, high_cut_hz, fs, filt_order=3, axis=0):\n if (low_cut_hz == 0 or low_cut_hz is None) and (\n high_cut_hz == None or high_cut_hz == fs / 2.0):\n log.info(\"Not doing any bandpass, since low 0 or None and \"\n \"high None or nyquist frequency\")\n return data.copy()\n if low_cut_hz == 0 or low_cut_hz == None:\n log.info(\"Using lowpass filter since low cut hz is 0 or None\")\n return lowpass_cnt(data, high_cut_hz, fs, filt_order=filt_order, axis=axis)\n if high_cut_hz == None or high_cut_hz == (fs / 2.0):\n log.info(\n \"Using highpass filter since high cut hz is None or nyquist freq\")\n return highpass_cnt(data, low_cut_hz, fs, filt_order=filt_order, axis=axis)\n\n nyq_freq = 0.5 * fs\n low = low_cut_hz / nyq_freq\n high = high_cut_hz / nyq_freq\n b, a = scipy.signal.butter(filt_order, [low, high], btype='bandpass')\n assert filter_is_stable(a), \"Filter should be stable...\"\n data_bandpassed = scipy.signal.lfilter(b, a, data, axis=axis)\n return data_bandpassed", "def _apply_filter(self, fn=lambda ngram, freq: False):\n tmp_ngram = FreqDist()\n for ngram, freq in self.ngram_fd.items():\n if not fn(ngram, freq):\n tmp_ngram[ngram] = freq\n self.ngram_fd = tmp_ngram", "def freq(self, frequency: Optional[int]):", "def filter_freq(self, low_freq=None, high_freq=None, axes=None, win_fcn='boxcar'):\n axes = self._get_axes_numbers(axes)\n fdomain = self.fft(axes=axes)\n low_freq = self._cook_args(low_freq, axes)\n high_freq = self._cook_args(high_freq, axes)\n\n if low_freq is None:\n low_freq = [0]*len(axes)\n if high_freq is None:\n high_freq = [self.ts[ax]/2. for ax in axes]\n\n fupper, flower = fdomain.copy(), fdomain.copy()\n for ax in axes:\n fupper = fupper.select(lambda x: x >= 0, axis=ax)\n flower = flower.select(lambda x: x < 0, axis=ax)\n\n fupper = fupper.window(index1=low_freq, index2=high_freq, axes=axes, win_fcn=win_fcn)\n flower = flower.window(index1=-np.array(high_freq), index2=-np.array(low_freq),\n axes=axes, win_fcn=win_fcn)\n fdomain.update(fupper)\n fdomain.update(flower)\n vals = fftshift(fdomain.values, axes=axes)\n ift = ifft2(vals, axes=axes, shape=np.array(self.shape)[axes])\n return Signal2D(np.real(ift), index=self.index, columns=self.columns)", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def rcfilter_C(freq, R):\n R = _normalizevalue(R)\n C = 1/(R*freq*2*math.pi)\n return _Cap(C)", "def filter(f,fcutoff=10.,w=10.0,dt=.001):\r\n\r\n tshift=float(w)/2.\r\n \r\n fpad=padzeros(f)\r\n Fpad=np.fft.fft(fpad)\r\n fc=fcutoff\r\n \r\n t=np.arange(start=-tshift,stop=tshift,step=dt)\r\n filt=np.zeros(len(fpad))\r\n fs=2*fc*np.sinc(2*t*fc)\r\n norm=sum(fs)\r\n filt[0:len(t)]=fs/norm\r\n Filt=np.fft.fft(filt)\r\n \r\n Filtfunc=Fpad*Filt\r\n filtfunc=np.fft.ifft(Filtfunc)\r\n filtfunc=filtfunc[len(t)/2:len(f)+len(t)/2]\r\n \r\n return filtfunc", "def filtered_fourier(self):\r\n\r\n freqs = tsu.get_freqs(self.sampling_rate, self.data.shape[-1])\r\n\r\n if self.ub is None:\r\n self.ub = freqs[-1]\r\n\r\n power = fftpack.fft(self.data)\r\n idx_0 = np.hstack([np.where(freqs < self.lb)[0],\r\n np.where(freqs > self.ub)[0]])\r\n\r\n #Make sure that you keep the DC component:\r\n keep_dc = np.copy(power[..., 0])\r\n power[..., idx_0] = 0\r\n power[..., -1 * idx_0] = 0 # Take care of the negative frequencies\r\n power[..., 0] = keep_dc # And put the DC back in when you're done:\r\n\r\n data_out = fftpack.ifft(power)\r\n\r\n data_out = np.real(data_out) # In order to make sure that you are not\r\n # left with float-precision residual\r\n # complex parts\r\n\r\n return ts.TimeSeries(data=data_out,\r\n sampling_rate=self.sampling_rate,\r\n time_unit=self.time_unit)", "def filters(self, low_freq=1/7, high_freq=128, notch_freq=50):\n self.raw.filter(l_freq=low_freq, h_freq=high_freq)\n self.raw.notch_filter(range(notch_freq, high_freq, notch_freq), filter_length='auto',\n phase='zero', fir_design='firwin')", "def source_freq(self) -> int:" ]
[ "0.60981953", "0.56942517", "0.5577544", "0.5543279", "0.55281085", "0.5500619", "0.5476223", "0.5473143", "0.54327935", "0.5337489", "0.5303522", "0.5286453", "0.52189946", "0.521232", "0.5156549", "0.51182044", "0.51102227", "0.50913894", "0.5088179", "0.5085512", "0.50194836", "0.5018264", "0.4991428", "0.49619165", "0.49598998", "0.49554652", "0.494272", "0.4938644", "0.49384308", "0.49173498" ]
0.69544667
0
Function to apply a probability filter to land cover probabilities in each image of imageCollection. The user defines which classes will be filtered and how to filter them in the params list. The params list is a list of dictionaries, one for each class the user wants to filter.
def applyProbabilityCutoffs(imageCollection, params): #Define function to map across imageCollection def probabilityFilter(image): #Get the classifications from the class with the highest probability classifications = npv.probabilityToClassification(image) #Loop through parameters for param in params: #Load parameter values class_name = param.get('class_name') class_value = param.get('class_value') filter_name = param.get('filter') threshold = param.get('threshold') if filter_name=='gt': #Find where the class_name is greater than threshold prob_mask = image.select(class_name).gt(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name=='gte': #Find where the class_name is greater than or equal to threshold prob_mask = image.select(class_name).gte(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name == 'lte': #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lte(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) else: #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lt(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) return ee.Image(classifications) return ee.ImageCollection(imageCollection.map(probabilityFilter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def filtro_probs(prediccion,p_min):\n clases = []\n for probabilidad in prediccion:\n if probabilidad[1]>=p_min:\n clases.append(probabilidad)\n else:\n clases.append(\"-\")\n return clases", "def filters(im, filter_list=[\"MedianFilter\"]):\n out = im\n for filter_name in filter_list:\n out = out.filter(getattr(ImageFilter, filter_name))\n return out", "def filter_classes(class_ints, class_list, class_filt):\n class_names = [class_list[int(c)] for c in class_ints]\n filter = [name in class_filt for name in class_names]\n return np.array(filter)", "def compute(self,filter_name):\n self.result = []\n for img in self.imgs:\n r = filters_dict[filter_name](img)\n if \"threshold\" in filter_name:\n r = img>r\n r = 1.0*r.copy()\n self.result.append(r)", "def filterp(th,ProbClass1):\n y=np.zeros(ProbClass1.shape[0])\n for i,v in enumerate(ProbClass1):\n if ProbClass1[i]>th:\n y[i]=1\n return y", "def apply_filter(self, image):\n pass", "def filter_detections(detections, arg_to_class, conf_thresh=0.5):\n num_classes = detections.shape[0]\n filtered_detections = []\n for class_arg in range(1, num_classes):\n class_detections = detections[class_arg, :]\n confidence_mask = np.squeeze(class_detections[:, -1] >= conf_thresh)\n confident_class_detections = class_detections[confidence_mask]\n if len(confident_class_detections) == 0:\n continue\n class_name = arg_to_class[class_arg]\n for confident_class_detection in confident_class_detections:\n coordinates = confident_class_detection[:4]\n score = confident_class_detection[4]\n detection = Box2D(coordinates, score, class_name)\n filtered_detections.append(detection)\n return filtered_detections", "def filterp(th, ProbClass1):\n y = np.zeros(ProbClass1.shape[0])\n for i, v in enumerate(ProbClass1):\n if ProbClass1[i] > th:\n y[i] = 1\n return y", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def compute_classifications(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class_list(thumbnail_list, **config)\n elif config['classifier_algo'] in ['svm']:\n from wbia.algo.detect.svm import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n result_list = densenet.test(thumbpath_list, ibs=ibs, gid_list=gid_list, **config)\n elif config['classifier_algo'] in ['tile_aggregation', 'tile_aggregation_quick']:\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(';')\n\n assert len(classifier_weight_filepath) == 2\n classifier_algo_, model_tag_ = classifier_weight_filepath\n\n include_grid2 = config['classifier_algo'] in ['tile_aggregation']\n tid_list = ibs.scout_get_valid_tile_rowids(\n gid_list=gid_list, include_grid2=include_grid2\n )\n ancestor_gid_list = ibs.get_tile_ancestor_gids(tid_list)\n confidence_list = ibs.scout_wic_test(\n tid_list, classifier_algo=classifier_algo_, model_tag=model_tag_\n )\n\n gid_dict = {}\n for ancestor_gid, tid, confidence in zip(\n ancestor_gid_list, tid_list, confidence_list\n ):\n if ancestor_gid not in gid_dict:\n gid_dict[ancestor_gid] = []\n gid_dict[ancestor_gid].append(confidence)\n\n result_list = []\n for gid in tqdm.tqdm(gid_list):\n gid_confidence_list = gid_dict.get(gid, None)\n assert gid_confidence_list is not None\n best_score = np.max(gid_confidence_list)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['densenet+neighbors']:\n raise NotImplementedError\n # ut.embed()\n # classifier_weight_filepath = config['classifier_weight_filepath']\n\n # all_bbox_list = ibs.get_image_bboxes(gid_list)\n # wic_confidence_list = ibs.scout_wic_test(gid_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # ancestor_gid_list = list(set(ibs.get_tile_ancestor_gids(gid_list)))\n # all_tile_list = list(set(ibs.scout_get_valid_tile_rowids(gid_list=ancestor_gid_list)))\n # all_bbox_list = ibs.get_image_bboxes(all_tile_list)\n # all_confidence_list = ibs.scout_wic_test(all_tile_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # TODO: USE THRESHOLDED AVERAGE, NOT MAX\n # result_list = []\n # for gid, wic_confidence in zip(gid_list, wic_confidence_list):\n # best_score = wic_confidence\n # for aid in aid_list:\n # wic_confidence_ = aid_conf_dict.get(aid, None)\n # assert wic_confidence_ is not None\n # best_score = max(best_score, wic_confidence_)\n #\n # if wic_confidence < 0.5:\n # best_key = 'negative'\n # best_score = 1.0 - best_score\n # else:\n # best_key = 'positive'\n # if best_score > wic_confidence:\n # recovered += 1\n # result = (best_score, best_key, )\n # result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet']:\n import json\n\n json_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n assert exists(json_filepath)\n with open(json_filepath, 'r') as json_file:\n values = json.load(json_file)\n annotations = values.get('annotations', {})\n\n gpath_list = ibs.get_image_paths(gid_list)\n gname_list = [split(gpath)[1] for gpath in gpath_list]\n\n result_list = []\n for gname in gname_list:\n annotation = annotations.get(gname, None)\n assert annotation is not None\n\n best_score = 1.0\n if len(annotation) == 0:\n best_key = 'negative'\n else:\n best_key = 'positive'\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet_csv', 'scout_faster_rcnn_csv']:\n uuid_str_list = list(map(str, ibs.get_image_uuids(gid_list)))\n\n manifest_filepath = join(ibs.dbdir, 'WIC_manifest_output.csv')\n csv_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n\n assert exists(manifest_filepath)\n assert exists(csv_filepath)\n\n manifest_dict = {}\n with open(manifest_filepath, 'r') as manifest_file:\n manifest_file.readline() # Discard column header row\n manifest_line_list = manifest_file.readlines()\n for manifest_line in manifest_line_list:\n manifest = manifest_line.strip().split(',')\n assert len(manifest) == 2\n manifest_filename, manifest_uuid = manifest\n manifest_dict[manifest_filename] = manifest_uuid\n\n csv_dict = {}\n with open(csv_filepath, 'r') as csv_file:\n csv_file.readline() # Discard column header row\n csv_line_list = csv_file.readlines()\n for csv_line in csv_line_list:\n csv = csv_line.strip().split(',')\n assert len(csv) == 2\n csv_filename, csv_score = csv\n csv_uuid = manifest_dict.get(csv_filename, None)\n assert (\n csv_uuid is not None\n ), 'Test image {!r} is not in the manifest'.format(\n csv,\n )\n csv_dict[csv_uuid] = csv_score\n\n result_list = []\n for uuid_str in uuid_str_list:\n best_score = csv_dict.get(uuid_str, None)\n assert best_score is not None\n\n if config['classifier_algo'] in ['scout_detectnet_csv']:\n assert best_score in ['yes', 'no']\n best_key = 'positive' if best_score == 'yes' else 'negative'\n best_score = 1.0\n elif config['classifier_algo'] in ['scout_faster_rcnn_csv']:\n best_score = float(best_score)\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n else:\n raise ValueError\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in [\n 'lightnet',\n 'densenet+lightnet',\n 'densenet+lightnet!',\n ]:\n min_area = 10\n\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(',')\n\n if config['classifier_algo'] in ['lightnet']:\n assert len(classifier_weight_filepath) == 2\n weight_filepath, nms_thresh = classifier_weight_filepath\n wic_thresh = 0.0\n nms_thresh = float(nms_thresh)\n wic_confidence_list = [np.inf] * len(gid_list)\n wic_filter = False\n elif config['classifier_algo'] in ['densenet+lightnet', 'densenet+lightnet!']:\n assert len(classifier_weight_filepath) == 4\n (\n wic_model_tag,\n wic_thresh,\n weight_filepath,\n nms_thresh,\n ) = classifier_weight_filepath\n wic_thresh = float(wic_thresh)\n nms_thresh = float(nms_thresh)\n wic_confidence_list = ibs.scout_wic_test(\n gid_list, classifier_algo='densenet', model_tag=wic_model_tag\n )\n wic_filter = config['classifier_algo'] in ['densenet+lightnet']\n else:\n raise ValueError\n\n flag_list = [\n wic_confidence >= wic_thresh for wic_confidence in wic_confidence_list\n ]\n if wic_filter:\n gid_list_ = ut.compress(gid_list, flag_list)\n else:\n gid_list_ = gid_list[:]\n config = {\n 'grid': False,\n 'algo': 'lightnet',\n 'config_filepath': weight_filepath,\n 'weight_filepath': weight_filepath,\n 'nms': True,\n 'nms_thresh': nms_thresh,\n 'sensitivity': 0.0,\n }\n prediction_list = depc.get_property(\n 'localizations', gid_list_, None, config=config\n )\n prediction_dict = dict(zip(gid_list_, prediction_list))\n\n result_list = []\n for gid, wic_confidence, flag in zip(gid_list, wic_confidence_list, flag_list):\n if not flag:\n best_key = 'negative'\n best_score = 1.0 - wic_confidence\n else:\n prediction = prediction_dict.get(gid, None)\n assert prediction is not None\n\n best_score = 0.0\n if prediction is not None:\n score, bboxes, thetas, confs, classes = prediction\n for bbox, conf in zip(bboxes, confs):\n xtl, ytl, w, h = bbox\n area = w * h\n if area >= min_area:\n best_score = max(best_score, conf)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n else:\n raise ValueError(\n 'specified classifier algo is not supported in config = {!r}'.format(config)\n )\n\n # yield detections\n for result in result_list:\n yield result", "def class_imgs(list_img):\n numberimg = len(list_img)\n resize(net, numberimg, cursize)\n i = 0\n for img in list_img:\n image = caffe.io.load_image(img)\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[i] = transformed_image\n i = i + 1\n\n output = net.forward()\n\n results = []\n for n in range(0, numberimg):\n themax = output['prob'][n].argmax()\n results.append({'filename':list_img[n], 'class': themax, 'prob': output['prob'][n].tolist()})\n\n return results", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def filter(self, filters):", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def filter_prediction(boxes, probs, cls_idx): \n if cfg.TOP_N_DETECTION < len(probs) and cfg.TOP_N_DETECTION > 0:\n order = probs.argsort()[:-cfg.TOP_N_DETECTION-1:-1]\n probs = probs[order]\n boxes = boxes[order]\n cls_idx = cls_idx[order]\n else:\n filtered_idx = np.nonzero(probs > cfg.PROB_THRESHOLD)[0]\n probs = probs[filtered_idx]\n boxes = boxes[filtered_idx]\n cls_idx = cls_idx[filtered_idx]\n\n final_boxes = []\n final_probs = []\n final_cls_idx = []\n\n for c in range(cfg.NUM_CLASSES):\n idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]\n keep = nms(boxes[idx_per_class], probs[idx_per_class], cfg.NMS_THRESHOLD)\n for i in range(len(keep)):\n if keep[i]:\n final_boxes.append(boxes[idx_per_class[i]])\n final_probs.append(probs[idx_per_class[i]])\n final_cls_idx.append(c)\n return final_boxes, final_probs, final_cls_idx", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def classify_image_proba(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return [np.amax(model.predict(np.array(images_list)))]", "def selection_profiles_by_chance(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n true = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n compare = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def OF1_CalculateThresholdValues(param_list, classNum):\n thresholdValues = [(-1., -1.) for _ in range(classNum-1)] # np.arange(classNum - 1)\n #numRow = sp.math.factorial(classNum-1)\n #numCol = classNum-1\n #thresholdValues = np.arange(numCol*numRow).reshape(numRow, numCol)\n indexOrder = np.argsort(param_list[classNum:classNum * 2])\n\n P = [param_list[indexOrder[i]] for i in range(classNum)]\n my = np.sort(param_list[classNum:classNum * 2])\n sigma = [param_list[classNum * 2 + indexOrder[i]] for i in range(classNum)]\n\n for i in range(classNum - 1):\n a = sigma[i] ** 2 - sigma[i + 1] ** 2\n b = 2 * ( my[i] * ( sigma[i + 1] ** 2 ) - my[i + 1] * ( sigma[i] ** 2 ) )\n c = ( sigma[i] * my[i + 1] ) ** 2 - ( sigma[i + 1] * my[i] ) ** 2 + 2 * ( ( sigma[i] * sigma[i + 1] ) ** 2 ) * math.log(( ( sigma[i + 1] * P[i] ) / ( sigma[i] * P[i + 1] ) ))\n\n p = np.poly1d([a, b, c], False, \"T\")\n p_roots = np.roots(p)\n\n if p_roots.size == 1:\n thresholdValues[i] = (np.real(p_roots[0]), -1)\n else:\n r1 = np.real(p_roots[0])\n r2 = np.real(p_roots[1])\n if (r1 == r2) or (r2 < 0.) or (r2 > 255.):\n thresholdValues[i] = (r1, -1)\n elif (r1 < 0) or (r1 > 255):\n thresholdValues[i] = (r2, -1)\n else:\n thresholdValues[i] = (r1, r2)\n #r1 = np.amin(p_roots)\n #r2 = np.amax(p_roots)\n #if i > 0:\n #if r1 >= thresholdValues[i-1]:\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n #else:\n #if (r1 >= my[i]) and (r1 < my[i+1]):\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n\n return thresholdValues", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def classify(priors, likelihoods, testData, classes):\r\n results = []\r\n for document in testData:\r\n bestClass = None\r\n bestProb = None\r\n currentProb = 0.0\r\n for cls in classes:\r\n prior = priors[cls]\r\n currentProb = log(prior)\r\n lhoods = likelihoods[cls]\r\n for (word, count) in document:\r\n if word in lhoods:\r\n currentProb += log(lhoods[word])\r\n else:\r\n currentProb += log(lhoods[None])\r\n if currentProb > bestProb or bestClass == None:\r\n bestProb = currentProb\r\n bestClass = cls\r\n results.append(bestClass)\r\n return results", "def classify_image_probavec(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return model.predict(np.array(images_list))", "def inference(self, img, probe_roi=None, threshold=0.75):\n device = self.cls_score.weight.device\n processed_img, scale = img_preprocessing(img)\n # [C, H, W] -> [N, C, H, W]\n processed_img = torch.from_numpy(processed_img).unsqueeze(0).to(device)\n # img_info: (height, width, scale)\n img_info = torch.Tensor([processed_img.shape[2], processed_img.shape[3], scale]).to(device)\n if probe_roi is not None:\n probe_roi = torch.from_numpy(probe_roi).float().view(1, 4)\n probe_roi *= scale\n # Add an extra 0, which means the probe_roi is from the first image in the batch\n probe_roi = torch.cat((torch.zeros(1, 1), probe_roi.float()), dim=1).to(device)\n\n with torch.no_grad():\n proposals, probs, proposal_deltas, features, _, _, _, _, _ = self.forward(\n processed_img, img_info, None, probe_roi\n )\n\n if probe_roi is not None:\n return features\n\n # Unscale proposals back to raw image space\n proposals = proposals[:, 1:5] / scale\n # Unnormalize proposal deltas\n num_classes = proposal_deltas.shape[1] // 4\n stds = torch.Tensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(num_classes).to(device)\n means = torch.Tensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(num_classes).to(device)\n proposal_deltas = proposal_deltas * stds + means\n # Apply proposal regression deltas\n boxes = bbox_transform_inv(proposals, proposal_deltas)\n boxes = clip_boxes(boxes, img.shape)\n\n # Remove those boxes with scores below the threshold\n j = 1 # Only consider foreground class\n keep = torch.nonzero(probs[:, j] > threshold, as_tuple=False)[:, 0]\n boxes = boxes[keep, j * 4 : (j + 1) * 4]\n probs = probs[keep, j]\n features = features[keep]\n\n # Remove redundant boxes with NMS\n detections = torch.cat((boxes, probs.unsqueeze(1)), dim=1)\n keep = nms(boxes, probs, cfg.TEST.NMS)\n detections = detections[keep]\n features = features[keep]\n\n return detections, features", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def detect_objects(image, threshold, classes_incl=None):\n set_input_tensor(image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(0)\n classes = get_output_tensor(1)\n scores = get_output_tensor(2)\n count = int(get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n if not classes_incl:\n results.append(result)\n elif classes[i] in classes_incl:\n results.append(result)\n return results", "def on_image(image):\n objects = [obj for obj in coco.classify(image) if obj.confidence > config.OBJECT_CONFIDENCE_THRESHOLD]\n queue.put((image, objects))", "def apply_2_class_filterV4(pred_csv, out_csv, filter_info_file, thr=0.08):\n df_pred = pd.read_csv(pred_csv)\n df_filter = pd.read_csv(filter_info_file)\n pred_strs = df_pred['PredictionString'].tolist()\n img_ids = df_pred['image_id'].tolist()\n\n num_normal = 0\n for idx in tqdm(range(len(pred_strs))):\n im_id = img_ids[idx]\n cls_score = df_filter[df_filter['image_id'] == im_id]['target'].tolist()[0]\n if cls_score < thr: # No finding\n pred_strs[idx] = '14 1 0 0 1 1'\n num_normal += 1\n print('number of No finding images: ', num_normal)\n\n df_save = pd.DataFrame()\n df_save['image_id'] = img_ids\n df_save['PredictionString'] = pred_strs\n df_save.to_csv(out_csv, index=False)\n print('all done!')", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n # Only check for people that meet the threshold\n if classes[i] == 0.0 and scores[i] >= THRESHOLD:\n result = {\n \"bounding_box\": boxes[i],\n \"class_id\": classes[i],\n \"score\": scores[i],\n }\n results.append(result)\n return results" ]
[ "0.6045485", "0.5797399", "0.5739651", "0.57369685", "0.5731208", "0.56458217", "0.55604017", "0.55141634", "0.5474806", "0.5470038", "0.5451431", "0.5425758", "0.53987706", "0.53475237", "0.5286839", "0.52788055", "0.52607375", "0.5203152", "0.5157591", "0.5144034", "0.5125432", "0.51221365", "0.511948", "0.51067436", "0.5079235", "0.5066152", "0.5060088", "0.5052882", "0.50246495", "0.49993414" ]
0.79373574
0
Returns the number of features in the processed data. Returns int Feature size.
def get_num_features(self): return len(self[0]['x'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None", "def getNrFeatures(self):\n return self.featureNames.size", "def num_features(self):\n if self.x is None:\n return 0\n return 1 if self.x.dim() == 1 else self.x.size(1)", "def get_num_features(self, ndim: int) -> int:\n nb_features = 0\n for feature_group in self.features_group_list:\n nb_features += feature_group.num_features(ndim)\n return nb_features", "def get_n_features(self):\n # +1 due to dummy bit\n return self.model.n_latent_features + 1", "def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features", "def features_size(self) -> int:\n return None", "def n_features(self):\n return self.components.shape[-1]", "def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features", "def num_feature(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumFeature(self.handle, ctypes.byref(out)))\n return out.value", "def num_node_features(self):\n return self[0].num_node_features", "def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")", "def feature_len(self):\n return len(self.coord)", "def _n_features_out(self):\n return self.components_.shape[0]", "def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features", "def num_flat_features(self, x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features", "def __len__(self):\n return len(self.features)", "def size(self):\r\n return len(self._train_datas)", "def feature_size(self):\n return self.fingerprint_length", "def n_good_features_(self):\n return np.sum(self.important_features_)", "def size(self):\n return _libsbml.ListOfSpeciesFeatures_size(self)", "def feature_dim(self):\n raise NotImplementedError", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def num_flat_features(x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features", "def num_edge_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_edge_features'):\n return data.num_edge_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_edge_features'\")", "def num_edge_features(self):\n return self[0].num_edge_features", "def num_flat_features(self, x):\n return int(np.prod(x.size()[1:]))", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def count(self):\r\n return self.data_array.size", "def count_unique_features(self):\n return N_UNIQUE_FEATS" ]
[ "0.8408814", "0.83368707", "0.8241575", "0.8142289", "0.80863315", "0.80641556", "0.8048011", "0.79128486", "0.7869567", "0.7836357", "0.78265285", "0.78225327", "0.77721834", "0.7672442", "0.75243306", "0.74740946", "0.7469065", "0.7446943", "0.74352556", "0.7432964", "0.7424912", "0.73855555", "0.73197794", "0.7315663", "0.7306298", "0.7294315", "0.7281277", "0.72633004", "0.7188402", "0.7157994" ]
0.8641283
0
Returns the index corresponding to the given class label.
def lookup_class_idx(self,label): return self.class_labels[label]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_index(self, label):\n assert label in CLASSES\n return CLASSES.index(label)", "def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(0,length):\n if label.lower()==class_label[i]:\n return i\n #if not found\n basic.outputlogMessage('class label: %s not found in the class list'%label)\n assert(False)\n return False", "def label_index(self, label: Text) -> int:\n count = 0\n for l in self.le.classes_:\n if(l == label):\n return count\n count += 1", "def labelIndex(self, label):\n for idx, taskDef in enumerate(self):\n if taskDef.label == label:\n return idx\n return -1", "def get_index(self, label):\n if label in self.labels:\n return self.labels.index(label)\n else:\n self.labels.append(label)\n return self.labels.index(label)", "def get_class_label(index):\n if isinstance(index,str):\n index = int(index)\n # print(type(index))\n if index < len(class_label):\n return class_label[index]\n basic.outputlogMessage('class index: %d not found in the class list' % index)\n assert (False)\n return False", "def label_index(self):\n return self._label_index", "def label_index(self):\n return self._label_index", "def fromLabel(name):\n return Data.labels.index(name)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def _extract_class(labels: List[int], class_index: int):\n class_ids = [i for i, label in enumerate(labels) if label == class_index]\n return class_ids", "def get_label_2_index(self, label):\n return self._labels_2_index.get(label, 0) # return unknown index when not found", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def get_index_of_class(self, y, to_torch=False):\n\n # init labels\n y_idx = torch.empty(y.shape, dtype=torch.long)\n\n for i, yi in enumerate(y):\n\n # get index\n idx = np.where(np.array(self.classes) == yi)[0]\n\n # transfer to torch\n if to_torch:\n y_idx[i] = torch.from_numpy(idx)\n\n return y_idx", "def __get_label_idx__(idx: int) -> int:\n\n label_idx = idx // 100\n label_idx = int(label_idx) if label_idx >= 0 else 0\n\n return label_idx", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]", "def get_index(observable_nodes, label):\n for k in observable_nodes:\n if label in observable_nodes[k]:\n return observable_nodes[k][label]['category']", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n #return self.labels[index, :, :]\n return self.labels[index]", "def get_ind(labels, k):\n return (np.array(labels) == k).astype('float64')", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def class_name_to_id(self, class_name: str):\n\n return self.class_to_idx[str(class_name)]", "def get_label_with_index(labels, index):\n return labels[np.where(labels[:, 0] == index)]", "def label_from_index(self, index):\n raise NotImplementedError", "def fromIndex(index):\n return Data.labels[index]", "def get_train_index():\n data_size = (NUM_CLASS - 1) * NUM_DATA_PER_CLASS\n return np.array([i for i in range(0, data_size)])", "def map_id_to_idx(self, class_ids):\n class_idx = torch.zeros(class_ids.shape, dtype=int)\n for k, v in self.id2idx.items():\n class_idx[class_ids == k] = v\n\n class_idx = class_idx.to(device)\n return class_idx", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])" ]
[ "0.8939161", "0.8781351", "0.8290658", "0.79299194", "0.75193024", "0.7453748", "0.7260702", "0.7260702", "0.7200521", "0.7144214", "0.6997018", "0.6986183", "0.6928649", "0.68628794", "0.67985356", "0.6562468", "0.64826816", "0.6404319", "0.6269193", "0.6264292", "0.62591416", "0.6208201", "0.61772144", "0.61468256", "0.61286426", "0.6051882", "0.6021557", "0.6006552", "0.6004112", "0.6004112" ]
0.9071037
0
Applies a function mapping to each element in the feature data.
def apply_fn(self,fn): self.check_Data() for split,data_ in self.processed_data.items(): x = data_['x'] x = np.array([fn(xi) for xi in x]) data_['x'] = x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map(self, function):\n pass", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def map(self, map_function, *map_arguments) -> None:\n\n elements = []\n self.__get_sorted_elements(self.__root, elements)\n\n for element in elements:\n map_function(element, *map_arguments)", "def list_map(data, function):\n return list(map(function, data))", "def convert(self, function=pointwise_mi):\n self.normalise()\n feat_prob = Counter()\n for feat_set in self.itervalues():\n for feat in feat_set:\n feat_prob[feat] += feat_set[feat]\n \n for feat_set in self.itervalues():\n code_prob = sum(feat_set.values())\n for feat in feat_set:\n feat_set[feat] = function(code_prob, feat_prob[feat], feat_set[feat])\n return self", "def apply(self, fn, column_label):\n return [fn(v) for v in self[column_label]]", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def map(self, func):\n return _(map(func, self._))", "def map(self, func):\n return List(map(func, self))", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)", "def map_my(self, func: Callable[[Union[float, int]], int]) -> None:\n def list_func(lst: List[valueType]) -> List[valueType]:\n \"\"\"\n To apply the function/operation defined by users to every item in the list.\n :param lst: A list object like [element1, [element2, element3], element4].\n :return: A list that store the result of items after user-defined operation.\n \"\"\"\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp\n\n for head_node in self.hashTable:\n for node in head_node.singlyLinkedList:\n node.values = list_func(node.values)", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def applyMapping(self):\n pass", "def mapf( f, C ):\n return (f(x) for x in C)", "def _map_fn(self):\n raise NotImplementedError", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(map(func,x))\n return res", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)", "def map(self, func: Callable[[T], V]) -> 'List[V]':\n return [func(v) for v in self.array]", "def self_map(self, func: Callable[[dd.Series], Any], **kwargs: Any) -> List[Any]:\n return [func(df, **kwargs) for df in self.data]", "def mapfn(k, v):\n for row in v:\n # completar\n pass", "def foreach(function):\n return partial(map, function)", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def map():", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result" ]
[ "0.73664135", "0.7220546", "0.70397", "0.70174754", "0.685345", "0.684726", "0.68348914", "0.6801942", "0.6742367", "0.6730765", "0.6695242", "0.6670184", "0.6574957", "0.6559686", "0.6507356", "0.6469651", "0.6460323", "0.6443097", "0.64101386", "0.64018846", "0.63930345", "0.62695825", "0.6259695", "0.6241182", "0.62201834", "0.62168753", "0.6201429", "0.61587334", "0.6142086", "0.6111248" ]
0.7359208
1
Generates a new MLP using the nn.Sequential class. Returns
def generate(self): components = [] components.append(nn.Linear(self.n_features,self.hidden_sizes[0])) self._activation(components,self.activation) self._dropout(components,self.dropout) for i in range(1,len(self.hidden_sizes)): components.append(nn.Linear(self.hidden_sizes[i-1],self.hidden_sizes[i])) self._activation(components,self.activation) self._dropout(components,self.dropout) components.append(nn.Linear(self.hidden_sizes[-1],self.n_classes)) mlp = nn.Sequential(*components) num_params = sum(p.numel() for p in mlp.parameters() if p.requires_grad) print("Created MLP with "+str(num_params)+" learnable params") return mlp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])", "def mlp_model(self):\n\n model = Sequential()\n model.add(Dense(self.dense1, input_shape=(784,)))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop1))\n\n model.add(Dense(self.dense2))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop2))\n\n model.add(Dense(10))\n model.add(Activation('softmax'))\n\n return model", "def mlp(self):\n # Model.\n model = Sequential()\n model.add(Flatten(input_shape=self.input_shape))\n model.add(Dense(1024))\n model.add(Dropout(0.6))\n model.add(Dense(512))\n model.add(Dropout(0.6))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def mlp_model():\n\tmodel = Sequential()\n\tmodel.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(256, activation='relu'))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(FLAGS.nb_classes, activation='softmax'))\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\tmodel.summary()\n\treturn model", "def mlp_model2():\n model = Sequential()\n model.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n model.add(Dropout(0.2))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(FLAGS.nb_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.summary()\n return model", "def modelMLP():\n \n layerSizes = [(3), (10), (10,10,10), (20,50,20)]\n random_state = 20 # Do not change this random_state\n max_iter = 2000 # fixed max_iter\n\n objs_MLP = []\n\n # Create a list of objects for the classifier for each of the above \"layerSizes\"\n for size in layerSizes:\n mlp = MLPClassifier(hidden_layer_sizes=size, max_iter=max_iter, random_state=random_state)\n objs_MLP.append(mlp)\n\n return objs_MLP", "def mlp_model(layers, units, dropout_rate, input_shape, num_classes):\n # https://developers.google.com/machine-learning/guides/text-classification/step-4\n\n op_units, op_activation = _get_last_layer_units_and_activation(num_classes)\n model = models.Sequential()\n model.add(Dropout(rate=dropout_rate, input_shape=input_shape))\n\n for _ in range(layers-1):\n model.add(Dense(units=units, activation='relu'))\n model.add(Dropout(rate=dropout_rate))\n\n model.add(Dense(units=op_units, activation=op_activation))\n return model", "def create_model(num_vars, num_categs, hidden_dims, actfn=None):\n num_outputs = max(1, num_categs)\n num_inputs = num_vars\n actfn = get_activation_function(actfn)\n\n mask = InputMask(None)\n if num_categs > 0:\n pre_layers = EmbedLayer(num_vars=num_vars,\n num_categs=num_categs,\n hidden_dim=hidden_dims[0],\n input_mask=mask,\n sparse_embeds=(num_vars >= 50))\n num_inputs = pre_layers.hidden_dim\n pre_layers = [pre_layers, actfn()]\n else:\n pre_layers = mask\n\n mlps = MultivarMLP(input_dims=num_inputs,\n hidden_dims=hidden_dims,\n output_dims=num_outputs,\n extra_dims=[num_vars],\n actfn=actfn,\n pre_layers=pre_layers)\n return mlps", "def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model", "def mlp(\n\t# input_shape: Tuple[int, ...],\n\t# output_shape: Tuple[int, ...],\n\t# layer_size: int = 128,\n\t# dropout_amount: float = 0.2,\n\t# num_layers: int = 3, \n\tnet_config: Dict\n)->Model:\n\tactivation_fn = net_config[\"hyperparams\"][\"activation_fn\"]\n\tinput_s = net_config[\"shapes\"][\"input_shape\"]\n\toutput_s = net_config[\"shapes\"][\"output_shape\"]\n\n\tinputs = keras.Input(shape=(input_s,))\n\tdense = layers.Dense(64, activation=\"relu\")\n\tx = dense(inputs)\n\tlayer1 = layers.Dense(64, activation=activation_fn)(x)\n\tlayer2 = layers.Dense(64, activation=activation_fn)(layer1)\n\toutputs = layers.Dense(output_s)(layer2)\n\tmodel = keras.Model(inputs=inputs, outputs=outputs, name=\"house_pred\")\n\t\n\treturn model", "def MLP(input_dim, num_classes, hidden_layer_sizes = (100,), activation='relu', solver='sgd', alpha = 1e-4, momentum = 0.2, learning_rate_init=1e-4):\n # create model\n model = Sequential()\n \n # first layer\n model.add(Dense(hidden_layer_sizes[0], \n input_dim=input_dim, \n activation=activation, \n bias_regularizer=regularizers.l2(alpha),\n activity_regularizer=regularizers.l2(alpha)))\n model.add(Dropout(0.2))\n \n # unroll ffn\n # 1 since we already created the first layer\n for i in range(1, len(hidden_layer_sizes)):\n model.add(Dense(hidden_layer_sizes[i], \n activation=activation, \n bias_regularizer=regularizers.l2(alpha),\n activity_regularizer=regularizers.l2(alpha)))\n \n # last layer\n model.add(Dense(num_classes, activation='softmax'))\n \n # Optimiser and compile model \n if solver=='sgd':\n optimizer = SGD(lr=learning_rate_init)\n elif solver=='adam':\n optimizer = Adam(lr=learning_rate_init)\n else :\n optimizer = solver\n model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model", "def train(self):\n\n\t\tinput_size = len(self.inputs[0])\n\t\toutput_size = len(set(self.labels))\n\t\thidden_size_1 = 15\n\t\thidden_size_2 = 15\n\n\t\t# One hot encode the labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(self.labels)\n\t\tenc_labels = encoder.transform(self.labels)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t# Create the MLP\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(hidden_size_1, activation='relu', input_dim=input_size))\n\t\tmodel.add(Dense(hidden_size_2, activation='relu'))\n\t\tmodel.add(Dense(output_size, activation='softmax'))\n\n\t\t# Compile model with optimizer and loss function\n\t\tmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\t\t# Train the model\n\t\tmodel.fit(self.inputs, enc_labels, steps_per_epoch=1000, epochs=20, verbose=2)\n\n\t\tself.model = model", "def get_mlp_model(input_dim, hidden_layer_one=50, hidden_layer_two=25,\n dropout=0.2, learn_rate=0.01):\n \n # initialize a sequential model and add layer to flatten the\n # input data\n model = Sequential()\n #model.add(Flatten())\n \n model.add(Dense(hidden_layer_one, activation=\"relu\",\n input_dim=input_dim))\n model.add(Dropout(dropout))\n model.add(Dense(hidden_layer_two, activation=\"relu\"))\n model.add(Dropout(dropout))\n model.add(Dense(1, activation='linear'))\n # compile the model\n model.compile(\n optimizer=Adam(learning_rate=learn_rate),\n loss=\"mean_squared_error\",\n metrics=[\"mse\", \"mae\"])\n # return compiled model\n return model", "def make_mlp(dim_list, activation, batch_norm=False, dropout=0.0):\n layers = []\n\n for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):\n layers.append(nn.Linear(dim_in, dim_out))\n if batch_norm:\n layers.append(nn.BatchNorm1d(dim_out))\n if activation == 'relu':\n layers.append(nn.ReLU())\n elif activation == 'tanh':\n layers.append(nn.Tanh())\n elif activation == 'leakyrelu':\n layers.append(nn.LeakyReLU())\n elif activation == 'sigmoid':\n layers.append(nn.Sigmoid())\n if dropout > 0:\n layers.append(nn.Dropout(p=dropout))\n\n return nn.Sequential(*layers)", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def build_mlp(\n input_size: int,\n output_size: int,\n n_layers: int,\n size: int,\n activation='tanh',\n output_activation='identity',\n):\n if isinstance(activation, str):\n activation = _str_to_activation[activation]\n if isinstance(output_activation, str):\n output_activation = _str_to_activation[output_activation]\n layers = []\n in_size = input_size\n for _ in range(n_layers):\n layers.append(nn.Linear(in_size, size))\n layers.append(activation)\n in_size = size\n layers.append(nn.Linear(in_size, output_size))\n layers.append(output_activation)\n return nn.Sequential(*layers)", "def model_creator(config):\n return nn.Linear(1, 1)", "def mlp(\n sizes: List[int],\n inner_activation: nn.Module,\n last_activation: nn.Module = None,\n):\n layers = []\n for j in range(len(sizes) - 1):\n act = (\n inner_activation\n if j < len(sizes) - 2 or not last_activation\n else last_activation\n )\n layers += [nn.Linear(sizes[j], sizes[j + 1]), act]\n\n return nn.Sequential(*layers)", "def mlp_policy_net(x, output_sizes):\n net = hk.Sequential([hk.nets.MLP(output_sizes), jnp.tanh])\n return net(x)", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def mlp_model(train, layers=(100,), window_size=5):\n # generate a window\n window = mlp_window_selector(train, window_size)\n # interpolate new data\n train_x = mlp_input_mapper(train[0], window)\n train_y = mlp_input_mapper(train[1], window)\n # generate model\n model = MLPRegressor(hidden_layer_sizes=tuple(layers))\n # fit model with new rounded data\n model.fit(train_x, train_y)\n # return model and window\n return (model, window)", "def generator(noise_dim=NOISE_DIM):\n model = nn.Sequential(\n nn.Linear(noise_dim, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 784),#784\n nn.Tanh(),\n )\n return model", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def neural_network(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n nn = MLPClassifier() #hidden_layer_sizes=30, alpha=0.0001, early_stopping=True\n nn = __train_and_test(nn, xtrain, ytrain, xtest, ytest,labels_mapping)\n return nn", "def build(obs_space: Box, action_space: Box, spec: Spec) -> MLPModel:\n model = MLPModel(obs_space, action_space, spec.network)\n model.initialize_parameters(spec.initializer)\n if spec.residual:\n model = ResidualStochasticModel(model)\n return model", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)", "def build_model(self) -> nn.Module:\n pass", "def build_mlp(\n n_in: int,\n n_out: int,\n n_hidden: Optional[Union[int, Sequence[int]]] = None,\n n_layers: int = 2,\n activation: Callable = F.silu,\n last_bias: bool = True,\n last_zero_init: bool = False,\n) -> nn.Module:\n # get list of number of nodes in input, hidden & output layers\n if n_hidden is None:\n c_neurons = n_in\n n_neurons = []\n for i in range(n_layers):\n n_neurons.append(c_neurons)\n c_neurons = max(n_out, c_neurons // 2)\n n_neurons.append(n_out)\n else:\n # get list of number of nodes hidden layers\n if type(n_hidden) is int:\n n_hidden = [n_hidden] * (n_layers - 1)\n else:\n n_hidden = list(n_hidden)\n n_neurons = [n_in] + n_hidden + [n_out]\n\n # assign a Dense layer (with activation function) to each hidden layer\n layers = [\n snn.Dense(n_neurons[i], n_neurons[i + 1], activation=activation)\n for i in range(n_layers - 1)\n ]\n # assign a Dense layer (without activation function) to the output layer\n\n if last_zero_init:\n layers.append(\n snn.Dense(\n n_neurons[-2],\n n_neurons[-1],\n activation=None,\n weight_init=torch.nn.init.zeros_,\n bias=last_bias,\n )\n )\n else:\n layers.append(\n snn.Dense(n_neurons[-2], n_neurons[-1], activation=None, bias=last_bias)\n )\n # put all layers together to make the network\n out_net = nn.Sequential(*layers)\n return out_net" ]
[ "0.7659937", "0.75626165", "0.74614346", "0.7456196", "0.7027464", "0.70230585", "0.6835712", "0.67514044", "0.67060864", "0.6647528", "0.6643573", "0.659611", "0.6572788", "0.6491478", "0.6484547", "0.6470002", "0.64629227", "0.64441985", "0.64162695", "0.6321424", "0.6284879", "0.6251099", "0.6249069", "0.62486655", "0.6218163", "0.6205383", "0.6184046", "0.61528987", "0.6148236", "0.6146496" ]
0.8253452
0
Creates a new activation function and adds it to the list of components.
def _activation(self,components,activation): if activation == "ReLU": components.append(nn.ReLU()) elif activation == "Sigmoid": components.append(nn.Sigmoid()) else: raise Exception("Invalid activation fn: "+activation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n self.activation_function\n )()\n else:\n assert isinstance(self.activation_function, ActivationFunction)\n activation_function = self.activation_function\n # Plot the function above the rest of the layer\n self.activation_function = activation_function\n self.add(self.activation_function)", "def get_activation_function(actfn):\n if actfn is None or actfn == 'leakyrelu':\n def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)\n elif actfn == 'gelu':\n def create_actfn(): return nn.GELU()\n elif actfn == 'relu':\n def create_actfn(): return nn.ReLU()\n elif actfn == 'swish' or actfn == 'silu':\n def create_actfn(): return nn.SiLU()\n else:\n raise Exception('Unknown activation function ' + str(actfn))\n return create_actfn", "def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct", "def linear_activation_calculation(A, W, b, activation_function):\n\n # Your code here\n return activation_function(linear_forward_calculation(A, W, b))\n # raise NotImplementedError", "def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)", "def activation_factory(name):\n if name == 'relu':\n return nn.ReLU(inplace=True)\n if name == 'leaky_relu':\n return nn.LeakyReLU(0.2, inplace=True)\n if name == 'elu':\n return nn.ELU(inplace=True)\n if name == 'sigmoid':\n return nn.Sigmoid()\n if name == 'tanh':\n return nn.Tanh()\n if name is None or name == \"identity\":\n return nn.Identity()\n\n raise ValueError(f'Activation function `{name}` not yet implemented')", "def get_activation_function(func_name):\n return {\n 'linear': lambda x: x,\n 'relu': lambda x: x * (x > 0),\n 'elu': lambda x: x * (x >= 0) + (T.exp(x) - 1) * (x < 0),\n 'softmax': T.nnet.softmax,\n 'tanh': T.tanh,\n 'log_softmax': log_softmax,\n 'sigmoid': T.nnet.sigmoid\n }[func_name]", "def __init__(self, layers=[2, 2, 1], activation_function=\"bentidentity\"):\n self.layers = layers\n self.activation_function = th.activation_functions[activation_function]\n self.activation_derivative = th.activation_derivatives[\n activation_function]\n self.weights = self._generate_weights()", "def register_activations(model: onnx_pb.ModelProto, activation_names: List):\n for act_name in activation_names:\n _ = add_hook_to_get_activation(model, act_name)", "def forward_activationfunction(self, x):\n if self.forward_activation == 'tanh':\n return torch.tanh(x)\n elif self.forward_activation == 'relu':\n return F.relu(x)\n elif self.forward_activation == 'linear':\n return x\n elif self.forward_activation == 'leakyrelu':\n return F.leaky_relu(x, 0.2)\n elif self.forward_activation == 'sigmoid':\n return torch.sigmoid(x)\n else:\n raise ValueError('The provided forward activation {} is not '\n 'supported'.format(self.forward_activation))", "def activation_function(self, x: np.array) -> np.array:\r\n\t\treturn self._activation_function(x)", "def add_activation(self, op, input_name, name=None, attr={}):\n attr['alpha'] = 1.0\n attr['beta'] = 1.0\n if 'op' == 'Selu':\n attr['alpha'] = 1.6732632423543772848170429916717\n attr['beta'] = 1.0507009873554804934193349852946\n\n return self._build_op(op, [input_name], name=name, attr=attr)", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]", "def activate(self, input_layer, funcname=None):\n if isinstance(funcname, tuple):\n funcname = funcname[0]\n params = funcname[1:]\n if funcname is None:\n funcname = self.activation_func\n if funcname == 'LINEAR':\n return input_layer\n activation_map = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'ELU': tf.nn.elu,\n 'SIGMOID': tf.nn.sigmoid,\n 'TANH': tf.nn.tanh,\n 'LRELU': lambda x, name: tf.maximum(params[0]*x, x, name=name)\n }\n return activation_map[funcname](input_layer, name=funcname.lower())", "def addFunction(self, func):\n self.__functions.append(func)", "def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]", "def activation(activation_fun=None):\n activation_fun = (activation_fun or cfg.MODEL.ACTIVATION_FUN).lower()\n if activation_fun == \"relu\":\n return nn.ReLU(inplace=cfg.MODEL.ACTIVATION_INPLACE)\n elif activation_fun == \"silu\" or activation_fun == \"swish\":\n try:\n return torch.nn.SiLU()\n except AttributeError:\n return SiLU()\n elif activation_fun == \"gelu\":\n return torch.nn.GELU()\n else:\n raise AssertionError(\"Unknown MODEL.ACTIVATION_FUN: \" + activation_fun)", "def linear_activation_forward(A_prev, W, b, activation):\n pass", "def generate_activation(act_par):\n\n if type(act_par) == list:\n if len(act_par) == 2:\n atype, par = act_par\n if atype == 'elu':\n return ELU(alpha=par)\n elif atype == 'leaky':\n return LeakyReLU(alpha=par)\n elif atype == 'prelu':\n return PReLU()\n else:\n raise NameError(\"No such Activation layer\")\n elif len(act_par) == 1:\n if act_par[0] == 'snake':\n return Activation(snake)\n elif act_par[0] == 'snakeh2':\n return Activation(snakeh2)\n elif act_par[0] == 'snake2':\n return Activation(snake2)\n elif act_par[0] == 'xsin':\n return Activation(xsin)\n elif act_par[0] == 'swish':\n return Activation(swish)\n else:\n return Activation(act_par[0])\n else:\n raise NameError(\"No such Activation layer\")\n elif type(act_par) == str:\n return Activation(act_par)\n else:\n raise NameError(\"Wrong parameters for activation layer\")", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", "def convert_activation(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n act_type = attrs[\"act_type\"]\n\n # Creating a dictionary here, but if this titlecase pattern\n # mxnet_name.title()\n act_types = {\n \"tanh\": \"Tanh\",\n \"relu\": \"Relu\",\n \"sigmoid\": \"Sigmoid\",\n \"softrelu\": \"Softplus\",\n \"softsign\": \"Softsign\"\n }\n\n act_name = act_types.get(act_type)\n if act_name:\n node = onnx.helper.make_node(\n act_name,\n input_nodes,\n [name],\n name=name\n )\n else:\n raise AttributeError(\n \"Activation %s not implemented or recognized in the converter\" % act_type\n )\n\n return [node]", "def forward_activate(self, a_prev, w, b, func_type):\n\n\t\tz = np.dot(w, a_prev) + b\n\t\tif 'sigmod' == func_type.lower(): \n\t\t\ta = 1 / (1 + np.exp(-z))\n\t\telif 'relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0)\n\t\telif 'leaky relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0.01 * z)\n\t\telif 'tanh' == func_type.lower():\n\t\t\ta = (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))\n\n\t\tcache = (a_prev, w, b, z)\n\t\treturn a, cache", "def activate(self, inputvaluelist: List[float]):\n if len(inputvaluelist) != len(self.inputWeight):\n raise Exception(f\"The length input is {len(inputvaluelist)} and is not equal\"\n f\" to length of weights({len(self.inputWeight)})\")\n self.inputvaluelist = inputvaluelist\n inputlist = list(zip(inputvaluelist, self.inputWeight))\n\n input_sum = 0\n for inp in inputlist:\n input_sum += inp[0] * inp[1]\n input_sum += self.bias\n\n self.output = sigmoid(input_sum)\n\n return self.output", "def activation_function(self, X):\n return self.net_input(X)", "def activation_function(self, X):\n return self.net_input(X)", "def new_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-0.1, 0.1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def pre_activation(features, weights, bias):\n # this is a dot product between features and weights, added to bias after.\n return np.dot(features, weights) + bias" ]
[ "0.7639841", "0.6542784", "0.61630833", "0.6001237", "0.6001181", "0.59439", "0.591768", "0.5908562", "0.58893776", "0.58730316", "0.5850897", "0.5848719", "0.58140385", "0.58140385", "0.5795392", "0.5792487", "0.5785407", "0.57467926", "0.57267064", "0.5675934", "0.5668909", "0.56354433", "0.56326073", "0.56173855", "0.55790275", "0.55623114", "0.55623114", "0.55437165", "0.54910827", "0.54903257" ]
0.7331862
1
Adds a dropout object to the list of components
def _dropout(self,components,dropout=None): if dropout is not None: components.append(nn.Dropout(dropout))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, component) -> None:\n pass", "def add_component(self, componentInstance):\n\n #print \"Componet being added to %s entity.\"%(self._sName)\n #print componentInstance\n \n self._dComponents[componentInstance.get_name()] = componentInstance\n\n #These if statements will save a pointer of the same variable as in dComponents if True.\n\n if componentInstance.get_updateable():\n self._lUpdatables.append(componentInstance)\n\n if componentInstance.is_view_drawable():\n self._lViewDrawables.append(componentInstance)\n\n elif componentInstance.is_screen_drawable():\n self._lScreenDrawables.append(componentInstance)", "def add_component(self, lib_component):\n comp_name = lib_component.name\n try:\n comp = self.__component_list[comp_name]\n except KeyError:\n self.__component_list[comp_name] = lib_component", "def addDropzone( self, dropzone ):\n self._dropzones.append(dropzone)", "def add_dut(self):\n pass", "def changeDropout(self,dropout):\n self.dropout = dropout", "def add(self, *components):\n for component in components:\n if component.container is not None:\n component.container.remove(component)\n component.container = self\n self._components.extend(components)", "def add(self, comp):\n\t\tif comp:\n\t\t\tif isinstance(comp, Component):\n\t\t\t\tcomp.container = self\n\t\t\telse:\n\t\t\t\tfor item in comp:\n\t\t\t\t\tself.add(item)", "def buttonAdd_Clicked( self, event ):\n\t\tid = DM.FixedIndex(self._combos[self._treasureIndex].GetSelection())\n\t\tif id is not None and id >= DM.FixedIndex(0):\n\t\t\tqty = self.spinCtrlQuantity.GetValue()\n\t\t\tprob = self.spinCtrlProbability.GetValue()\n\t\t\ttreasure = (id, prob, qty)\n\t\t\tself.Treasure[self._treasureIndex].append(treasure)\n\t\t\tself.refreshTreasureList()", "def _addOutlet(self, outlet, other): \n self._outlets.append(outlet)\n if self._type == 2 and other._type == 1:\n self._reservoirs.append(other)", "def append(self, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects", "def add(self, name, obj):\n obj = super(Assembly, self).add(name, obj)\n if is_instance(obj, Component):\n self._depgraph.add(obj.name)\n return obj", "def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)", "def add_gearbox(self):\n next_gearbox_name = ''.join(('gearbox_', str(self.num_gearboxes)))\n self.gearboxes.append(gearbox.Gearbox(self.cables_per_gearbox,\n name=next_gearbox_name, \n level=self.num_gearboxes))\n print \"Added gearbox\", self.num_gearboxes\n self.num_gearboxes += 1\n self.gearbox_added = True", "def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')", "def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)", "def add(self, item):", "def add(self, widget: Component) -> None:\n self._add(widget)", "def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory", "def added(self, comp):\n\t\tpass", "def add_plant(self, desc, obj_list):\n self.plants.append((desc, obj_list))\n if len(self.plants) == 1:\n self.set_default_brush()", "def on_item_dropped(self, url):\n print 'Weld.on_item_dropped:', url\n #make sure all struct are present\n if not(self.project and self.project.level):\n print >> sys.stderr, 'it\\'s too early to drop stuff: '\\\n 'create a project and a level first !'\n return\n\n #retrieve data if it comes from weld\n if url in self.resMan:\n props = self.resMan.file_props(url)\n if props is None:\n print >> sys.stderr, curr_f(), ': url(\\'%s\\') in self.resMan '\\\n 'but can\\'t retrieve props.' % (url)\n return\n props = self.project.level.resMan.add_resource(self.resMan.base_path,\n props)\n url = props['url']\n if props == {} or url not in self.project.level.resMan:\n print >> sys.stderr, curr_f(), 'could not retrieve file and/or '\\\n 'dependencies for props:', pp(props)\n return\n\n #instanciate it\n if url in self.project.level.resMan:\n props = self.project.level.resMan.file_props(url)\n dtp = self.project.level.qsteelwidget.dropTargetPosition(Config.instance().drop_target_vec)\n props['position'] = dtp\n props['rotation'] = self.project.level.qsteelwidget.dropTargetRotation()\n if props['resource_type'] == 'meshes':\n props['meshName'] = props['name']\n self.project.level.instanciate(props)\n s = 'dropped agent \\'%s\\' with id %i' % (props['name'], props['agentId'])\n print s\n Ui.instance().show_status(s)\n else:\n Ui.instance().show_status('can only drop meshes so far')", "def append(self, pane: Any) -> None:\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self)\n new_objects.append(new_object)\n self._names.append(new_name)\n self.objects = new_objects", "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def add_handout(self, asset_name):\r\n self._handouts.append(asset_name)", "def add_joint_to_list(list_widget, combo_box, add_btn, del_btn, forward):\n\n global ftm_list # Forward transition matrices list\n global btm_list # Backward transition matrices list\n global robot_obj\n\n # Getting the current item\n ind = combo_box.currentIndex()\n\n # Finding the associated joint\n i_joint = 0\n for _, _, node in robot_obj.tree:\n type_, nb = node.name.split('_')\n nb = int(nb)\n\n if type_ == 'joint':\n if forward:\n if 'joint_' + str(nb) in ftm_list:\n i_joint += 1\n continue\n else:\n if 'joint_' + str(nb) in btm_list:\n i_joint += 1\n continue\n if ind == nb:\n text = robot_obj.joints[nb].name\n list_widget.addItem(text)\n\n # Disabling the item in the combo box\n combo_box.model().item(i_joint).setEnabled(False)\n\n # If all the joints are added\n if list_widget.count() == combo_box.count():\n add_btn.setEnabled(False)\n del_btn.setEnabled(True)\n\n if forward:\n ftm_list.append(\"joint_\" + str(nb))\n else:\n btm_list.append(\"joint_\" + str(nb))\n\n i_joint += 1", "def add_to_bag(self, item):\n self._bag.append(item)", "def add_output(self):\r\n if self.slots[self.length-1].item is not Item.E:\r\n self.outputs.append(self.slots[self.length-1].item)", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_component(self, new: components.Component) -> None:\n for existing in self.components:\n if isinstance(existing, type(new)):\n raise Exception(type(new))\n self.components.append(new)" ]
[ "0.63641316", "0.56994003", "0.5660805", "0.5521632", "0.5519748", "0.5501247", "0.54634714", "0.5435919", "0.53915066", "0.5373497", "0.53394043", "0.53063345", "0.5302059", "0.52375495", "0.52294815", "0.5180793", "0.51650107", "0.51634353", "0.515684", "0.51533777", "0.5146691", "0.5139247", "0.51310295", "0.51231766", "0.5123073", "0.50958693", "0.5091321", "0.5090395", "0.50851196", "0.50776845" ]
0.673964
0
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
def split_data(text_df,splits=None,rand_perm=True): if splits is None: splits = {'train':0.6,'val':0.1,'test':0.3} if np.round(np.sum(list(splits.values())),4) != 1: raise Exception("Split percentages do not sum to 1") size = len(text_df) if rand_perm: perm_idx = np.random.permutation(size) else: perm_idx = np.arange(size) text_df = text_df.iloc[perm_idx,:] all_data = dict() keys = list(splits.keys()) pct = list(splits.values()) count = np.round(np.array(pct) * size).astype(np.int32) split_idx = np.cumsum(count)[:-1] data_list = np.split(text_df,split_idx,axis=0) all_data = {keys[i]:data for i,data in enumerate(data_list)} return all_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"sender\")[\"mid\", \"recipients\"]:\n n_msg = g.shape[0]\n n_dev = int(n_msg * percent)\n g = g.sort_values(\"date\")\n g_train = g[:-n_dev]\n g_dev = g[-n_dev:]\n train.append(g_train)\n dev.append(g_dev)\n # concat all dataframe\n df_train = pd.concat(train, axis=0).sort_index()\n df_dev = pd.concat(dev, axis=0).sort_index()\n return df_train, df_dev", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def split_percentiles_pediatrics(df):\n df.rename(columns={\"ageyears\": \"age\", \"sex\": \"Sex\"}, inplace=True)\n cols = [\"Sex\", \"agedays\", \"age\"]\n\n ht_cols = cols.copy()\n ht_cols.extend([col for col in df.columns if \"s_ht_p\" in col])\n df_ht = df[ht_cols]\n df_ht.columns = [c.replace(\"s_ht_p\", \"P\") for c in df_ht]\n\n wt_cols = cols.copy()\n wt_cols.extend([col for col in df.columns if \"s_wt_p\" in col])\n df_wt = df[wt_cols]\n df_wt.columns = [c.replace(\"s_wt_p\", \"P\") for c in df_wt]\n\n bmi_cols = cols.copy()\n bmi_cols.extend([col for col in df.columns if \"s_bmi_p\" in col])\n df_bmi = df[bmi_cols]\n df_bmi.columns = [c.replace(\"s_bmi_p\", \"P\") for c in df_bmi]\n\n return (df_ht, df_wt, df_bmi)", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def split_by_percentage(data, percentage) -> tuple:\n try:\n percentage = int(round(percentage*len(data)))\n return(data[percentage:], data[:percentage])\n except Exception as error:\n print(f\"Error: split_by_percentage([...], {percentage}) -> {error}\")", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_train_test(df, percentage_train=50):\n return (\n df.loc[(df.index.values % 100) < percentage_train].reset_index().copy(),\n df.loc[~((df.index.values % 100) < percentage_train)].reset_index().copy(),\n )", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def subset_df(df: pd.DataFrame) -> dict:\n prct10 = int(round(len(df) * 10 / 100, 0))\n dict_nb = {}\n deb = 0\n fin = prct10\n dict_nb[\"df1\"] = df.iloc[deb:fin, :]\n deb = fin\n dixieme = 10 * prct10\n reste = (len(df) - dixieme)\n fin_reste = len(df) + 1\n for i in range(2, 11):\n fin = (i * prct10 + 1)\n dict_nb[\"df\" + str(i)] = df.iloc[deb:fin, :]\n if reste > 0:\n dict_nb[\"reste\"] = df.iloc[fin: fin_reste, :]\n deb = fin\n\n return dict_nb", "def split_dataset(data_df: pd.DataFrame) -> dict:\n\n # Splits the dataset\n print(data_df.shape)\n X, y = data_df.drop(\"SalePrice\", axis=1), data_df[\"SalePrice\"]\n splits = train_test_split(X, y, test_size=0.3)\n\n # Transform to dict\n labels = [\"X_train\", \"X_test\", \"y_train\", \"y_test\"]\n splits_dict = dict(zip(labels, splits))\n\n return splits_dict", "def data():\n df = gen_sliced_df()\n df = df[[\"x\", \"z_categ\", \"y\", \"residual\"]]\n new_df = df.iloc[[1, 100, 150, 200, 250, 300, 305, 400, 405, 500, 550, 609]].copy()\n return {\"df\": df, \"new_df\": new_df}", "def split_data(train_percentage, *data):\n train = [entry[0:int(train_percentage * len(entry))] for entry in data]\n val = [entry[int(train_percentage * len(entry)):] for entry in data]\n return train, val", "def split_train_test_by_percentage(dataset, train_percentage=0.8):\n train_length = int(len(dataset) * train_percentage)\n return torch.utils.data.random_split(dataset, (train_length, len(dataset) - train_length))", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, valid_percentage):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],\n train_size=train_percentage + valid_percentage,\n test_size=1-(train_percentage + valid_percentage))\n\n valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]\n valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]\n\n return train_x, valid_x, test_x, train_y, valid_y, test_y", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(42)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return (data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]])", "def split_dataset(dataset: torch.utils.data.Dataset, split_perc: float = 0.20):\n assert (split_perc >= 0.0) and (split_perc <= 1.0), (\n f\"FATAL ERROR: invalid split_perc value {split_perc}.\" f\"Expecting float >= 0.0 and <= 1.0\"\n )\n\n if split_perc > 0.0:\n num_recs = len(dataset)\n train_count = int((1.0 - split_perc) * num_recs)\n test_count = num_recs - train_count\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_count, test_count])\n return train_dataset, test_dataset\n else:\n return dataset, None", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def split_train_test_data(total_data_df, frac):\n test_data_df = total_data_df.sample(frac=frac, random_state=1)\n train_data_df = total_data_df.loc[total_data_df.index.difference(test_data_df.index)]\n return train_data_df, test_data_df", "def train_test_split(ratio, classes, files):\n train_dict = {}\n test_dict = {}\n for cl in classes:\n train_cnt = int(ratio * len(files[cl]))\n train_dict[cl] = files[cl][:train_cnt]\n test_dict[cl] = files[cl][train_cnt:]\n return train_dict, test_dict", "def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df", "def get_contests_per_style(data_frame: pd.DataFrame, ballot_styles: list) -> dict:\n contests_per_style = collections.OrderedDict()\n for ballot_style in ballot_styles:\n df = data_frame[data_frame['Ballot Style'] == ballot_style].copy()\n df.drop('Ballot Style', axis=1, inplace=True)\n contests_per_style[ballot_style] = list(df.dropna(axis='columns', how='all'))\n return contests_per_style", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr" ]
[ "0.59946746", "0.5814314", "0.5597616", "0.55113435", "0.5504025", "0.54890805", "0.5455985", "0.54370016", "0.54187405", "0.54187405", "0.54108846", "0.54088694", "0.5390827", "0.5368966", "0.53635633", "0.53455263", "0.53044295", "0.5286926", "0.52688", "0.5268422", "0.5253747", "0.52520144", "0.5217308", "0.5201055", "0.5160175", "0.5145082", "0.512048", "0.50514823", "0.5047043", "0.5029761" ]
0.58939797
1
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
def filter_nmt_file(filename,filter_fn=None): if filter_fn is None: filter_fn = lambda en : en.lower().startswith('i am') or \ en.lower().startswith('he is') or \ en.lower().startswith('she is') or \ en.lower().startswith('they are') or \ en.lower().startswith('you are') or \ en.lower().startswith('we are') filtered_lines = [] with open(filename) as file: lines = file.readlines() for line in lines: text = line.split('\t') en = text[0] fra = text[1] if filter_fn(en): filtered_lines.append(en.lower() + '\t' + fra.lower()) return filtered_lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def generate_filter(filter_text):\n if ':' in filter_text:\n file_path_filter, _, contract_filter = filter_text.partition(':')\n else:\n file_path_filter = contract_filter = filter_text\n\n return functools.partial(check_if_matches_filter, file_path_filter, contract_filter)", "def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)", "def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]", "def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)", "def fileFiltRecGen(filePath, filt, delim = \",\"):\n\twith open(filePath, \"r\") as fp:\n\t\tfor line in fp:\t\n\t\t\tline = line[:-1]\n\t\t\tif delim is not None:\n\t\t\t\tline = line.split(delim)\n\t\t\tif filt(line):\n\t\t\t\tyield line", "def ascii_to_filter(filename, filter_name=None, detector=None, temperature=None, \n filter_type=None, wcol=0, tcol=None, **kwargs):\n strg = \"Reading a MiriFilter model from an ASCII file \"\n strg += \"is not longer supported.\"\n raise NotImplementedError(strg)", "def LoadSourceFilter(coverable_file_name):\n \n with open(coverable_file_name, \"r\") as cov_file:\n file_list = [line.strip() for line in cov_file.readlines()]\n return SourceFilter(file_list)", "def filter(ctx: click.Context):\n vcf: Reader = vcfpy.Reader.from_path(ctx.obj[\"vcf_file\"])\n filter_settings: Dict[str, Dict] = SV_FILTER_SETTINGS[\"tiddit_tumor_normal\"]\n\n # Update VCF header\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_T_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in tumor, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_N_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in normal, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"normal_variant\"),\n (\"Description\", \"AF_T_MAX == 0 and ctg_t == False\"),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_normal_allele_frequency']['filter']}\"),\n (\n \"Description\",\n f\"AF_N_MAX > {filter_settings['max_normal_allele_frequency']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_tin_fraction']['filter']}\"),\n (\n \"Description\",\n f\"(AF_N_MAX / AF_T_MAX) > {filter_settings['max_tin_fraction']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"in_normal\"),\n (\"Description\", \"ctg_n == True and AF_N_MAX == 0 and AF_T_MAX <= 0.25\"),\n ]\n )\n )\n\n writer = vcfpy.Writer.from_path(\"/dev/stdout\", vcf.header)\n\n # Set soft filters for variants based on presence in the normal sample\n for variant in vcf:\n variant_info: dict = variant.INFO\n\n # Collect evidence of variant in tumor and normal sample\n evidence_dict: dict = get_tumor_normal_evidence(variant_info)\n allele_frequency_tumor: float = evidence_dict[\"tumor_max_af\"]\n allele_frequency_normal: float = evidence_dict[\"normal_max_af\"]\n tumor_has_contig: bool = evidence_dict[\"tumor_has_contig\"]\n normal_has_contig: bool = evidence_dict[\"normal_has_contig\"]\n\n # Add AF_MAX to info field\n variant.INFO[\"AF_T_MAX\"] = [round(allele_frequency_tumor, 4)]\n variant.INFO[\"AF_N_MAX\"] = [round(allele_frequency_normal, 4)]\n\n # Set filter statuses\n if allele_frequency_tumor == 0 and not tumor_has_contig:\n variant.add_filter(\"normal_variant\")\n writer.write_record(variant)\n continue\n\n # Regardless of CTG, set filter if AF_T / AF_N > max_tin_fraction\n normal_tumor_af_ratio = (\n float(allele_frequency_normal / allele_frequency_tumor)\n if allele_frequency_tumor > 0\n else 0\n )\n if normal_tumor_af_ratio > filter_settings[\"max_tin_fraction\"][\"value\"]:\n variant.add_filter(\"high_normal_af_fraction\")\n\n # Set filter if AF_N > 0.25\n if (\n allele_frequency_normal\n > filter_settings[\"max_normal_allele_frequency\"][\"value\"]\n ):\n variant.add_filter(\"high_normal_af\")\n\n # Set filter if CTG_N = True, AF_N is 0 and AF_T is below 0.25\n if (\n normal_has_contig\n and allele_frequency_normal == 0\n and allele_frequency_tumor <= 0.25\n ):\n variant.add_filter(\"in_normal\")\n\n writer.write_record(variant)", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n fnode = ftab.get_node('/filters/' + fname.decode('utf8'))\n else:\n fnode = ftab.get_node('/filters/' + fname)\n flamb = fnode[:]['WAVELENGTH']\n transmit = fnode[:]['THROUGHPUT']\n dtype = 'photon'\n unit = None\n\n attrs = fnode.attrs\n if 'DETECTOR' in attrs:\n dtype = attrs['DETECTOR']\n if 'WAVELENGTH_UNIT' in attrs:\n unit = attrs['WAVELENGTH_UNIT']\n\n fil = UnitFilter(flamb, transmit, name=fnode.name,\n dtype=dtype, unit=unit)\n\n if interp & (lamb is not None):\n fil = fil.reinterp(lamb)\n return fil", "def apply_word_filter(self, fn):\n self._apply_filter(lambda ng, f: any(fn(w) for w in ng))", "def language_filter_func(self, model, iter, data):\n if self.current_filter_language is None or self.current_filter_language == \"None\":\n return True\n else:\n return model[iter][2] == self.current_filter_language", "def ft_filter(fnct, tab):\n res = []\n for i in tab:\n if fnct:\n if fnct(i):\n res.append(i)\n else:\n if i:\n res.append(i)\n return res", "def readAndRewrite(self):\n try:\n with open(self.dataFile, 'r') as source:\n self.initDictionnary()\n lineCount = 0\n filteredData = [] # array of filtered flights\n for line in source:\n lineCount += 1\n line = line.strip()\n if line != \"\" and line[0] != \"#\":\n f = Flight(line, self.vocabulary)\n if self.filter:\n f.rewrite(self.summaryDict)\n f.filter(filteredData, self.listOfTerms, self.threshold)\n print(\"End : Displaying general summary\")\n self.displaySummary(self.summaryDict, lineCount)\n print(\"-------------- End of general summary ---------------\")\n if len(filteredData) != 0:\n print(\"Beginning summary on filtered data (\" + str(len(filteredData)) + \" entries)\")\n for data in filteredData:\n data.rewrite(self.summaryFilteredDict)\n print(\"End of summary for filtered data\")\n self.displaySummary(self.summaryFilteredDict, len(filteredData))\n print(\"Finding correlations\")\n self.findLinkedTerms()\n print(\"Printing correlations with \" + str(self.listOfTerms) + \" and threshold : \" + str(self.threshold))\n #for key in self.correlationDict.keys():\n #print(str(key) + \" : \" + str(self.correlationDict[key]))\n self.findAtypicalTerms()\n print(\"Printing atypical terms with \" + str(self.listOfTerms) + \" and threshold : \" + str(self.threshold))\n #for term in self.atypicalTermsDict.keys():\n #print(str(term) + \" : \" + str(self.atypicalTermsDict[term]))\n display = Display(self.vocabulary)\n display.displayPieChartSummary(self.summaryDict, \"General Summary for 2008 flights in the USA\")\n display.displayPieChartSummary(self.summaryFilteredDict, \"General Summary for 2008 flights with \"+str(self.listOfTerms)+\" and threshold : \" + str(self.threshold))\n display.displayBubbleChart(self.correlationDict,\"Linked terms in 2008 flights with \" + str(self.listOfTerms) + \" and threshold = \" + str(self.threshold))\n display.displayBubbleChart(self.atypicalTermsDict,\"Atypical terms in 2008 flights with \" + str(self.listOfTerms) + \" and threshold = \" + str(self.threshold))\n else:\n print(\"Filter returned no entry\")\n except:\n raise Exception(\"Error while loading the dataFile %s\" % self.dataFile)", "def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END", "def filter(ctx, fil, filter_host, filter_port):\n if not fil:\n raise ValueError(\"Must specify at least one filtering operaion (of the form '<filter>=<value>'\")\n client = aceclient.FilterClient(host=filter_host, port=filter_port)\n filters = {}\n for f in fil:\n filters.update(parse_tag(f))\n client.update(**filters)", "def filter_it(self, _filter):\n with open(self.path) as _file:\n for line in _file:\n tokens = self._tokenize(line)\n if tokens:\n _ip = tokens.group('ip')\n if _filter.match(_ip):\n yield line", "def get_special_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Function\"]\n filters.setdefault(function, {})\n filters[function][\"description\"] = row[\"Description\"]\n filters[function][\"parameters\"] = row[\"Parameters\"].split(\",\")\n filters[function][\"example\"] = row[\"Example\"]\n return filters", "def filter(self, fn):\n self.__filter_chain.append(fn)", "def filter_sff_file(flowgrams, header, filter_list, out_fh):\r\n\r\n write_sff_header(header, out_fh)\r\n\r\n l = 0\r\n for f in flowgrams:\r\n passed = True\r\n for filter in filter_list:\r\n passed = passed and filter(f)\r\n if not passed:\r\n # bail out\r\n break\r\n if (passed):\r\n out_fh.write(f.createFlowHeader() + \"\\n\")\r\n l += 1\r\n return l", "def add_filter(self, f):\n raise NotImplementedError", "def load_filter(filename):\n # parse config file\n if not os.path.isfile(filename):\n raise IOError('File \"%s\" does not exist' % filename)\n try:\n f = open(filename)\n except IOError:\n raise IOError('Could not open file \"%s\"' % filename)\n\n cfg_items = []\n for (i, line) in enumerate(f):\n try:\n # remove all comments and unnecessary whitespace\n normalizer = shlex.shlex(line)\n normalizer.wordchars += '.-'\n normal_line = ' '.join([t for t in normalizer])\n if normal_line:\n # split up normalized line and build dictionary\n cfg_item = {}\n for part in normal_line.split(','):\n cfg_split = shlex.split(part)\n key = cfg_split.pop(0)\n value = cfg_split\n cfg_item[key] = value\n cfg_items.append(cfg_item)\n except (IndexError, ValueError):\n raise RuntimeError( \\\n 'Could not parse line %i of file \"%s\"' % (i, filename))\n\n # look for global bit settings\n bits_global = None\n factor_bits_global = None\n norm_bits_global = None\n for cfg_item in cfg_items:\n if 'bits_global' in cfg_item:\n if bits_global is None:\n [bits_global] = cfg_item.pop('bits_global')\n bits_global = int(bits_global)\n else:\n raise RuntimeError( \\\n 'bits_global must not be specified more than once')\n if 'factor_bits_global' in cfg_item:\n if factor_bits_global is None:\n [factor_bits_global] = cfg_item.pop('factor_bits_global')\n factor_bits_global = int(factor_bits_global)\n else:\n raise RuntimeError( \\\n 'factor_bits_global must not be specified more than once')\n if 'norm_bits_global' in cfg_item:\n if norm_bits_global is None:\n [norm_bits_global] = cfg_item.pop('norm_bits_global')\n norm_bits_global = int(norm_bits_global)\n else:\n raise RuntimeError( \\\n 'norm_bits_global must not be specified more than once')\n\n # remove empty items from cfg_items, only node definitions should be left\n cfg_items = filter(None, cfg_items)\n\n # look for filter nodes\n filter_nodes = {}\n adjacency = {}\n input_node = None\n output_node = None\n for cfg_item in cfg_items:\n # mandatory settings\n try:\n [node] = cfg_item['node']\n except KeyError:\n raise RuntimeError('Node type not specified')\n try:\n [name] = cfg_item['name']\n except KeyError:\n raise RuntimeError('Name not specified')\n # optional settings\n if 'bits' in cfg_item:\n [bits] = map(int, cfg_item['bits'])\n else:\n bits = bits_global\n if 'connect' in cfg_item:\n connect = cfg_item['connect']\n else:\n connect = []\n if 'input' in cfg_item:\n if input_node is None:\n input_node = name\n else:\n raise RuntimeError('More than one input node specified')\n if 'output' in cfg_item:\n if output_node is None:\n output_node = name\n else:\n raise RuntimeError('More than one output node specified')\n\n # make filter node\n if name not in filter_nodes:\n if bits is not None:\n if node == 'Const':\n filter_nodes[name] = Const(bits)\n elif node == 'Add':\n filter_nodes[name] = Add(bits)\n elif node == 'Delay':\n filter_nodes[name] = Delay(bits)\n elif node == 'Multiply':\n if 'factor_bits' in cfg_item:\n [factor_bits] = cfg_item['factor_bits']\n factor_bits = int(factor_bits)\n else:\n factor_bits = factor_bits_global\n if 'norm_bits' in cfg_item:\n [norm_bits] = cfg_item['norm_bits']\n norm_bits = int(norm_bits)\n else:\n norm_bits = norm_bits_global\n if (factor_bits is not None and norm_bits is not None):\n filter_nodes[name] = Multiply(\n bits, factor_bits, norm_bits)\n if 'factor' in cfg_item:\n [factor] = cfg_item['factor']\n factor = float(factor)\n filter_nodes[name].set_factor(factor, norm=True)\n else:\n raise ValueError('Unknown node type: %s' % node)\n else:\n raise RuntimeError('Number of bits for node \"%s\" not specified' \\\n % name)\n adjacency[name] = connect\n else:\n raise RuntimeError('Node \"%s\" already present' % name)\n\n # make filter\n if input_node is None:\n raise RuntimeError('No input node specified')\n elif output_node is None:\n raise RuntimeError('No output node specified')\n else:\n return Filter(filter_nodes, adjacency, input_node, output_node)", "def readFiltered(f):\n line = f.readline()\n while line:\n line = line.strip()\n if len(line) != 0:\n if line == \"### NEW EXPERIMENT ###\":\n # print (\"readFiltered: ''\")\n yield \"\"\n elif line[0] != \"#\":\n # print (\"readFiltered: '\",line,\"'\")\n yield line\n line = f.readline()\n # print (\"readFiltered: '\",line,\"'\")\n return line", "def prefilter(json_arg, initial_prefilter):\n\n if not initial_prefilter:\n logging.info(\"prefilter not found!\")\n # whether it is filtered or not, return as json so it can be handled uniformly from now on\n return json.loads(json_arg)\n\n with open(initial_prefilter) as f:\n lines = f.read().splitlines()\n logging.info(\"prefilter:lines in prefilter file: %d \", len(lines))\n lines = filter(lambda k: not k.startswith(\"#\"), lines)\n logging.info(\"prefilter:lines after removing comments: %d \", len(lines))\n json_args_as_json = json.loads(json_arg)\n for filtering_line in lines:\n json_args_as_json = apply_filter(json_args_as_json, filtering_line)\n\n return json_args_as_json", "def get_filter_word_list(self):\n self.filter_words = self.read_word_file(self.filter_word_file)", "def filterRansac():\n pass", "def _load_filter(self, fname, **kwargs):\n with self as current_lib:\n return UnitLickIndex(fname, current_lib._content[fname])", "def fromfile(cls, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support fromfile\")", "def read_filter(filter_file):\n\n fd = open(filter_file, \"r\")\n lines = fd.readlines()\n fd.close()\n\n wavelengths = []\n weights = []\n for line in lines:\n line = line.strip()\n words = line.split()\n wavelengths.append(float(words[0]))\n weights.append(float(words[1]))\n\n return (wavelengths, weights)" ]
[ "0.594911", "0.5432073", "0.5307598", "0.5264026", "0.5253835", "0.52506196", "0.5130263", "0.49939936", "0.49880716", "0.49808767", "0.4957064", "0.494303", "0.49124965", "0.49102247", "0.49089", "0.48704535", "0.48687115", "0.48383683", "0.48080763", "0.47974768", "0.47960532", "0.47910362", "0.4785495", "0.47619236", "0.47166225", "0.47120875", "0.47058204", "0.4693129", "0.4689234", "0.46866822" ]
0.73677015
0
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
def create_nmt_data(text,train_pct=0.7,val_pct=0.15): if train_pct + val_pct >= 1: raise Exception("train_pct + val_pct must be < 1.0") source = [] target = [] for line in text: text = line.split('\t') source.append(text[0]) target.append(text[1]) text_df = pd.DataFrame({'source_language':source,'target_language':target}) text_df['split'] = 'train' text_df = text_df.sample(frac=1).reset_index(drop=True) idx = int(len(text_df)*train_pct) text_df.loc[:idx,'split'] = 'train' idx2 = idx + int(len(text_df)*val_pct) text_df.loc[idx:idx2,'split'] = 'val' text_df.loc[idx2:,'split'] = 'test' return text_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_training_data_file(list_of_word_lines, language):\r\n # To store each feature vector\r\n feature_vector = []\r\n\r\n # To store the entire dataset\r\n data = []\r\n\r\n for sentence in list_of_word_lines:\r\n\r\n # Contains Q\r\n CONTAINS_Q = 'N'\r\n\r\n # Contains Q\r\n CONTAINS_X = 'N'\r\n\r\n # Contains more than 1 vowel\r\n VOWELS = 'N'\r\n\r\n # Contains common dutch substrings\r\n DUTCH_SUBSTRING = 'N'\r\n\r\n # Contains is-was\r\n ISWAS = 'N'\r\n\r\n # Contains come\r\n COME = 'N'\r\n\r\n # Contains common english words\r\n COMMON_ENGLISH_WORDS = 'N'\r\n\r\n # Contains common dutch words\r\n DUTCH_WORDS = 'N'\r\n\r\n # Contains dutch ij\r\n IJ = 'N'\r\n\r\n # Contains and\r\n AND = 'N'\r\n\r\n # Contains they, he, she\r\n COLLECTIVES = 'N'\r\n\r\n for word in sentence:\r\n\r\n if re.match('[0-9]*', word):\r\n word = re.sub('[0-9]*', '', word)\r\n\r\n if re.match('[!?~`@#$%&)(_=+/.,\"»;«-]', word):\r\n word = re.sub('[!?~`@#$%&)(_=+/.,\"»;«-]', '', word)\r\n\r\n word = word.lower()\r\n if \"de\" == word or \"het\" == word or \"dat\" == word or \"en\" == word or \"een\" == word or \"voor\" == word or \"van\" == word or \"welke\" == word \\\r\n or \"te\" == word or \"hij\" == word or \"zij\" == word or \"op\" == word or \"ik\" == word or \"bij\" == word:\r\n DUTCH_WORDS = 'Y'\r\n\r\n if \"ij\" in word:\r\n IJ = 'Y'\r\n\r\n if \"the\" == word or \"but\" == word or \"for\" == word or \"which\" == word or \"that\" == word or \"and\" == word or \"not\" == word \\\r\n or \"to\" == word or \"in\" == word:\r\n COMMON_ENGLISH_WORDS = 'Y'\r\n\r\n if \"q\" in word:\r\n CONTAINS_Q = 'Y'\r\n\r\n if \"x\" in word:\r\n CONTAINS_X = 'Y'\r\n\r\n if \"aa\" in word or \"ee\" in word or \"ii\" in word or \"uu\" in word:\r\n VOWELS = 'Y'\r\n\r\n if \"ijk\" in word or \"sch\" in word or \"ijn\" in word:\r\n DUTCH_SUBSTRING = 'Y'\r\n\r\n if \"is\" == word or \"of\" == word or \"was\" == word or \"all\" in word:\r\n ISWAS = 'Y'\r\n\r\n if \"come\" == word or \"a\" == word:\r\n COME = 'Y'\r\n\r\n if \"and\" == word:\r\n AND = 'Y'\r\n\r\n if \"he\" == word or \"she\" == word or \"it\" == word or \"they\" == word:\r\n COLLECTIVES = 'Y'\r\n\r\n feature_vector.append([DUTCH_WORDS, IJ, COMMON_ENGLISH_WORDS, CONTAINS_Q, CONTAINS_X,\r\n VOWELS, DUTCH_SUBSTRING, ISWAS,\r\n COME, AND, COLLECTIVES, language])\r\n\r\n data.append(feature_vector)\r\n feature_vector = []\r\n return data", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def splits(cls, text_field, label_field, root='.data',\n train='training.1600000.processed.noemoticon.csv', \n test='testdata.manual.2009.06.14.csv', \n neutral = None, **kwargs):\n \n path_train = root + train\n path_test = root + test\n \n if not os.path.exists(root):\n os.mkdir(root)\n \n if not os.path.exists(path_train) or not os.path.exists(path_test):\n path = cls.download(root)\n path_train = path + train\n path_test = path + test\n \n train_dataset = Sentiment140(path_train, text_field, label_field, neutral=neutral, **kwargs)\n test_dataset = Sentiment140(path_test, text_field, label_field, **kwargs)\n \n return train_dataset, test_dataset", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def create_train_test(dataframe_all):\n label_encoder=LabelEncoder()\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)\n for train_index, test_index in split.split(dataframe_all['word_values'], dataframe_all['document_label']):\n strat_train_set = dataframe_all.loc[train_index]\n strat_test_set = dataframe_all.loc[test_index]\n\n strat_train_set = strat_train_set.dropna(subset=['word_values'])\n strat_test_set = strat_test_set.dropna(subset=['word_values'])\n pipe=su.pipe()\n x_train, y_train = pipe.fit_transform(strat_train_set), label_encoder.fit_transform(\n strat_train_set['document_label'])\n x_test, y_test = pipe.transform(strat_test_set), label_encoder.fit_transform(\n strat_test_set['document_label'])\n\n return x_train,x_test,y_train,y_test", "def split_data(name, is_train = True):\r\n data = pd.read_csv(name, header = 0, encoding = 'ISO-8859-1')\r\n X = data['text']\r\n if is_train:\r\n Y = data['polarity']\r\n return X, Y\r\n return X", "def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def load_dataset(train_path, test_path, tokenizer):\n train_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=train_path,\n block_size=128)\n\n test_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=test_path,\n block_size=128)\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=False,\n )\n return train_dataset, test_dataset, data_collator", "def load_text_and_label(data_file):\n # load data from file\n\n # splite by word\n dfRaw = pd.read_csv(data_file)\n dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()\n pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()\n neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()\n\n x_text = pos_examples + neg_examples\n x_text = np.array([clean_str(sentence) for sentence in x_text])\n # generate label (y)\n pos_labels = [[0,1] for _ in pos_examples]\n neg_labels = [[1,0] for _ in neg_examples]\n y = np.array(pos_labels + neg_labels)\n return [x_text, y]", "def convert_text_to_df(text):\n new_list = [i.strip() for i in text.splitlines() if i.strip() != \"\"]\n new_dict = {}\n col_name = new_list[0].strip().split()\n index_name = new_list[1].strip()\n for item in new_list[2:]:\n index, *others = item.split()\n others = [float(i) for i in others]\n new_dict[index] = others\n new_df = pd.DataFrame(new_dict).transpose()\n new_df.index.name = index_name\n new_df.columns = col_name\n return new_df", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def convert_examples_to_features_for_train(examples, label_list, max_seq_length, tokenizer):\r\n label_map = {label : i for i, label in enumerate(label_list)} #label -> i index dictionary\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n label_list = example.label.split(' ')\r\n\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(example.text_a.split(' ')): #textlist\r\n token_wordpiece = tokenizer.tokenize(word)\r\n tokens.extend(token_wordpiece)\r\n label_current = label_list[i]\r\n for m in range(len(token_wordpiece)):\r\n if m == 0:\r\n labels.append(label_current)\r\n else:\r\n labels.append('X')\r\n\r\n # max_seq_length-1\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n\r\n ntokens.append('[CLS]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[CLS]'])\r\n # print(tokens, labels)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(label_map[labels[i]])\r\n\r\n ntokens.append('[SEP]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[SEP]'])\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n\r\n #if the length is short, tianbu 0\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n #we do not concerned about it\r\n label_ids.append(0)\r\n ntokens.append('NULL')\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n assert len(label_ids) == max_seq_length\r\n\r\n features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids))\r\n return features", "def create_lm_dataset(opt, logger=None):\n # Using spacy to tokenize text\n spacy_en = spacy.load('en')\n # Add <unk> special case is due to wiki text which has raw <unk>\n spacy_en.tokenizer.add_special_case(\"<unk>\", [{ORTH: \"<unk>\"}])\n\n def tokenize(text):\n \"\"\"tokenize sentence\"\"\"\n return [item.text for item in spacy_en.tokenizer(text)]\n\n is_lower = True\n if opt.data_type == \"ptb\":\n is_lower = False\n TEXT = torchtext.data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=is_lower\n )\n\n resources_dir = os.path.expanduser(opt.resources_dir)\n if opt.data_type == \"wiki3\":\n train, valid, test = torchtext.datasets.WikiText103.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"wiki2\":\n train, valid, test = torchtext.datasets.WikiText2.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"ptb\":\n train, valid, test = torchtext.datasets.PennTreebank.splits(\n text_field=TEXT,\n root=resources_dir\n )\n\n if logger:\n logger.info(f\"train token: {len(train.examples[0].text)}\")\n logger.info(f\"test token: {len(test.examples[0].text)}\")\n logger.info(f\"valid token: {len(valid.examples[0].text)}\")\n\n device = torch.device(opt.device)\n if opt.input_vector is not None:\n opt.input_vector = os.path.expanduser(opt.input_vector)\n head, tail = os.path.split(opt.input_vector)\n torchtext_vectors = torchtext.vocab.Vectors(name=tail, cache=head)\n torchtext_vectors.vectors.to(device)\n # print(f\"len: {len(torchtext_vectors.stoi)}\")\n # print(f\"size: {torchtext_vectors.vectors.size()}\")\n # Here the list of list is to simulate the real dataset\n # where first dim is sentence and second is word.\n limited_train = [[word] for word in torchtext_vectors.stoi.keys()]\n TEXT.build_vocab(limited_train, vectors=torchtext_vectors)\n else:\n TEXT.build_vocab(train)\n\n train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(\n (train, valid, test),\n batch_size=opt.batch_size,\n bptt_len=opt.bptt_len,\n device=device,\n repeat=False\n )\n return (TEXT, train_iter, test_iter, val_iter)", "def train_validation_test_split(col_stratify='Kind of offensive language',\n train_percent=0.6,\n validate_percent=0.2,\n test_percent=0.2,\n random_state=101):\n\n data = pd.read_csv('cleaned_data.csv', header=0)\n\n if train_percent + validate_percent + test_percent != 1.0:\n raise ValueError(f'Sum of train, validate and test is not 1.0')\n\n if col_stratify not in data.columns:\n raise ValueError(f'{col_stratify} is not a column in the dataframe')\n\n X = data\n y = data[[col_stratify]]\n\n # Split original dataframe into train and temp dataframes.\n data_train, data_temp, y_train, y_temp = train_test_split(X,\n y,\n stratify=y,\n test_size=(\n 1.0 - train_percent),\n random_state=random_state)\n # Split the temp dataframe into val and test dataframes.\n test_to_split = test_percent / (validate_percent + test_percent)\n data_val, data_test, y_val, y_val = train_test_split(data_temp,\n y_temp,\n stratify=y_temp,\n test_size=test_to_split,\n random_state=random_state)\n\n assert len(data) == len(data_train) + len(data_val) + len(data_test)\n\n return data_train, data_val, data_test, y_train, y_val, y_val", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list, 1)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n textlist = example.text_a.split(' ')\n labellist = example.label\n tokens = []\n labels = []\n valid = []\n label_mask = []\n for i, word in enumerate(textlist):\n token = tokenizer.tokenize(word)\n tokens.extend(token)\n label_1 = labellist[i]\n for m in range(len(token)):\n if m == 0:\n labels.append(label_1)\n valid.append(1)\n label_mask.append(True)\n else:\n valid.append(0)\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n labels = labels[0:(max_seq_length - 2)]\n valid = valid[0:(max_seq_length - 2)]\n label_mask = label_mask[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n label_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n valid.insert(0, 1)\n label_mask.insert(0, True)\n label_ids.append(label_map[\"[CLS]\"])\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n if len(labels) > i:\n label_ids.append(label_map[labels[i]])\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n valid.append(1)\n label_mask.append(True)\n label_ids.append(label_map[\"[SEP]\"])\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n label_mask = [True] * len(label_ids)\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(0)\n valid.append(1)\n label_mask.append(False)\n while len(label_ids) < max_seq_length:\n label_ids.append(0)\n label_mask.append(False)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(valid) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_ids,\n valid_ids=valid,\n label_mask=label_mask))\n return features", "def build_data_cv(file, split_dict, label_dict, clean_string=False):\n revs = []\n f = open(file)\n vocab = defaultdict(float)\n \n for index, line in enumerate(f.readlines()): \n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev)\n words = set(orig_rev.split())\n for word in words:\n vocab[word] += 1\n datum = {\"y\":label_dict[index], \n \"text\": orig_rev, \n \"num_words\": len(orig_rev.split()),\n \"split\": split_dict[index]}#1 or 2\n revs.append(datum)\n\n return revs, vocab", "def _create_examples(self, lines: List[str], mode: Split):\n examples = []\n text_index = 1 if mode == Split.test else 0\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, i)\n text_a = line[text_index]\n if len(line) > text_index + 1:\n label = line[text_index + 1]\n else:\n label = None\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n # id,title,content,label\n test_mode = mode == Split.test\n title_index = 1\n content_index = 2\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[title_index]\n text_b = line[content_index]\n if test_mode:\n label = None\n else:\n label = line[3]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y", "def load_dataset(self, fn):\n df = pandas.read_csv(fn,\n sep = self.sep,\n header = 0,\n keep_default_na = False)\n\n # Encode one-hot representation of the labels\n if self.classes_() is None:\n self.encoder.fit(df.label.values)\n\n # Split according to sentences and encode\n sents = self.get_sents_from_df(df)\n return (self.encode_inputs(sents),\n self.encode_outputs(sents))", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def _convert_loops_to_df(text_loops):\n \n # Convert the list to a table\n df_loop = DataFrame(text_loops, columns=[u'text'])\n \n # Append columns which classify each row as a loop tag,\n # stop tag, label tab, or data values\n df_loop = _set_loops(df_loop)\n df_loop = _set_labels(df_loop)\n df_loop = _set_stops(df_loop)\n df_loop = _set_values(df_loop)\n \n # Extract the data into a table\n df_list = _extract_loop_data(df_loop)\n \n return df_list", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex", "def pre_process_df(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data", "def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )", "def get_data(train_path,\n test_path,\n tokenize='spacy',\n max_vocab_size=25000,\n train_valid_split=0.8,\n toy=False):\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n if toy:\n train_data = train_data.head(100)\n test_data = test_data.head(100)\n\n train_data, test_data = pre_process_df(train_data, test_data)\n\n train_data_path = \"train_processed.csv\"\n test_data_path = \"test_processed.csv\"\n\n train_data.to_csv(train_data_path, header=False, index=False)\n test_data.to_csv(test_data_path, header=False, index=False)\n\n if tokenize == 'spacy':\n TEXT = data.Field(tokenize=tokenize)\n else:\n TEXT = data.Field()\n\n LABEL = data.LabelField(dtype=torch.float)\n train = data.TabularDataset(path=train_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n test = data.TabularDataset(path=test_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n\n os.remove(train_data_path)\n os.remove(test_data_path)\n\n train, valid = train.split(train_valid_split)\n\n TEXT.build_vocab(train, max_size=max_vocab_size)\n LABEL.build_vocab(train)\n\n return TEXT, LABEL, train, valid, test", "def _create_examples(self, df, mode):\n idx_tr, idx_te = next(ShuffleSplit(test_size=0.3, random_state=1234).split(df.title, df.totalViews))\n\n examples = []\n\n iterind = idx_tr if mode == \"train\" else idx_te\n\n for i in iterind:\n examples.append(\n InputExample(guid=i, text_a=df.title.values[i], label=df.totalViews.values[i]))\n\n return examples" ]
[ "0.6267126", "0.6177644", "0.6156957", "0.614642", "0.6129366", "0.612271", "0.6122403", "0.60591143", "0.5948062", "0.59287256", "0.592589", "0.5915757", "0.58811563", "0.5854081", "0.58518916", "0.5847722", "0.58189434", "0.58174044", "0.58112806", "0.58017504", "0.5774996", "0.57625914", "0.5726544", "0.5725933", "0.5721964", "0.5704764", "0.57015544", "0.56984866", "0.5696608", "0.5686199" ]
0.6742829
0
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
def process_glove_data(filename): word_list = [] embed_list = [] with open(filename,encoding="utf8") as file: lines = file.readlines() for line in lines: toks = line.split(' ') word_list.append(toks[0]) vec = [float(tok) for tok in toks[1:]] embed_list.append(vec) embed = np.array(embed_list,dtype=float) embed_df = pd.DataFrame(embed,index=word_list) embed_df.index = embed_df.index.str.lower() return embed_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_embeddings(filename):\n labels = []\n rows = []\n with open(filename, encoding='utf-8') as infile:\n for i, line in enumerate(infile):\n items = line.rstrip().split(' ')\n if len(items) == 2:\n # This is a header row giving the shape of the matrix\n continue\n labels.append(items[0])\n values = np.array([float(x) for x in items[1:]], 'f')\n rows.append(values)\n\n arr = np.vstack(rows)\n return pd.DataFrame(arr, index=labels, dtype='f')", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict", "def load_glove_data():\n glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')\n f = open(glove_path,'r')\n \n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n \n return model", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings", "def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def gen_embedding(path):\r\n word_emb = {}\r\n with open(path, encoding='utf-8') as f:\r\n for line in tqdm(f):\r\n values = line.split()\r\n word_emb[values[0]] = np.asarray(values[1:], dtype='float32')\r\n return word_emb", "def load_embeddings(filename):\n count = 0\n matrix = []\n word_map = {}\n with open(filename, encoding=\"utf8\") as f:\n # with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n word = items[0]\n rest = items[1:]\n # print(\"word:\", word)\n word_map[word] = count\n count += 1\n\n rest = list(map(float, rest))\n matrix.append(rest)\n matrix = np.array(matrix)\n return word_map, matrix", "def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding", "def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim", "def load_embed_text(embed_file):\n \n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embeddings should be same size\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size", "def load_embeddings(glove_path, vocab):\n vocab_size = vocab.get_vocab_size()\n words_to_keep = set(vocab.get_index_to_token_vocabulary().values())\n glove_embeddings = {}\n embedding_dim = None\n\n logger.info(\"Reading GloVe embeddings from {}\".format(glove_path))\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file,\n total=get_num_lines(glove_path)):\n fields = line.strip().split(\" \")\n word = fields[0]\n if word in words_to_keep:\n vector = np.asarray(fields[1:], dtype=\"float32\")\n if embedding_dim is None:\n embedding_dim = len(vector)\n else:\n assert embedding_dim == len(vector)\n glove_embeddings[word] = vector\n\n all_embeddings = np.asarray(list(glove_embeddings.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_std = float(np.std(all_embeddings))\n logger.info(\"Initializing {}-dimensional pretrained \"\n \"embeddings for {} tokens\".format(\n embedding_dim, vocab_size))\n embedding_matrix = torch.FloatTensor(\n vocab_size, embedding_dim).normal_(\n embeddings_mean, embeddings_std)\n # Manually zero out the embedding of the padding token (0).\n embedding_matrix[0].fill_(0)\n # This starts from 1 because 0 is the padding token, which\n # we don't want to modify.\n for i in range(1, vocab_size):\n word = vocab.get_token_from_index(i)\n\n # If we don't have a pre-trained vector for this word,\n # we don't change the row and the word has random initialization.\n if word in glove_embeddings:\n embedding_matrix[i] = torch.FloatTensor(glove_embeddings[word])\n return embedding_matrix", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index", "def read_old_glove(filepath):\n print('reading glove files:', filepath)\n\n word2idx = {}\n word_embed = [['0'] * 300] # word_embed[0] = [0] * 300, represent the <PAD>\n\n with open(filepath, 'r') as f:\n for idx, line in enumerate(f):\n line_list = line.split()\n word = ' '.join(line_list[: len(line_list)-300])\n embed = [num for num in line_list[len(line_list)-300:]]\n\n word2idx[word] = idx + 1\n word_embed.append(embed)\n\n return word2idx, word_embed", "def load_embeddings(path):\r\n\r\n embeds = dict() # dictionary mapping words to vectors\r\n for line in open(path, encoding='utf-8'):\r\n row = line.strip().split('\\t')\r\n embeds[row[0]] = np.array(row[1:], dtype=np.float32)\r\n\r\n embeddings_dim = embeds[list(embeds)[0]].shape[0]\r\n\r\n return embeds, embeddings_dim", "def load_word_vectors(filepath, word_index, vector_size):\n embedding_matrix = np.zeros((len(word_index) + 1, vector_size))\n\n fin = io.open(filepath, \"r\", encoding=\"utf-8\", newline=\"\\n\", errors=\"ignore\")\n n, d = map(int, fin.readline().split())\n\n for line in fin:\n tokens = line.rstrip().split(\" \")\n if tokens[0] in word_index:\n w = word_index[tokens[0]]\n embedding_matrix[w] = np.fromiter(map(float, tokens[1:]), \"float\")\n\n return embedding_matrix", "def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300", "def load_embed(file_name, vocab_size):\n\n with tf.io.gfile.Open(file_name, 'r') as embed_file:\n vocab = []\n embeds = []\n depth = -1\n for index, line in enumerate(embed_file):\n if vocab_size > 0 and index >= vocab_size:\n break\n line = line.strip()\n tokens = line.strip().split(' ')\n word = tokens[0]\n vocab.append(word)\n if depth == -1:\n embed = [float(token) for token in tokens[1:]]\n else:\n embed = [float(token) for token in tokens[-depth:]]\n d = len(embed)\n if depth == -1:\n depth = d\n if d != depth:\n raise ValueError('Inconsistent embedding sizes')\n embeds.append(embed)\n\n embeds = np.stack(embeds)\n\n return vocab, embeds, depth", "def load_embedding(embedding_file_path, word_index, embedding_dim):\n # Create a Numpy Placeholder for Embedding\n max_features = len(word_index)+1\n embedding_weights = np.random.random([max_features, embedding_dim])\n count = 0\n glove_file = open(embedding_file_path)\n for line in glove_file:\n word, vector = line.split(' ')[0], line.split(' ')[1:]\n if word in word_index and word_index[word] <= max_features:\n count += 1\n vector = list(map(float, vector))\n embedding_weights[word_index[word]] = [float(i) for i in vector]\n\n print('Fraction found in glove {}'.format(count/len(embedding_weights)))\n return embedding_weights", "def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)", "def load_data(self, file_path):\n \n dataset = []\n \n for line in open(file_path):\n arr = line.strip().split('\\t')\n label = [w for w in arr[0].split(' ')]\n sentence = [w for w in arr[1].split(' ')]\n cname = ' '.join(label)\n \n # The line is useless if the class is\n # not in the class dictionary.\n if cname not in self.class_list:\n raise Exception(\"{} not in class list.\".format(cname))\n \n # Build the sample dictionary.\n sample = {}\n sample['sentence_w2v'] = []\n \n for word in sentence:\n if word not in self.w2v.vocab.keys():\n continue # ignore sentence\n \n # In the loading embedding (see self.load_embedding()), we\n # stack one additional layer of zeros in front to handle padding.\n # Thus here we append the embedding index plus one.\n sample['sentence_w2v'].append(torch.Tensor([self.w2v.vocab[word].index + 1]))\n\n sample['length'] = len(sample['sentence_w2v'])\n sample['label_onehot'] = self.onehot(self.class_indices[cname])\n sample['label_w2v'] = self.class_w2v[cname]\n dataset.append(sample)\n \n return dataset", "def load_text_dims(file: Union[str, bytes, int, PathLike],\n lossy: bool = False) -> Embeddings:\n with open(file, encoding='utf8',\n errors='replace' if lossy else 'strict') as inf:\n rows, cols = next(inf).split()\n return _load_text(inf, int(rows), int(cols))", "def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index", "def getWordEmbeddingsMatrix(script_directory, embedding_file):\n translator = str.maketrans('', '', string.punctuation)\n all_words = []\n print(\"Loading vocab from text files in:\")\n for d in os.listdir(script_directory):\n print(d)\n for fname in os.listdir(\"%s/%s\" % (script_directory, d)):\n with open(\"%s/%s/%s\" % (script_directory, d, fname), 'r') as f:\n words = [w.translate(translator) for w in f.read().split() if w.translate(translator) != \"\"]\n all_words.extend(words)\n\n model = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n vocab = {\"PAD\" : 0, \"EOS\" : 1}\n vocab.update({w : i + 2 for i,w in enumerate([w1 for w1 in set(all_words) if w1 in model]) })\n inv_dict = vocab.keys()\n ## Take a minute to load...\n\n vocab_size = len(inv_dict)\n emb_size = 300 # or whatever the size of your embeddings\n embeddings = np.zeros((vocab_size + 1, emb_size))\n for k,v in vocab.items():\n embeddings[v] = model[k]\n vocab[\"UNK\"] = len(vocab.keys())\n embeddings[vocab[\"UNK\"]] = np.ones(emb_size)\n del model\n ## Now we have a numpy matrix of embeddings...\n # x_model = tf.placeholder(tf.int32, shape=[None, input_size])\n # with tf.device(\"/cpu:0\"):\n # embedded_x = tf.nn.embedding_lookup(embeddings, x_model)\n return embeddings, vocab", "def create_embedding_matrix(filepath, word_index, embedding_dim):\n vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n\n with open(filepath) as f:\n for line in f:\n word, *vector = line.split()\n if word in word_index:\n idx = word_index[word] \n embedding_matrix[idx] = np.array(\n vector, dtype=np.float32)[:embedding_dim]\n\n return embedding_matrix", "def load_embeddings(filepath, vocabulary, retain):\n \n word2index = dict()\n word_vectors = list()\n\n def add_entry(word, vector):\n word2index[word] = len(word2index)\n word_vectors.append(vector)\n\n model = gensim.models.KeyedVectors.load(filepath)\n\n # adding special tokens <FIL>, <UNK> and <NUM>\n dim = model.vector_size\n add_entry('<fil>', np.zeros((dim,)))\n for special in ['<unk>', '<num>']:\n vector = np.random.uniform(-0.025, 0.025, (dim,))\n add_entry(special, vector)\n\n if retain:\n for word, _ in model.vocab.items():\n add_entry(word, model[word])\n else:\n for word in vocabulary:\n if word in model:\n add_entry(word, model[word])\n\n vocabulary = vocabulary.intersection(word2index.keys())\n return word2index, np.asarray(word_vectors)", "def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings", "def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict", "def set_glove_embedding(self,fpath,embedding_dim):\n\t\temb = np.random.randn(self._count,embedding_dim)\n#\ttf.logging.info(emb[0])\n\t\twith open(fpath) as f: #python 3.x support \n\t\t\tfor k,line in enumerate(f):\n\t\t\t\tfields = line.split()\n\t\t\t\tif len(fields) - 1 != embedding_dim:\n\t\t\t\t\t# Sometimes there are funny unicode parsing problems that lead to different\n\t\t\t\t\t# fields lengths (e.g., a word with a unicode space character that splits\n\t\t\t\t\t# into more than one colum n). We skip those lines. Note that if you have\n\t\t\t\t\t# some kind of long header, this could result in all of your lines getting\n\t\t\t\t\t# skipped. It's hard to check for that here; you just have to look in the\n\t\t\t\t\t# embedding_misses_file and at the model summary to make sure things look\n\t\t\t\t\t# like they are supposed to.\n\t\t\t\t\t#logger.warning(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t# embedding_dim, len(fields) - 1, line)\n\t\t\t\t\traise Exception(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t\t\t\t\t embedding_dim, len(fields) - 1, line)\n\t\t\t\t\tcontinue\n\t\t\t\tword = fields[0]\n\t\t\t\tif word in self._word_to_id:\n\t\t\t\t\tvector = np.asarray(fields[1:], dtype='float32')\n\t\t\t\t\temb[self._word_to_id[word]] = vector\n#\t\tif k%1000 == 0:\n#\t\t tf.logging.info('glove : %d',k)\n\t\tself.glove_emb = emb" ]
[ "0.7637871", "0.72984564", "0.72906333", "0.7238396", "0.7030938", "0.6931128", "0.69254005", "0.6910939", "0.6879712", "0.6877971", "0.68639934", "0.6831232", "0.6824664", "0.68067014", "0.679522", "0.6794145", "0.6751845", "0.67482585", "0.67427385", "0.66889936", "0.666392", "0.665094", "0.6634601", "0.6600874", "0.65529066", "0.6495853", "0.6495826", "0.6474231", "0.6467967", "0.6462704" ]
0.7851951
0
Processes a list of splits by modifying any positions as needed.
def handle_splits(self, splits): total_leftover_cash = 0 for instrument, ratio in splits: if instrument in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[instrument] leftover_cash = position.handle_split(instrument, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def addSplit(self):\n pass", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def split(self, X):", "def _splitPoints(self, points, split):\n # validate split\n if not split:\n return [points]\n\n # complete split with adding start and end frames\n if split[0] != 0:\n split.insert(0, 0)\n\n if split[-1] != len(points):\n split.append(len(points))\n\n # make sure split is sorted and doesn't contain any duplicates\n split = list(set(split))\n split.sort()\n\n # split range for looping\n splitA = split[:-1]\n splitB = split[1:]\n\n # get lists\n return [points[a:b + 1] for a, b in zip(splitA, splitB)]", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def split(self):\n st = time()\n tokens = self._build_args.tokens\n\n for token_split in IStorage._tokens_partitions(tokens, config.min_number_of_tokens,\n config.number_of_partitions):\n storage_id = uuid.uuid4()\n log.debug('assigning to %s %d tokens', str(storage_id), len(token_split))\n new_args = self._build_args._replace(tokens=token_split, storage_id=storage_id)\n self.__class__._store_meta(new_args)\n\n yield self.__class__.build_remotely(new_args)\n log.debug('completed split of %s in %f', self.__class__.__name__, time() - st)", "def nextSplit(self):\n pass", "def place_at_splits(data):\n groups = defaultdict(list)\n for runner_idx, runner in enumerate(data):\n splits = runner['splits']\n for split in splits:\n split['runner_idx'] = runner_idx\n groups[split['split_dist']].append(split)\n\n ranks = []\n srt_keys = sorted(groups, key=groups.get)\n for key in srt_keys:\n group = groups[key]\n srt_group = sorted(group, key=lambda t: t['split_mins'])\n ranked_group = []\n for rank, split in enumerate(srt_group):\n split['rank'] = rank\n ranked_group.append(split)\n ranks.append(ranked_group)\n\n return data, ranks", "def unsplit(self, variant_groups):\n for vargroup in variant_groups:\n self.variant_list.extend(vargroup.variant_list)\n self.pos = min([var.start for var in self.variant_list])\n self.end = max([var.end for var in self.variant_list])", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def split(self):\n if self.split_level <= 0:\n self.doSplit = False\n return []\n else:\n vel_polar = cart_to_polar(self.pos[1])\n\n ang1 = vel_polar[1] + math.pi / 4\n ang2 = vel_polar[1] - math.pi / 4\n\n vel1 = polar_to_cart( [vel_polar[0] * 1.5, ang1] )\n vel2 = polar_to_cart( [vel_polar[0] * 1.5, ang2] )\n\n offset1 = polar_to_cart( [self.radius, ang1] )\n offset2 = polar_to_cart( [self.radius, ang2] )\n\n pos1 = self.pos[0] + offset1\n pos2 = self.pos[0] + offset2\n\n return [ Asteroid([pos1, vel1], [self.ang[0] + 0.01, self.ang[1] * 1.5],\n scale = self.scale / 1.4,\n color_idx = self.color_idx,\n doSplit = True,\n split_level = self.split_level - 1\n ),\n Asteroid([pos2, vel2], [self.ang[0] - 0.01, -self.ang[1] * 1.5],\n scale = self.scale / 1.4,\n color_idx = self.color_idx,\n doSplit = True,\n split_level = self.split_level - 1\n )\n ]", "def spliter(temp,split1,split2):\n for x in range(len(temp)):\n if x<len(temp)/2:\n split1.append(temp[x])\n else:\n split2.append(temp[x])", "def splits(self) -> List[int]:\n if self._splits is None:\n self.RefreshStats()\n return self._splits", "def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits", "def fillBestSplitsInDataByInfoGainIntoDict(self, data, structure, colName, numOfSplits, splitsList, indexToInsert):\n if len(data) <= 0 or numOfSplits <= 0:\n return []\n colIndex = structure[colName]['index']\n split = self.findBestSplitInDataByInfoGain(data, structure, colName)\n if str(indexToInsert) in splitsList:\n splitsList[str(indexToInsert)] += [split]\n else:\n splitsList[str(indexToInsert)] = [split]\n indexToInsert, numOfSplits = indexToInsert + 1, numOfSplits - 1\n\n if split:\n newDataBellowSplit = list(filter(lambda y: float(y[colIndex]) <= split[0], data))\n newDataAboveSplit = list(filter(lambda y: float(y[colIndex]) > split[0], data))\n self.fillBestSplitsInDataByInfoGainIntoDict(newDataBellowSplit, structure, colName, numOfSplits, splitsList, indexToInsert)\n self.fillBestSplitsInDataByInfoGainIntoDict(newDataAboveSplit, structure, colName, numOfSplits, splitsList, indexToInsert)", "def setSplit(self,split):\n self.split=split", "def split_train(splits, val_size, groups=None, **kwargs):\n new_splits = []\n for train_val, test in splits:\n sub_groups = None if groups is None else groups[train_val]\n train, val = train_test_split_groups(\n train_val, val_size=val_size, groups=sub_groups, **kwargs) if val_size > 0 else (train_val, [])\n new_splits.append([train, val, test])\n return new_splits", "def process(self, lists, subqueries):\n pass", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError", "def split(self, user, number=2, piece='a', comment=None, force_refresh=True):\n if comment is None:\n comment = 'Split sample into {0} pieces'.format(number)\n\n process = Process.objects.create(title='Split Sample',\n comment=comment,\n user=user,\n type_id='split-process')\n nodes = []\n\n branch = self.get_piece(piece)\n for i in range(number):\n if i == 0:\n new_piece = piece\n else:\n new_piece = self._get_next_piece()\n # Note: Issue #248 in django-mptt causes the tree to not be properly\n # updated when inserting objects if parent is set. Workaround\n # is to set parent_id instead. This fixes methods such as\n # MPTTModel.get_descendants(). Since you are referencing an\n # object that has changed in the database (process_tree),\n # the lft and rght items are not updated properly. Workarounds\n # include manually updating the root node or requerying for\n # the sample object which will force a refresh.\n nodes.append(self._insert_node(process, new_piece, i + 1, branch))\n if force_refresh: # workaround to force the root node to update\n self.refresh_tree()\n return nodes", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def split_all(self):\n for domino in self.dominoes[:]:\n self.split(domino)", "def apply_split(\n sid: str,\n old_shares: int,\n new_shares: int\n ) -> list[dict[str, Union[str, int]]]:\n params = {\n \"sid\": sid,\n \"old_shares\": old_shares,\n \"new_shares\": new_shares\n }\n response = houston.patch(\"/blotter/positions\", params=params)\n houston.raise_for_status_with_json(response)\n return response.json()", "def process_commands():\n # Parse and handle each different command\n args = parse_arguments()\n\n pdfsplit.pdf_split(args.file, args.pages, args.offset)", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))" ]
[ "0.69268346", "0.57384413", "0.5705373", "0.5634635", "0.56108665", "0.5545612", "0.5541551", "0.5517076", "0.5466351", "0.54659456", "0.54127634", "0.5412377", "0.53936285", "0.5370275", "0.53439856", "0.5326818", "0.53085357", "0.5286673", "0.5274086", "0.5254431", "0.52331835", "0.52204424", "0.5218673", "0.5206883", "0.51222473", "0.5118309", "0.5107839", "0.5102903", "0.51009226", "0.5100633" ]
0.6112161
1
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
def earn_dividends(self, cash_dividends, stock_dividends): for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.instrument].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.instrument ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pay_dividends(self, next_trading_day):\n net_cash_payment = 0.0\n\n try:\n payments = self._unpaid_dividends[next_trading_day]\n # Mark these dividends as paid by dropping them from our unpaid\n del self._unpaid_dividends[next_trading_day]\n except KeyError:\n payments = []\n\n # representing the fact that we're required to reimburse the owner of\n # the stock for any dividends paid while borrowing.\n for payment in payments:\n net_cash_payment += payment['amount']\n\n # Add stock for any stock dividends paid. Again, the values here may\n # be negative in the case of short positions.\n try:\n stock_payments = self._unpaid_stock_dividends[next_trading_day]\n except KeyError:\n stock_payments = []\n\n for stock_payment in stock_payments:\n payment_instrument = stock_payment['payment_instrument']\n share_count = stock_payment['share_count']\n # note we create a Position for stock dividend if we don't\n # already own the instrument\n if payment_instrument in self.positions:\n position = self.positions[payment_instrument]\n else:\n position = self.positions[payment_instrument] = Position(\n payment_instrument,\n )\n\n position.amount += share_count\n\n return net_cash_payment", "def get_dividends(self, stock_list, start_date=None, end_date=None):\n df_dict = {}\n df_list = []\n file_in_path = [year.replace(\".csv\", \"\") for year in self.get_csv_in_path(self.dividend_eps_path)]\n if not start_date:\n start_date = file_in_path[0]\n if not end_date:\n end_date = file_in_path[-1]\n if start_date > end_date:\n return df_dict\n for year in range(int(start_date), int(end_date)+1):\n target_path = \"{}/{}.csv\".format(self.dividend_eps_path, year)\n df = pd.read_csv(target_path, index_col=\"名稱\")\n self.replace_nan_to_other(df, \"\")\n for stock in stock_list:\n pd_index = df.index.to_list()\n old_list = []\n if stock in pd_index:\n data = df.loc[stock]\n\n # print(\"日期 = {}\".format(data.get(\"除息交易日\")))\n if df_dict.get(stock):\n old_list = df_dict.get(stock)\n\n # check data is available\n dict = {}\n if data.get(\"現金股利\") != \"\":\n dict.update({\"除息交易日\": \"{}{}\".format(year, data.get(\"除息交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除息交易日') else \"\",\n \"現金股利\": data.get(\"現金股利\"),\n })\n if data.get(\"股票股利\") != \"\":\n dict.update({\"除權交易日\": \"{}{}\".format(year, data.get(\"除權交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除權交易日') else \"\",\n \"股票股利\": data.get(\"股票股利\"),\n })\n if dict:\n old_list.append(dict)\n df_dict.update({stock: old_list})\n\n return df_dict", "def list_dividends(\n self,\n ticker: Optional[str] = None,\n ticker_lt: Optional[str] = None,\n ticker_lte: Optional[str] = None,\n ticker_gt: Optional[str] = None,\n ticker_gte: Optional[str] = None,\n ex_dividend_date: Optional[Union[str, date]] = None,\n ex_dividend_date_lt: Optional[Union[str, date]] = None,\n ex_dividend_date_lte: Optional[Union[str, date]] = None,\n ex_dividend_date_gt: Optional[Union[str, date]] = None,\n ex_dividend_date_gte: Optional[Union[str, date]] = None,\n record_date: Optional[Union[str, date]] = None,\n record_date_lt: Optional[Union[str, date]] = None,\n record_date_lte: Optional[Union[str, date]] = None,\n record_date_gt: Optional[Union[str, date]] = None,\n record_date_gte: Optional[Union[str, date]] = None,\n declaration_date: Optional[Union[str, date]] = None,\n declaration_date_lt: Optional[Union[str, date]] = None,\n declaration_date_lte: Optional[Union[str, date]] = None,\n declaration_date_gt: Optional[Union[str, date]] = None,\n declaration_date_gte: Optional[Union[str, date]] = None,\n pay_date: Optional[Union[str, date]] = None,\n pay_date_lt: Optional[Union[str, date]] = None,\n pay_date_lte: Optional[Union[str, date]] = None,\n pay_date_gt: Optional[Union[str, date]] = None,\n pay_date_gte: Optional[Union[str, date]] = None,\n frequency: Optional[Union[int, Frequency]] = None,\n cash_amount: Optional[float] = None,\n cash_amount_lt: Optional[float] = None,\n cash_amount_lte: Optional[float] = None,\n cash_amount_gt: Optional[float] = None,\n cash_amount_gte: Optional[float] = None,\n dividend_type: Optional[Union[str, DividendType]] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[Dividend], HTTPResponse]:\n url = \"/v3/reference/dividends\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_dividends, locals()),\n raw=raw,\n deserializer=Dividend.from_dict,\n options=options,\n )", "def calculate_payments(yearly_payments_percentage, cost_reductions,\n days_with_payments, days_for_discount_rate):\n\n return [period_payment(yearly_payments_percentage, ccr,\n days_with_payments[i], days_for_discount_rate[i])\n for i, ccr in enumerate(cost_reductions)]", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def fill_prices_using_dates(ls_ls_prices, ls_ls_dates, ls_master_dates):\n dict_corrections = {}\n dict_errors = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n for day_ind, price in enumerate(ls_prices):\n if price != price:\n relative_day = 0\n while (day_ind + relative_day < len(ls_master_dates)-1) and\\\n (ls_ls_prices[indiv_ind][day_ind + relative_day] !=\\\n ls_ls_prices[indiv_ind][day_ind + relative_day]):\n relative_day += 1\n next_valid_date = ls_ls_dates[indiv_ind][day_ind + relative_day]\n # if next_valid_date is not None (end of series full of None)\n if next_valid_date and next_valid_date != '--':\n try:\n # could have bad info in date (check with regex?)\n next_valid_date_int = int(u'20%s%s%s' %(next_valid_date[6:],\n next_valid_date[3:5],\n next_valid_date[:2]))\n # next date must be the same or anterior to the current date\n if next_valid_date_int <= int(ls_master_dates[day_ind]):\n ls_ls_prices[indiv_ind][day_ind] = ls_ls_prices[indiv_ind][day_ind + relative_day]\n dict_corrections.setdefault(indiv_ind, []).append(day_ind)\n except:\n dict_errors.setdefault(indiv_ind, []).append(day_ind)\n return (ls_ls_prices, dict_corrections, dict_errors)", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def get_returns(self, start_date=None, end_date=None, stocks=None):\n if stocks is None:\n stocks = self.stocks\n\n if start_date is None:\n start_date = self.dates[0]\n\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n dates_to_check = self.dates[self.dates.index(start_date): self.dates.index(end_date) + 1]\n\n stock_money = []\n\n for date in dates_to_check:\n stock_money += [self.get_day_returns(stocks, date)]\n\n stock_money = pd.DataFrame({\"stock value\": stock_money}).set_index([self.dates])\n\n return_info = join_features(stock_money, self.cash)\n return_info['value'] = return_info['cash'] + return_info['stock value']\n\n return return_info", "def _get_financials_by_chunk(self, args):\n (istart, iend) = args\n comp_index = self.components.index\n # download financials\n browser=webdriver.Chrome()\n for sym in comp_index[istart:iend]:\n print('Chunk %s-%s: downloading financial data for %s' %(comp_index[istart], comp_index[iend], sym))\n stock = Symbol(sym)\n if 'Exchange' in self.components.columns:\n exch = self.components['Exchange'][sym]\n if type(exch) == pd.Series:\n # unexpected duplicates, e.g. AMOV\n exch = exch.iloc[0]\n if type(exch) == str:\n stock.exch = exch\n stock.get_financials(browser=browser)\n stock.save_financial_data()\n browser.quit()\n return", "def get_financials(self, update_list=True, sym_start=str(), sym_end=str(), num_procs=9):\n if self.components.empty or update_list:\n self.get_compo_list(update_list=True)\n # slice symbols\n comp_index = self.components.index\n istart = 0\n iend = len(comp_index)\n if len(sym_start) > 0 and sym_start in comp_index:\n istart = comp_index.get_loc(sym_start)\n if len(sym_end) > 0 and sym_end in comp_index:\n iend = comp_index.get_loc(sym_end)\n if istart > iend:\n (istart, iend) = (iend, istart) # make sure end is greater than start\n # download financials\n pool = mp.Pool(processes=num_procs)\n steps = np.round(np.linspace(istart, iend, num_procs+1)).astype(int)\n args = [(steps[i-1], steps[i]-1) for i in range(1,len(steps))]\n stats = pool.map(self._get_financials_by_chunk, args)\n return", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def get_prices(start, end):\n\n tickers = TICKERS # fetch tickers from config.py\n df_final = pd.DataFrame() # declared for merging purposes (inside loops)\n\n for ticker in tickers: # Loop over tickers to fetch individual price series\n\n r = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=\" + ticker\n + \"&outputsize=full&apikey=\" + ALPHAVANTAGE_KEY)\n r_dict = r.json()\n\n dates = np.array([]) # this loop makes the index into an index of datetime objects. Note the format.\n for i in r_dict['Time Series (Daily)'].keys():\n datetime_obj = datetime.datetime.strptime(i, '%Y-%m-%d')\n dates = np.append(dates, datetime_obj)\n\n prices = np.array([]) # This loop extracts all prices and put them into an array\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['5. adjusted close']\n prices = np.append(prices, x)\n\n open_prices = np.array([]) # grab opening prices as well\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['1. open']\n open_prices = np.append(open_prices, x)\n\n df = pd.DataFrame({ # This dataframe contains each individual stock\n 'Date': dates,\n str(ticker + '_' + 'adjclose'): prices,\n str(ticker + '_' + 'open'): open_prices\n })\n df = df.set_index('Date')\n\n df_final = pd.DataFrame(data=df_final,\n index=dates) # these few lines are for merging the individual dataframes\n df_final.index.name = 'Date'\n df_final = df.merge(df_final, left_index=True, right_index=True)\n\n for ticker in tickers: # convert to numeric values. Prices are just \"objects\"\n df_final[str(ticker + '_' + 'adjclose')] = pd.to_numeric(df_final[str(ticker + '_' + 'adjclose')])\n df_final[str(ticker + '_' + 'open')] = pd.to_numeric(df_final[str(ticker + '_' + 'open')])\n\n df_final = df_final.iloc[::-1]\n\n return df_final[start: end] # slice the dataframe at the end, only return the specified date-range.", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def calculateDailyBill(service):\n bill = [] # initialize the empty list called bill, storing bill amount for each AC serviced for a particular day\n for service_ele in service:\n total = (service_ele[-1] + service_ele[-2]) * 1.05 # iterate the service list and sum up the labour_charge and cost replaced multiply 5% gov tax \n bill.append(total) # append the total amount value to bill list\n # service[service.index(service_ele)] = total # using index to locate the list element and add the total value to the last in each list element\n return bill", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def get_day_returns(self, stocks=None, date=None):\n if stocks is None:\n stocks = self.stocks\n\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n stock_money = 0\n for stock in stocks:\n stock_day = self.stock_data[stock]\n # TODO find a better way than avging open and cloase\n stock_money += stock_day.position['Position'][date] *\\\n (stock_day.market['Close'][date] + stock_day.market['Open'][date])/2\n\n return stock_money", "def build_dividend_lists(portfolio_dict):\n # ETF dividend list\n dow_dividends = lookup_dividends(yf.Ticker(\"DIA\")) \n sp500_dividends = lookup_dividends(yf.Ticker(\"SPY\")) \n nasdaq_dividends = lookup_dividends(yf.Ticker(\"QQQ\")) \n totalmarket_dividends = lookup_dividends(yf.Ticker(\"VTI\")) \n \n # Portfolio dividends\n portfolio_dividend_dict = {}\n for key in portfolio_dict:\n portfolio_dividend_dict[key] = lookup_dividends(yf.Ticker(key))\n \n return (dow_dividends, sp500_dividends, nasdaq_dividends, totalmarket_dividends, portfolio_dividend_dict)", "def get_daily_percent_change(ticker_symbols_as_list):\n\tyahoo_finance_url = 'http://query.yahooapis.com/v1/public/yql'\n\n\t# when we make the request, pass along additional preferences, eg the SQL query\n\tticker_symbols_as_string = ','.join(ticker_symbols_as_list)\n\tdata = {'q': \"select Symbol, PercentChange from yahoo.finance.quotes where symbol in (%s)\" % ticker_symbols_as_string, \n\t'format': 'json',\n\t'diagnostics':'false',\n\t'env': 'http://datatables.org/alltables.env',}\n\n\tencoded_data = urllib.urlencode(data)\n\turl = \"%s?%s\" % (yahoo_finance_url, encoded_data)\n\n\tyahoo_response = urllib.urlopen(url).read()\n\tyahoo_json = json.loads(yahoo_response)\n\n\tdaily_percent_change_keyed_by_ticker_symbol = {}\n\n\tfor quote_result in yahoo_json['query']['results']['quote']:\n\n\t\tsymbol = quote_result['Symbol'] \n\t\tdaily_percent_change = quote_result['PercentChange'] \n\n\t\tif daily_percent_change is None:\n\t\t\tprint 'warning: no value found for percent change', symbol\n\t\t\tcontinue\n\t\t\t\n\t\t# we noticed that the percent change is often reported as a string like \"+14.35%\"...\n\t\t# let's get rid of the leading \"+\" and the trailing \"%\"\n\t\tif daily_percent_change.startswith('+'):\n\t\t\t# 'slice' the string (my_value[start_index:stop_index]); define the start index,\n\t\t\t# and in this case, no need to specify the end index\n\t\t\tdaily_percent_change = daily_percent_change[1:] \n\t\t\n\t\t# get rid of the trailing \"%\" \n\t\tif daily_percent_change.endswith('%'):\n\t\t\t# 'slice' the string. this time, no need to define the start index, but definiely define the end index\n\t\t\tdaily_percent_change = daily_percent_change[:-1]\n\n\t\tprint symbol, daily_percent_change\n\n\t\tdaily_percent_change_keyed_by_ticker_symbol[symbol] = daily_percent_change\n\n\treturn daily_percent_change_keyed_by_ticker_symbol", "def gains_btw_dates(self, date_ini='Ini', date_fin='today', pct=False):\n assert date_fin == 'today' or isinstance(date_fin, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert date_ini == 'Ini' or isinstance(date_ini, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert isinstance(pct, bool), 'Error! The pct parameter must be boolean.'\n if date_fin == 'today':\n date_fin = self.data.index[-1]\n if date_ini == 'Ini':\n date_ini = self.data.index[0]\n assert date_ini >= self.data.index[0], 'Error ! Invalid Initial Date'\n assert date_fin >= self.data.index[0], 'Error ! Invalid Final Date'\n date_fin = self._first_good_date(date_fin)\n if date_ini == self.data.index[0]:\n profit = self.data.loc[date_fin, 'Profit/Loss']\n else:\n #date_ini = self._first_good_date(self._first_good_date(date_ini) - timedelta(1))\n date_ini = self._first_good_date(date_ini - timedelta(1))\n profit = self.data.loc[date_fin, 'Profit/Loss'] - self.data.loc[date_ini, 'Profit/Loss']\n if pct:\n return round(profit / self.value(date_ini) * 100, 2)\n else:\n return round(profit, 2)", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def calculate_prices(self, merged_data):\n calculated_prices = []\n for record in merged_data:\n prices_dict = dict()\n supplier_price_id = record.get('supplier_detail').get('identifier') # get the supplier price id\n session_id = record.get('supplier_transaction').get('session_id') # get the transaction session\n supplier_trans_fee_price = self.compute_fee_price(\n record) # Get the fee price for each transaction if needed\n supplier_trans_time_price = self.compute_time_price(\n record) # Get the time price for each transaction if needed\n supplier_trans_kwh_price = self.compute_kwh_price(record)\n total_price = supplier_trans_fee_price + supplier_trans_time_price + supplier_trans_kwh_price\n prices_dict.update({'fee_price': supplier_trans_fee_price,\n 'time_price': supplier_trans_time_price,\n 'kwh_price': supplier_trans_kwh_price,\n 'total_price': total_price,\n 'session_id': session_id,\n 'supplier_price_id': supplier_price_id})\n calculated_prices.append(prices_dict)\n\n return calculated_prices", "def compute_costs(timesheet, biller, date1=None, date2=None): \n # Slice\n f = slice_by_dates(timesheet, date1, date2)\n\n # Resample and add start/end dates\n if biller.freq is not None:\n freq = biller.freq\n f = timesheet.set_index('date')[['duration']].resample(freq).sum()\n f = f.reset_index()\n f['period'] = f['date'].map(lambda x: pd.Period(x, freq))\n f['start_date'] = f['period'].map(lambda x: x.start_time)\n f['end_date'] = f['period'].map(lambda x: x.end_time)\n else:\n start_date, end_date = f['date'].min(), f['date'].max()\n f['start_date'] = start_date\n f['end_date'] = end_date\n\n # Get bins for aggregating\n if biller.base_fee:\n bins = [0] + biller.bins\n else:\n bins = biller.bins\n\n def my_agg(group):\n d = OrderedDict()\n d['start_date'] = group['start_date'].iat[0]\n d['end_date'] = group['end_date'].iat[0]\n t = group['duration'].iat[0]\n d['duration'] = pd.Series(decompose(t, bins))\n c1 = d['duration'].cumsum().map(biller)\n c2 = c1.shift(1).fillna(0)\n cost = c1 - c2\n d['rate'] = cost/d['duration']\n d['cost'] = cost\n return pd.DataFrame(d)\n \n f = f.groupby('date').apply(my_agg\n ).reset_index().drop(['level_1', 'date'], axis=1)\n\n # Drop NaN rate items\n f = f.dropna(subset=['rate'])\n\n return f", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def get_adjusted_data(stockSymbol, df):\n\n events = ['SPLIT', 'BONUS']\n arr = ['Open Price', 'High Price', 'Low Price',\n 'Last Price', 'Close Price', 'Average Price']\n\n stockSymbol = stockSymbol.replace('&', '%26')\n\n if(df.empty):\n print(\"Please check data. Dataframe is empty\")\n return df\n\n df.index = pd.to_datetime(df.index)\n df.sort_index(inplace=True)\n\n try:\n df = df.drop(['Prev Close'], axis=1)\n except KeyError:\n pass\n\n for event in events:\n\n ratio, dates = scrape_bonus_splits(stockSymbol, event)\n for i in range(len(dates)):\n\n date = datetime.datetime.strptime(dates[i], '%d-%b-%Y')\n print(event, \" on : \", dates[i], \" and ratio is : \", ratio[i])\n\n changed_data = df.loc[df.index < date]\n same_data = df.loc[df.index >= date]\n\n for j in arr:\n\n try:\n changed_data.loc[:, j] = changed_data.loc[:, j]/ratio[i]\n except TypeError:\n pass\n\n df = pd.concat([changed_data, same_data])\n\n return df", "def YahooFinancials_Data(Ticker=[],Start='',End ='',Frequency ='daily'):\n\n\n \n import pandas as pd\n from yahoofinancials import YahooFinancials\n import datetime as dt \n \n Ticker = Ticker or input(\"Enter Tcikers separated by',': \").split(',')\n Start = Start or input(\"Enter Start Date separated by '-': \") or (dt.date.today()-\n dt.timedelta(1825)).strftime(\"%Y-%m-%d\")\n End = End or input(\"Enter End Date separated by '-': \") or (dt.date.today()).strftime(\"%Y-%m-%d\")\n Frequency = Frequency or input(\"Enter Frequency like 'daily','weekly': \") or 'daily'\n \n data = pd.DataFrame()\n for i in range(len(Ticker)):\n try:\n yahoo_financials = YahooFinancials(Ticker[i])\n Json_obj = yahoo_financials.get_historical_price_data(Start, End, Frequency)\n Ohlv = Json_obj[Ticker[i]]['prices']\n temp = pd.DataFrame(Ohlv)[[\"formatted_date\",\"adjclose\"]]\n temp.set_index(\"formatted_date\", inplace = True)\n temp = temp[~temp.index.duplicated(keep = 'first')]\n data[Ticker[i]] = temp['adjclose']\n \n except:\n print(f\"Unable to get the Data for: {Ticker[i]}\")\n continue\n \n return data", "def quant(date, bid, ask, voodoo):\n\n future = 200\n voodoo[:] = ask-bid\n for i in xrange(0, future):\n voodoo += (ask-bid + ask-bid + ask-bid + ask-bid\n +ask-bid + ask-bid + ask-bid + ask-bid\n ) / 8\n voodoo[:] = voodoo / future", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices" ]
[ "0.6943807", "0.61015797", "0.59446824", "0.5888466", "0.575217", "0.57305545", "0.57155085", "0.55656976", "0.5565659", "0.55222803", "0.55169284", "0.54701203", "0.5462021", "0.53821945", "0.53751284", "0.53381366", "0.5314847", "0.53086144", "0.5304677", "0.529244", "0.5266538", "0.5231422", "0.5222594", "0.52073574", "0.51993316", "0.5194166", "0.5183944", "0.513712", "0.5112574", "0.51103634" ]
0.62604964
1
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
def pay_dividends(self, next_trading_day): net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_instrument = stock_payment['payment_instrument'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the instrument if payment_instrument in self.positions: position = self.positions[payment_instrument] else: position = self.positions[payment_instrument] = Position( payment_instrument, ) position.amount += share_count return net_cash_payment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def cash(self, qtt_100s, qtt_50s, qtt_20s):\n return (qtt_100s * 100) + (qtt_50s * 50) + (qtt_20s * 20)", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(600), # debited (600) + adjustment (0) = invoiced (600)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def earn_dividends(self, cash_dividends, stock_dividends):\n for cash_dividend in cash_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n # Store the earned dividends so that they can be paid on the\n # dividends' pay_dates.\n div_owed = self.positions[cash_dividend.instrument].earn_dividend(\n cash_dividend,\n )\n try:\n self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)\n except KeyError:\n self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]\n\n for stock_dividend in stock_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n div_owed = self.positions[\n stock_dividend.instrument\n ].earn_stock_dividend(stock_dividend)\n try:\n self._unpaid_stock_dividends[stock_dividend.pay_date].append(\n div_owed,\n )\n except KeyError:\n self._unpaid_stock_dividends[stock_dividend.pay_date] = [\n div_owed,\n ]", "def test_split_payment_with_discount_and_adjustment(self):\n debit_jobs(\n [\n (self.job, A(480), Entry.FLAT_DEBIT),\n (self.job2, A(480), Entry.WORK_DEBIT),\n ]\n )\n self.assertEquals(A(480), self.job2.account.balance)\n self.assert_balances(promised=A(960), balance=A(480), invoiced=A(480))\n credit_jobs(\n [\n (self.job, A(440), A(0), A(40)), # adjusted\n (self.job2, A(460), A(20), A(0)), # discounted\n ],\n D(900),\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(440), # debited (480) + adjustment (-40) = invoiced (440)\n paid=A(-440),\n credited=A(-480), # payment (-440) + adjustment (-40) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(480), # debited (480) + adjustment (0) = invoiced (480)\n paid=A(-480),\n credited=A(-480), # payment (-480) + adjustment (0) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n switch_to_job=self.job2,\n )", "def test_discounted_payment_matching_debit(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(500), # debited (500) + adjustment (0) = invoiced (500)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def get_cash(self):\r\n return self.cash", "def cash_income(df):\n return (df.aftertax_income -\n (1 - tc.HOUSING_CASH_SHARE) * df.housing_ben -\n (1 - tc.MCAID_CASH_SHARE) * df.mcaid_ben -\n (1 - tc.MCARE_CASH_SHARE) * df.mcare_ben -\n (1 - tc.OTHER_CASH_SHARE) * df.other_ben -\n (1 - tc.SNAP_CASH_SHARE) * df.snap_ben -\n (1 - tc.SSI_CASH_SHARE) * df.ssi_ben -\n (1 - tc.TANF_CASH_SHARE) * df.tanf_ben -\n (1 - tc.VET_CASH_SHARE) * df.vet_ben -\n (1 - tc.WIC_CASH_SHARE) * df.wic_ben)", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def cash(self) -> float:\n return self._cash", "def test_adjusted_payment_matching_invoice(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(480), # debited (500) + adjustment (-20) = invoiced (480)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def _vcash(totmoney, totcftable, cashobj):\n cashl = []\n cashl.append(totmoney + totcftable.iloc[0].cash)\n for i in range(len(totcftable) - 1):\n date = totcftable.iloc[i + 1].date\n delta = totcftable.iloc[i + 1].cash\n if delta < 0:\n cashl.append(\n myround(\n delta\n / cashobj.price[cashobj.price[\"date\"] <= date].iloc[-1].netvalue\n )\n )\n else:\n cashl.append(delta)\n datadict = {\"date\": totcftable.loc[:, \"date\"], \"mf\": cashl}\n return pd.DataFrame(data=datadict)", "def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def available_cash(self):\n return self._cash", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def cash_flow(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could ust use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n txs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n status=Transaction.STATUS_EXECUTED,\n reason__in=Transaction.CASH_FLOW_REASONS)\n txs = txs.order_by('executed').values_list('to_goal', 'executed', 'amount')\n return Response([(dt2ed(tx[1]), tx[2] if tx[0] else -tx[2]) for tx in txs])", "def remaining_payroll(session, employee):\n\n scontract = \\\n session.query(Contract).filter(\n Contract.employee == employee)\n sherees_paychecks_due = session.query(Invoice).filter(\n Invoice.contract == scontract, Invoice.voided == 0,\n Invoice.prcleared == 0, Invoice.posted == 1)\n do_not_delete_items = []\n total_due = 0\n for pc in sherees_paychecks_due:\n iitems = session.query(Iitem).filter(Iitem.invoice == pc)\n pay = 0\n for i in iitems:\n do_not_delete_items.append(i)\n pay += i.quantity * i.cost\n total_due += pay\n return sherees_paychecks_due, do_not_delete_items, total_due", "def cash_coupon(certificate, percentage):\n return sum(stake for name, stake in certificate['underlyings'].items()) * percentage", "def getCash(self) -> int:\n return self.state[CASH]", "def withdraw_cash(self, qtt_100s, qtt_50s, qtt_20s):\n amount = PaperMoneyCounter().cash(qtt_100s, qtt_50s, qtt_20s)\n if (self.__is_logged_in) and (amount <= self.__balance) and (amount <= 1000):\n self.__balance = float(Decimal(str(self.__balance - amount)))\n self.register_operation(self.ACTIONS['WITHDRAWING'], amount)\n return True\n\n return False", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def duty_free(price: int, discount: int, holiday_cost: int) -> int:\n if holiday_cost == 500:\n return holiday_cost\n\n discount /= 100\n price = holiday_cost / (price * discount)\n price = int(price)\n return price", "def get_current_pending_charge(self, partial_payments, validated_data):\n if partial_payments:\n previous_payments_sum = 0\n for partial_payment in partial_payments:\n previous_payments_sum += partial_payment.valorPagado\n pending_amount = previous_payments_sum % 1_000_000\n new_charge = 1_000_000 - (pending_amount + int(validated_data['valorPagado']))\n return new_charge", "def total_clearance(self):\n total_clearances = 0\n debit = 0 #variable to track the remaining debit\n clearances = self.clearance_set.all() #grab all the previous clerances\n for clearance in clearances:\n total_clearances += clearance.paid_value\n return total_clearances", "def test_discounted_payment_below_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100),\n invoiced=A(600),\n paid=A(-500),\n income=A(600).net_amount,\n tax=A(580).tax_amount,\n discounts=A(-20).net_amount,\n )", "def calculator(self, income):\n annuity = float(config.basic(income)) # 社保总额\n out = []\n if float(income) > 3500.00:\n taxable_income = (float(income) - float(annuity) - 3500.00) # 课税对象金额\n taxrate = self.tax_rate(taxable_income) # 税率\n deduction = deductions[taxrate] # 速算扣除数\n tax = taxable_income * taxrate - deduction # 个税金额\n after = float(income) - float(tax) - float(annuity) # 税后工资\n # print(\"社保总额:{}, 个税金额:{}, 税后工资:{}\".format(annuity, tax, after))\n else:\n tax = 0.00 # 个税金额\n after = float(income) - annuity\n for i in [annuity, tax, after]:\n out.append('{:.2f}'.format(i))\n return out", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))" ]
[ "0.65616626", "0.6471666", "0.6241464", "0.6134051", "0.6083674", "0.6017929", "0.59529567", "0.59450686", "0.57963526", "0.5787191", "0.5777359", "0.57426834", "0.5719649", "0.56672525", "0.5616136", "0.5607058", "0.55491143", "0.5521608", "0.5502035", "0.5492246", "0.5451199", "0.5405619", "0.5401534", "0.53903913", "0.53402203", "0.5334028", "0.53300625", "0.5308315", "0.5307092", "0.53025806" ]
0.7525171
0