query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Gets `features` and `labels`.
def features_and_labels(self): if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must call dataset_initializer_hook ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs(self._iterator.get_next()) return (self._features, self._labels)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feature_labels(self):\n return self.feature_labels", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_features_and_labels(self, dataframe):\n features = dataframe.drop(columns=self._label, axis=1)\n labels = dataframe[self._label]\n\n return features, labels", "def get_features(self):\n return self._features", "def get_feature_labels(self):\n\t\tfeature_labels = []\n\t\tfor feature, i in zip(self.feature_names,self.feature_mask):\n\t\t\tif i == True:\n\t\t\t\tfeature_labels.append(feature)\n\t\treturn feature_labels", "def get_labels(self):\n return self.labels", "def get_features(self):\n return []", "def get_labels(self):\n raise NotImplementedError", "def get_labels(self):\r\n raise NotImplementedError()", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()" ]
[ "0.79877245", "0.734456", "0.734456", "0.7326892", "0.7230395", "0.71708345", "0.71536416", "0.7029519", "0.70181954", "0.7005192", "0.7003384", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69360304", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166", "0.69250166" ]
0.7678803
1
Returns the `Signals` from `_Inputs`.
def signals(self): if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return signals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSignals(cls):\n return []", "def get_signals(self):\n return QFDataFrame(data=self._signals, index=self._signals_dates)", "def read_all_signals(self):\n return [pio.sample(signal_idx)\n for signal_idx in self._signals_idx]", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self._inputs", "def inputs(self):\n return self.inputs", "def get_inputs(self):\n return self.inputs", "def inputs(self) -> InputType:\n return self._inputs", "def get_signals(self):\n raise NotImplementedError('Agent is an abstract base class')", "def signal(self) -> list:\n raise NotImplementedError(\"You must implement signal\")", "def get_inputs(self):\n return self.attributes[\"inputs\"]", "def get_raw_signals(self):\n signals, fields = wfdb.rdsamp(self.patient_number, pb_dir='mitdb', warn_empty=True)\n logging.info(\"Patient {} additional info: {}\".format(self.patient_number, fields))\n return signals, fields", "def calculate_signals(self):\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def _get_inputs(self):\n return self.__inputs", "def Signals(sigtype, num_sigs):\n assert isinstance(sigtype, (bool, intbv))\n sigs = [Signal(sigtype) for _ in range(num_sigs)]\n return sigs", "def calculate_signals(self):\n raise NotImplementedError(\"Should implement calculate_signals()\")", "def inputs(self) -> 'Input':\n return self.Input", "def generate_signals(self):\n\n raise NotImplementedError('''\n Must implement generate_signals. Call help() for details.\n ''')", "def getEDFsignals(edf):\n n = edf.signals_in_file\n samples = edf.getNSamples()[0]\n signals = np.zeros((n, samples))\n for i in range(n):\n try:\n signals[i,:] = edf.readSignal(i)\n except:\n pass\n return signals", "def get_inputs(self):\n flight_snapshot = self.flight()\n orbit_snapshot = self.orbit()\n\n\n inputs = [flight_snapshot.heading / 360, flight_snapshot.pitch / 90, flight_snapshot.roll / 360, flight_snapshot.speed / 2000,\n flight_snapshot.horizontal_speed / 500, flight_snapshot.vertical_speed / 500, self.throttle(),\n min(self.liquid_fuel(), self.oxidizer())/100, orbit_snapshot.apoapsis_altitude / 100000,\n orbit_snapshot.periapsis_altitude /100000, orbit_snapshot.inclination, orbit_snapshot.eccentricity,\n flight_snapshot.dynamic_pressure / 1000]\n return inputs", "def pick_signals(processor, source = 'input'):\n\n if source == 'input':\n bin_edges = processor.input_parameters['bin_edges']\n raw_signal = processor.input_signal\n elif source == 'output':\n bin_edges = processor.output_parameters['bin_edges']\n raw_signal = processor.output_signal\n else:\n raise ValueError('Unknown value for the data source')\n t = np.zeros(len(raw_signal)*4)\n bins = np.zeros(len(raw_signal)*4)\n signal = np.zeros(len(raw_signal)*4)\n value = 1.\n\n for i, edges in enumerate(bin_edges):\n t[4*i] = edges[0]\n t[4*i+1] = edges[0]\n t[4*i+2] = edges[1]\n t[4*i+3] = edges[1]\n bins[4*i] = 0.\n bins[4*i+1] = value\n bins[4*i+2] = value\n bins[4*i+3] = 0.\n signal[4*i] = 0.\n signal[4*i+1] = raw_signal[i]\n signal[4*i+2] = raw_signal[i]\n signal[4*i+3] = 0.\n value *= -1\n\n z = t * c\n return (t, z, bins, signal)", "def inputs(self) -> List[str]:\n return self._model.inputs", "def getListOfInputs(self, *args):\n return _libsbml.Transition_getListOfInputs(self, *args)", "def inputs(self):\n inputs = []\n for transition in self.transitions:\n if transition.event and not self._is_local(transition.event):\n inputs.append(Input(transition.event, self))\n return EventSet(inputs)", "def inputs(self):\n\n inputs = []\n for arg in self.arguments:\n if arg.IN:\n inputs.append(arg)\n\n return inputs", "def get_signal_info(self, signal_names):\n result = []\n for name in signal_names:\n description = self._pio.signal_description(name)\n domain_type = self._pio.signal_domain_type(name)\n aggregation, format_type, behavior = self._pio.signal_info(name)\n result.append((name, description, domain_type, aggregation, format_type, behavior))\n return result" ]
[ "0.7630647", "0.674216", "0.66904575", "0.62987953", "0.62987953", "0.62987953", "0.62970096", "0.62616074", "0.62526375", "0.61903083", "0.6189783", "0.6183568", "0.61779135", "0.609748", "0.6075958", "0.60570484", "0.60570484", "0.60570484", "0.60139763", "0.6005559", "0.6000842", "0.5999183", "0.5906557", "0.5892425", "0.58822393", "0.58733946", "0.5873061", "0.5813232", "0.5784658", "0.5768716" ]
0.7737252
0
Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `features_and_labels`).
def insert_stopping_signal(stop, batch_size, add_padding=False): def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features, labels = _Inputs._parse_inputs(args) new_input_dict = {} if add_padding: padding_mask, features, labels = ( _PaddingSignals.pad_features_and_labels( features, labels, batch_size)) new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels else: new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels padding_mask = None new_input_dict['signals'] = _StopSignals( stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict() return new_input_dict return _map_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _map_fn(*args):\n if len(args) == 1:\n # Unpack the single Tensor/dict argument as features. This is required\n # for the input_fn returns no labels.\n args = args[0]\n features, labels = _Inputs._parse_inputs(args)\n new_input_dict = {}\n\n if add_padding:\n padding_mask, features, labels = (\n _PaddingSignals.pad_features_and_labels(\n features, labels, batch_size))\n\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n\n else:\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n padding_mask = None\n\n new_input_dict['signals'] = _StopSignals(\n stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()\n\n return new_input_dict", "def map(self, func: Callable[[Trajectory, Any], Tuple[Trajectory, Any]]) -> Data:\n trajs, labels = [], []\n for traj, label in zip(self.trajs, self.labels):\n traj, label = func(traj, label)\n trajs.append(traj)\n labels.append(label)\n return Data(trajs, labels)", "def process_data(self):\n\n # direct and opposite mappings for items\n idpool = itertools.count(start=1)\n FVMap = collections.namedtuple('FVMap', ['dir', 'opp'])\n self.fvmap = FVMap(dir={}, opp={})\n\n # mapping features to ids\n for i in range(len(self.names) - 1):\n feats = sorted(self.feats[i])\n\n # try to rangify this feature\n if self.intvs and len(feats) > len(self.intvs) + 1:\n feats = self.rangify(feats, i)\n self.feats[i] = set(feats)\n\n if len(feats) != 2:\n for l in feats:\n self.fvmap.dir[(self.names[i], l)] = next(idpool)\n else:\n var = next(idpool)\n self.fvmap.dir[(self.names[i], feats[0])] = var\n self.fvmap.dir[(self.names[i], feats[1])] = -var\n\n # use ranges for updating samples\n if self.vimap:\n for i, s in enumerate(self.samps):\n self.samps[i] = [self.vimap[j][v] if j in self.vimap and v != '' else v for j, v in enumerate(s)]\n\n # recomputing the weights\n counter = collections.Counter()\n for s, w in zip(self.samps, self.wghts):\n counter[tuple(s)] += w\n\n self.samps = []\n self.wghts = []\n for s, w in six.iteritems(counter):\n self.samps.append(list(s))\n self.wghts.append(w)\n\n # all labels are marked with distinct ids\n for l in sorted(self.feats[-1]):\n self.fvmap.dir[(self.names[-1], l)] = next(idpool)\n\n # opposite mapping\n for key, val in six.iteritems(self.fvmap.dir):\n self.fvmap.opp[val] = key\n\n # encoding samples and filtering out features with only 1 value\n for i in range(len(self.samps)):\n self.samps[i] = [self.fvmap.dir[(self.names[j], self.samps[i][j])] for j in range(len(self.samps[i])) if self.samps[i][j] and len(self.feats[j]) > 1]\n\n # determining feature variables (excluding class variables)\n for v, pair in six.iteritems(self.fvmap.opp):\n if pair[0] == self.names[-1]:\n self.fvars = v - 1\n break", "def as_dict(self):\n shape = [self._batch_size, 1]\n dtype = dtypes.bool\n\n if self._stop:\n stopping = array_ops.ones(shape=shape, dtype=dtype)\n else:\n stopping = array_ops.zeros(shape=shape, dtype=dtype)\n\n signals = {'stopping': stopping}\n if self._padding_mask is not None:\n signals['padding_mask'] = self._padding_mask\n return signals", "def input_features_labels(device, signal, subject_ID):\n\n directory = f'data/feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_all_axis_{device}_{signal}'\n data = pd.read_csv(directory)\n data = data.dropna()\n\n # since all zero_crossing and mean_crossing metrics are zero and 200, respectively,\n # regardless of the signal and the activity, we ignore this feature.\n features = data.drop(columns=[f'x_{signal}_zero_crossing', f'x_{signal}_mean_crossing',\n f'y_{signal}_zero_crossing', f'y_{signal}_mean_crossing',\n f'z_{signal}_zero_crossing', f'z_{signal}_mean_crossing',\n 'Activity_ID'])\n\n all_labels = data[['Activity_ID']]\n\n feature_train, feature_test, label_train, label_test = train_test_split(\n features, all_labels, test_size=0.2, shuffle=True)\n # feature normalization\n scalar = StandardScaler().fit(feature_train)\n normalized_feature_train = scalar.transform(feature_train)\n normalized_feature_test = scalar.transform(feature_test)\n normalized_all_feature = scalar.transform(features)\n # convert 'numpy.ndarray' to pandas dataframe\n normalized_feature_train = pd.DataFrame(normalized_feature_train)\n normalized_feature_test = pd.DataFrame(normalized_feature_test)\n normalized_all_feature = pd.DataFrame(normalized_all_feature)\n\n return normalized_feature_train, normalized_feature_test, label_train, label_test, normalized_all_feature, all_labels", "def signal_to_training( # pylint: disable=too-many-locals\n self,\n signal: Union[Dict, List[Dict]]\n ) -> Tuple[np.ndarray, Tuple[np.ndarray, ...], np.ndarray, Dict[str, Any]]:\n dict_list = list(signal) if isinstance(signal, list) else list((signal, ))\n\n # Initialize the return values\n time_length = len(dict_list[0]['signal']['time']['data']) # type: ignore\n length = int(time_length / 2)\n signals = np.zeros((0, time_length))\n result_r = np.zeros((0, length))\n result_b = np.zeros((0, length))\n result_h = np.zeros((0, length))\n result_m = np.zeros((0, length))\n result_p = np.zeros((0, length))\n answer = np.zeros((0, length))\n config = {\n 'SNR': [],\n 'count': [],\n 'frequencies': [],\n 'amplitudes': [],\n 'minamplitude': [],\n 'mindist': []\n } # type: Dict[str, Any]\n\n # Calculate window functions\n window_bartlett = np.bartlett(time_length)\n window_hanning = np.hanning(time_length)\n window_meyer = self._meyer_wavelet(time_length)\n window_poisson = exponential(time_length, sym=True, tau=(time_length/2)*(8.69/60.0))\n\n # Loop all data entries\n for data in dict_list:\n time = np.asarray(data['signal']['time']['data'])\n signals = np.concatenate((signals, np.reshape(time, (1,) + time.shape)))\n config['SNR'].append(data['signal']['SNR'])\n\n # Assemble the FFTs\n fft = np.fft.fft(time)[:length] / time_length\n result_r = np.concatenate((result_r, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_bartlett)[:length] / time_length\n result_b = np.concatenate((result_b, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_hanning)[:length] / time_length\n result_h = np.concatenate((result_h, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_meyer)[:length] / time_length\n result_m = np.concatenate((result_m, np.reshape(fft, (1,) + fft.shape)))\n fft = np.fft.fft(time * window_poisson)[:length] / time_length\n result_p = np.concatenate((result_p, np.reshape(fft, (1,) + fft.shape)))\n\n # Assemble all the frequencies and amplitudes\n count = 0\n freqs = []\n ampls = []\n counting = np.zeros((1, length))\n for subsig in data['signal']['parts']:\n if subsig['signal']['type'] == 'SingleOscillation':\n count += 1\n freq = subsig['signal']['frequency']\n counting[0, int(max(0, min(length - 1, round(freq))))] += 1\n freqs.append(freq)\n ampls.append(subsig['signal']['amplitude'])\n config['count'].append(count)\n\n # Sort frequencies and amplitudes by frequency\n np_freqs = np.asarray(freqs)\n sorting = np.unravel_index(np.argsort(np_freqs), np_freqs.shape)\n np_freqs = np_freqs[sorting]\n np_ampls = np.asarray(ampls)[sorting]\n\n # Assemble some statistics\n config['mindist'].append(999999. if len(np_freqs) < 2 else np.min(np.diff(np_freqs)))\n config['minamplitude'].append(np.min(np_ampls) if len(np_ampls) > 0 else 999999.)\n config['frequencies'].append(np_freqs)\n config['amplitudes'].append(np_ampls)\n answer = np.concatenate((answer, counting))\n\n # Assemble results\n ffts = (result_r, result_b, result_h, result_m, result_p)\n return signals, ffts, answer, config", "def _create_label_data(\n self,\n training_data: \"TrainingData\",\n label_id_dict: Dict[Text, int],\n attribute: Text,\n ) -> \"SessionDataType\":\n\n # Collect one example for each label\n labels_idx_example = []\n for label_name, idx in label_id_dict.items():\n label_example = self._find_example_for_label(\n label_name, training_data.intent_examples, attribute\n )\n labels_idx_example.append((idx, label_example))\n\n # Sort the list of tuples based on label_idx\n labels_idx_example = sorted(labels_idx_example, key=lambda x: x[0])\n labels_example = [example for (_, example) in labels_idx_example]\n\n # Collect features, precomputed if they exist, else compute on the fly\n if self._check_labels_features_exist(labels_example, attribute):\n features = self._extract_labels_precomputed_features(\n labels_example, attribute\n )\n else:\n features = self._compute_default_label_features(labels_example)\n\n label_data = {}\n self._add_to_session_data(label_data, \"label_features\", features)\n self._add_mask_to_session_data(label_data, \"label_mask\", \"label_features\")\n\n return label_data", "def __train_input_fn(self):\n ## To ensure unbiased training, grab random labels to define batch\n labels = np.random.choice(np.unique(self.labels_train), self.batch_size)\n ## Then grab a random spectrum from each label\n spectra = np.zeros((self.batch_size, len(self.spectra_train[0])))\n for i,l in enumerate(labels):\n good = self.labels_train == l\n idx = np.random.choice(np.sum(good))\n spectra[i] = self.spectra_train[good][idx]\n ## Recast into dictionary for estimator\n features = {'flux': spectra}\n ## Convert labels to integers\n ilabels = [self.label_index_lookup[l] for l in labels]\n return features, ilabels", "def _add_stops_to_df(self, stop_coords, signal_coords, route_df):\n\n self.stop_nn_indicies, self.stop_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n stop_coords\n )\n\n\n signal_nn_indicies, singal_coord_nn = knn.find_knn(\n 1,\n route_df.geometry.values,\n signal_coords)\n\n route_df = route_df.assign(\n is_bus_stop = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_signal = ([False] * len(route_df.index))\n )\n\n route_df = route_df.assign(\n is_stop = ([False] * len(route_df.index))\n )\n \n for i in self.stop_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_bus_stop'] = True\n route_df.at[i, 'is_stop'] = True\n \n for i in signal_nn_indicies.ravel()[::3]:\n route_df.at[i, 'is_stop'] = True\n route_df.at[i, 'is_signal'] = True\n\n # route_df.at[0, 'is_bus_stop'] = True\n # route_df.at[-1, 'is_bus_stop'] = True\n\n return route_df", "def extract_features(ds, config):\n feature_type = tf.constant(config[\"type\"], tf.string)\n args = _feature_extraction_kwargs_to_args(config)\n tf_device = _get_device_or_default(config)\n\n logger.info(\"Extracting '%s' features on device '%s' with arguments:\\n %s\", config[\"type\"], tf_device, \"\\n \".join(repr(a) for a in args[1:]))\n\n def _append_features(x):\n with tf.device(tf_device):\n features = tf_utils.extract_features(x[\"signal\"], x[\"sample_rate\"], *args)\n feature_types = tf.repeat(feature_type, tf.shape(features)[0])\n return dict(x, input=features, feature_type=feature_types)\n\n if \"group_by_input_length\" in config:\n max_batch_size = config[\"group_by_input_length\"][\"max_batch_size\"]\n logger.info(\"Grouping signals by length, creating batches of max size %d from each group\", max_batch_size)\n ds = group_by_axis_length(ds, \"signal\", max_batch_size, axis=0)\n else:\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching signals with batch size %s, extracting features in batches.\", batch_size.numpy())\n ds = ds.batch(batch_size)\n\n return (ds.prefetch(TF_AUTOTUNE)\n .map(_append_features, num_parallel_calls=TF_AUTOTUNE)\n .unbatch())", "def input_fn():\n features = {\n feature_name: tf.constant(features_np_list[i])\n for i, feature_name in enumerate(feature_names)\n }\n return tf.data.Dataset.zip((tf.data.Dataset.from_tensors(features),\n tf.data.Dataset.from_tensors(label_np),))", "def predict_step(unused_scalar_stopping_signal):\n inputs = dequeue_fn()\n features, labels = inputs.features_and_labels()\n stopping_signals = inputs.signals()\n\n assert stopping_signals is not None, (\n 'Internal Error: `signals` is missing.')\n\n tpu_estimator_spec = self._call_model_fn(\n features, labels, is_export_mode=False)\n if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):\n raise RuntimeError(\n 'estimator_spec used by TPU prediction must have type'\n '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))\n\n captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)\n to_record = {}\n identity_fn = lambda **kwargs: kwargs\n # TODO(xiejw): Adds validation for prediction dictionrary.\n # TODO(xiejw): Adds support for single tensor as predictions.\n if not isinstance(tpu_estimator_spec.predictions, dict):\n raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')\n to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]\n to_record['signals'] = [identity_fn, stopping_signals]\n if tpu_estimator_spec.host_call is not None:\n to_record['host_call'] = tpu_estimator_spec.host_call\n host_calls.record(to_record)\n\n with ops.control_dependencies(host_calls.create_enqueue_op()):\n return _StopSignals.as_scalar_stopping_signal(stopping_signals)", "def featurize(data):\n features = {}\n missing_weight = False\n for fieldname in STATIC_FIELDS:\n # Static fields use -1 to denote that the value was not measured.\n if data[fieldname][0][1] == -1:\n features[fieldname] = NAN_REPLACE\n else:\n features[fieldname] = float(data[fieldname][0][1])\n for fieldname in FIELDS:\n # Time-series fields may or may not be measured, but if they are present\n # in the dataset, then the value will be valid (i.e. nonnegative).\n if fieldname in data:\n values = [float(d[1]) for d in data[fieldname]]\n if -1 in values and fieldname == 'Weight':\n # Record that weight was missing for this record id.\n missing_weight = True\n field_features = set_features_to_nan(fieldname)\n else:\n field_features = {}\n field_features['{}_min'.format(fieldname)] = min(values)\n field_features['{}_max'.format(fieldname)] = max(values)\n field_features['{}_mean'.format(fieldname)] = np.mean(values)\n field_features['{}_first'.format(fieldname)] = values[0]\n field_features['{}_last'.format(fieldname)] = values[-1]\n field_features['{}_diff'.format(fieldname)] = values[-1] - values[0]\n else:\n field_features = set_features_to_nan(fieldname)\n features.update(field_features)\n return features, missing_weight", "def create_features(energy_data, label=None):\n energy_data['date'] = energy_data.index\n energy_data['hour'] = energy_data['Datetime'].dt.hour\n energy_data['dayofweek'] = energy_data['Datetime'].dt.dayofweek\n energy_data['month'] = energy_data['Datetime'].dt.month\n energy_data['quarter'] = energy_data['Datetime'].dt.quarter\n energy_data['year'] = energy_data['Datetime'].dt.year\n energy_data['dayofyear'] = energy_data['Datetime'].dt.dayofyear\n energy_data['dayofmonth'] = energy_data['Datetime'].dt.day\n energy_data['weekofyear'] = energy_data['Datetime'].dt.weekofyear\n energy_data['pjme_2_hrs_lag'] = energy_data['PJME_MW'].shift(2)\n energy_data['pjme_4_hrs_lag'] = energy_data['PJME_MW'].shift(4)\n energy_data['pjme_8_hrs_lag'] = energy_data['PJME_MW'].shift(8)\n energy_data['pjme_12_hrs_lag'] = energy_data['PJME_MW'].shift(12)\n energy_data['pjme_24_hrs_lag'] = energy_data['PJME_MW'].shift(24)\n energy_data['pjme_4_hrs_mean'] = energy_data['PJME_MW'].rolling(window=4).mean()\n energy_data['pjme_8_hrs_mean'] = energy_data['PJME_MW'].rolling(window=8).mean()\n energy_data['pjme_12_hrs_mean'] = energy_data['PJME_MW'].rolling(window=12).mean()\n energy_data['pjme_24_hrs_mean'] = energy_data['PJME_MW'].rolling(window=24).mean()\n energy_data['pjme_4_hrs_std'] = energy_data['PJME_MW'].rolling(window=4).std()\n energy_data['pjme_8_hrs_std'] = energy_data['PJME_MW'].rolling(window=8).std()\n energy_data['pjme_12_hrs_std'] = energy_data['PJME_MW'].rolling(window=12).std()\n energy_data['pjme_24_hrs_std'] = energy_data['PJME_MW'].rolling(window=24).std()\n energy_data['pjme_4_hrs_max'] = energy_data['PJME_MW'].rolling(window=4).max()\n energy_data['pjme_8_hrs_max'] = energy_data['PJME_MW'].rolling(window=8).max()\n energy_data['pjme_12_hrs_max'] = energy_data['PJME_MW'].rolling(window=12).max()\n energy_data['pjme_24_hrs_max'] = energy_data['PJME_MW'].rolling(window=24).max()\n energy_data['pjme_4_hrs_min'] = energy_data['PJME_MW'].rolling(window=4).min()\n energy_data['pjme_8_hrs_min'] = energy_data['PJME_MW'].rolling(window=8).min()\n energy_data['pjme_12_hrs_min'] = energy_data['PJME_MW'].rolling(window=12).min()\n energy_data['pjme_24_hrs_min'] = energy_data['PJME_MW'].rolling(window=24).min()\n\n features = energy_data[['hour', 'dayofweek', 'quarter', 'month', 'year',\n 'dayofyear', 'dayofmonth', 'weekofyear', 'pjme_2_hrs_lag', 'pjme_4_hrs_lag',\n 'pjme_8_hrs_lag', 'pjme_12_hrs_lag', 'pjme_24_hrs_lag', 'pjme_4_hrs_mean',\n \"pjme_8_hrs_mean\", \"pjme_12_hrs_mean\", \"pjme_24_hrs_mean\", \"pjme_4_hrs_std\",\n \"pjme_8_hrs_std\", \"pjme_12_hrs_std\", \"pjme_24_hrs_std\",\n \"pjme_4_hrs_max\", \"pjme_8_hrs_max\", \"pjme_12_hrs_max\", \"pjme_24_hrs_max\",\n \"pjme_4_hrs_min\", \"pjme_8_hrs_min\", \"pjme_12_hrs_min\", \"pjme_24_hrs_min\"]]\n if label:\n label = energy_data[label]\n return features, label\n return features", "def modify_data(self, sim, dat_in, dat_out, dat_out_raw, dat_out_oracle,\n sequential):\n dat_in, dat_out, dat_out_raw, dat_out_oracle, scl_grps = (\n self.__create_buckets(\n sim, dat_in, dat_out, dat_out_raw, dat_out_oracle, sequential)\n if self.rtt_buckets else (\n self.__create_windows(dat_in, dat_out, sequential)\n if self.windows else (\n dat_in, dat_out, dat_out_raw, dat_out_oracle,\n # Each feature is part of its own scaling group.\n list(range(len(dat_in.dtype.names))))))\n return dat_in, dat_out, dat_out_raw, dat_out_oracle, scl_grps", "def train_weak_signals(data, weak_signal_data, num_weak_signal):\n\n train_data, train_labels = data['training_data']\n val_data, val_labels = data['validation_data']\n test_data, test_labels = data['test_data']\n\n n, d = train_data.shape\n\n weak_signal_train_data = weak_signal_data[0]\n weak_signal_val_data = weak_signal_data[1]\n weak_signal_test_data = weak_signal_data[2]\n\n weak_signals = []\n stats = np.zeros(num_weak_signal)\n w_sig_probabilities = []\n w_sig_test_accuracies = []\n weak_val_accuracy = []\n\n\n for i in range(num_weak_signal):\n # fit model\n model = LogisticRegression(solver = \"lbfgs\", max_iter= 1000)\n model.fit(weak_signal_train_data[i], train_labels)\n weak_signals.append(model)\n\n # evaluate probability of P(X=1)\n probability = model.predict_proba(weak_signal_val_data[i])[:, 1]\n score = val_labels * (1 - probability) + (1 - val_labels) * probability\n stats[i] = np.sum(score) / score.size\n w_sig_probabilities.append(probability)\n\n # evaluate accuracy for validation data\n weak_val_accuracy.append(accuracy_score(val_labels, np.round(probability)))\n\n # evaluate accuracy for test data\n test_predictions = model.predict(weak_signal_test_data[i])\n w_sig_test_accuracies.append(accuracy_score(test_labels, test_predictions))\n\n\n model = {}\n model['models'] = weak_signals\n model['probabilities'] = np.array(w_sig_probabilities)\n model['error_bounds'] = stats\n model['validation_accuracy'] = weak_val_accuracy\n model['test_accuracy'] = w_sig_test_accuracies\n\n return model", "def _make_features(self):\n self.features = {}\n self.labels = {}\n for key in ['train', 'cv', 'test']:\n if self.radius is not None:\n feat, label = self._sliding_window(self.images[key], self.masks[key], window_radius=self.radius)\n self.features[key] = feat\n self.labels[key] = label\n else:\n self.features[key] = self.images[key].reshape(-1, 3)\n self.labels[key] = self.masks[key].ravel()", "def process_datapoints(datapoints):\n point_dict = {}\n\n ddata = [p for p in datapoints]\n for point in ddata:\n point_dict[hash_datapoint(point)] = {'results': [],\n 'time': [],\n 'features': point['features']}\n\n for point in ddata:\n point_dict[hash_datapoint(point)]['results'].append(point['result'])\n point_dict[hash_datapoint(point)]['time'].append(point['time'])\n\n for e in point_dict:\n result_array = np.array(point_dict[e]['results'])\n point_dict[e]['n'] = len(point_dict[e]['results'])\n point_dict[e]['mu'] = np.mean(result_array)\n point_dict[e]['sigma'] = np.std(result_array)\n del point_dict[e]['results']\n\n return point_dict", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def _feedward_signal(self, sample):\r\n if sample.shape[1] != self.input_dimension:\r\n raise ValueError\r\n\r\n input_layer = sample\r\n\r\n for layer in self.layers:\r\n output_layer = layer.function_signal(input_layer)\r\n input_layer = output_layer\r\n\r\n return output_layer", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def preprocessing_fn(inputs):\n outputs = {}\n for key in _DENSE_FLOAT_FEATURE_KEYS:\n # Preserve this feature as a dense float, setting nan's to the mean.\n outputs[_transformed_name(key)] = tft.scale_to_z_score(\n _fill_in_missing(inputs[key]))\n\n for key in _VOCAB_FEATURE_KEYS:\n # Build a vocabulary for this feature.\n outputs[_transformed_name(key)] = tft.compute_and_apply_vocabulary(\n _fill_in_missing(inputs[key]),\n top_k=_VOCAB_SIZE,\n num_oov_buckets=_OOV_SIZE)\n\n for key in _BUCKET_FEATURE_KEYS:\n outputs[_transformed_name(key)] = tft.bucketize(\n _fill_in_missing(inputs[key]), _FEATURE_BUCKET_COUNT,\n always_return_num_quantiles=False)\n\n for key in _CATEGORICAL_FEATURE_KEYS:\n outputs[_transformed_name(key)] = _fill_in_missing(inputs[key])\n\n # Was this passenger a big tipper?\n taxi_fare = _fill_in_missing(inputs[_FARE_KEY])\n tips = _fill_in_missing(inputs[_LABEL_KEY])\n outputs[_transformed_name(_LABEL_KEY)] = tf.where(\n tf.is_nan(taxi_fare),\n tf.cast(tf.zeros_like(taxi_fare), tf.int64),\n # Test if the tip was > 20% of the fare.\n tf.cast(\n tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))\n\n return outputs", "def prepare_label_feature(self, label2id: dict):\n text, wp_text, label, wp_label, wp_mark = [], [], [], [], []\n sorted_labels = sorted(label2id.items(), key=lambda x: x[1])\n for label_name, label_id in sorted_labels:\n if label_name == '[PAD]':\n continue\n tmp_text = self.convert_label_name(label_name)\n tmp_wp_text = self.tokenizer.tokenize(' '.join(tmp_text))\n text.extend(tmp_text)\n wp_text.extend(tmp_wp_text)\n label.extend(['O'] * len(tmp_text))\n wp_label.extend(['O'] * len(tmp_wp_text))\n wp_mark.extend([0] + [1] * (len(tmp_wp_text) - 1))\n label_item = self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0)\n label_input = self.get_test_model_input(label_item)\n return label_input, label_item", "def process_dataset(dataset, func):\n new_dataset = copy.copy(dataset)\n del new_dataset[\"val\"]\n new_dataset.update(func(dataset))\n return new_dataset", "def setup_signals():\n # The total number of signals in the recording\n num_signals = 32\n\n # What brain region each signal was recorded from\n regions = [\"SUB\"] * 2 + [\"RSC\"] * 2 + [\"SUB\"] * 28\n\n # If the wires were bundled, or any other kind of grouping existed\n # If no grouping, groups = [i for i in range(num_signals)]\n groups = [\"LFP\", \"LFP\", \"LFP\", \"LFP\"]\n for i in range(2, 9):\n groups.append(i)\n groups.append(i)\n groups.append(i)\n groups.append(i)\n\n # The sampling rate in Hz of each signal\n sampling_rate = [250] * num_signals\n channel_type = [\"eeg\"] * num_signals\n\n # This just passes the information on\n output_dict = {\n \"num_signals\": num_signals,\n \"region\": regions,\n \"group\": groups,\n \"sampling_rate\": sampling_rate,\n \"channel_type\": channel_type,\n }\n\n return output_dict", "def tfds_map(self, example):\n\t\tif len(self.get_labels()) > 1:\n\t\t\texample.label = self.get_labels()[int(example.label)]\n\t\treturn example", "def tfds_map(self, example):\r\n if len(self.get_labels()) > 1:\r\n example.label = self.get_labels()[int(example.label)]\r\n return example", "def prepare_features(features, subject_labels):\n data = {}\n labels = {}\n for stage in STAGES:\n labels[stage] = []\n features_combined = []\n\n for subject in subject_labels.keys():\n current = []\n for feature, columns in features:\n if feature[stage][subject].size == 0:\n # do not look at empty arrays\n continue\n # collect features for current stage and subject\n if len(feature[stage][subject].shape) == 2:\n # feature is 2-dimensional, just use transpose\n current.append(feature[stage][subject].T)\n elif len(feature[stage][subject].shape) == 3:\n # feature is 3-dimensional, manually reshape to 2-dimensional\n # np.reshape does not work here\n reshaped = []\n for electrode in range(feature[stage][subject].shape[0]):\n for band in range(feature[stage][subject].shape[2]):\n if len(feature[stage][subject].shape) != 3:\n continue\n reshaped.append(feature[stage][subject][electrode, :, band])\n current.append(np.array(reshaped).T)\n\n if len(current) == 0:\n continue\n\n # merge the features for the current stage and subject\n features_combined.append(np.concatenate(current, axis=1))\n\n # concatenate the label name for the current subject as often as there are samples\n labels[stage] += [subject_labels[subject]] * features_combined[-1].shape[0]\n\n # concatenate the features for all subjects\n data[stage] = np.concatenate(features_combined, axis=0)\n labels[stage] = np.array(labels[stage])\n\n return data, labels", "def feature_mapping(x, y, power, as_ndarray=False):\n # data = {}\n # # inclusive\n # for i in np.arange(power + 1):\n # for p in np.arange(i + 1):\n # data[\"f{}{}\".format(i - p, p)] = np.power(x, i - p) * np.power(y, p)\n\n data = {\"f{}{}\".format(i - p, p): np.power(x, i - p) * np.power(y, p)\n for i in np.arange(power + 1)\n for p in np.arange(i + 1)\n }\n\n if as_ndarray:\n return pd.DataFrame(data).as_matrix()\n else:\n return pd.DataFrame(data)", "def preprocessing_fn(inputs):\n outputs = {}\n\n for key in ONE_HOT_FEATURES.keys():\n dim = ONE_HOT_FEATURES[key]\n int_value = tft.compute_and_apply_vocabulary(\n fill_in_missing(inputs[key]), top_k=dim + 1\n )\n outputs[transformed_name(key)] = convert_num_to_one_hot(\n int_value, num_labels=dim + 1\n )\n\n for key, bucket_count in BUCKET_FEATURES.items():\n temp_feature = tft.bucketize(\n convert_zip_code(fill_in_missing(inputs[key])),\n bucket_count,\n )\n outputs[transformed_name(key)] = convert_num_to_one_hot(\n temp_feature, num_labels=bucket_count + 1\n )\n\n for key in TEXT_FEATURES.keys():\n outputs[transformed_name(key)] = fill_in_missing(inputs[key])\n\n outputs[transformed_name(LABEL_KEY)] = fill_in_missing(inputs[LABEL_KEY])\n\n return outputs" ]
[ "0.6490509", "0.5323185", "0.50812894", "0.50604886", "0.50385696", "0.49665913", "0.4957273", "0.48859507", "0.48683363", "0.47766396", "0.47728422", "0.4761893", "0.47422934", "0.47389808", "0.47379407", "0.47214484", "0.46899843", "0.46709707", "0.4644301", "0.46401665", "0.46310106", "0.46255326", "0.4618445", "0.46141833", "0.46118987", "0.46082118", "0.4601172", "0.45850053", "0.45782673", "0.457735" ]
0.64638627
1
Pads out the batch dimension of features and labels.
def pad_features_and_labels(features, labels, batch_size): real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size, data=(batch_size_tensor, real_batch_size), message='The real batch size should not be greater than batch_size.') with ops.control_dependencies([check_greater]): missing_count = batch_size_tensor - real_batch_size def pad_single_tensor(tensor): """Pads out the batch dimension of a tensor to the complete batch_size.""" rank = len(tensor.shape) assert rank > 0 padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) padded_shape = (batch_size,) + tuple(tensor.shape[1:]) padded_tensor = array_ops.pad(tensor, padding) padded_tensor.set_shape(padded_shape) return padded_tensor def nest_pad(tensor_or_dict): return nest.map_structure(pad_single_tensor, tensor_or_dict) features = nest_pad(features) if labels is not None: labels = nest_pad(labels) padding_mask = _PaddingSignals._padding_mask( real_batch_size, missing_count, batch_size) return padding_mask, features, labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pad_feature_sequences(sequences, pad=PAD, feature_dims=768):\n lengths = [tf.shape(x)[0] for x in sequences]\n padded_size = tf.reduce_max(lengths)\n padded_sequences = tf.stack([\n tf.pad(x,\n paddings=[[0, padded_size - lengths[i]], [0, 0]],\n mode='CONSTANT',\n constant_values=pad) for i, x in enumerate(sequences)\n ])\n return padded_sequences, lengths", "def add_dummy_batch_dim(x):\n return x.view(1, x.size()[0], x.size()[1], x.size()[2])", "def pad_examples(x, desired_batch_size):\n batch_pad = desired_batch_size - x.shape[0]\n tile_dims = [1] * len(x.shape)\n tile_dims[0] = batch_pad\n return np.concatenate([x, np.tile(x[-1], tile_dims)], axis=0)", "def pad_labellings(labels):\n target_length = max([len(labels) for labels in labels])\n padded = []\n\n for label in labels:\n padding_size = target_length - len(label)\n\n padded_label = label + [0] * padding_size\n\n assert len(padded_label) > 0\n\n padded.append(padded_label)\n\n return padded", "def batch_size(features, labels):\n return extract_batch_length(features)", "def batchify(batch):\n\n\tquestion_len = list()\n\tlabel_list = list()\n\tfor ex in batch:\n\t\tquestion_len.append(len(ex[0]))\n\t\tlabel_list.append(ex[1])\n\n\t'''\n\tPadding the labels - unequal length sequences for sequenial data like we have. \n\tSince actual labels are 0/1 - we pad with -1, and will use this when 'masking' labels during loss and\n\taccuracy evaluation.\n\t'''\n\ttarget_labels = torch.nn.utils.rnn.pad_sequence([torch.tensor(y) for y in label_list], padding_value=-1).t()\n\n\t# dimension is dimension of every feature vector = n_guesses in this homework setting\n\tdim = batch[0][0].shape[1]\n\n\t# similar padding happens for the feature vectors, with vector of all zeros appended.\n\tx1 = torch.FloatTensor(len(question_len), max(question_len), dim).zero_()\n\tfor i in range(len(question_len)):\n\t\tquestion_feature_vec = batch[i][0]\n\t\tvec = torch.FloatTensor(question_feature_vec)\n\t\tx1[i, :len(question_feature_vec)].copy_(vec)\n\tq_batch = {'feature_vec': x1, 'len': torch.FloatTensor(question_len), 'labels': target_labels}\n\treturn q_batch", "def _set_shapes(self, batch_size, features_in, labels_in):\n features_in['mcts_features'] = tf.reshape(\n features_in['mcts_features'], [batch_size, self._env_state_space],\n name='mcts_feature_reshape')\n\n features_in['policy_features'] = tf.reshape(\n features_in['policy_features'], [batch_size, self._env_state_space],\n name='policy_feature_reshape')\n\n labels_in['action_tensor'] = tf.reshape(\n labels_in['action_tensor'], [batch_size, self._env_action_space],\n name='action_reshape')\n\n labels_in['mean_tensor'] = tf.reshape(\n labels_in['mean_tensor'], [batch_size, self._env_action_space],\n name='mean_reshape')\n\n labels_in['logstd_tensor'] = tf.reshape(\n labels_in['logstd_tensor'], [batch_size, self._env_action_space],\n name='logstd_reshape')\n\n labels_in['value_tensor'] = tf.reshape(\n labels_in['value_tensor'], [batch_size], name='value_reshape')\n\n labels_in['return_tensor'] = tf.reshape(\n labels_in['return_tensor'], [batch_size], name='return_reshape')\n\n labels_in['old_neg_logprob_tensor'] = tf.reshape(\n labels_in['old_neg_logprob_tensor'], [batch_size], name='log_reshape')\n\n labels_in['mcts_enable_tensor'] = tf.reshape(\n labels_in['mcts_enable_tensor'], [batch_size], name='mcts_reshape')\n\n labels_in['policy_action_tensor'] = tf.reshape(\n labels_in['policy_action_tensor'], [batch_size, self._env_action_space],\n name='policy_action_reshape')\n\n labels_in['policy_value_tensor'] = tf.reshape(\n labels_in['policy_value_tensor'], [batch_size],\n name='policy_value_reshape')\n\n labels_in['policy_return_tensor'] = tf.reshape(\n labels_in['policy_return_tensor'], [batch_size],\n name='policy_return_reshape')\n\n labels_in['policy_old_neg_logprob_tensor'] = tf.reshape(\n labels_in['policy_old_neg_logprob_tensor'], [batch_size],\n name='log_reshape')\n\n return features_in, labels_in", "def pad_samples(features, maxlen=50, pad=0):\n padded_features = []\n for feature in features:\n if len(feature) >= maxlen:\n padded_feature = feature[:maxlen]\n else:\n padded_feature = feature\n while len(padded_feature) < maxlen:\n padded_feature.append(pad)\n padded_features.append(padded_feature)\n return padded_features", "def pad(batch):\n batch_split = list(zip(*batch))\n seqs, num, targs, lengths, visits = batch_split[0], batch_split[1], batch_split[2], batch_split[3], batch_split[4]\n num = torch.vstack([torch.as_tensor(sample, dtype=torch.float32) for sample in zip(*num)]).T\n visits = [torch.as_tensor(s, dtype=torch.long) for s in visits]\n return [list(seqs), num, torch.as_tensor(lengths, dtype=torch.long), visits], \\\n torch.as_tensor(targs, dtype=torch.float32)", "def provide_label(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._label]", "def zero_pad_features(features: List[np.ndarray],\n target_shape: tuple) -> List[np.ndarray]:\n pad_features = []\n for feature in features:\n feature_shape = feature.shape\n if len(feature_shape) < len(target_shape): # add extra dimensions\n for i in range(len(target_shape) - len(feature_shape)):\n feature = np.expand_dims(feature, axis=len(feature.shape) + 1)\n feature_shape = feature.shape\n elif len(feature_shape) > len(target_shape):\n raise ValueError(\"Provided target shape must be bigger then the original \"\n \"shape. (provided: {}, original {})\".format(len(target_shape), len(feature_shape)))\n diff_shape = np.subtract(target_shape, feature_shape) # pylint: disable=assignment-from-no-return\n if np.any(diff_shape < 0):\n raise ValueError(\"Provided target values must be bigger then the original \"\n \"values for each dimension. (provided: {}, original {})\".format(target_shape, feature_shape))\n # pad format: ((before_1, after_1), ... (before_N, after_N))\n diff_shape = [[0, d] for d in diff_shape] # pylint: disable=not-an-iterable\n p = np.pad(feature, diff_shape, 'constant', constant_values=0)\n pad_features.append(p)\n return pad_features", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def fixed_padding(inputs, kernel_size, data_format='channels_first'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],\n [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],\n [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def create_feature_and_label(inputs, feature_size: int):\n input_token_ids = inputs['features']\n labels = inputs['labels']\n num_tokens = inputs['num_tokens']\n\n input_mask = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)\n type_id = tf.sequence_mask(num_tokens, feature_size, dtype=tf.int32)\n features = [input_token_ids, input_mask, type_id]\n\n return features, labels", "def fixed_padding(inputs, kernel_size, data_format):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n\n if data_format == 'channels_first':\n padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]])\n else:\n padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n\n return padded_inputs", "def zero_pad_features(features, depth):\n\n n = int(features.get_shape().dims[-1])\n extra_feature_count = depth - n\n assert n >= 0\n if n > 0:\n padding = tf.tile(features[:, :, :, :1] * 0,\n [1, 1, 1, extra_feature_count])\n features = tf.concat([features, padding], 3)\n return features", "def pad_dataset(dataset, padding=0):\n max_l = max(len(x) for x in dataset[\"input_ids\"])\n for name in PADDED_INPUTS:\n dataset[name] = [x + [padding if name != \"lm_labels\" else -100] * (max_l - len(x)) for x in dataset[name]]\n return dataset", "def Batch_Size_Normalization(batch, batch_len, pad_token, batch_size):\n max_length = max(batch_len)\n current_batch_len = len(batch)\n need_more = batch_size-current_batch_len\n if need_more==0:\n return batch\n\n padding_array = np.ones(max_length)*pad_token\n for i in range(need_more):\n batch.append(padding_array)\n return batch", "def pad_input(X):\n num_features = len(X)\n if not float(np.log2(num_features)).is_integer():\n size_needed = pow(2, math.ceil(math.log(num_features) / math.log(2)))\n X = np.pad(X, (0, size_needed - num_features), \"constant\")\n return X", "def adapt_batch(batch):\n image_arrays, labellings = batch\n\n current_batch_size = len(labellings)\n\n images = np.array(image_arrays).reshape(current_batch_size, *image_arrays[0].shape)\n\n padded_labellings = pad_labellings(labellings)\n\n labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)\n\n input_lengths = compute_input_lengths(image_arrays)\n\n label_lengths = np.array([len(labelling) for labelling in labellings],\n dtype=np.int32).reshape(current_batch_size, 1)\n\n return [images, labels, input_lengths, label_lengths], labels", "def reformat(dataset, labels):\n n_dataset = dataset.reshape((-1, IMAGE_SIZE * IMAGE_SIZE)).astype(np.float32)\n\n # Convert to the one hot format\n n_labels = (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)\n\n return n_dataset, n_labels", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def embed_features(batch, f_size):\n for f in range(Config.num_feature):\n feature_val = batch[:, f]\n num_cat_value = Config.schema[f]\n\n if f == 0:\n if num_cat_value == 1:\n vector = tf.reshape(feature_val, [-1, 1])\n else:\n vector = tf.nn.embedding_lookup(embed_dict[f], tf.cast(\n feature_val, tf.int32))\n else:\n if num_cat_value == 1:\n vector = tf.concat(1, [vector, tf.reshape(feature_val,\n [-1, 1])])\n else:\n vector = tf.concat(1, [vector, tf.nn.embedding_lookup(\n embed_dict[f], tf.cast(feature_val, tf.int32))])\n\n result = tf.reshape(vector, [-1, 1, f_size])\n\n return result", "def pad_packed_collate(batch):\n if isinstance(batch[0], np.ndarray):\n pass\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [pad_packed_collate(samples) for samples in transposed]\n\n # pad sequence as TxBx*\n # T is length[0] longest seq, B is batch, * is feature\n # length and padded is sorted in descending order\n if len(batch) == 1:\n sorted_batch = batch\n padded_batch = batch[0][:, None, :] # add batch dimension\n lengths = [padded_batch.shape[0]]\n else:\n # sort\n sorted_batch = sorted(batch, key=lambda x: x.shape[0], reverse=True)\n lengths = [s.shape[0] for s in sorted_batch]\n\n # pad\n max_len, n_feats = sorted_batch[0].shape\n padded_batch = \\\n [np.concatenate((s, np.zeros((max_len - s.shape[0], n_feats),\n dtype=np.float32)), axis=0)\n if s.shape[0] != max_len else s for s in sorted_batch]\n\n # stack\n padded_batch = np.stack(padded_batch, axis=1)\n\n # pack\n packed_batch = pack_padded_sequence(Variable(t.from_numpy(padded_batch)), lengths,\n batch_first=False)\n\n return packed_batch", "def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,\n max_instances):\n batch_size, _, num_keypoints, _ = (\n shape_utils.combined_static_and_dynamic_shape(keypoint_coords))\n kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])\n kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])\n instance_inds = tf.expand_dims(instance_inds, axis=-1)\n kpt_coords_scattered = tf.scatter_nd(\n indices=instance_inds,\n updates=kpt_coords_transposed,\n shape=[max_instances, batch_size, num_keypoints, 2])\n kpt_scores_scattered = tf.scatter_nd(\n indices=instance_inds,\n updates=kpt_scores_transposed,\n shape=[max_instances, batch_size, num_keypoints])\n keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])\n keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])\n return keypoint_coords_padded, keypoint_scores_padded", "def _padding(inputs, paddings, data_format):\n if data_format == 'channels_first':\n padded_inputs = tf.pad(\n inputs, [[0, 0], [0, 0], paddings, paddings])\n else:\n padded_inputs = tf.pad(\n inputs, [[0, 0], paddings, paddings, [0, 0]])\n return padded_inputs", "def apply_padding(self, batch_list):\n max_len = max([len(idx_seq) for idx_seq in batch_list])\n padded = [idx_seq + [self.vocab.pad_id] * (max_len - len(idx_seq)) for idx_seq in batch_list]\n return padded" ]
[ "0.65667975", "0.6381753", "0.6247113", "0.62424535", "0.6191029", "0.6129018", "0.5991808", "0.5989984", "0.596495", "0.59362715", "0.59317976", "0.5920891", "0.5863416", "0.5861643", "0.58507115", "0.5837591", "0.58372873", "0.5837181", "0.5827929", "0.5821655", "0.57975", "0.5791395", "0.5791093", "0.5791093", "0.5791093", "0.57886297", "0.5776075", "0.57744837", "0.57701826", "0.5753416" ]
0.7201086
0
Slice the real Tensors according to padding mask in signals.
def slice_tensor_or_dict(tensor_or_dict, signals): padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): return array_ops.identity(tensor) def slice_single_tensor(tensor): rank = len(tensor.shape) assert rank > 0 real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) return verify_batch_size(tensor)[0:real_batch_size] # As we split the Tensors to all TPU cores and concat them back, it is # important to ensure the real data is placed before padded ones, i.e., # order is preserved. By that, the sliced padding mask should have all 0's. # If this assertion failed, # the slice logic here would not hold. sliced_padding_mask = slice_single_tensor(padding_mask) assert_padding_mask = math_ops.equal( math_ops.reduce_sum(sliced_padding_mask), 0) with ops.control_dependencies([assert_padding_mask]): should_stop = _StopSignals.should_stop( _StopSignals.as_scalar_stopping_signal(signals)) is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) def slice_fn(tensor): # If the current batch is full batch or part of stopping signals, we do # not need to slice to save performance. return control_flow_ops.cond( math_ops.logical_or(should_stop, is_full_batch), (lambda: verify_batch_size(tensor)), (lambda: slice_single_tensor(tensor))) return nest.map_structure(slice_fn, tensor_or_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice(tensor):\n out = tensor[:, 444:524, :]\n return out", "def slice_signal(file, window_size, stride, sample_rate):\n wav, sr = librosa.load(file, sr=sample_rate)\n hop = int(window_size * stride)\n slices = []\n for end_idx in range(window_size, len(wav), hop):\n start_idx = end_idx - window_size\n slice_sig = wav[start_idx:end_idx]\n #print(type(slice_sig),' ',slice_sig.shape,'begin:',start_idx,'end_idx:',end_idx)\n slices.append(slice_sig)\n\n if(len(slices)*window_size<len(wav)):\n slice_sig = np.zeros((window_size,))\n temp = wav[len(slices)*window_size:]\n slice_sig[:len(temp)] = temp\n slices.append(slice_sig)\n #print(type(slice_sig), ' ', slice_sig.shape,'begin:',0,'end_idx:',len(temp))\n\n return slices", "def _slice(tensor, size, i):\n return tensor[:, i * size : (i + 1) * size]", "def getslice(arr: tf.Tensor, slice: tf.Tensor, axis: int) -> tf.Tensor:\n if arr is None:\n return None\n return tf.boolean_mask(arr, slice, axis=axis)", "def generate_mask_aligned(data, length, tp_union):\n mask = np.zeros((data.shape[0], tp_union.shape[0]))\n e_data = torch.zeros((data.shape[0], tp_union.shape[0], data.shape[2]))\n e_data = e_data.to(data.device)\n r_arr = []\n\n for i, l in enumerate(length):\n mask[i, :l] = 1\n e_data[i, :l] = data[i, :l]\n r_arr.append(np.where(mask[i] == 1)[0])\n\n return mask, e_data, r_arr", "def slice(ds, timedelta_input, timedelta_output, to_predict, stepwidth, input_sampling, output_sampling):\n\n inputs = []\n outputs = []\n\n start_input_frame = ds.index[0]\n while start_input_frame + timedelta_input + timedelta_output <= ds.index[-1]:\n\n end_input_frame = start_input_frame + timedelta_input\n end_output_frame = end_input_frame+timedelta_output\n\n input_frame = ds[start_input_frame:end_input_frame]\n output_frame = ds[end_input_frame:end_output_frame]\n\n input_frame = input_frame.resample(input_sampling)\n output_frame = output_frame.resample(output_sampling)\n\n for k in output_frame.keys():\n if k not in to_predict:\n del output_frame[k]\n\n input_shape = input_frame.shape\n output_shape = output_frame.shape\n\n inputs.append(input_frame.as_matrix().flatten())\n outputs.append(output_frame.as_matrix().flatten())\n\n #Move forward\n start_input_frame = start_input_frame + stepwidth\n\n\n return (inputs, input_shape), (outputs, output_shape)", "def input_slice(self, inputs):\n result = []\n for i in range(int(len(inputs) / self.window_size)):\n result.append(inputs[i * self.window_size:(i + 1) * self.window_size])\n return result", "def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r", "def _forward_padded(self, x, x_mask):\n # Compute sorted sequence lengths\n lengths = x_mask.data.eq(0).long().sum(1).squeeze()\n _, idx_sort = torch.sort(lengths, dim=0, descending=True)\n _, idx_unsort = torch.sort(idx_sort, dim=0)\n\n lengths = list(lengths[idx_sort])\n idx_sort = Variable(idx_sort)\n idx_unsort = Variable(idx_unsort)\n\n # Sort x\n x = x.index_select(0, idx_sort)\n\n # Transpose batch and sequence dims\n x = x.transpose(0, 1)\n\n # Pack it up\n rnn_input = nn.utils.rnn.pack_padded_sequence(x, lengths)\n\n # Encode all layers\n outputs = [rnn_input]\n for i in range(self.num_layers):\n rnn_input = outputs[-1]\n\n # Apply dropout to input\n if self.dropout_rate > 0:\n dropout_input = F.dropout(rnn_input.data,\n p=self.dropout_rate,\n training=self.training)\n rnn_input = nn.utils.rnn.PackedSequence(dropout_input,\n rnn_input.batch_sizes)\n outputs.append(self.rnns[i](rnn_input)[0])\n\n # Unpack everything\n for i, o in enumerate(outputs[1:], 1):\n outputs[i] = nn.utils.rnn.pad_packed_sequence(o)[0]\n\n # Concat hidden layers or take final\n if self.concat_layers:\n output = torch.cat(outputs[1:], 2)\n else:\n output = outputs[-1]\n\n # Transpose and unsort\n output = output.transpose(0, 1)\n output = output.index_select(0, idx_unsort)\n\n # Dropout on output layer\n if self.dropout_output and self.dropout_rate > 0:\n output = F.dropout(output,\n p=self.dropout_rate,\n training=self.training)\n return output", "def unset_padding(self):\n if self.metadata.Signal.has_item('pad_tuple'):\n Npy, Npx = self.metadata.Signal.pad_tuple\n else:\n # If no padding was done, return the same signal\n return self\n Nx, Ny = self.axes_manager.signal_shape\n s=self.deepcopy()\n del s.metadata.Signal.pad_tuple\n if self.axes_manager.navigation_dimension == 0:\n s.data = s.data[Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n elif self.axes_manager.navigation_dimension > 0:\n s.data = s.data[..., Npy[0]:(Ny-Npy[1]), Npx[0]:(Nx-Npx[1])]\n s.get_dimensions_from_data()\n # copy in case of non-linear defoci\n s.axes_manager.navigation_axes[0].axis = self.axes_manager.navigation_axes[0].axis.copy()\n return s", "def unpad(x, i0, i1):\n return x[..., i0:i1]", "def padding_mask(lens):\n bs, max_len = len(lens), max(lens)\n mask = torch.zeros(bs, 1, max_len)\n for i, l in enumerate(lens):\n mask[i, :, :l] = 1\n mask = mask > 0\n return mask", "def _special_handle_slice(cls, op, X, W):\n tensor_list = []\n # slice add starts, ends, axes, steps\n append_inputs = {\n \"starts\": op.starts,\n \"ends\": op.ends,\n \"axes\": op.axes,\n \"steps\": op.steps,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list", "def slice_tensors(data, tensor_slice):\n\n def _slice_tensor(tensor, tensor_slice):\n return tensor[tensor_slice]\n\n return recursively_apply(_slice_tensor, data, tensor_slice)", "def __call__(self, x):\n batch_shape = x.shape[:-2]\n signal_shape = x.shape[-2:]\n x = x.reshape((-1, 1) + signal_shape)\n if not self.pre_pad:\n x = self.padding_module(x)\n\n # Note: PyTorch is not effective to pad signals of size N-1 with N\n # elements, thus we had to add this fix.\n if self.pad_size[0] == self.input_size[0]:\n x = torch.cat([x[:, :, 1, :].unsqueeze(2), x, x[:, :, x.shape[2] - 2, :].unsqueeze(2)], 2)\n if self.pad_size[2] == self.input_size[1]:\n x = torch.cat([x[:, :, :, 1].unsqueeze(3), x, x[:, :, :, x.shape[3] - 2].unsqueeze(3)], 3)\n\n output = x.new_zeros(x.shape + (2,))\n output[..., 0] = x\n output = output.reshape(batch_shape + output.shape[-3:])\n return output", "def splice(features, left_num, right_num):\n shape = tf.shape(features)\n splices = []\n pp = tf.pad(features, [[left_num, right_num], [0, 0]])\n for i in range(left_num + right_num + 1):\n splices.append(tf.slice(pp, [i, 0], shape))\n splices = tf.concat(axis=1, values=splices)\n\n return splices", "def _process_decoder_input(self,target_data, tgt_sos_id, batch_size):\n x = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n y = tf.concat([tf.fill([batch_size, 1], tgt_sos_id), x], 1)\n\n for item in y:\n item.remove(3)\n return y", "def torch_aligned_random_crop(waves, frame_length):\r\n n, t = waves[0].shape\r\n crop_t = frame_length * (t//frame_length - 1)\r\n # offsets = [tf.random.uniform(shape=(), minval=0,\r\n # maxval=t-crop_t, dtype=tf.int32)\r\n # for _ in range(n)]\r\n offsets = [np.random.randint(size=(),low=0,high=t-crop_t,dtype=torch.int32)\r\n for _ in range(n)]\r\n\r\n # waves_unbatched = [tf.split(w, n, axis=0) for w in waves]\r\n waves_unbatched = [torch.split(w, n, dim=0) for w in waves]\r\n\r\n # wave_crops = [[tf.slice(w, begin=[0, o], size=[1, crop_t])\r\n # for w, o in zip(ws, offsets)] for ws in waves_unbatched]\r\n wave_crops = [[torch.narrow(torch.narrow(w,0,0,0+1),1,start=o,length=o+crop_t)\r\n for w, o in zip(ws, offsets)] for ws in waves_unbatched]\r\n\r\n #wave_crops = [tf.concat(wc, axis=0) for wc in wave_crops]\r\n wave_crops = [torch.cat(wc, dim=0) for wc in wave_crops]\r\n\r\n return wave_crops", "def generate_mask(data, tps, length, tp_union):\n tp_map = {tp_union[i].item(): i for i in range(len(tp_union))}\n\n mask = np.zeros((data.shape[0], tp_union.shape[0]))\n e_data = torch.zeros((data.shape[0], tp_union.shape[0], data.shape[2]))\n e_data = e_data.to(data.device)\n r_arr = []\n\n for i in range(len(mask)):\n inds = [tp_map[tps[i][j].item()] for j in range(length[i])]\n mask[i, inds] = 1\n e_data[i, inds] = data[i, :length[i]]\n r_arr.append(np.where(mask[i] == 1)[0])\n\n return mask, e_data, r_arr", "def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)", "def trim(self, trim_samples):\n n = len(self.timestamps)\n self.timestamps = self.timestamps[:n - trim_samples]\n self.labels = self.labels[:n - trim_samples]\n self.emg = [x[:n - trim_samples] for x in self.emg]\n self.accel = [x[:n - trim_samples] for x in self.accel]\n self.gyro = [x[:n - trim_samples] for x in self.gyro]\n self.orient = [x[:n - trim_samples] for x in self.orient]", "def get_mask(tensor, padding_idx=0):\n mask = torch.ones(size=list(tensor.size()), dtype=torch.bool)\n mask[tensor == padding_idx] = False \n\n return mask", "def extract_signal_from_mask(data, mask):\r\n affine = data[0].affine\r\n resample_mask = resample_img(mask,affine)\r\n signal = apply_mask(data, resample_mask, ensure_finite=True)\r\n print(signal.shape, type(signal))\r\n\r\n return signal", "def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask", "def slice_vec_bands(doc_vectors, start=0, end=None,\n ndims=300, drop_imag=False):\n return numpy.array([\n flatten_fft(unflatten_vec(dv, ndims=ndims), start, end, drop_imag)\n for dv in doc_vectors\n ])", "def reduce_X(X, mask):\n return X[:, mask]", "def extract_unpadded(self, data, ind):\n batch_range = torch.arange(0, data.shape[0], dtype=torch.int64).to(self.device)\n indices = torch.stack([batch_range, ind], dim=1)\n res = data[indices.transpose(0, 1).tolist()]\n return res", "def _prepare_onnx_paddings__tensorrt(g, input, pad):\n ctx = FUNCTION_REWRITER.get_context()\n torch_version = version_parse(torch.__version__)\n if torch_version.major == 1 and torch_version.minor < 10:\n return ctx.origin_func(g, input, pad)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with\n # zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(\n g, pad, g.op('Constant', value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op('Size', g.op('Shape', input))\n else:\n rank = g.op('Constant', value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\n 'Sub',\n g.op('Mul', rank,\n g.op('Constant', value_t=torch.tensor(2, dtype=torch.int64))),\n pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end,\n # dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op('Cast', pad, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n paddings = g.op(\n 'Concat',\n pad,\n g.op(\n 'ConstantOfShape',\n extension,\n value_t=torch.tensor([0], dtype=torch.int64)),\n axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin,\n # ..., 0, dim_n - 1_end, dim_n_end]\n\n # replace original Constant-Transpose-Constant with Slices and Concat.\n paddings = torch.onnx.symbolic_opset10.flip(g, paddings, [0])\n begins = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[1], ends=[0xffff], steps=[2])\n ends = sym_help._slice_helper(\n g, paddings, axes=[0], starts=[0], ends=[0xffff], steps=[2])\n paddings = g.op('Concat', begins, ends, axis_i=0)\n padding_c = g.op(\n 'Cast', paddings, to_i=sym_help.cast_pytorch_to_onnx['Long'])\n return padding_c", "def subArray(self,left,rightplus):\n lst=self.boolVals[:]\n \n sm=\"\"\n for i in range(len(lst)):\n if lst[i]:\n sm+=\"1\"\n else:\n sm+=\"0\"\n newlst=sm[left:rightplus]\n newlst=newlst[::-1]\n final=hex(int(newlst,2))\n final=final[2:]\n\n return BoolArray(final,rightplus-left)", "def get_sample_mask(self):" ]
[ "0.6374091", "0.56844383", "0.55673355", "0.5520634", "0.5495231", "0.538921", "0.53742844", "0.5365133", "0.5315971", "0.52649224", "0.52371657", "0.5231514", "0.52025896", "0.51906943", "0.51897913", "0.5185912", "0.51694137", "0.51603997", "0.5145077", "0.50990677", "0.50931466", "0.50547314", "0.5050602", "0.50373733", "0.5037264", "0.5027677", "0.50158036", "0.50128084", "0.4997449", "0.49925423" ]
0.65732765
0
Plots the train accuracy, test accuracy, test precision and test recall, and saves them in the data folder. Also saves the scores in the data folder in 'scores.csv'
def plot_scores(self): results_path = DATA_PATH.joinpath("results") if not results_path.is_dir(): os.mkdir(results_path) scores = DataFrame(columns=["Import", "Months", "Train Accuracy", "Test Accuracy", "Precision", "Recall"]) for importance, tree_list in self.trees.items(): for i, tree in enumerate(tree_list): train_accuracy, test_accuracy, precision, recall = tree.get_scores() scores.loc[len(scores)] = [importance, i+1, train_accuracy, test_accuracy, precision, recall] for i, col in enumerate(scores.columns.values): if col == "Months" or col == "Import": continue pivot_scores = pandas.pivot_table(scores, values=col, index="Months", columns="Import") ax = pivot_scores.plot(kind='bar') ax.set_ylim([0, 1]) plt.xticks(rotation=None) plt.title(col) plt.xlabel("Months of assessments") plt.ylabel(col) plt.legend(loc="lower right") path = str(results_path.joinpath(col + ".png")) plt.savefig(path) score_path = str(results_path.joinpath("scores.csv")) scores.to_csv(score_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_accuracy(model_fit, save_folder): \n train_acc = model_fit.history['binary_accuracy']\n val_acc = model_fit.history['val_binary_accuracy']\n epoch_axis = np.arange(1, len(train_acc) + 1)\n plt.title('Train vs Validation Accuracy')\n plt.plot(epoch_axis, train_acc, 'b', label='Train Acc')\n plt.plot(epoch_axis, val_acc,'r', label='Val Acc')\n plt.xlim([1, len(train_acc)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_acc) / 10) + 0.5)))\n plt.legend(loc='lower right')\n plt.ylabel('Accuracy')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/accuracy.png')\n plt.show()\n plt.close()", "def cross_validation_visualization_accuracy(epochs, accs, save=False, filename=\"cross_validation_acc\"):\n plt.plot(epochs, accs, marker=\".\", color='r', label='accuracy')\n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def main():\n args = parse_args()\n\n with open(args.train_details_json, mode='r', encoding='utf-8') as json_f:\n results_dict = json.load(json_f)[-1]\n\n losses_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_loss']) + 1),\n results_dict['train_loss'])\n plt.plot(range(1, len(results_dict['val_loss']) + 1),\n results_dict['val_loss'])\n plt.plot(range(1, len(results_dict['test_loss']) + 1),\n results_dict['test_loss'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'loss vs epoch for {args.model} model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('loss')\n plt.grid(True)\n losses_plot.set_size_inches((8, 8))\n losses_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_losses_plot.png'))\n\n accuracies_plot = plt.figure()\n plt.plot(range(1, len(results_dict['train_acc']) + 1),\n results_dict['train_acc'])\n plt.plot(range(1, len(results_dict['val_acc']) + 1),\n results_dict['val_acc'])\n plt.plot(range(1, len(results_dict['test_acc']) + 1),\n results_dict['test_acc'])\n plt.legend(['train', 'val', 'test'])\n plt.title(f'accuracy vs epoch for {args.model} '\n f'model on {args.dataset} dataset')\n plt.xlabel('epoch')\n plt.ylabel('accuracy')\n plt.grid(True)\n accuracies_plot.set_size_inches((8, 8))\n accuracies_plot.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_accuracies_plot.png'))", "def plot_train_test_data(train, test):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.spy(train, precision=0.01, markersize=0.5)\n ax1.set_xlabel(\"Users\")\n ax1.set_ylabel(\"Items\")\n ax1.set_title(\"Training data\")\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.spy(test, precision=0.01, markersize=0.5)\n ax2.set_xlabel(\"Users\")\n ax2.set_ylabel(\"Items\")\n ax2.set_title(\"Test data\")\n plt.tight_layout()\n plt.savefig(\"../results/train_test\")\n plt.show()", "def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc, save_figure_path):\n\n green = '#72C29B'\n orange = '#FFA577'\n\n with plt.xkcd():\n fig, (ax1, ax2) = plt.subplots(2, figsize=(10, 8))\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n ax1.set_title('Model loss through #epochs', fontweight='bold')\n\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n ax2.set_title('Model accuracy through #epochs', fontweight='bold')\n\n plt.tight_layout()\n plt.show()\n fig.savefig(save_figure_path)\n plt.close(fig)", "def plot_accuracy(self):\n plot_title, img_title = self.prep_titles(\"\")\n test_legend = ['training data', 'test data']\n\n # Data for plotting x- and y-axis\n x = np.arange(1, CFG.EPOCHS + 1)\n y = [self.tr_accuracy, self.test_accuracy]\n\n # prints x and y-axis values\n print(f'x: {x}')\n print(f'training: {self.tr_accuracy}')\n print(f'test: {self.test_accuracy}')\n\n plt.figure(figsize=(CFG.FIG_WIDTH, CFG.FIG_HEIGHT))\n\n # Create the lineplot\n for line in range(2):\n ax = sns.lineplot(x=x, y=y[line], color=CFG.COLOR_ACCURACY[line], label=test_legend[line])\n\n if CFG.ANNOTATE:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS + 2),\n ylim=(0, 119))\n\n for line in range(2):\n for e in range(0, CFG.EPOCHS):\n if y[line][e] > CFG.ANNOTATE_LEVEL:\n value = \"{:.2f}\".format(y[line][e])\n label = \"epoch \" + str(e + 1) + \"\\n\" + value + \"%\"\n plt.annotate(label,\n xy=(x[e], y[line][e]),\n alpha=1,\n size=9,\n rotation=45,\n textcoords='offset pixels', xytext=(0, 7),\n ha='left', va='bottom')\n else:\n ax.set(xlabel='Epochs',\n ylabel='Accuracy (%)',\n title=plot_title,\n xlim=(1, CFG.EPOCHS),\n ylim=(0, 102))\n\n ax.legend(loc='best')\n\n self.save_plot(img_title)\n plt.show()", "def plot_data(losses, accuracies, name):\n # convert accuracies to percentages\n accuracies['Train'] = [acc * 100 for acc in accuracies['Train']]\n accuracies['Valid'] = [acc * 100 for acc in accuracies['Valid']]\n # set fontsize\n plt.rcParams.update({'font.size': 13})\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(8,5))\n ax1.set_xlabel('Number of Epochs')\n ax1.set_ylabel('Cross Entropy Loss')\n ax1.set_ylim(0,2)\n ax1.plot(losses['Train'], label='Training')\n ax1.plot(losses['Valid'], label='Validation')\n ax1.legend(loc='upper right')\n\n ax2.set_xlabel('Number of Epochs')\n ax2.set_ylabel('Accuracy (%)')\n ax2.set_ylim(0,100)\n ax2.plot(accuracies['Train'], label='Training')\n ax2.plot(accuracies['Valid'], label='Validation')\n ax2.legend(loc='upper left')\n\n fig.tight_layout()\n fig.savefig('../outputs/' + name)", "def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)", "def plot_acc(acc_v, acc_t, save_plots_path):\n\n plt.figure()\n plt.plot(acc_v, label='Validation acc')\n plt.plot(acc_t, label='Training acc')\n plt.legend()\n title = 'Accuracy per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Accuracy\")\n plt.savefig(save_plots_path + \"swag_accuracy_plot.png\")", "def plot_results(\n train_data: tuple[Tensor, Tensor],\n test_data: tuple[Tensor, Tensor],\n correct_class: Tensor\n):\n #fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(21,7), subplot_kw=dict(box_aspect=1))\n fig1, (ax1, ax2) = plt.subplots(1, 2, figsize=(14,7), subplot_kw=dict(box_aspect=1))\n fig2, ax3 = plt.subplots(figsize=(7,7), subplot_kw=dict(box_aspect=1))\n ax1.set_title('Training data')\n plot_dataset(train_data, ax1)\n\n ax2.set_title('Test data')\n plot_dataset(test_data, ax2)\n\n ax3.set_title('Test prediction correctness')\n plot_dataset((test_data[0], correct_class.int()), ax3, cmap={0: '#ff0000', 1: '#00ff00'})\n \n fig1.savefig('plots/datasets')\n fig2.savefig('plots/predictions')\n plt.show()", "def plot_train_results(metrics2record, loss_metric,\n train_metrics, test_metrics):\n pyplot.figure(figsize=(10, 5))\n min_, max_ = np.min(loss_metric), np.max(loss_metric)\n lg, = pyplot.plot(loss_metric)\n pyplot.yticks(min_ + np.arange(5) * (max_ - min_))\n # if learning_rate is not None:\n # lg, = pyplot.plot(learning_rate)\n pyplot.title('Loss')\n pyplot.xlabel('Epoch')\n pyplot.yscale('log')\n pyplot.show()\n\n for prm in basic_metrics:\n if prm in metrics2record:\n leg = []\n met_idx = metrics2record.index(prm)\n pyplot.figure(figsize=(10, 5))\n lg, = pyplot.plot(train_metrics[:, met_idx], label=('train'))\n leg.append(lg)\n lg, = pyplot.plot(test_metrics[:, met_idx], label=('test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title(prm)\n pyplot.xlabel('Epoch')\n pyplot.show()\n\n has_prf = any([(prm in PRF_metrics) for prm in metrics2record])\n if has_prf:\n pyplot.figure(figsize=(10, 5))\n leg = []\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(train_metrics[:, met_idx],\n label=(prm + ':train'))\n leg.append(lg)\n\n for prm in PRF_metrics:\n if prm in metrics2record:\n met_idx = metrics2record.index(prm)\n lg, = pyplot.plot(test_metrics[:, met_idx],\n label=(prm + ':test'))\n leg.append(lg)\n\n pyplot.legend(handles=leg)\n pyplot.title('Precision / Recall')\n pyplot.xlabel('Epoch')\n pyplot.show()", "def save_only_test_results(path, losses_test, accuracy_test, accuracy_det_test):\n print('Save pretrained model losses and accuracies on test dataset..')\n np.save(os.path.join(path, 'losses_test.npy'), np.array(losses_test))\n np.save(os.path.join(path, 'accuracy_test.npy'), np.array(accuracy_test))\n np.save(os.path.join(path, 'accuracy_detailed_test.npy'), np.array(accuracy_det_test))\n plot_dataframe(pd.DataFrame(losses_test, columns = ['Batch', 'Loss']),\n save_path=path, save_name='losses_test', title='Losses [test dataset]',\n x_name='Batch', y_name='Loss', ending='.png', ylog=False, figsize=(10,5),\n xints=float, yints=float)\n plot_dataframe(pd.DataFrame(accuracy_test, columns = ['Batch', 'Accuracy']),\n save_path=path, save_name='accuracy_test', title='Accuracy [test dataset] in %',\n x_name='Batch', y_name='Accuracy', ending='.png', ylog=False, figsize=(10,5),\n xints=int, yints=int)", "def accuracy_plot(training, test, layers, data_size, n_neighbours, learning_rate, dropout_rate):\n\n plt.figure()\n plt.plot(training, label=\"Training\")\n plt.plot(test, label=\"Test\")\n plt.xlabel(\"Iterations\", size='medium')\n plt.ylabel(\"Accuracy function (%)\", size='medium')\n plt.suptitle(\"Accuracy function while training the neural network\", size='medium', ha='center')\n plt.title(\"layers: {} with dropout rate of {}, learning rate: {}\".format(layers, dropout_rate, learning_rate),\n size='small', ha='center')\n if n_neighbours == 0:\n plt.figtext(0.83, 0.80, \"Neighbours\\nexcluded\", size='medium')\n else:\n plt.figtext(0.83, 0.80, \"{} neighbours\\nincluded\".format(n_neighbours), size='medium')\n plt.figtext(0.83, 0.70, \"{}\\nsamples\".format(data_size), size='medium')\n plt.legend(loc='right', bbox_to_anchor=(1.3, 0.5))\n plt.subplots_adjust(right=0.8)\n\n working_dir = os.path.dirname(os.path.abspath(__file__))\n saving(working_dir + \"/output_ANN/accuracy_plots/{}_accuracy_{}\".format(n_neighbours, data_size))", "def plot_training_results(clfs, train_scores, test_scores):\n\n # Set graph format\n sns.set_style(\"whitegrid\")\n sns.set_context(\"paper\", font_scale=1, rc={\"lines.linewidth\": 1})\n ax = plt.subplot(111)\n w = 0.5\n x = np.arange(len(train_scores))\n ax.set_yticks(x + w)\n ax.legend((train_scores[0], test_scores[0]), (\"Train Scores\", \"Test Scores\"))\n names = ['SVC', 'LR', 'KNN', 'GNB', 'ADA', 'RF']\n\n # Loop throuugh classifiers\n # clfnames = []\n # for i in range(0, len(clfs)):\n # # Define temporary variables\n # clfname = clfnames[i]\n # # clf_name = clf.__class__.__name__\n # # Create and store name\n # name = \"{}\".format(clf_name)\n # names.append(name)\n\n # Plot all names in horizontal bar plot\n ax.set_yticklabels((names), fontsize=20)\n plt.xlim(0.5, 0.56)\n plt.barh(x, test_scores, color='b', alpha=0.6)\n plt.title(\"Test Data Accuracy Scores\", fontsize=30)\n fig = plt.figure(1)\n\n plt.show()", "def plot_train_test_data(train, test):\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.spy(train, precision=0.01, markersize=0.5)\n ax1.set_xlabel(\"Users\")\n ax1.set_ylabel(\"Items\")\n ax1.set_title(\"Training data\")\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.spy(test, precision=0.01, markersize=0.5)\n ax2.set_xlabel(\"Users\")\n ax2.set_ylabel(\"Items\")\n ax2.set_title(\"Test data\")\n plt.tight_layout()\n plt.savefig(\"train_test\")\n plt.show()", "def plot_train_test_data(train, test):\n fig = plt.figure(figsize=(16, 8))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.spy(train, precision=0.01, markersize=0.05, aspect=\"auto\")\n ax1.set_xlabel(\"Users\")\n ax1.set_ylabel(\"Items\")\n ax1.set_title(\"Training data\")\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.spy(test, precision=0.01, markersize=0.05, aspect=\"auto\")\n ax2.set_xlabel(\"Users\")\n ax2.set_ylabel(\"Items\")\n ax2.set_title(\"Test data\")\n plt.tight_layout()\n plt.savefig(\"train_test\")\n plt.show()", "def plot_acc(model_dir):\n ## extract loss from csv\n file_dir = os.path.join(model_dir, 'acc.csv')\n data = pd.read_csv(file_dir)\n epochs = data['epoch'].ravel()\n acc_train = data['acc_train'].ravel()\n acc_test = data['acc_test'].ravel()\n # epoch,acc_train,acc_test\n\n ## Theoretical Loss\n fig, ax = plt.subplots(1, 1, figsize=(7, 5), sharey=True, sharex=True, dpi=400)\n ax.plot(epochs, acc_train, label='train', color='green', alpha=0.8)\n ax.plot(epochs, acc_test, label='test', color='red', alpha=0.8)\n ax.set_ylabel('Accuracy', fontsize=10)\n ax.set_xlabel('Epoch', fontsize=10)\n ax.legend(loc='lower right', prop={\"size\": 15}, ncol=3, framealpha=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n plt.tight_layout()\n\n ## create saving directory\n acc_dir = os.path.join(model_dir, 'figures', 'acc')\n os.makedirs(acc_dir, exist_ok=True)\n file_name = os.path.join(acc_dir, 'accuracy.png')\n plt.savefig(file_name, dpi=400)\n print(\"Plot saved to: {}\".format(file_name))\n file_name = os.path.join(acc_dir, 'accuracy.pdf')\n plt.savefig(file_name, dpi=400)\n plt.close()\n print(\"Plot saved to: {}\".format(file_name))", "def save_history(history, save_folder_path=None, params=['acc', 'val_acc']):\n \n print(\"[INFO] Showing train and test accuracy plot...\")\n\n # Plot all lines in parameters\n for param in params:\n plt.plot(history.history[param])\n \n plt.title('Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n\n if save_folder_path is not None:\n save_path = \"{}/{}\".format(save_folder_path, \"train_history.png\")\n history_fig = plt.gcf() # get current figure\n history_fig.savefig(save_path)\n print(\"[INFO] Plot saved to {0}\".format(save_path))\n\n plt.show()", "def save_accuracy_chart(self):\n history = self.model.history.history\n fig = plt.figure()\n plt.plot(history['accuracy'], label='Training Accuracy')\n plt.plot(history['val_accuracy'],label='Validation Set Accuracy')\n plt.legend()\n fig.savefig('model_accuracy.png')", "def make_accuracy_plot(num_trials=10):\n data = load_digits()\n # print data.DESCR\n train_percentages = range(5, 95, 5)\n test_accuracies = numpy.zeros(len(train_percentages))\n\n for i in range(len(train_percentages)):\n individual_trial_accuracies = []\n for j in range(num_trials):\n X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size=train_percentages[i]*.01)\n model = LogisticRegression(C=10**-10)\n model.fit(X_train, y_train)\n individual_trial_accuracies.append(model.score(X_test, y_test))\n test_accuracies[i] = numpy.mean(individual_trial_accuracies)\n\n fig = plt.figure()\n plt.plot(train_percentages, test_accuracies, 'b')\n plt.xlabel('Percentage of Data Used for Training')\n plt.ylabel('Accuracy on Test Set')\n plt.show()", "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()", "def cross_validation_visualization_accuracy_multiple(epochs, accs, save=False, filename=\"cross_validation_acc_multiple\"):\n \n for i in range(accs.shape[0]):\n plt.plot(epochs, accs[i], marker=\".\", color='r', label=str(i+1)+'th accuracy')\n \n plt.xlabel(\"epoch\")\n plt.ylabel(\"accuracy\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n if (save):\n plt.savefig(filename)", "def plot_results(self):\n\n\n f1, ax1 = plt.subplots()\n h1, = ax1.plot(self.history[\"step\"], self.history[\"trainLoss\"],\\\n \"b-\", label=\"Loss - Train\")\n h2, = ax1.plot(self.history[\"step\"], self.history[\"validLoss\"],\\\n \"b.\", label=\"Loss - Validation\")\n\n ax1.set_ylabel(\"Loss\", color = \"blue\")\n ax1.tick_params(\"y\", color = \"blue\")\n ax1.yaxis.label.set_color(\"blue\")\n ax1.set_xlabel(\"Training Steps [{}]\".format(self.FLAGS.eval_every))\n\n ax2 = ax1.twinx()\n h3, = ax2.plot(self.history[\"step\"], self.history[\"trainAccr\"], \"r-\",\\\n label = \"Accuracy - Train\")\n h4, = ax2.plot(self.history[\"step\"], self.history[\"validAccr\"], \"r.\",\\\n label = \"Accuracy - Validation\")\n\n ax2.set_ylabel(\"Accuracy\", color = \"red\")\n ax2.tick_params(\"y\", color = \"red\")\n ax2.yaxis.label.set_color(\"red\")\n\n hds = [h1,h2,h3,h4]\n lbs = [l.get_label() for l in hds]\n ax1.legend(hds, lbs)\n f1.tight_layout()\n plt.savefig(\"trainingHistory.png\")\n\n plt.close(f1)\n #plt.show()", "def plot_testacc_numlabels(dataset, models, results, path, suffix):\n res = results[results['dataset'] == dataset]\n plt.close()\n for mod in models:\n res_model = res[res['model'] == mod]\n res_model.sort_values(by=['num_labels'], inplace=True)\n plt.scatter(res_model['num_labels'], res_model['test_acc'],\n label=mod, marker='o')\n plt.ylabel('test accuracy')\n plt.xlabel('num labels')\n plt.title('Dataset: {}'.format(dataset))\n plt.legend()\n plt.tight_layout()\n if suffix is None:\n plt.savefig('{}{}_performance.png'.format(path, dataset))\n else:\n plt.savefig('{}{}_performance_{}.png'.format(path, dataset,\n suffix))", "def save_learning_curves(history, run_name, base_path=\"plots/\"):\n path = os.path.join(base_path, run_name)\n if not os.path.isdir(path):\n os.makedirs(path)\n losses = {k: history[k] for k in ['loss', 'val_loss']}\n accuracies = {k: history[k] for k in ['acc', 'val_acc']}\n x = range(len(losses['loss']))\n fn_losses = os.path.join(path, \"loss.png\")\n fn_accuracies = os.path.join(path, \"accuracy.png\")\n ut.save_plot(x, ys=losses, xlabel=\"epoch\", ylabel=\"loss\",\n title=run_name, path=fn_losses)\n ut.save_plot(x, ys=accuracies, xlabel=\"epoch\", ylabel=\"accuracy\",\n title=run_name, path=fn_accuracies)", "def cross_validation_visualization(lambds, score_tr, score_te):\n plt.semilogx(lambds, score_tr, marker=\".\", color='b', label='train score');\n plt.semilogx(lambds, score_te, marker=\".\", color='r', label='test score');\n plt.xlabel(\"lambda\")\n plt.ylabel(\"score\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_test\")", "def plot(training_losses, validation_losses, epochs, directory_name):\n plt.figure(figsize=(20, 10))\n\n x = np.linspace(1, epochs, epochs)\n training_losses = np.array(training_losses)\n validation_losses = np.array(validation_losses)\n\n plt.title(\"Learning curve over Epochs\")\n\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Average Loss\")\n\n plt.plot(x, training_losses, color='purple', marker=\".\", label='Training loss')\n plt.plot(x, validation_losses, color='orange', marker=\".\", label='Validation loss')\n plt.legend()\n plt.savefig('./' + directory_name + '/Learning_curves-' + str(epochs) + '.png')\n pass", "def plot_cv_train_test(test_avg, train_avg, lambdas, path):\n\n plt.plot(lambdas, test_avg, marker = \"o\", color=\"green\", label=\"validating cv error\")\n plt.plot(lambdas, train_avg, marker = \"v\", color=\"blue\", label=\"training cv error\" )\n \n print(train_avg[0])\n print(test_avg[0])\n \n plt.title(\"Cross Validation Error for Different Regularization Parameters\")\n plt.ylabel(\"10f cv RMSE\")\n plt.ylim(0.86 , 0.99)\n plt.xlabel(\"$\\lambda$\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()", "def plot_training_info(case, metrics, save, history):\n val = False\n if 'val_accuracy' in history and 'val_loss' in history:\n val = True\n plt.ioff()\n if 'accuracy' in metrics:\n fig = plt.figure()\n plt.plot(history['accuracy'])\n if val:\n plt.plot(history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'accuracy.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)\n\n # summarize history for loss\n if 'loss' in metrics:\n fig = plt.figure()\n plt.plot(history['loss'])\n if val:\n plt.plot(history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n # plt.ylim(1e-3, 1e-2)\n plt.yscale(\"log\")\n if val:\n plt.legend(['train', 'val'], loc='upper left')\n else:\n plt.legend(['train'], loc='upper left')\n if save:\n plt.savefig(case + 'loss.png')\n plt.gcf().clear()\n else:\n plt.show()\n plt.close(fig)" ]
[ "0.7224669", "0.71921223", "0.71734667", "0.71697706", "0.7110309", "0.7072749", "0.70446795", "0.7005062", "0.6988307", "0.6936626", "0.693535", "0.6925998", "0.69201785", "0.6885906", "0.6869337", "0.6860697", "0.6812449", "0.6787487", "0.67683053", "0.6765857", "0.6717797", "0.6645104", "0.6630031", "0.66063106", "0.6606292", "0.66051084", "0.6588572", "0.6527335", "0.651657", "0.65049833" ]
0.7269786
0
Test remove missing feature in base marshmallow Schema Also test opting out by setting a pure marshmallow Schema for base
def test_marshmallow_base_schema_remove_missing(self, base_schema): # Typically, we'll use it in all our schemas, so let's define base # Document and EmbeddedDocument classes using this base schema class @self.instance.register class MyDocument(Document): MA_BASE_SCHEMA_CLS = base_schema class Meta: abstract = True @self.instance.register class MyEmbeddedDocument(EmbeddedDocument): MA_BASE_SCHEMA_CLS = base_schema class Meta: abstract = True @self.instance.register class Accessory(MyEmbeddedDocument): brief = fields.StrField() value = fields.IntField() @self.instance.register class Bag(MyDocument): item = fields.EmbeddedField(Accessory) content = fields.ListField(fields.EmbeddedField(Accessory)) data = { 'item': {'brief': 'sportbag'}, 'content': [ {'brief': 'cellphone'}, {'brief': 'lighter'}] } dump = { 'id': None, 'content': [ {'brief': 'cellphone', 'value': None}, {'brief': 'lighter', 'value': None} ], 'item': {'brief': 'sportbag', 'value': None} } remove_missing_dump = { 'item': {'brief': 'sportbag'}, 'content': [ {'brief': 'cellphone'}, {'brief': 'lighter'} ] } expected_dump = { BaseMarshmallowSchema: remove_missing_dump, ma.Schema: dump, }[base_schema] bag = Bag(**data) ma_schema = Bag.schema.as_marshmallow_schema() assert ma_schema().dump(bag) == expected_dump
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_base_schema_ignores_unknown_fields():\n assert BaseSchema().load({\"unknown\": \"field\"}) == {}", "def test_schema_strict():\n path = os.path.join(extensiondir, 'release-schema.json')\n if os.path.isfile(path):\n with open(path) as f:\n data = json.load(f)\n\n original = deepcopy(data)\n add_validation_properties(data)\n\n assert data == original, f'{path} is missing validation properties, run: ocdskit schema-strict {path}'", "def test_required_fields_schema_version(self):\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def test_build_schema_badschema(self):\n dummy_meta = {\n 'schema': '',\n 'version': '1.0.0',\n 'update': datetime.datetime.utcnow().isoformat(),\n }\n\n with pytest.raises(jsonschema.exceptions.ValidationError):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n dummy_meta,\n schema_utils.Update.first_run\n )", "def test_schema_default_missing_validator_openapi():\n converter = ce.ObjectTypeConverter(ce.OAS3TypeConversionDispatcher())\n test_schemas = [\n Mapping,\n Missing,\n Default,\n Validator,\n DefaultMissing,\n DefaultValidator,\n MissingValidator,\n DefaultMissingValidator,\n DefaultDropValidator,\n DefaultDropRequired,\n ]\n for schema in test_schemas:\n converted = converter.convert_type(schema())\n assert converted == schema.schema_expected, f\"Schema for [{schema.__name__}] not as expected\"", "def get_schema():\n if TEST_COLLECTION:\n return TestSchema()\n return MySchema()", "def test_create_feature_set_missing_schema_name(test_app, create_schema):\n APP.dependency_overrides[crud.get_db] = lambda: (yield create_schema) # Give the \"server\" the same db session\n\n response = test_app.post('/feature-sets', json=feature_set_no_schema, auth=basic_auth)\n\n logger.info(f'status: {response.status_code}, -- message: {response.json()}')\n\n assert response.status_code in range(400,500), 'Should fail because the feature set has no schema name'", "def test_schema_default_missing_validator_combinations(test_case):\n evaluate_test_cases([test_case])", "def test_build_schema_no_update(self):\n metadata = schema_utils.build_metadata(\n self.dummy_schema,\n self.fake_metadata,\n schema_utils.Update.no_update,\n )\n assert metadata == self.fake_metadata", "def __init__(self):\n super(ObjectSchema, self).__init__()\n self.is_allow_undefined = False", "def test_name_mandatory(self):\n field = self.base_field\n del field['name']\n with self.assertRaises(FieldSchemaError):\n SchemaField(field)\n # no blank\n field = self.base_field\n field['name'] = ''\n with self.assertRaises(FieldSchemaError):\n SchemaField(field)", "def testNoDefaultSchemata(self):\n self.failUnless('default' not in self.person.schema.getSchemataNames())", "def test_validate_error_wrong_schema(tmp_config): # noqa # pylint: disable=W0621\n from canarieapi.api import APP # isort: skip # noqa\n\n APP.config.update({\n \"SERVICES\": {\"random\": \"bad\"},\n \"PLATFORM\": {\"invalid\": \"error\"},\n })\n\n with pytest.raises(jsonschema.ValidationError):\n validate_config_schema(False)", "def test_attribute_missing_validation():\n\n @attr.s\n class Foo(object):\n something = attr.ib()\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)", "def _patch_schema(self):\n fields = get_json()['data']['attributes'].keys()\n return make_entity_schema(\n self.SCHEMA, self.RESOURCE_NAME,\n make_data_schema(\n self.SCHEMA, id_required=True,\n only=fields, partial=True\n )\n )", "def test_custom_schema():\n graph = create_object_graph(\"example\", testing=True)\n codec = graph.pubsub_message_schema_registry.find(DerivedSchema.MEDIA_TYPE)\n assert_that(codec.schema, is_(instance_of(DerivedSchema)))", "def testLazySchemaForCreation(self):\n api = self.ApiFromDiscoveryDoc(self.__TEST_DISCOVERY_DOC)\n for schema in ['Activity', 'Comment', 'ActivityObject']:\n self.assertTrue(isinstance(api._schemas[schema], Schema))", "def test_model(base, fake_session):\n\n # Make a dummy model\n\n # these fields should be ignored and should not appear in the model\n ignored = (\"field1\", \"field2\", \"field3\")\n\n # these fields are in the model, but should not get dumped to json\n loadonly = (\"field6\", \"field7\")\n\n @add_schema\n class MyModel(base):\n fields = dict(ignore=ignored, load_only=loadonly)\n\n # load the model from dummy data\n values = range(10)\n keys = [\"field{}\".format(x) for x in values]\n data = dict(zip(keys, values))\n m = MyModel.load_from(data, fake_session)\n\n return m, ignored, loadonly, data, MyModel", "def test_meta_data_is_not_inherited(self):", "def schema() -> None:\n pass", "def test_compare_schemas_happypath(self):\n status = schema_utils.compare_schemas(\n self.base_schema,\n self.base_schema\n )\n\n assert status == schema_utils.Update.no_update", "def test_can_import_xmlschema(self):\n self.assertEqual(xmlschema.__name__, 'xmlschema')", "async def upgradeSchema(self) -> None:", "def test_validate_business_schema_on_empty_schema():\n data = {\n 'business': {\n 'legalType': 'BC'\n }\n }\n\n is_valid, errors = validate(data, 'business', validate_schema=True)\n\n for err in errors:\n print(err.message)\n\n assert not is_valid", "def test_empty_schema_cant_find_module() -> None:\n with patch(\"inspect.getmodule\", return_value=None):\n cv.empty_config_schema(\"test_domain\")({\"test_domain\": {\"foo\": \"bar\"}})", "def test_method_missing_from_schema(monkeypatch) -> None:\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'PATH': yml_path})\n monkeypatch.setattr('django_swagger_tester.static_schema.loader.LoadStaticSchema.get_schema', ret_schema)\n with pytest.raises(\n ImproperlyConfigured,\n match='Method \\`gets\\` is invalid. Should be one of: GET, POST, PUT, PATCH, DELETE, OPTIONS, HEAD.',\n ):\n LoadStaticSchema('api/v1/trucks/correct', 'gets', status_code=200)", "def DefaultsDataclassField(feature_type: str):\n\n\n class DefaultMarshmallowField(fields.Field):\n \"\"\"Custom marshmallow field that deserializes a dict for a valid defaults config from the feature_registry\n and creates a corresponding JSON schema for external usage.\"\"\"\n\n def _deserialize(self, value, attr, data, **kwargs):\n if value is None:\n return None\n if isinstance(value, dict):\n input_feature_class = input_mixin_registry[feature_type]\n output_feature_class = output_mixin_registry.get(feature_type, None)\n try:\n input_schema = input_feature_class.Schema().load(value)\n if output_feature_class:\n output_schema = output_feature_class.Schema().load(value)\n combined = input_schema + output_schema\n else:\n combined = input_schema\n return combined\n except (TypeError, ValidationError) as error:\n raise ValidationError(f'Invalid params: {value}, see `{attr}` definition. Error: {error}')\n raise ValidationError(f'Invalid params: {value}')\n\n @staticmethod\n def _jsonschema_type_mapping():\n input_feature_cls = input_mixin_registry.get(feature_type)\n output_feature_cls = output_mixin_registry.get(feature_type, None)\n input_props = schema_utils.unload_jsonschema_from_marshmallow_class(input_feature_cls)['properties']\n if output_feature_cls:\n output_props = schema_utils.unload_jsonschema_from_marshmallow_class(output_feature_cls)['properties']\n combined_props = {**output_props, **input_props}\n else:\n combined_props = input_props\n return {'type': 'object', 'properties': combined_props, 'additionalProperties': False, 'title': 'defaults_options'}\n try:\n input_cls = input_mixin_registry[feature_type]\n output_cls = output_mixin_registry.get(feature_type, None)\n dump_default = input_cls.Schema().dump({'type': feature_type})\n if output_cls:\n output_dump = output_cls.Schema().dump({'type': feature_type})\n dump_default = {**output_dump, **dump_default}\n load_default = input_cls.Schema().load({'type': feature_type})\n if output_cls:\n output_load = output_cls.Schema().load({'type': feature_type})\n for k in dump_default.keys():\n if getattr(load_default, k, -1) == -1:\n setattr(load_default, k, getattr(output_load, k))\n return field(metadata={'marshmallow_field': DefaultMarshmallowField(allow_none=False, dump_default=dump_default, load_default=load_default)}, default_factory=lambda : load_default)\n except Exception as e:\n raise ValidationError(f'Unsupported feature type: {feature_type}. See input_type_registry. Details: {e}')", "def test_schema_exists(self):\n return exclusions.open()", "def test_merge_schemas(registry):\n test_schema = registry[TYPES][unit_test_type].schema\n test_subschema = test_schema['properties']['attachment']\n res = merge_schemas(test_subschema, registry[TYPES])\n assert res\n assert res != test_subschema\n assert res['properties']['attachment']['attachment'] is True", "def test_non_attrs_object():\n class Foo(object):\n def __init__(self, x):\n self.x = x\n\n with pytest.raises(UnextractableSchema):\n extract_jsonschema(Foo)" ]
[ "0.6704261", "0.6257809", "0.62339044", "0.6202627", "0.6178689", "0.6136276", "0.61212945", "0.6067049", "0.6038067", "0.6000049", "0.5999241", "0.59988236", "0.59154654", "0.5914625", "0.5912379", "0.5896713", "0.58911383", "0.5890401", "0.58775854", "0.58499223", "0.58296186", "0.5822896", "0.58006346", "0.58003414", "0.5795165", "0.5781637", "0.57799006", "0.5760616", "0.5758323", "0.57545763" ]
0.76797485
0
Creates the initial alignment values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return a tensor of all zeros.
def initial_alignments(self, batch_size, dtype): max_time = self._word_alignments_size * self._alignments_size return _zero_state_tensors(max_time, batch_size, dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_default_paddings(self, inputs: JTensor) -> JTensor:\n in_shape = list(inputs.shape)\n assert len(in_shape) > 1\n in_shape[-1] = 1\n return jnp.zeros(in_shape, dtype=inputs.dtype)", "def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)", "def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)", "def adjusted_matrix(self):\n mat = AlignerExtract().transform_matrix(self.aligned[\"matrix\"],\n self.aligned[\"size\"],\n self.aligned[\"padding\"])\n return mat", "def to_alignment(self):\n alignment = dict()\n alignment[\"x\"] = self.x\n alignment[\"w\"] = self.w\n alignment[\"y\"] = self.y\n alignment[\"h\"] = self.h\n alignment[\"frame_dims\"] = self.frame_dims\n alignment[\"landmarksXY\"] = self.landmarksXY\n return alignment", "def initial_state(self):\n h_0 = tf.zeros([1, self._num_units], self._dtype)\n context_0 = self._compute_context(h_0)\n h_0 = context_0 * 0\n\n if self._dec_init_states is None:\n batch_size = tf.shape(self._memory)[0]\n cell_states = self._cell.zero_state(batch_size, self._dtype)\n else:\n cell_states = self._dec_init_states\n\n attn_state_0 = AttnState(cell_states, h_0, context_0)\n\n return attn_state_0", "def _initialize(self):\n scaled_signal = self.get_read(raw=True, scale=True)\n raw_signal = self.get_read(raw=True, scale=False)\n # add raw signal information to AlignedSignal\n aligned_signal = AlignedSignal(scaled_signal)\n aligned_signal.add_raw_signal(raw_signal)\n return aligned_signal", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def zero_state(self, batch_size, dtype):\n with ops.name_scope(\n type(self).__name__ + 'ZeroState', values = [batch_size]\n ):\n if self._initial_cell_state is not None:\n cell_state = self._initial_cell_state\n else:\n cell_state = self._cell.zero_state(batch_size, dtype)\n error_message = (\n 'When calling zero_state of AttentionWrapper %s: '\n % self._base_name\n + 'Non-matching batch sizes between the memory '\n '(encoder output) and the requested batch size. Are you using '\n 'the BeamSearchDecoder? If so, make sure your encoder output has '\n 'been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and '\n 'the batch_size= argument passed to zero_state is '\n 'batch_size * beam_width.'\n )\n with tf.control_dependencies(\n self._batch_size_checks(batch_size, error_message)\n ):\n cell_state = nest.map_structure(\n lambda s: tf.identity(s, name = 'checked_cell_state'),\n cell_state,\n )\n return tf.contrib.seq2seq.AttentionWrapperState(\n cell_state = cell_state,\n time = tf.zeros([], dtype = tf.int32),\n attention = _zero_state_tensors(\n self._attention_layer_size, batch_size, dtype\n ),\n alignments = self._item_or_tuple(\n attention_mechanism.initial_alignments(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms\n ),\n attention_state = self._item_or_tuple(\n attention_mechanism.initial_state(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms\n ),\n # since we need to read the alignment history several times, so we need set clear_after_read to False\n alignment_history = self._item_or_tuple(\n tf.TensorArray(\n dtype = dtype,\n size = 0,\n clear_after_read = False,\n dynamic_size = True,\n )\n if self._alignment_history\n else ()\n for _ in self._attention_mechanisms\n ),\n )", "def aligned(self):\n return self.__aligned", "def init_basic_aligner(allow_mismatches=False):\n a = Align.PairwiseAligner()\n if allow_mismatches:\n a.mismatch_score = -1\n a.gap_score = -3\n a.target_gap_score = -np.inf\n if not allow_mismatches:\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n return a", "def horizontal_alignment(self):\n self.update()\n return self._horizontal_alignment", "def get_alignment_offset(self):\n\n return 0", "def unaligned(self):\n new_alignment = Alignment()\n new_alignment.datatype = self.datatype\n for name, seq in self.items():\n new_seq = re.sub(_INDEL, '', str(seq))\n if new_seq != '':\n new_alignment[name] = new_seq\n return new_alignment", "def init_aligner(allow_target_gaps=False, allow_target_mismatches=False):\n a = Align.PairwiseAligner()\n a.mismatch = -np.inf\n a.mismatch_score = -np.inf\n\n # Don't allow for gaps or mismatches with the target sequence\n if not allow_target_gaps:\n a.target_gap_score = -np.inf\n\n # Do not let matching items overwhelm determining where gaps should go\n if not allow_target_gaps:\n a.match = 10\n else:\n a.match = 200\n\n if allow_target_mismatches:\n a.mismatch = 200\n\n # Generally, prefer to extend gaps than to create them\n a.query_extend_gap_score = 99\n a.query_open_gap_score = 49\n\n # Set slight preference for open gaps on the edges, however, if present, strongly prefer single edge gaps\n a.query_end_open_gap_score = 50\n a.query_end_extend_gap_score = 100\n\n return a", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def test_default_alignment(self):\n # Should give same result than test_template_alignment\n reg = ElasticRegistration()\n register = reg.fit_transform(self.unimodal_samples)\n\n values = register([-.25, -.1, 0, .1, .25])\n\n expected = [[[0.599058], [0.997427], [0.772248],\n [0.412342], [0.064725]],\n [[0.626875], [0.997155], [0.791649],\n [0.382181], [0.050098]],\n [[0.620992], [0.997369], [0.785886],\n [0.376556], [0.048804]]]\n\n np.testing.assert_allclose(values, expected, atol=1e-4)", "def call_alignment(self, orthologs):\n alignment = aminoCons.build_alignment(orthologs)\n self.alignment = alignment\n return alignment", "def _nn_initial_values(structure):\n\n # Use Xavier uniform initializer\n initializer=tf.glorot_uniform_initializer()\n\n output=[]\n last_width=None\n\n # Add biases & weights per layer\n for l in structure:\n output.append(tf.zeros(shape=[l])) # layer l biases\n if last_width is not None: # Exclude weights from layer 0\n output.append(initializer(shape=[last_width, l])) # layer l weights\n last_width=l\n\n return output", "def soft_attention_alignment(input_1, input_2):\n\n attention = Dot(axes=-1)([input_1, input_2])\n\n w_att_1 = Lambda(lambda x: softmax(x, axis=1),\n output_shape=unchanged_shape)(attention)\n w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),\n output_shape=unchanged_shape)(attention))\n in1_aligned = Dot(axes=1)([w_att_1, input_1])\n in2_aligned = Dot(axes=1)([w_att_2, input_2])\n return in1_aligned, in2_aligned", "def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n if self._initial_cell_state is not None:\n cell_state = self._initial_cell_state\n else:\n cell_state = self._cell.zero_state(batch_size, dtype)\n error_message = (\n \"When calling zero_state of AttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the requested batch size. Are you using \"\n \"the BeamSearchDecoder? If so, make sure your encoder output has \"\n \"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and \"\n \"the batch_size= argument passed to zero_state is \"\n \"batch_size * beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(batch_size, error_message)):\n cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"checked_cell_state\"),\n cell_state)\n return AttentionWrapperState(\n cell_state=cell_state,\n time=array_ops.zeros([], dtype=dtypes.int32),\n attention=_zero_state_tensors(self._attention_layer_size, batch_size,\n dtype),\n alignments=self._item_or_tuple(\n attention_mechanism.initial_alignments(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms),\n attention_state=self._item_or_tuple(\n attention_mechanism.initial_state(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms),\n alignment_history=self._item_or_tuple(\n tensor_array_ops.TensorArray(dtype=dtype, size=0,\n dynamic_size=True)\n if self._alignment_history else ()\n for _ in self._attention_mechanisms))", "def _align(self, sum_heads_encoded, sum_bodies_encoded):\n _tmp_heads_encoded = tf.expand_dims(sum_heads_encoded, 1)\n vector_attn = tf.reduce_sum(\n tf.multiply(tf.nn.l2_normalize(sum_bodies_encoded, 2), tf.nn.l2_normalize(_tmp_heads_encoded, 2)), axis=2,\n keepdims=True)\n return tf.nn.softmax(vector_attn, axis=1)", "def zeros_like(self):\n return MultiterminalDevice.zeros(\n self.dims,\n self.center.shape,\n tuple(i.shape for i in self.leads),\n self.connections,\n )", "def zero_state(self, batch_size, dtype):\n with ops.name_scope(type(self).__name__ + \"ZeroState\", values=[batch_size]):\n if self._initial_cell_state is not None:\n cell_state = self._initial_cell_state\n else:\n cell_state = self._cell.zero_state(batch_size, dtype)\n error_message = (\n \"When calling zero_state of AttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the requested batch size. Are you using \"\n \"the BeamSearchDecoder? If so, make sure your encoder output has \"\n \"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and \"\n \"the batch_size= argument passed to zero_state is \"\n \"batch_size * beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(batch_size, error_message)):\n cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"checked_cell_state\"),\n cell_state)\n return AttentionWrapperState(\n cell_state=cell_state,\n time=array_ops.zeros([], dtype=dtypes.int32),\n attention=_zero_state_tensors(self._attention_layer_size, batch_size,\n dtype),\n alignments=self._item_or_tuple(\n attention_mechanism.initial_alignments(batch_size, dtype)\n for attention_mechanism in self._attention_mechanisms),\n alignment_history=self._item_or_tuple(\n tensor_array_ops.TensorArray(dtype=dtype, size=0,\n dynamic_size=True)\n if self._alignment_history else ()\n for _ in self._attention_mechanisms))", "def get_paddings(self):\n return tf.constant([[0, 0,],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [self._settings.half_patch_size, self._settings.half_patch_size],\n [0, 0]])", "def initial(self) -> np.ndarray:\n return self._dist['initial']", "def affine(self) -> torch.Tensor:\n return self.meta.get(MetaKeys.AFFINE, self.get_default_affine())", "def initial_state(self):\n r = np.full((self.xlen, 2), self.log0, dtype=np.float32)\n r[0, 1] = self.log_probs[0, self.blank]\n for i in range(1, self.xlen):\n r[i, 1] = r[i - 1, 1] + self.log_probs[i, self.blank]\n return r", "def createAlignment(sequences, alphabet):\n align = Alignment(alphabet)\n counter = 0\n for sequence in sequences:\n name = \"sequence\" + str(counter)\n align.add_sequence(name, sequence)\n counter+=1\n return align", "def GetAlignment(self):\r\n\r\n return self.alignment" ]
[ "0.6028425", "0.59444785", "0.59444785", "0.5852683", "0.5841245", "0.5833352", "0.5652988", "0.5560669", "0.55605567", "0.55293924", "0.5496568", "0.5492979", "0.5479976", "0.54668605", "0.5441797", "0.5423778", "0.5401625", "0.5400675", "0.5371976", "0.5336401", "0.53245115", "0.53008115", "0.5276369", "0.52328324", "0.5232698", "0.5221485", "0.52136207", "0.5193206", "0.5188632", "0.517786" ]
0.74215364
0
Computes the attention and alignments for a given attention_mechanism.
def _compute_attention(attention_mechanism, batch_size, cell_output, previous_alignments, attention_layer): line_alignments, word_alignments, hier_alignments = attention_mechanism( cell_output, batch_size, previous_alignments=previous_alignments) # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time] # Context is the inner product of alignments and values along the # memory time dimension. # alignments shape is # [batch_size, 1, memory_time] # attention_mechanism.values shape is # [batch_size, memory_time, memory_size] # the batched matmul is over memory_time, so the output shape is # [batch_size, 1, memory_size]. # we then squeeze out the singleton dim. expanded_line_alignments = array_ops.expand_dims(line_alignments, 1) line_context = math_ops.matmul(expanded_line_alignments, attention_mechanism.values) line_attention = array_ops.squeeze(line_context, [1]) return line_attention, line_alignments, word_alignments, hier_alignments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_attention(attention_mechanism, initial_state, previous_alignments,\n attention_layer):\n alignments, final_state = attention_mechanism(\n initial_state, previous_alignments=previous_alignments)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n expanded_alignments = tf.expand_dims(alignments, 1)\n # Context is the inner product of alignments and values along the\n # memory time dimension.\n # alignments shape is\n # [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\n # [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\n # [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n context = tf.matmul(expanded_alignments, attention_mechanism.values)\n context = tf.squeeze(context, [1])\n\n if attention_layer is not None:\n attention = attention_layer(context)\n else:\n attention = context\n\n return attention, alignments, final_state", "def call(self, inputs, state):\n if not isinstance(state, tf.contrib.seq2seq.AttentionWrapperState):\n raise TypeError(\n 'Expected state to be instance of AttentionWrapperState. '\n 'Received type %s instead.' % type(state)\n )\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = cell_output.shape[0].value or tf.shape(cell_output)[0]\n error_message = (\n 'When applying AttentionWrapper %s: ' % self.name\n + 'Non-matching batch sizes between the memory '\n '(encoder output) and the query (decoder output). Are you using '\n 'the BeamSearchDecoder? You may need to tile your memory input via '\n 'the tf.contrib.seq2seq.tile_batch function with argument '\n 'multiple=beam_width.'\n )\n with tf.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)\n ):\n cell_output = tf.identity(cell_output, name = 'checked_cell_output')\n\n if self._is_multi:\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n else:\n previous_alignments = [state.alignments]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_histories = []\n\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n print(attention_mechanism)\n if self.coverage:\n # if we use coverage mode, previous alignments is coverage vector\n # alignment history stack has shape: decoder time * batch * atten_len\n # convert it to coverage vector\n previous_alignments[i] = tf.cond(\n previous_alignment_history[i].size() > 0,\n lambda: tf.reduce_sum(\n tf.transpose(\n previous_alignment_history[i].stack(), [1, 2, 0]\n ),\n axis = 2,\n ),\n lambda: tf.zeros_like(previous_alignments[i]),\n )\n # debug\n # previous_alignments[i] = tf.Print(previous_alignments[i],[previous_alignment_history[i].size(), tf.shape(previous_alignments[i]),previous_alignments[i]],message=\"atten wrapper:\")\n attention, alignments, next_attention_state = _compute_attention(\n attention_mechanism,\n cell_output,\n previous_alignments[i],\n self._attention_layers[i] if self._attention_layers else None,\n )\n alignment_history = (\n previous_alignment_history[i].write(state.time, alignments)\n if self._alignment_history\n else ()\n )\n\n all_alignments.append(alignments)\n all_histories.append(alignment_history)\n all_attentions.append(attention)\n\n attention = tf.concat(all_attentions, 1)\n next_state = tf.contrib.seq2seq.AttentionWrapperState(\n time = state.time + 1,\n cell_state = next_cell_state,\n attention = attention,\n alignments = self._item_or_tuple(all_alignments),\n attention_state = self._item_or_tuple(all_alignments),\n alignment_history = self._item_or_tuple(all_histories),\n )\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state", "def _build_attention(\n self, encoder_outputs, encoder_sequence_length, attention_bias\n ):\n with tf.variable_scope('AttentionMechanism'):\n attention_depth = self.params['attention_layer_size']\n if self.params['attention_type'] == 'location':\n attention_mechanism = LocationSensitiveAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n use_bias=attention_bias,\n )\n elif self.params['attention_type'] == 'bahdanau':\n bah_normalize = self.params.get('bahdanau_normalize', False)\n attention_mechanism = BahdanauAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n normalize=bah_normalize,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n )\n else:\n raise ValueError('Unknown Attention Type')\n return attention_mechanism", "def calc_attention(self, encoder_hidden_states):\n\n params = self.dec_params\n if len(encoder_hidden_states.shape) == 3:\n # Squeeze the first dimension\n encoder_hidden_states = np.squeeze(encoder_hidden_states, axis=0)\n\n # T x Attn_vec_size\n attn_enc_term = np.matmul(encoder_hidden_states, params.attn_enc_w)\n\n def attention(dec_state):\n attn_dec_term = (np.matmul(dec_state, params.attn_dec_w) +\n params.attn_dec_b) # T x A\n attn_sum = np.tanh(attn_enc_term + attn_dec_term) # T x A\n attn_logits = np.squeeze(np.matmul(attn_sum, params.attn_v)) # T\n attn_probs = softmax(attn_logits)\n\n context_vec = np.matmul(attn_probs, encoder_hidden_states)\n # The attention probabilities are necessary for coverage penalty calculation\n return (context_vec, attn_probs)\n\n return attention", "def compute_attention(self, decoder_state, forward_encoder_states, backward_encoder_states):\r\n\r\n\t\tassert len(forward_encoder_states) == len(backward_encoder_states)\r\n\t\tmax_encoding_len = len(forward_encoder_states)\r\n\t\toutput_states=[]\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tstate = np.concatenate([forward_encoder_states[i]['h'], backward_encoder_states[i]['h']])\r\n\t\t\toutput_states.append(state)\r\n\r\n\t\ttiled_outputs = np.concatenate(output_states, 1)\r\n\t\ttiled_decoder = np.tile(decoder_state['h'], (1, max_encoding_len))\r\n\r\n\t\talpha = np.zeros((len(output_states), 1))\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\talpha[i] = np.matmul(self.attentionV.transpose(), np.tanh(np.matmul(self.memoryLayer, output_states[i]) + np.matmul(self.queryLayer, decoder_state['h'])))\r\n\t\talpha = softmax(alpha)\r\n\r\n\t\tu = np.zeros(output_states[0].shape)\r\n\t\tfor i in range(max_encoding_len):\r\n\t\t\tu += alpha[i, 0] * output_states[i]\r\n\t\ta = np.matmul(self.attentonLayer, np.concatenate([decoder_state['h'], u]))\r\n\r\n\t\treturn a, alpha", "def calculate_all_attentions(\n self, xs, ilens, ys, olens, spembs=None, extras=None, *args, **kwargs\n ):\n with torch.no_grad():\n # remove unnecessary padded part (for multi-gpus)\n xs = xs[:, : max(ilens)]\n ys = ys[:, : max(olens)]\n if extras is not None:\n extras = extras[:, : max(ilens)].squeeze(-1)\n\n # forward propagation\n outs = self._forward(\n xs, ilens, ys, olens, spembs=spembs, ds=extras, is_inference=False\n )[1]\n\n att_ws_dict = dict()\n for name, m in self.named_modules():\n if isinstance(m, MultiHeadedAttention):\n attn = m.attn.cpu().numpy()\n if \"encoder\" in name:\n attn = [a[:, :l, :l] for a, l in zip(attn, ilens.tolist())]\n elif \"decoder\" in name:\n if \"src\" in name:\n attn = [\n a[:, :ol, :il]\n for a, il, ol in zip(attn, ilens.tolist(), olens.tolist())\n ]\n elif \"self\" in name:\n attn = [a[:, :l, :l] for a, l in zip(attn, olens.tolist())]\n else:\n logging.warning(\"unknown attention module: \" + name)\n else:\n logging.warning(\"unknown attention module: \" + name)\n att_ws_dict[name] = attn\n att_ws_dict[\"predicted_fbank\"] = [\n m[:l].T for m, l in zip(outs.cpu().numpy(), olens.tolist())\n ]\n\n return att_ws_dict", "def call(self, inputs, state):\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = (\n cell_output.shape[0].value or array_ops.shape(cell_output)[0])\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)):\n cell_output = array_ops.identity(\n cell_output, name=\"checked_cell_output\")\n\n if self._is_multi:\n previous_attention_state = state.attention_state\n previous_alignment_history = state.alignment_history\n else:\n previous_attention_state = [state.attention_state]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_attention_states = []\n maybe_all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n attention, alignments, next_attention_state = _compute_attention(\n attention_mechanism, cell_output, previous_attention_state[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_attention_states.append(next_attention_state)\n all_alignments.append(alignments)\n all_attentions.append(attention)\n maybe_all_histories.append(alignment_history)\n\n attention = array_ops.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n attention_state=self._item_or_tuple(all_attention_states),\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(maybe_all_histories))\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state", "def call(self, inputs, state):\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n _, initial_state = self._cell(cell_inputs, state.cell_state)\n cell_state = state.cell_state\n\n\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n\n if self._is_multi:\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n else:\n previous_alignments = [state.alignments]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n attention, alignments, next_cell_state = _compute_attention(\n attention_mechanism, initial_state, previous_alignments[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_alignments.append(alignments)\n all_histories.append(alignment_history)\n all_attentions.append(attention)\n\n attention = tf.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(all_histories))\n\n return attention, next_state", "def attention(query, use_attention=False):\n attn_weights = []\n ds = [] # Results of attention reads will be stored here.\n for i in xrange(num_heads):\n with variable_scope.variable_scope(\"Attention_%d\" % i):\n y = rnn_cell._linear(query, attention_vec_size, True)\n y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(\n v[i] * math_ops.tanh(hidden_features[i] + y), [2, 3])\n if use_attention is False: # apply mean pooling\n weights = tf.tile(sequence_length, tf.pack([attn_length]))\n weights = array_ops.reshape(weights, tf.shape(s))\n a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(weights)\n # a = array_ops.ones(tf.shape(s), dtype=dtype) / math_ops.to_float(tf.shape(s)[1])\n else:\n a = nn_ops.softmax(s)\n attn_weights.append(a)\n # Now calculate the attention-weighted vector d.\n d = math_ops.reduce_sum(\n array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n ds.append(array_ops.reshape(d, [-1, attn_size]))\n return attn_weights, ds", "def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None):\n super(AttentionWrapper, self).__init__(name=name)\n if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access\n raise TypeError(\n \"cell must be an RNNCell, saw type: %s\" % type(cell).__name__)\n if isinstance(attention_mechanism, (list, tuple)):\n self._is_multi = True\n attention_mechanisms = attention_mechanism\n for attention_mechanism in attention_mechanisms:\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must contain only instances of \"\n \"AttentionMechanism, saw type: %s\"\n % type(attention_mechanism).__name__)\n else:\n self._is_multi = False\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must be an AttentionMechanism or list of \"\n \"multiple AttentionMechanism instances, saw type: %s\"\n % type(attention_mechanism).__name__)\n attention_mechanisms = (attention_mechanism,)\n\n if cell_input_fn is None:\n cell_input_fn = (\n lambda inputs, attention: array_ops.concat([inputs, attention], -1))\n else:\n if not callable(cell_input_fn):\n raise TypeError(\n \"cell_input_fn must be callable, saw type: %s\"\n % type(cell_input_fn).__name__)\n\n if attention_layer_size is not None:\n attention_layer_sizes = tuple(\n attention_layer_size\n if isinstance(attention_layer_size, (list, tuple))\n else (attention_layer_size,))\n if len(attention_layer_sizes) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer_size must contain exactly one \"\n \"integer per attention_mechanism, saw: %d vs %d\"\n % (len(attention_layer_sizes), len(attention_mechanisms)))\n self._attention_layers = tuple(\n layers_core.Dense(\n attention_layer_size,\n name=\"attention_layer\",\n use_bias=False,\n dtype=attention_mechanisms[i].dtype)\n for i, attention_layer_size in enumerate(attention_layer_sizes))\n self._attention_layer_size = sum(attention_layer_sizes)\n else:\n self._attention_layers = None\n self._attention_layer_size = sum(\n attention_mechanism.values.get_shape()[-1].value\n for attention_mechanism in attention_mechanisms)\n\n self._cell = cell\n self._attention_mechanisms = attention_mechanisms\n self._cell_input_fn = cell_input_fn\n self._output_attention = output_attention\n self._alignment_history = alignment_history\n with ops.name_scope(name, \"AttentionWrapperInit\"):\n if initial_cell_state is None:\n self._initial_cell_state = None\n else:\n final_state_tensor = nest.flatten(initial_cell_state)[-1]\n state_batch_size = (\n final_state_tensor.shape[0].value\n or array_ops.shape(final_state_tensor)[0])\n error_message = (\n \"When constructing AttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and initial_cell_state. Are you using \"\n \"the BeamSearchDecoder? You may need to tile your initial state \"\n \"via the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(state_batch_size, error_message)):\n self._initial_cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"check_initial_cell_state\"),\n initial_cell_state)", "def forward(self, queries, keys, mask=None, attn_prior=None, speaker_embed=None):\n if speaker_embed is not None:\n keys = keys + self.key_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, keys.shape[-1], -1\n )).transpose(1, 2)\n queries = queries + self.query_spk_proj(speaker_embed.unsqueeze(1).expand(\n -1, queries.shape[-1], -1\n )).transpose(1, 2)\n keys_enc = self.key_proj(keys) # B x n_attn_dims x T2\n queries_enc = self.query_proj(queries)\n\n # Simplistic Gaussian Isotopic Attention\n attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2 # B x n_attn_dims x T1 x T2\n attn = -self.temperature * attn.sum(1, keepdim=True)\n\n if attn_prior is not None:\n #print(f\"AlignmentEncoder \\t| mel: {queries.shape} phone: {keys.shape} mask: {mask.shape} attn: {attn.shape} attn_prior: {attn_prior.shape}\")\n attn = self.log_softmax(attn) + torch.log(attn_prior[:, None] + 1e-8)\n #print(f\"AlignmentEncoder \\t| After prior sum attn: {attn.shape}\")\n\n attn_logprob = attn.clone()\n\n if mask is not None:\n attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2), -float(\"inf\"))\n\n attn = self.softmax(attn) # softmax along T2\n return attn, attn_logprob", "def __init__(self,\n cell,\n attention_mechanism,\n rl=False,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None):\n super(AttentionWrapper, self).__init__(name=name)\n if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access\n raise TypeError(\n \"cell must be an RNNCell, saw type: %s\" % type(cell).__name__)\n if isinstance(attention_mechanism, (list, tuple)):\n self._is_multi = True\n attention_mechanisms = attention_mechanism\n for attention_mechanism in attention_mechanisms:\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must contain only instances of \"\n \"AttentionMechanism, saw type: %s\"\n % type(attention_mechanism).__name__)\n else:\n self._is_multi = False\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must be an AttentionMechanism or list of \"\n \"multiple AttentionMechanism instances, saw type: %s\"\n % type(attention_mechanism).__name__)\n attention_mechanisms = (attention_mechanism,)\n\n if cell_input_fn is None:\n cell_input_fn = (\n lambda inputs, attention: array_ops.concat([inputs, attention], -1))\n else:\n if not callable(cell_input_fn):\n raise TypeError(\n \"cell_input_fn must be callable, saw type: %s\"\n % type(cell_input_fn).__name__)\n\n if attention_layer_size is not None:\n attention_layer_sizes = tuple(\n attention_layer_size\n if isinstance(attention_layer_size, (list, tuple))\n else (attention_layer_size,))\n if len(attention_layer_sizes) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer_size must contain exactly one \"\n \"integer per attention_mechanism, saw: %d vs %d\"\n % (len(attention_layer_sizes), len(attention_mechanisms)))\n self._attention_layers = tuple(\n layers_core.Dense(\n attention_layer_size, name=\"attention_layer\", use_bias=False)\n for attention_layer_size in attention_layer_sizes)\n self._attention_layer_size = sum(attention_layer_sizes)\n else:\n self._attention_layers = None\n self._attention_layer_size = sum(\n attention_mechanism.values.get_shape()[-1].value\n for attention_mechanism in attention_mechanisms)\n\n self._cell = cell\n self._attention_mechanisms = attention_mechanisms\n self._rl = rl\n self._cell_input_fn = cell_input_fn\n self._output_attention = output_attention\n self._alignment_history = alignment_history\n with ops.name_scope(name, \"AttentionWrapperInit\"):\n if initial_cell_state is None:\n self._initial_cell_state = None\n else:\n final_state_tensor = nest.flatten(initial_cell_state)[-1]\n state_batch_size = (\n final_state_tensor.shape[0].value\n or array_ops.shape(final_state_tensor)[0])\n error_message = (\n \"When constructing AttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and initial_cell_state. Are you using \"\n \"the BeamSearchDecoder? You may need to tile your initial state \"\n \"via the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(state_batch_size, error_message)):\n self._initial_cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"check_initial_cell_state\"),\n initial_cell_state)", "def attention(self, decoder_state, coverage=None):\n with tf.variable_scope(\"attention_compute\"):\n attn_size = 2*self.hidden_dim\n batch_size = tf.shape(self.encoder_states)[0]\n # Reshape encoder_states (need to insert a dim)\n encoder_states = tf.expand_dims(self.encoder_states, axis=2) # now is shape (batch_size, attn_len, 1, attn_size)\n # To calculate attention, we calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n # where h_i is an encoder state, and s_t a decoder state.\n # attn_vec_size is the length of the vectors v, b_attn, (W_h h_i) and (W_s s_t).\n # We set it to be equal to the size of the encoder states.\n attention_vec_size = attn_size\n\n # Get the weight matrix W_h and apply it to each encoder state to get (W_h h_i), the encoder features\n W_h = tf.get_variable(\"W_h\", [1, 1, attn_size, attention_vec_size])\n encoder_features = tf.nn.conv2d(encoder_states, W_h, [1, 1, 1, 1], \"SAME\") # shape (batch_size,attn_length,1,attention_vec_size)\n\n # Pass the decoder state through a linear layer (this is W_s s_t + b_attn in the paper)\n decoder_features = self.linear(decoder_state, attention_vec_size, True) # shape (batch_size, attention_vec_size)\n decoder_features = tf.expand_dims(tf.expand_dims(decoder_features, 1), 1) # reshape to (batch_size, 1, 1, attention_vec_size)\n\n w_c = tf.get_variable(\"w_cvg\", [1, 1, 1, attention_vec_size])\n if self.use_coverage and coverage is not None: # non-first step of coverage\n # Multiply coverage vector by w_c to get coverage_features.\n coverage_features = tf.nn.conv2d(coverage, w_c, [1, 1, 1, 1],\n \"SAME\") # c has shape (batch_size, attn_length, 1, attention_vec_size)\n\n # Calculate v^T tanh(W_h h_i + W_s s_t + w_c c_i^t + b_attn)\n e = tf.reduce_sum(tf.tanh(encoder_features + decoder_features + coverage_features), [2, 3]) # calculate e\n\n # Calculate attention distribution\n attn_dist = tf.nn.softmax(e * self.enc_padding_mask) # masked_attention(e)\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n attn_dist /= tf.reshape(masked_sums, [-1, 1]) # re-normalize\n # Update coverage vector\n coverage += tf.reshape(attn_dist, [tf.shape(self.encoder_states)[0], -1, 1, 1])\n else:\n # Calculate v^T tanh(W_h h_i + W_s s_t + b_attn)\n e = tf.reduce_sum(tf.tanh(encoder_features + decoder_features), [2, 3]) # calculate e\n # Calculate attention distribution\n attn_dist = tf.nn.softmax(e * self.enc_padding_mask) # masked_attention(e)\n masked_sums = tf.reduce_sum(attn_dist, axis=1) # shape (batch_size)\n attn_dist /= tf.reshape(masked_sums, [-1, 1]) # re-normalize\n if self.use_coverage: # first step of training\n coverage = tf.expand_dims(tf.expand_dims(attn_dist, 2), 2) # initialize coverage\n\n # Calculate the context vector from attn_dist and encoder_states\n context_vector = tf.reduce_sum(tf.reshape(attn_dist, [tf.shape(self.encoder_states)[0], -1, 1, 1]) * encoder_states, [1, 2]) # shape (batch_size, attn_size).\n context_vector = tf.reshape(context_vector, [-1, attn_size])\n\n return context_vector, attn_dist, coverage", "def __call__(self, query, batch_size, previous_alignments):\n with variable_scope.variable_scope(None, \"custom_attention\", [query]):\n line_scores = _luong_score(query, self._keys, self._scale)\n word_scores = _luong_word_score(query, self._word_values, self._scale, self._alignments_size, self._hierarchy, batch_size)\n \n line_alignments = self._soft_weight*self._probability_fn(line_scores)\n word_alignments = self._soft_weight*self._probability_fn(word_scores)\n\n if self._hierarchy:\n temp_word_alignments = tf.transpose(word_alignments, [0,2,1])\n temp_line_alignments = tf.expand_dims(line_alignments, 1)\n hier_alignments = math_ops.multiply(temp_word_alignments, temp_line_alignments)\n hier_alignments = tf.transpose(hier_alignments, [0,2,1])\n else:\n shape = word_scores.get_shape().as_list()[-1]\n word_scores = tf.reshape(word_scores, [self._batch_size, -1])\n word_alignments = self._probability_fn(word_scores)\n word_alignments = tf.reshape(word_alignments, tf.stack([self._batch_size, -1, shape]))\n hier_alignments = word_alignments\n #[batch_size x memory_size x sentence_size]\n hier_alignments = tf.reshape(hier_alignments, [self._batch_size, -1])\n return line_alignments, word_alignments, hier_alignments", "def call(self, inputs):\n (from_tensor, to_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n\n # Scalar dimensions referenced here:\n # B = batch size (number of sequences)\n # F = `from_tensor` sequence length\n # T = `to_tensor` sequence length\n # N = `num_attention_heads`\n # H = `size_per_head`\n # `query_tensor` = [B, F, N ,H]\n query_tensor = self.query_dense(from_tensor)\n\n # `key_tensor` = [B, T, N, H]\n key_tensor = self.key_dense(to_tensor)\n\n # `value_tensor` = [B, T, N, H]\n value_tensor = self.value_dense(to_tensor)\n\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum(\"BTNH,BFNH->BNFT\", key_tensor, query_tensor)\n attention_scores = tf.multiply(attention_scores,\n 1.0 / math.sqrt(float(self.size_per_head)))\n\n if attention_mask is not None:\n # `attention_mask` = [B, 1, F, T]\n attention_mask = tf.expand_dims(attention_mask, axis=[1])\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n adder = (1.0 - tf.cast(attention_mask, attention_scores.dtype)) * -10000.0\n\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n attention_scores += adder\n\n # Normalize the attention scores to probabilities.\n # `attention_probs` = [B, N, F, T]\n attention_probs = tf.nn.softmax(attention_scores)\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_probs = self.attention_probs_dropout(attention_probs)\n\n # `context_layer` = [B, F, N, H]\n context_tensor = tf.einsum(\"BNFT,BTNH->BFNH\", attention_probs, value_tensor)\n\n return context_tensor, attention_scores", "def _compute_attention(\n self, query_tensor, key_tensor, value_tensor, attention_mask=None\n ):\n # Take the dot product between \"query\" and \"key\" to get the raw\n # attention scores.\n attention_scores = tf.einsum( # pragma: no cover\n self._dot_product_equation, key_tensor, query_tensor\n )\n attention_scores = tf.multiply( # pragma: no cover\n attention_scores, 1.0 / math.sqrt(float(self._key_size))\n )\n\n # Normalize the attention scores to probabilities.\n # `attention_scores` = [B, N, T, S]\n attention_scores = self._masked_softmax(\n attention_scores, attention_mask\n ) # pragma: no cover\n\n # This is actually dropping out entire tokens to attend to, which might\n # seem a bit unusual, but is taken from the original Transformer paper.\n attention_scores_dropout = self._dropout_layer(\n attention_scores\n ) # pragma: no cover\n\n # `context_layer` = [B, T, N, H]\n attention_output = tf.einsum(\n self._combine_equation, attention_scores_dropout, value_tensor\n ) # pragma: no cover\n return attention_output, attention_scores # pragma: no cover", "def task_specific_attention(inputs, output_size, sequence_lengths,\n initializer=layers.xavier_initializer(),\n activation_fn=tf.tanh, scope=None):\n assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None\n\n with tf.variable_scope(scope or 'attention') as scope:\n attention_context_vector = tf.get_variable(name='attention_context_vector',\n shape=[output_size],\n initializer=initializer,\n dtype=tf.float32)\n \n input_projection = layers.fully_connected(inputs, output_size,\n activation_fn=activation_fn,\n scope=scope)\n\n vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector), axis=2) \n mask = tf.sequence_mask(sequence_lengths, dtype=tf.float32) \n attention_weights = tf.nn.softmax(vector_attn, axis=1)\n attention_weights = attention_weights*mask\n norms = tf.reduce_sum(attention_weights, axis = 1, keepdims = True) + 1e-6 \n attention_weights = attention_weights / norms\n attention_weights = tf.expand_dims(attention_weights, axis = 2) \n \n weighted_projection = inputs*attention_weights\n outputs = tf.reduce_sum(weighted_projection, axis=1)\n\n return outputs", "def rel_attn_core(\n self,\n q_head,\n k_head_h,\n v_head_h,\n k_head_r,\n seg_mat=None,\n attn_mask=None,\n head_mask=None,\n output_attentions=False,\n ):\n\n # content based attention score\n ac = torch.einsum(\"ibnd,jbnd->bnij\", q_head + self.r_w_bias , k_head_h)#mixout(self.r_w_bias, self.r_w_bias_target, self.mixout_p, self.training)\n\n # position based attention score\n bd = torch.einsum(\"ibnd,jbnd->bnij\", q_head + self.r_r_bias, k_head_r)#mixout(self.r_r_bias, self.r_r_bias_target, self.mixout_p, self.training)\n bd = self.rel_shift_bnij(bd, klen=ac.shape[3])\n\n # segment based attention score\n if seg_mat is None:\n ef = 0\n else:\n #ef = torch.einsum(\"ibnd,snd->ibns\", q_head + mixout(self.r_s_bias, self.r_s_bias_target, self.mixout_p, self.training), mixout(self.seg_embed, self.seg_embed_target, self.mixout_p, self.training))\n ef = torch.einsum(\"ibnd,snd->ibns\", q_head + self.r_s_bias, seg_embed)\n\n ef = torch.einsum(\"ijbs,ibns->bnij\", seg_mat, ef)\n\n # merge attention scores and perform masking\n attn_score = (ac + bd + ef) * self.scale\n if attn_mask is not None:\n # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask\n if attn_mask.dtype == torch.float16:\n attn_score = attn_score - 65500 * torch.einsum(\"ijbn->bnij\", attn_mask)\n else:\n attn_score = attn_score - 1e30 * torch.einsum(\"ijbn->bnij\", attn_mask)\n\n # attention probability\n attn_prob = F.softmax(attn_score, dim=3)\n attn_prob = self.dropout(attn_prob)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_prob = attn_prob * torch.einsum(\"ijbn->bnij\", head_mask)\n\n # attention output\n attn_vec = torch.einsum(\"bnij,jbnd->ibnd\", attn_prob, v_head_h)\n\n if output_attentions:\n return attn_vec, torch.einsum(\"bnij->ijbn\", attn_prob)\n\n return attn_vec", "def call(self, inputs):\n (input_tensor, attention_mask) = tf_utils.unpack_inputs(inputs)\n with tf.name_scope('attention'):\n attention_output, attention_scores = self.attention_layer(\n from_tensor=input_tensor,\n to_tensor=input_tensor,\n attention_mask=attention_mask)\n with tf.name_scope('output'):\n attention_output = self.attention_output_dense(attention_output)\n attention_output = self.attention_dropout(attention_output)\n # Use float32 in keras layer norm and the gelu activation in the\n # intermediate dense layer for numeric stability\n attention_output = self.attention_layer_norm(input_tensor +\n attention_output)\n if self.float_type == tf.float16:\n attention_output = tf.cast(attention_output, tf.float16)\n\n with tf.name_scope('intermediate'):\n intermediate_output = self.intermediate_dense(attention_output)\n if self.float_type == tf.float16:\n intermediate_output = tf.cast(intermediate_output, tf.float16)\n\n with tf.name_scope('output'):\n layer_output = self.output_dense(intermediate_output)\n layer_output = self.output_dropout(layer_output)\n # Use float32 in keras layer norm for numeric stability\n layer_output = self.output_layer_norm(layer_output + attention_output)\n if self.float_type == tf.float16:\n layer_output = tf.cast(layer_output, tf.float16)\n return layer_output, attention_scores", "def apply_attention(inputs,\n attention_mode=None,\n attention_in=None,\n use_5d_mode=False,\n data_format='channels_last'):\n assert data_format == 'channels_last'\n\n h_ch_loc = 2 if use_5d_mode else 1\n\n if attention_mode == 'peer':\n attn = softmax_merge_peer_attentions(attention_in, data_format)\n else:\n attn = tf.reduce_mean(inputs, [h_ch_loc, h_ch_loc+1])\n attn = tf.layers.dense(\n inputs=attn,\n units=inputs.shape[-1],\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n attn = tf.math.sigmoid(attn)\n channel_attn = tf.expand_dims(tf.expand_dims(attn, h_ch_loc), h_ch_loc)\n\n inputs = tf.multiply(inputs, channel_attn)\n\n return inputs", "def soft_attention_alignment(input_1, input_2):\n\n attention = Dot(axes=-1)([input_1, input_2])\n\n w_att_1 = Lambda(lambda x: softmax(x, axis=1),\n output_shape=unchanged_shape)(attention)\n w_att_2 = Permute((2, 1))(Lambda(lambda x: softmax(x, axis=2),\n output_shape=unchanged_shape)(attention))\n in1_aligned = Dot(axes=1)([w_att_1, input_1])\n in2_aligned = Dot(axes=1)([w_att_2, input_2])\n return in1_aligned, in2_aligned", "def _attention(self, inputs):\n attn_weights = K.batch_dot(x=inputs,\n y=K.permute_dimensions(inputs,\n pattern=(0, 2, 1)))\n return K.permute_dimensions(attn_weights, (0, 2, 1))", "def get_corpus_alignments(self,bitext,f_vocab,e_vocab):\n alignments = [] # all alignments in the corpus\n sentence_alignments = {} ## associated alignments for each snetence pair\n sent_count = 0\n for pair in bitext:\n sentence_alignments[sent_count] = []\n f_sent = pair[\"fr\"]\n e_sent = pair[\"en\"]\n e_count = len(e_sent) # number of wrods in each sentence\n f_count = len(f_sent)\n ## generate all combinations of alignments\n tuple_sets = []\n # all possible e->f mappings for each english word in separate list\n for i in range(e_count): # getting english words count of sets of ali tuples\n list = []\n iv_idx = e_vocab.index(e_sent[i]) ## getting corresponding index of word in the the vocabulary list\n for j in range(f_count):\n jv_idx = f_vocab.index(f_sent[j])\n list.append((iv_idx,jv_idx)) #of form (e,f)\n tuple_sets.append(list)\n for combination in product(*tuple_sets): ## change thos for more than 3 words\n alignments.append(combination)\n sentence_alignments[sent_count].append(len(alignments)-1)\n sent_count += 1\n #print(alignments)\n return alignments,sentence_alignments", "def __call__(self, affine, representations_list, aatype):\n act = [\n common_modules.Linear( # pylint: disable=g-complex-comprehension\n self.config.num_channel,\n name='input_projection')(jax.nn.relu(x))\n for x in representations_list\n ]\n # Sum the activation list (equivalent to concat then Linear).\n act = sum(act)\n\n final_init = 'zeros' if self.global_config.zero_init else 'linear'\n\n # Mapping with some residual blocks.\n for _ in range(self.config.num_residual_block):\n old_act = act\n act = common_modules.Linear(\n self.config.num_channel,\n initializer='relu',\n name='resblock1')(\n jax.nn.relu(act))\n act = common_modules.Linear(\n self.config.num_channel,\n initializer=final_init,\n name='resblock2')(\n jax.nn.relu(act))\n act += old_act\n\n # Map activations to torsion angles. Shape: (num_res, 14).\n num_res = act.shape[0]\n unnormalized_angles = common_modules.Linear(\n 14, name='unnormalized_angles')(\n jax.nn.relu(act))\n unnormalized_angles = jnp.reshape(\n unnormalized_angles, [num_res, 7, 2])\n angles = l2_normalize(unnormalized_angles, axis=-1)\n\n outputs = {\n 'angles_sin_cos': angles, # jnp.ndarray (N, 7, 2)\n 'unnormalized_angles_sin_cos':\n unnormalized_angles, # jnp.ndarray (N, 7, 2)\n }\n\n # Map torsion angles to frames.\n backb_to_global = r3.rigids_from_quataffine(affine)\n\n # Jumper et al. (2021) Suppl. Alg. 24 \"computeAllAtomCoordinates\"\n\n # r3.Rigids with shape (N, 8).\n all_frames_to_global = all_atom.torsion_angles_to_frames(\n aatype,\n backb_to_global,\n angles)\n\n # Use frames and literature positions to create the final atom coordinates.\n # r3.Vecs with shape (N, 14).\n pred_positions = all_atom.frames_and_literature_positions_to_atom14_pos(\n aatype, all_frames_to_global)\n\n outputs.update({\n 'atom_pos': pred_positions, # r3.Vecs (N, 14)\n 'frames': all_frames_to_global, # r3.Rigids (N, 8)\n })\n return outputs", "def forward(self, encoder_outputs, decoder_h, previous_attention=None, mode=\"soft\"):\n if mode not in [\"soft\", \"hard\"]:\n raise ValueError(\"Invalid forward mode {} for attention; \\\n accept only soft and hard mode\".format(mode))\n\n if mode == \"soft\":\n\n #alpha_rec = cuda_benchmark(super().soft_recursive, encoder_outputs, decoder_h, previous_attention)\n #alpha = cuda_benchmark(super().soft, encoder_outputs, decoder_h, previous_attention)\n #alpha_rec = super().soft_recursive(encoder_outputs, decoder_h, previous_attention)\n alpha = super()(encoder_outputs, decoder_h, previous_attention, mode=\"soft\")\n # sum_of_alpha = torch.sum(alpha, dim=-1)\n # assert torch.allclose(sum_of_alpha, alpha.new_ones(alpha.size(0)),\n # atol=1e-3,\n # rtol=1e-3), \"{}\".format(sum_of_alpha)\n chunk_energy = self.chunk_energy(encoder_outputs, decoder_h)\n beta = self.my_soft(alpha, chunk_energy)\n return alpha, beta\n\n elif mode == \"hard\":\n monotonic_attention = super()(encoder_outputs, decoder_h,\n previous_attention, mode=\"hard\")\n chunk_energy = self.chunk_energy(encoder_outputs, decoder_h)\n masked_energy = self.hard(\n monotonic_attention, chunk_energy)\n chunkwise_attention = self.softmax(masked_energy)\n chunkwise_attention.masked_fill_(\n chunkwise_attention != chunkwise_attention,\n 0) # a trick to replace nan value with 0\n return monotonic_attention, chunkwise_attention", "def formatted_alignments(self,chosen_a_idxs,bitext,alignments,e_words,f_words):\n output =[]\n output_idxs = []\n for key in chosen_a_idxs.keys():\n temp = []\n temp_idx = []\n idx = chosen_a_idxs[key]\n alignment = alignments[idx]\n for t in alignment:\n temp.append((e_words[t[0]],f_words[t[1]]))\n temp_idx.append((bitext[key][\"en\"].index(e_words[t[0]]),bitext[key][\"fr\"].index(f_words[t[1]])))\n output.append(temp)\n output_idxs.append(temp_idx)\n return output,output_idxs", "def attention(query, step):\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta", "def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))", "def __call__(self, initial_state, previous_alignments):\n with tf.variable_scope(None, \"rnn_score_attention\", [initial_state]):\n score, final_state = rnn_score(initial_state, self._keys, self._cell, self._memory_sequence_length)\n alignments = self._probability_fn(score, previous_alignments)\n return alignments, final_state", "def attention_decoder(decoder_inputs,\n attention_states,\n cell,\n output_size=None,\n dtype=None,\n scope=None):\n if not decoder_inputs:\n raise ValueError(\"Must provide at least 1 input to attention decoder.\")\n if output_size is None:\n output_size = cell.output_size\n \n # ==================================scope=================================================\n with variable_scope.variable_scope(scope or \"TemporalAttn\", dtype=dtype) as scope:\n \n dtype = scope.dtype\n batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.\n attn_length = attention_states.get_shape()[1].value\n attn_size = attention_states.get_shape()[2].value\n \n # To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.\n hidden = array_ops.reshape(attention_states, [-1, attn_length, 1, attn_size])\n # U_d * h_i for i in range(T) (filter)\n u = variable_scope.get_variable(\"AttnDecoderU\", [1, 1, attn_size, attn_size], dtype=dtype)\n hidden_features = nn_ops.conv2d(hidden, u, [1, 1, 1, 1], \"SAME\")\n \n v = variable_scope.get_variable(\"AttnDecoderV\", [attn_size], dtype=dtype)\n \n # how to get the initial_state\n initial_state_size = array_ops.stack([batch_size, cell.output_size])\n initial_state = [array_ops.zeros(initial_state_size, dtype=dtype) for _ in xrange(2)]\n state = initial_state\n \n w = variable_scope.get_variable(\"AttnDecoderW\", [2*cell.output_size, attn_size], dtype=dtype)\n b = variable_scope.get_variable(\"AttnDecoderb\", [attn_size], dtype=dtype)\n \n # beta_scalar = variable_scope.get_variable(\"BetaScalar\", [attn_length])\n \n def attention(query, step):\n \"\"\"\n Put attention masks on hidden using hidden_features and query.\n \"\"\"\n \n if nest.is_sequence(query): # If the query is a tuple, flatten it.\n query_list = nest.flatten(query)\n query = array_ops.concat(query_list, 1)\n _tmp = math_ops.matmul(query, w) + b\n _tmp = array_ops.reshape(_tmp, [-1, 1, 1, attn_size])\n # Attention mask is a softmax of v^T * tanh(...).\n s = math_ops.reduce_sum(v * math_ops.tanh(hidden_features + _tmp), [2, 3])\n # beta = math_ops.multiply(nn_ops.softmax(s, name=\"beta_%d\" % step), beta_scalar)\n beta = nn_ops.softmax(s, name=\"beta_%d\" % step)\n # Now calculate the attention-weighted vector d.\n \n hidden_attn = math_ops.reduce_sum(array_ops.reshape(beta, [-1, attn_length, 1, 1]) * hidden,\n [1, 2])\n return hidden_attn, beta\n\n outputs = []\n attns = []\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, 0)\n attns.append(attn_t)\n # =============================recurrent===========================\n for i, inp in enumerate(decoder_inputs):\n if i > 0:\n variable_scope.get_variable_scope().reuse_variables()\n \n # LSTM_d([\\tilde{\\mathbf{h}}_{t}; \\mathbf{y}_t], \\hat{\\mathbf{y}}_{t}, \\mathbf{s}^d_{t})\n with variable_scope.variable_scope(\"DecoderOutput\"):\n x = tf.concat([inp, h_t], 1)\n cell_output, state = cell(x, state)\n outputs.append(cell_output)\n\n with variable_scope.variable_scope(\"Attn\"):\n h_t, attn_t = attention(state, i+1)\n attns.append(attn_t)\n \n with variable_scope.variable_scope(\"AttnDecoderOutput\"):\n inputs = tf.concat([cell_output, h_t], 1)\n output = Linear(inputs, output_size, True)(inputs)\n outputs.append(output)\n \n return outputs, state, attns" ]
[ "0.7185696", "0.6218817", "0.6158965", "0.6057099", "0.6030939", "0.60308385", "0.5955116", "0.5597623", "0.55930084", "0.5510469", "0.54661316", "0.5414076", "0.5378174", "0.53511596", "0.53483367", "0.525885", "0.5242506", "0.5172489", "0.5112989", "0.5112013", "0.51053816", "0.50916195", "0.5070893", "0.5050957", "0.50339323", "0.495676", "0.49530762", "0.49079856", "0.48924407", "0.48878893" ]
0.727797
0
Construct the `AttentionWrapper`. NOTE If you are using the `BeamSearchDecoder` with a cell wrapped in
def __init__(self, cell, attention_mechanism, rl=False, attention_layer_size=None, alignment_history=False, cell_input_fn=None, output_attention=True, initial_cell_state=None, name=None): super(AttentionWrapper, self).__init__(name=name) if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access raise TypeError( "cell must be an RNNCell, saw type: %s" % type(cell).__name__) if isinstance(attention_mechanism, (list, tuple)): self._is_multi = True attention_mechanisms = attention_mechanism for attention_mechanism in attention_mechanisms: if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must contain only instances of " "AttentionMechanism, saw type: %s" % type(attention_mechanism).__name__) else: self._is_multi = False if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must be an AttentionMechanism or list of " "multiple AttentionMechanism instances, saw type: %s" % type(attention_mechanism).__name__) attention_mechanisms = (attention_mechanism,) if cell_input_fn is None: cell_input_fn = ( lambda inputs, attention: array_ops.concat([inputs, attention], -1)) else: if not callable(cell_input_fn): raise TypeError( "cell_input_fn must be callable, saw type: %s" % type(cell_input_fn).__name__) if attention_layer_size is not None: attention_layer_sizes = tuple( attention_layer_size if isinstance(attention_layer_size, (list, tuple)) else (attention_layer_size,)) if len(attention_layer_sizes) != len(attention_mechanisms): raise ValueError( "If provided, attention_layer_size must contain exactly one " "integer per attention_mechanism, saw: %d vs %d" % (len(attention_layer_sizes), len(attention_mechanisms))) self._attention_layers = tuple( layers_core.Dense( attention_layer_size, name="attention_layer", use_bias=False) for attention_layer_size in attention_layer_sizes) self._attention_layer_size = sum(attention_layer_sizes) else: self._attention_layers = None self._attention_layer_size = sum( attention_mechanism.values.get_shape()[-1].value for attention_mechanism in attention_mechanisms) self._cell = cell self._attention_mechanisms = attention_mechanisms self._rl = rl self._cell_input_fn = cell_input_fn self._output_attention = output_attention self._alignment_history = alignment_history with ops.name_scope(name, "AttentionWrapperInit"): if initial_cell_state is None: self._initial_cell_state = None else: final_state_tensor = nest.flatten(initial_cell_state)[-1] state_batch_size = ( final_state_tensor.shape[0].value or array_ops.shape(final_state_tensor)[0]) error_message = ( "When constructing AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and initial_cell_state. Are you using " "the BeamSearchDecoder? You may need to tile your initial state " "via the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with ops.control_dependencies( self._batch_size_checks(state_batch_size, error_message)): self._initial_cell_state = nest.map_structure( lambda s: array_ops.identity(s, name="check_initial_cell_state"), initial_cell_state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n cell,\n attention_mechanism,\n attention_layer_size=None,\n alignment_history=False,\n cell_input_fn=None,\n output_attention=True,\n initial_cell_state=None,\n name=None):\n super(AttentionWrapper, self).__init__(name=name)\n if not rnn_cell_impl._like_rnncell(cell): # pylint: disable=protected-access\n raise TypeError(\n \"cell must be an RNNCell, saw type: %s\" % type(cell).__name__)\n if isinstance(attention_mechanism, (list, tuple)):\n self._is_multi = True\n attention_mechanisms = attention_mechanism\n for attention_mechanism in attention_mechanisms:\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must contain only instances of \"\n \"AttentionMechanism, saw type: %s\"\n % type(attention_mechanism).__name__)\n else:\n self._is_multi = False\n if not isinstance(attention_mechanism, AttentionMechanism):\n raise TypeError(\n \"attention_mechanism must be an AttentionMechanism or list of \"\n \"multiple AttentionMechanism instances, saw type: %s\"\n % type(attention_mechanism).__name__)\n attention_mechanisms = (attention_mechanism,)\n\n if cell_input_fn is None:\n cell_input_fn = (\n lambda inputs, attention: array_ops.concat([inputs, attention], -1))\n else:\n if not callable(cell_input_fn):\n raise TypeError(\n \"cell_input_fn must be callable, saw type: %s\"\n % type(cell_input_fn).__name__)\n\n if attention_layer_size is not None:\n attention_layer_sizes = tuple(\n attention_layer_size\n if isinstance(attention_layer_size, (list, tuple))\n else (attention_layer_size,))\n if len(attention_layer_sizes) != len(attention_mechanisms):\n raise ValueError(\n \"If provided, attention_layer_size must contain exactly one \"\n \"integer per attention_mechanism, saw: %d vs %d\"\n % (len(attention_layer_sizes), len(attention_mechanisms)))\n self._attention_layers = tuple(\n layers_core.Dense(\n attention_layer_size,\n name=\"attention_layer\",\n use_bias=False,\n dtype=attention_mechanisms[i].dtype)\n for i, attention_layer_size in enumerate(attention_layer_sizes))\n self._attention_layer_size = sum(attention_layer_sizes)\n else:\n self._attention_layers = None\n self._attention_layer_size = sum(\n attention_mechanism.values.get_shape()[-1].value\n for attention_mechanism in attention_mechanisms)\n\n self._cell = cell\n self._attention_mechanisms = attention_mechanisms\n self._cell_input_fn = cell_input_fn\n self._output_attention = output_attention\n self._alignment_history = alignment_history\n with ops.name_scope(name, \"AttentionWrapperInit\"):\n if initial_cell_state is None:\n self._initial_cell_state = None\n else:\n final_state_tensor = nest.flatten(initial_cell_state)[-1]\n state_batch_size = (\n final_state_tensor.shape[0].value\n or array_ops.shape(final_state_tensor)[0])\n error_message = (\n \"When constructing AttentionWrapper %s: \" % self._base_name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and initial_cell_state. Are you using \"\n \"the BeamSearchDecoder? You may need to tile your initial state \"\n \"via the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(state_batch_size, error_message)):\n self._initial_cell_state = nest.map_structure(\n lambda s: array_ops.identity(s, name=\"check_initial_cell_state\"),\n initial_cell_state)", "def _build_attention(\n self, encoder_outputs, encoder_sequence_length, attention_bias\n ):\n with tf.variable_scope('AttentionMechanism'):\n attention_depth = self.params['attention_layer_size']\n if self.params['attention_type'] == 'location':\n attention_mechanism = LocationSensitiveAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n use_bias=attention_bias,\n )\n elif self.params['attention_type'] == 'bahdanau':\n bah_normalize = self.params.get('bahdanau_normalize', False)\n attention_mechanism = BahdanauAttention(\n num_units=attention_depth,\n memory=encoder_outputs,\n normalize=bah_normalize,\n memory_sequence_length=encoder_sequence_length,\n probability_fn=tf.nn.softmax,\n dtype=tf.get_variable_scope().dtype,\n )\n else:\n raise ValueError('Unknown Attention Type')\n return attention_mechanism", "def call(self, inputs, state):\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n _, initial_state = self._cell(cell_inputs, state.cell_state)\n cell_state = state.cell_state\n\n\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n\n if self._is_multi:\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n else:\n previous_alignments = [state.alignments]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n attention, alignments, next_cell_state = _compute_attention(\n attention_mechanism, initial_state, previous_alignments[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_alignments.append(alignments)\n all_histories.append(alignment_history)\n all_attentions.append(attention)\n\n attention = tf.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(all_histories))\n\n return attention, next_state", "def call(self, inputs, state):\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = (\n cell_output.shape[0].value or array_ops.shape(cell_output)[0])\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)):\n cell_output = array_ops.identity(\n cell_output, name=\"checked_cell_output\")\n\n if self._is_multi:\n previous_attention_state = state.attention_state\n previous_alignment_history = state.alignment_history\n else:\n previous_attention_state = [state.attention_state]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_attention_states = []\n maybe_all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n attention, alignments, next_attention_state = _compute_attention(\n attention_mechanism, cell_output, previous_attention_state[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_attention_states.append(next_attention_state)\n all_alignments.append(alignments)\n all_attentions.append(attention)\n maybe_all_histories.append(alignment_history)\n\n attention = array_ops.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n attention_state=self._item_or_tuple(all_attention_states),\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(maybe_all_histories))\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state", "def to_attention_instance(self):\n if self.label == self.label_mapping[\"entails\"] or self.label == self.label_mapping[\"contradicts\"]:\n new_label = \"attention_true\"\n elif self.label == self.label_mapping[\"neutral\"]:\n new_label = \"attention_false\"\n else:\n raise RuntimeError(\"Can't convert \" + str(self.label) + \" to an attention label\")\n return SnliInstance(self.first_sentence, self.second_sentence, new_label, self.index)", "def call(self, inputs, state):\n if not isinstance(state, tf.contrib.seq2seq.AttentionWrapperState):\n raise TypeError(\n 'Expected state to be instance of AttentionWrapperState. '\n 'Received type %s instead.' % type(state)\n )\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n cell_batch_size = cell_output.shape[0].value or tf.shape(cell_output)[0]\n error_message = (\n 'When applying AttentionWrapper %s: ' % self.name\n + 'Non-matching batch sizes between the memory '\n '(encoder output) and the query (decoder output). Are you using '\n 'the BeamSearchDecoder? You may need to tile your memory input via '\n 'the tf.contrib.seq2seq.tile_batch function with argument '\n 'multiple=beam_width.'\n )\n with tf.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)\n ):\n cell_output = tf.identity(cell_output, name = 'checked_cell_output')\n\n if self._is_multi:\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n else:\n previous_alignments = [state.alignments]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_histories = []\n\n for i, attention_mechanism in enumerate(self._attention_mechanisms):\n print(attention_mechanism)\n if self.coverage:\n # if we use coverage mode, previous alignments is coverage vector\n # alignment history stack has shape: decoder time * batch * atten_len\n # convert it to coverage vector\n previous_alignments[i] = tf.cond(\n previous_alignment_history[i].size() > 0,\n lambda: tf.reduce_sum(\n tf.transpose(\n previous_alignment_history[i].stack(), [1, 2, 0]\n ),\n axis = 2,\n ),\n lambda: tf.zeros_like(previous_alignments[i]),\n )\n # debug\n # previous_alignments[i] = tf.Print(previous_alignments[i],[previous_alignment_history[i].size(), tf.shape(previous_alignments[i]),previous_alignments[i]],message=\"atten wrapper:\")\n attention, alignments, next_attention_state = _compute_attention(\n attention_mechanism,\n cell_output,\n previous_alignments[i],\n self._attention_layers[i] if self._attention_layers else None,\n )\n alignment_history = (\n previous_alignment_history[i].write(state.time, alignments)\n if self._alignment_history\n else ()\n )\n\n all_alignments.append(alignments)\n all_histories.append(alignment_history)\n all_attentions.append(attention)\n\n attention = tf.concat(all_attentions, 1)\n next_state = tf.contrib.seq2seq.AttentionWrapperState(\n time = state.time + 1,\n cell_state = next_cell_state,\n attention = attention,\n alignments = self._item_or_tuple(all_alignments),\n attention_state = self._item_or_tuple(all_alignments),\n alignment_history = self._item_or_tuple(all_histories),\n )\n\n if self._output_attention:\n return attention, next_state\n else:\n return cell_output, next_state", "def __init__(self,\n num_units,\n line_memory,\n word_memory=None,\n soft_weight=None,\n hierarchy=True,\n line_memory_sequence_length=None,\n word_memory_sequence_length=None,\n scale=False,\n probability_fn=None,\n score_mask_value=float(\"-inf\"),\n name=\"CustomAttention\"):\n # For LuongAttention, we only transform the memory layer; thus\n # num_units **must** match expected the query depth.\n if probability_fn is None:\n probability_fn = nn_ops.softmax\n wrapped_probability_fn = lambda score: probability_fn(score)\n super(CustomAttention, self).__init__(\n query_layer=None,\n line_memory_layer=layers_core.Dense(\n num_units, name=\"line_memory_layer\", use_bias=False),\n line_memory=line_memory,\n word_memory=word_memory,\n probability_fn=wrapped_probability_fn,\n line_memory_sequence_length=line_memory_sequence_length,\n word_memory_sequence_length=word_memory_sequence_length,\n score_mask_value=score_mask_value,\n name=name)\n self._num_units = num_units\n self._scale = scale\n self._name = name\n self._hierarchy = hierarchy\n self._soft_weight = soft_weight", "def _build_attention(self, qkv_rank):\n if self._attention_axes is None:\n self._attention_axes = tuple(range(1, qkv_rank - 2))\n else:\n self._attention_axes = tuple(self._attention_axes) # pragma: no cover\n (\n self._dot_product_equation,\n self._combine_equation,\n attn_scores_rank,\n ) = _build_attention_equation(qkv_rank, attn_axes=self._attention_axes)\n norm_axes = tuple(\n range(attn_scores_rank - len(self._attention_axes), attn_scores_rank)\n )\n self._masked_softmax = MaskedSoftmax(\n mask_expansion_axes=[1], normalization_axes=norm_axes\n )\n self._dropout_layer = layers.Dropout(rate=self._dropout)", "def build(self, unused_input_shapes):\n self.query_dense = self._projection_dense_layer(\"query\")\n self.key_dense = self._projection_dense_layer(\"key\")\n self.value_dense = self._projection_dense_layer(\"value\")\n self.attention_probs_dropout = tf.keras.layers.Dropout(\n rate=self.attention_probs_dropout_prob)\n super(CustomAttention, self).build(unused_input_shapes)", "def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))", "def _compute_attention(attention_mechanism, batch_size, cell_output, previous_alignments,\n attention_layer):\n line_alignments, word_alignments, hier_alignments = attention_mechanism(\n cell_output, batch_size, previous_alignments=previous_alignments)\n\n # Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]\n # Context is the inner product of alignments and values along the\n # memory time dimension.\n # alignments shape is\n # [batch_size, 1, memory_time]\n # attention_mechanism.values shape is\n # [batch_size, memory_time, memory_size]\n # the batched matmul is over memory_time, so the output shape is\n # [batch_size, 1, memory_size].\n # we then squeeze out the singleton dim.\n expanded_line_alignments = array_ops.expand_dims(line_alignments, 1)\n line_context = math_ops.matmul(expanded_line_alignments, attention_mechanism.values)\n line_attention = array_ops.squeeze(line_context, [1])\n\n return line_attention, line_alignments, word_alignments, hier_alignments", "def __init__(self, kernel_size=11, log_t=False):\n super(Attention, self).__init__()\n assert kernel_size % 2 == 1, \"Kernel size should be odd for 'same' conv.\"\n padding = (kernel_size - 1) // 2\n self.conv = nn.Conv1d(1, 1, kernel_size, padding=padding)\n self.log_t = log_t", "def __init__(\n self,\n wrapper,\n embedder,\n transformer,\n ):\n dims = (\n wrapper.num_people,\n wrapper.num_samples,\n wrapper.num_people - 1,\n )\n super().__init__(wrapper, dims)\n self.embedder = embedder\n self.transformer = transformer", "def __init__(self,\n num_units,\n memory,\n cell,\n memory_sequence_length=None,\n scale=False,\n probability_fn=None,\n score_mask_value=float(\"-inf\"),\n name=\"LuongAttention\"):\n # For LuongAttention, we only transform the memory layer; thus\n # num_units **must** match expected the query depth.\n if probability_fn is None:\n probability_fn = nn_ops.softmax\n wrapped_probability_fn = lambda score, _: probability_fn(score)\n super(RnnScoreAttention, self).__init__(\n query_layer=None,\n memory_layer=layers_core.Dense(\n num_units, name=\"memory_layer\", use_bias=False),\n memory=memory,\n probability_fn=wrapped_probability_fn,\n memory_sequence_length=memory_sequence_length,\n score_mask_value=score_mask_value,\n name=name)\n self._num_units = num_units\n self._scale = scale\n self._name = name\n self._memory_sequence_length = memory_sequence_length\n self._cell = cell", "def __init__(\n self,\n wrapper,\n embedder,\n transformer,\n ):\n dims = (wrapper.num_people, wrapper.num_people, wrapper.num_samples)\n super().__init__(wrapper, dims)\n self.embedder = embedder\n self.transformer = transformer", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann", "def __init__(self, anno):\n self._x1 = float(anno[0])\n self._y1 = float(anno[1])\n self._w = float(anno[2])\n self._h = float(anno[3])\n self._blur = int(anno[4])\n self._expression = int(anno[5])\n self._illumination = int(anno[6])\n self._invalid = int(anno[7])\n self._occlusion = int(anno[8])\n self._pose = int(anno[9])", "def forward(self, encoder_state, context, context_lens):\n attn = self.attention(context, encoder_state.squeeze(0), context_lens)\n return attn", "def make_anno(wf, entities, entity_meta, gt_proportion, padding, acwe=False, plot_all=True):\r\n combined_clustered_pts, classwise_entities = organize_entities(\r\n wf.vols[0], entities, entity_meta, plot_all=plot_all\r\n )\r\n wf.params[\"entity_meta\"] = entity_meta\r\n anno_masks, anno_all = make_pseudomasks(\r\n wf,\r\n classwise_entities,\r\n acwe=acwe,\r\n padding=padding,\r\n core_mask_radius=(12, 12, 12),\r\n )\r\n\r\n return anno_masks, anno_all, entities", "def __init__(\n self,\n word_embeddings: BasicTextFieldEmbedder,\n input_dim: int,\n hidden_dim: int,\n attn_dim: int,\n features_to_idx: dict = None,\n feature_dim: int = None,\n dropout: float = 0.5,\n ):\n super(AttentiveNER, self).__init__()\n self.word_embeddings = word_embeddings\n self.hidden_dim = hidden_dim\n self.left_bilstm = PytorchSeq2SeqWrapper(\n nn.LSTM(\n input_dim, hidden_dim, batch_first=True, bidirectional=True\n )\n )\n self.right_bilstm = PytorchSeq2SeqWrapper(\n nn.LSTM(\n input_dim, hidden_dim, batch_first=True, bidirectional=True\n )\n )\n\n self.attn = Attention(hidden_dim * 2, attn_dim)\n\n if features_to_idx is not None:\n self.feat_embs = nn.Embedding(\n len(features_to_idx) + 1, feature_dim, padding_idx=0\n )\n self.feat_to_idx = features_to_idx\n\n self.output_dim = (\n 2 * hidden_dim + input_dim + feature_dim\n ) # 50 is for features\n else:\n self.feat_to_idx = None\n self.output_dim = 2 * hidden_dim + input_dim\n\n self.dropout = nn.Dropout(dropout)", "def __init__(\n self,\n wrapper,\n embedder,\n ):\n dims = (wrapper.num_people, wrapper.num_samples, wrapper.num_samples)\n super().__init__(wrapper, dims)\n self.embedder = embedder", "def call(self, inputs, state):\n if not isinstance(state, AttentionWrapperState):\n raise TypeError(\"Expected state to be instance of AttentionWrapperState. \"\n \"Received type %s instead.\" % type(state))\n\n # Step 1: Calculate the true inputs to the cell based on the\n # previous attention value.\n # inputs, val, position_emb = inputs\n # check = tf.constant(3)\n\n # def f1():\n # cell_inputs = position_emb\n # cell_state = state.cell_state\n # cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n # next_cell_state = cell_state\n # return cell_output, next_cell_state\n\n # def f2():\n # cell_inputs = self._cell_input_fn(inputs, state.attention)\n # cell_state = state.cell_state\n # cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n # return cell_output, next_cell_state\n\n # cellStep = tf.cond(tf.less(val, check), f2, f1)\n\n # cell_output, next_cell_state = cellStep(inputs, position_emb, state)\n\n '''\n if self._rl:\n # with tf.variable_scope(self._name):\n inputs, position_emb = inputs\n cell_inputs = position_emb\n else:\n inputs, null = inputs\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n '''\n\n #inputs, null = inputs\n cell_inputs = self._cell_input_fn(inputs, state.attention)\n\n # cell_inputs = inputs\n cell_state = state.cell_state\n cell_output, next_cell_state = self._cell(cell_inputs, cell_state)\n\n '''\n if self._rl:\n next_cell_state = cell_state\n '''\n \n cell_batch_size = (\n cell_output.shape[0].value or array_ops.shape(cell_output)[0])\n error_message = (\n \"When applying AttentionWrapper %s: \" % self.name +\n \"Non-matching batch sizes between the memory \"\n \"(encoder output) and the query (decoder output). Are you using \"\n \"the BeamSearchDecoder? You may need to tile your memory input via \"\n \"the tf.contrib.seq2seq.tile_batch function with argument \"\n \"multiple=beam_width.\")\n with ops.control_dependencies(\n self._batch_size_checks(cell_batch_size, error_message)):\n cell_output = array_ops.identity(\n cell_output, name=\"checked_cell_output\")\n\n if self._is_multi:\n previous_alignments = state.alignments\n previous_alignment_history = state.alignment_history\n else:\n previous_alignments = [state.alignments]\n previous_alignment_history = [state.alignment_history]\n\n all_alignments = []\n all_attentions = []\n all_histories = []\n for i, attention_mechanism in enumerate(self._attention_mechanisms): \n line_attention, line_alignments, word_alignments, hier_alignments = _compute_attention(\n attention_mechanism, cell_batch_size, cell_output, previous_alignments[i],\n self._attention_layers[i] if self._attention_layers else None)\n alignment_history = previous_alignment_history[i].write(\n state.time, alignments) if self._alignment_history else ()\n\n all_alignments.append(hier_alignments)\n all_histories.append(alignment_history)\n all_attentions.append(line_attention)\n\n attention = array_ops.concat(all_attentions, 1)\n next_state = AttentionWrapperState(\n time=state.time + 1,\n cell_state=next_cell_state,\n attention=attention,\n alignments=self._item_or_tuple(all_alignments),\n alignment_history=self._item_or_tuple(all_histories))\n\n p_gens = tf.sigmoid(linear([cell_state, cell_inputs], 1, True))\n\n if self._output_attention:\n return attention, next_state\n else:\n return (cell_output, hier_alignments, p_gens), next_state", "def __init__(\n self, buffer_size, keys_dim, vals_dim,\n query_dim=None, num_heads=1, last_timestep_only=False,\n postprocess=None, dense=True, name=None):\n super(CausalAttention, self).__init__(name=name)\n with self._enter_variable_scope():\n if isinstance(keys_dim, int):\n keys = Conv1x1(keys_dim * num_heads, name='key_module')\n if isinstance(vals_dim, int):\n vals = Conv1x1(vals_dim * num_heads, name='vals_module')\n if isinstance(query_dim, int):\n query = Conv1x1(query_dim * num_heads, name='query_module')\n if postprocess:\n postprocess = snt.Sequential(layer_factory(postprocess, name_prefix='output_module')[0])\n if keys_dim is None:\n keys = Identity()\n if query_dim is None:\n query = keys\n\n self._cores = Struct(keys=keys, vals=vals, query=query, postprocess=postprocess)\n self._keys_dim = keys_dim\n self._vals_dim = vals_dim\n self._buffer_size = buffer_size\n self._num_heads = num_heads\n self._dense = dense\n self._last_timestep_only = last_timestep_only", "def __init__(self,\n W_regularizer=None, b_regularizer=None,\n W_constraint=None, b_constraint=None,\n bias=True,\n return_attention=False,\n **kwargs):\n self.supports_masking = True\n self.return_attention = return_attention\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n super(Attention, self).__init__(**kwargs)", "def _get_imganno(self, idx):\n raise NotImplementedError", "def copy_annotations_from_unaligned(aligned_seqrec: SeqRecord, unaligned_seqrec: SeqRecord):\n # NCBI Blast id includes description, whereas alignment does not\n assert aligned_seqrec.id in unaligned_seqrec.id, f\"{aligned_seqrec.id} <> {unaligned_seqrec.id}\"\n # copy annotations from previous\n newrec = deepcopy(aligned_seqrec)\n newrec.annotations = unaligned_seqrec.annotations\n # clear any letter annotations added during deepcopy\n newrec.letter_annotations = dict()\n # original sequence and letter annotations\n seq = unaligned_seqrec.seq\n letter_annotations = unaligned_seqrec.letter_annotations\n # index to track position in original sequence\n i = 0\n for j, letter in enumerate(aligned_seqrec.seq):\n if letter in [gap_letter, stop_letter]:\n for key, values in letter_annotations.items():\n # convert strings into lists of characters,\n # then combine into string at end of loop\n if key == \"seqnums\":\n letter_annotation = None\n elif all(isinstance(value, str) for value in values):\n letter_annotation = gap_letter\n else:\n letter_annotation = None\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotation)\n else:\n while seq[i] in [gap_letter, stop_letter]:\n i += 1\n assert letter == seq[i], f\"letter {letter} at {j} <> seq {seq[i]} at {i}\"\n for key in letter_annotations.keys():\n newrec.letter_annotations.setdefault(key, list()).append(letter_annotations[key][i])\n i += 1\n # convert list of chars into string\n for key, values in letter_annotations.items():\n if isinstance(values, str):\n newrec.letter_annotations[key] = \"\".join(newrec.letter_annotations[key])\n return newrec", "def __init__(self, params):\n super().__init__(params)\n p = self.params\n assert p.primary_source_key in [\n x for x, _ in p.source_atten_tpls\n ], 'Source attention must have the primary source key.'\n for source_key, atten_p in p.source_atten_tpls:\n if isinstance(atten_p, list):\n child_p_list = []\n for atten in atten_p:\n child_p = atten.Copy()\n if child_p.hidden_dim <= 0:\n child_p.hidden_dim = p.hidden_dim\n if child_p.input_dim <= 0:\n child_p.input_dim = p.input_dim\n child_p_list.append(child_p)\n self.CreateChildren('atten_%s' % source_key, child_p_list)\n else:\n child_p = atten_p.Copy()\n if child_p.hidden_dim <= 0:\n child_p.hidden_dim = p.hidden_dim\n if child_p.input_dim <= 0:\n child_p.input_dim = p.input_dim\n self.CreateChild('atten_%s' % source_key, child_p)\n\n # Initialize source context vector merging layer.\n merger_p = p.atten_merger_tpl.Copy()\n merger_p.name = 'atten_merger'\n merger_p.source_dim = p.input_dim\n merger_p.query_dim = p.input_dim\n self.CreateChild('atten_merger', merger_p)", "def build(self, unused_input_shapes):\n\n dense3d_impl = Dense3D\n dense2dprojection_impl = Dense2DProjection\n\n self.attention_layer = CustomAttention(\n num_attention_heads=self.num_attention_heads,\n size_per_head=self.attention_head_size,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n initializer_range=self.initializer_range,\n backward_compatible=self.backward_compatible,\n name=\"self\")\n\n self.attention_output_dense = dense3d_impl(\n num_attention_heads=self.num_attention_heads,\n size_per_head=int(self.hidden_size / self.num_attention_heads),\n kernel_initializer=get_initializer(self.initializer_range),\n output_projection=True,\n backward_compatible=self.backward_compatible,\n name=\"dense\")\n self.attention_dropout = tf.keras.layers.Dropout(\n rate=self.hidden_dropout_prob)\n self.attention_layer_norm = (\n tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12,\n # We do layer norm in float32 for numeric stability.\n dtype=tf.float32))\n\n self.intermediate_dense = dense2dprojection_impl(\n output_size=self.intermediate_size,\n kernel_initializer=get_initializer(self.initializer_range),\n activation=self.intermediate_activation,\n # Uses float32 so that gelu activation is done in float32.\n fp32_activation=True, name='dense')\n self.output_dense = dense2dprojection_impl(\n output_size=self.hidden_size,\n kernel_initializer=get_initializer(self.initializer_range),\n name='dense')\n self.output_dropout = tf.keras.layers.Dropout(rate=self.hidden_dropout_prob)\n self.output_layer_norm = tf.keras.layers.LayerNormalization(axis=-1,\n epsilon=1e-12, dtype=tf.float32, name=LAYER_NORM_NAME)\n super(CustomTransformerBlock, self).build(unused_input_shapes)", "def parse_annotation_instance(annotation):\n\n text = annotation['utf8_string']\n language = annotation['language']\n legible = int(annotation['legibility'] == 'legible')\n\n mask = np.reshape(np.array(annotation['mask'], np.int32), (-1, 2))\n box = cv2.boxPoints(cv2.minAreaRect(mask))\n quadrilateral = [int(x) for x in box.reshape([-1])]\n\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': text,\n 'legible': legible,\n 'language': language,\n }\n }\n\n return word_annotation", "def get_attention(self, X):\n if self.bn:\n layer = 16\n else:\n layer = 14\n inputs = [K.learning_phase()] + [self.model.inputs[0]]\n _attention_f = K.function(inputs, [\n self.model.layers[layer].output])\n \n return _attention_f([0] + [X])" ]
[ "0.72406137", "0.61116153", "0.6106595", "0.5868271", "0.58216625", "0.574263", "0.5419795", "0.5395449", "0.53780705", "0.5358682", "0.5326316", "0.5276871", "0.52745825", "0.5269008", "0.526074", "0.51900357", "0.5189963", "0.5186625", "0.5131551", "0.5088417", "0.50647956", "0.50638986", "0.50319695", "0.5018762", "0.4995462", "0.4979305", "0.49637875", "0.495368", "0.49333343", "0.49187532" ]
0.7107153
1
Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor.
def _item_or_tuple(self, seq): t = tuple(seq) if self._is_multi: return t else: return t[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def _item_or_tuple(self, seq):\n t = tuple(seq)\n if self._is_multi:\n return t\n else:\n return t[0]", "def to_sequence(arg, seq_type=None):\n def return_type(t):\n if t:\n if t is tuple:\n return (arg, ) if arg else tuple()\n elif t is dict:\n return {arg: True} if arg else dict()\n elif t is set:\n return {arg, } if arg else set()\n return [arg] if arg else list()\n\n if not is_sequence(arg):\n return return_type(seq_type)\n elif seq_type is not None and type(arg) is not seq_type:\n return seq_type(arg)\n return arg", "def format_seq(seq, new_seq):\n if type(seq) == str:\n return \"\".join(new_seq)\n elif type(seq) == tuple:\n return tuple(new_seq)\n else:\n return new_seq", "def get(self, seq):\n return self._get_node(seq).element", "def get_sequence_string(seq):\n if type(seq) == Bio.SeqRecord:\n seqstr = seq.seq.tostring()\n elif type(seq) == Bio.Seq.Seq:\n seqstr = seq.tostring()\n else:\n seqstr = seq\n return seqstr", "def get_seq(self): # -> list[Unknown]:\n ...", "def tuple_from_sequence(*args):\n return tuple(args)", "def getSequence(self):\n if self.sequence != None: # a sequence has been assigned\n return self.sequence\n elif self.seqscores != None: # inferred by parsimony but not yet assigned\n return None # determine most parsimonous sequence, not yet implemented", "def sequence(self) -> Any:\n return self.__seq", "def ensure_sequence(obj):\n if isinstance(obj, (tuple, list)):\n return obj\n else:\n return (obj,)", "def sequence_type(self) -> str:\n raise NotImplementedError()", "def seq(self):\n return self.__seq", "def get_rtl_seq(seq):\n # Sequences with ZWJ in them will reflect. Fitzpatrick modifiers\n # however do not, so if we reflect we make a pass to swap them back into their\n # logical order.\n # Used to check for TAG_END 0xe007f as well but Android fontchain_lint\n # dislikes the resulting mangling of flags for England, Scotland, Wales.\n\n ZWJ = 0x200d\n def is_fitzpatrick(cp):\n return 0x1f3fb <= cp <= 0x1f3ff\n\n if ZWJ not in seq:\n return ()\n\n rev_seq = list(seq)\n rev_seq.reverse()\n for i in range(1, len(rev_seq)):\n if is_fitzpatrick(rev_seq[i-1]):\n tmp = rev_seq[i]\n rev_seq[i] = rev_seq[i-1]\n rev_seq[i-1] = tmp\n return tuple(rev_seq)", "def try_flatten(sequence):\n # type: (Sequence[T]) -> Union[T, Sequence[T]]\n if sequence is None or len(sequence) == 0:\n return None\n if len(sequence) == 1:\n return tuple(sequence)[0]\n return sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence", "def _get_sequence(self):\n return self.__sequence" ]
[ "0.6926655", "0.6926655", "0.58118045", "0.57991827", "0.57862526", "0.56921035", "0.5684707", "0.5637408", "0.5624004", "0.5592931", "0.551967", "0.54796165", "0.5422755", "0.53580755", "0.53534436", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117", "0.53325117" ]
0.6938356
0
The `state_size` property of `AttentionWrapper`.
def state_size(self): return AttentionWrapperState( cell_state=self._cell.state_size, time=tensor_shape.TensorShape([]), attention=self._attention_layer_size, alignments=self._item_or_tuple( a.alignments_size for a in self._attention_mechanisms), alignment_history=self._item_or_tuple( () for _ in self._attention_mechanisms)) # sometimes a TensorArray
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def state_size(self):\n raise NotImplementedError(\"Please implement this method\")", "def state_size(self):\n return AttentionWrapperState(\n cell_state=self._cell.state_size,\n time=tensor_shape.TensorShape([]),\n attention=self._attention_layer_size,\n alignments=self._item_or_tuple(\n a.alignments_size for a in self._attention_mechanisms),\n attention_state=self._item_or_tuple(\n a.state_size for a in self._attention_mechanisms),\n alignment_history=self._item_or_tuple(\n () for _ in self._attention_mechanisms)) # sometimes a TensorArray", "def state_size(self):\n raise NotImplementedError(\"Abstract method\")", "def state_size(self):\n raise NotImplementedError(\"Abstract method\")", "def state_size(self):\n\t\treturn (\n\t\t\ttf.TensorShape([self.args[\"kb_node_max_len\"], self.args[\"mp_state_width\"]]),\n\t\t)", "def get_state_size(self) -> Tuple[int, int]:\n return self.height, self.width", "def _cell_state_size(self):\n state_sizes = self._cells[0].state_size\n if isinstance(state_sizes, tuple):\n return sum(state_sizes)\n return state_sizes", "def state_size(self):\n return [tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel]),tf.TensorShape([self.dmodel])]", "def state_size(self):\n # estimated state, its covariance, and the step number\n return [[self.dim_x], [self.dim_x * self.dim_x], [1]]", "def actual_size(self, size, mode='normal', state='on'):\n raise NotImplementedError", "def get_max_state_size(self) -> int:\n return self._j_checkpoint_storage.getMaxStateSize()", "def state(self):\n decimals = 2\n size_mb = round(self._size/1e6, decimals)\n return size_mb", "def size(self):\n return self.getattr('size')", "def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]", "def size(self):\n return self.properties.get('size')", "def state_size(self):\n return DualPointerWrapperState(\n cell_state=self._cell.state_size,\n time=tf.TensorShape([]),\n alignments_a=self._memory_a_ids.shape[1].value,\n alignments_b=self._memory_a_ids.shape[1].value,\n coverage_a=self._memory_a_ids.shape[1].value,\n coverage_b=self._memory_b_ids.shape[1].value\n )", "def size(self):\n return self.states.size() * self.n_pop", "def size(self):\n return self.size_number", "def size(self):\r\n return self._size", "def size(self):\r\n return self._size", "def size(self):\n\t\treturn self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size", "def size(self):\n return self._size" ]
[ "0.8413178", "0.83095956", "0.76917756", "0.76917756", "0.76524323", "0.74709535", "0.73154217", "0.724257", "0.7135943", "0.70932657", "0.6971657", "0.69473106", "0.6929372", "0.69267815", "0.6837609", "0.67772937", "0.6741656", "0.6741119", "0.6716011", "0.66940117", "0.6692383", "0.6690668", "0.6690668", "0.6690668", "0.6690668", "0.6690668", "0.6690668", "0.6690668", "0.6690668", "0.6690668" ]
0.83416754
1
This will cache the contents of a template fragment for a given amount of time, but with the extra bonus of limiting the dogpile/stampeding effect. You can easily replace the default template cache, just change the load statement from ``{% load cache %}`` to ``{% load cors_cache %}``.
def do_cache(parser, token, endparse='endcache', noda=CacheNode): nodelist = parser.parse((endparse,)) parser.delete_first_token() tokens = token.contents.split() if len(tokens) < 3: raise TemplateSyntaxError(u"'%r' tag requires at least 2 arguments." % tokens[0]) try: expire_time = int(tokens[1]) except ValueError: raise TemplateSyntaxError(u"First argument to '%r' must be an integer (got '%s')." % (tokens[0], tokens[1])) cache = 'default' links = '' if len(tokens)>3: ntokens = [] for item in tokens: if item[0:6]=='cache=': cache = item[6:] elif item[0:6]=='links=': links = item[6:] else: ntokens.append(item) tokens = ntokens return noda(nodelist, expire_time, tokens[2].strip('"'), tokens[3:], cache=cache, links=links)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def render_cached(self, cache_key, render_cls, max_age, cache_time=0, *args, **kwargs):\r\n\r\n # Default the cache to be the same as our max age if not\r\n # supplied.\r\n cache_time = cache_time or max_age\r\n\r\n # Postfix the cache key with the subreddit name\r\n # This scopes all the caches by subreddit\r\n cache_key = cache_key + '-' + c.site.name\r\n\r\n # Get the etag and content from the cache.\r\n hit = g.rendercache.get(cache_key)\r\n if hit:\r\n etag, content = hit\r\n else:\r\n # Generate and cache the content along with an etag.\r\n content = render_cls(*args, **kwargs).render()\r\n etag = '\"%s\"' % datetime.utcnow().isoformat()\r\n g.rendercache.set(cache_key, (etag, content), time=cache_time)\r\n\r\n # Check if the client already has the correct content and\r\n # throw 304 if so. Note that we want to set the max age in the\r\n # 304 response, we can only do this by using the\r\n # pylons.response object just like the etag_cache fn does\r\n # within pylons (it sets the etag header). Setting it on the\r\n # c.response won't work as c.response isn't used when an\r\n # exception is thrown. Note also that setting it on the\r\n # pylons.response will send the max age in the 200 response\r\n # (just like the etag header is sent in the response).\r\n response.headers['Cache-Control'] = 'max-age=%d' % max_age\r\n etag_cache(etag)\r\n\r\n # Return full response using our cached info.\r\n c.response.content = content\r\n return c.response", "def cache():\n is_conditional = request.headers.get(\"If-Modified-Since\") or request.headers.get(\n \"If-None-Match\"\n )\n\n if is_conditional is None:\n response = view_get()\n response.headers[\"Last-Modified\"] = http_date()\n response.headers[\"ETag\"] = uuid.uuid4().hex\n return response\n else:\n return status_code(304)", "def slow(request):\n time.sleep(.1)\n return TemplateResponse(request, 'slow.html', {})", "def cache_control(value):\n response = view_get()\n response.headers[\"Cache-Control\"] = \"public, max-age={0}\".format(value)\n return response", "def cache_handler(event, context):\n events.cache()", "def never_cache_preview(response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def test_cachefile_timestamp(self):\n data = EngineTest.testdata['test_cachefile']\n filenames = { 'layout': 'layout.pyhtml',\n 'page': 'account_create.pyhtml',\n 'form': 'account_form.pyhtml',\n }\n expected = data['expected']\n context = { 'params': { } }\n cache_filenames = ['account_create.pyhtml.cache', 'account_form.pyhtml.cache']\n try:\n for key, filename in filenames.items():\n write_file(filename, data[key])\n props = { 'prefix': 'account_', 'postfix':'.pyhtml', 'layout':'layout.pyhtml', 'cache':True }\n ## create cache files and check them\n time.sleep(1)\n curr_time = time.time()\n engine = tenjin.Engine(**props)\n output = engine.render(':create', context)\n for fname in filenames.values():\n self.assertExists(fname) # file created?\n self.assertTrue(engine.get_template(fname).timestamp < curr_time)\n self.assertEquals(os.path.getmtime(fname), engine.get_template(fname).timestamp)\n ## save current cached object\n cached = {}\n for fname in filenames.values():\n cached[fname] = engine.get_template(fname)\n ## confirm that get_template() returns the same object\n for fname in filenames.values():\n self.assertEquals(id(engine.get_template(fname)), id(cached[fname]))\n ## change timestamp of templates to be old\n for fname in filenames.values():\n atime = mtime = os.path.getmtime(fname) - 10\n os.utime(fname, (atime, mtime))\n ## check whether new caches are created\n for fname in filenames.values():\n t = engine.get_template(fname)\n self.assertNotEqual(id(t), id(cached[fname]))\n self.assertEquals(os.path.getmtime(fname), t.timestamp)\n finally:\n _remove_files(filenames.values())", "def do_jmbocache(parser, token):\n nodelist = parser.parse(('endjmbocache',))\n parser.delete_first_token()\n tokens = token.split_contents()\n if len(tokens) < 3:\n raise TemplateSyntaxError(\"'%r' tag requires at least 2 arguments.\" % tokens[0])\n return JmboCacheNode(nodelist,\n parser.compile_filter(tokens[1]),\n tokens[2], # fragment_name can't be a variable.\n [parser.compile_filter(token) for token in tokens[3:]])", "def cached_examples():\n examples = ExampleModel.query()\n return render_template('list_examples_cached.html', examples=examples)", "def __call__(self, template_name, template_vars, cache_key=None,\n cache_type=None, cache_expire=None):\n # Create a render callable for the cache function\n def render_template():\n # Grab a template reference\n template = self.loader.load(template_name)\n return Markup(template(template_vars).render())\n\n return cached_template(template_name, render_template,\n cache_key=cache_key, cache_type=cache_type,\n cache_expire=cache_expire)", "def cache_stats(request, template_name=\"admin/cache_stats.html\"):\n cache_stats = get_cache_stats()\n\n return render_to_response(template_name, RequestContext(request, {\n 'cache_hosts': cache_stats,\n 'cache_backend': cache.__module__,\n 'title': _(\"Server Cache\"),\n 'root_path': settings.SITE_ROOT + \"admin/db/\"\n }))", "def never_cache_preview(self, response):\n response.cache_control.max_age = 0\n response.cache_control.no_cache = True\n response.cache_control.must_revalidate = True\n response.cache_control.no_store = True\n return response", "def cache(cache_path):\n def cache_decorator(generator):\n def wrapper():\n return cached(cache_path, generator)\n return wrapper\n return cache_decorator", "def cache():\n if request.method == 'GET':\n cache_info = in_water.cache_info()\n return json.dumps({\n 'hits': cache_info.hits,\n 'misses': cache_info.misses,\n 'maxsize': cache_info.maxsize,\n 'currsize': cache_info.currsize,\n })", "def renderPage(c, page, request = None, response = None, cache = True, indexing = False):\n if request is None:\n # page rendered within a feed or batch context\n key = \"soup:\" + '_' + page.headers['name']\n else:\n # page rendered for online viewing or indexing\n key = \"soup:\" + page.headers['name']\n if not cache:\n return subRender(c,page,request,response,indexing)\n else:\n if \"x-cache-control\" in page.headers.keys():\n control = page.headers[\"x-cache-control\"].lower()\n m = MAX_AGE_REGEX.match(control)\n if m:\n seconds = int(m.group(3))\n try:\n if (c.cache.mtime(key) + seconds) < time.time():\n del(c.cache[key])\n except KeyError:\n pass\n try:\n if c.store.mtime(page.headers['name']) > c.cache.mtime(key):\n del(c.cache[key])\n raise KeyError\n else:\n return c.cache[key]\n except KeyError:\n c.cache[key] = buffer = subRender(c,page,request,response,indexing)\n return buffer\n # end else", "def docache(minutes=5, content_type='application/json; charset=utf-8'):\n def fwrap(f):\n @wraps(f)\n def wrapped_f(*args, **kwargs):\n r = f(*args, **kwargs)\n then = datetime.now() + timedelta(minutes=minutes)\n rsp = Response(r, content_type=content_type)\n rsp.headers.add('Expires', then.strftime(\"%a, %d %b %Y %H:%M:%S GMT\"))\n rsp.headers.add('Cache-Control', 'public,max-age=%d' % int(60 * minutes))\n return rsp\n return wrapped_f\n return fwrap", "def cache(timeout):\n def cached(func, *args, **kwargs):\n \"\"\"\n Cache data wrapper.\n \"\"\"\n lock = threading.Lock()\n key = func.__name__\n\n with lock:\n if key in CACHE:\n age = time() - CACHE[key]['time']\n if age < timeout:\n return CACHE[key]['result']\n\n result = func(*args, **kwargs)\n CACHE[key] = {\n 'result': result,\n 'time': time()\n }\n return result\n return decorator(cached)", "def cachedeterministic(parser, token):\r\n nodelist = parser.parse(('endcachedeterministic',))\r\n parser.delete_first_token()\r\n tokens = token.contents.split()\r\n if len(tokens) != 3:\r\n raise TemplateSyntaxError(u\"'%r' tag requires 2 arguments.\" % tokens[0])\r\n return CacheNode(nodelist, tokens[1], tokens[2])", "def static_cachebust(timestamp, filename):\n path = os.path.join(app.static_folder, filename)\n mtime = os.stat(path).st_mtime\n if abs(mtime - timestamp) > 1:\n abort(404)\n else:\n resp = send_from_static(filename)\n resp.headers.set(\n \"cache-control\", \"public, immutable, max-age=%s\" % (60 * 60 * 24 * 365,)\n )\n if \"expires\" in resp.headers:\n resp.headers.remove(\"expires\")\n return resp", "def cached(time=1200):\n def decorator(function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n key = '%s%s%s' % (function.__name__, str(args), str(kwargs))\n value = memcache.get(key)\n logging.debug('Cache lookup for %s, found? %s', key, value != None)\n if not value:\n value = function(*args, **kwargs)\n memcache.set(key, value, time=time)\n return value\n return wrapper\n return decorator", "def test_client_max_age_3600(self, sess):\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n # request that we don't want a new one unless\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=3600'})\r\n assert r.from_cache is True\r\n\r\n # now lets grab one that forces a new request b/c the cache\r\n # has expired. To do that we'll inject a new time value.\r\n resp = self.cache.get(self.url)\r\n resp.headers['date'] = 'Tue, 15 Nov 1994 08:12:31 GMT'\r\n r = sess.get(self.url)\r\n assert not r.from_cache", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def content():\n try:\n url = request.args.get('url')\n if not url:\n raise Exception('Expected url parameter')\n return render(cached_content(url=url), template='content.jinja2')\n except Exception, e:\n traceback.print_exc()\n return render({'url': request.url, 'error': str(e)},\n template='error.jinja2')", "def named_cache_page(cache_timeout):\n def wrapper(func):\n def foo(*args, **kwargs):\n key = func.__name__\n if kwargs:\n key += ':' + ':'.join([kwargs[key] for key in kwargs])\n\n response = cache.get(key)\n if not response:\n response = func(*args, **kwargs)\n cache.set(key, response, cache_timeout)\n return response\n return foo\n return wrapper", "def showifcached(parser, token):\r\n tokens = token.contents.split()\r\n if len(tokens) != 2:\r\n raise TemplateSyntaxError(u\"'%r' tag requires 1 argument.\" % tokens[0])\r\n return ShowIfCachedNode(tokens[1])", "def cache(self, file_name, content):\n self.files_loaded[file_name] = content", "def test_render_cached(self, mock_from_string, mock_sha1):\n template = SnippetTemplateFactory(code='asdf')\n cache_key = mock_sha1.return_value.hexdigest.return_value\n jinja_template = Mock()\n mock_cache = {cache_key: jinja_template}\n\n with patch('snippets.base.models.template_cache', mock_cache):\n result = template.render({})\n\n mock_sha1.assert_called_with('asdf')\n ok_(not mock_from_string.called)\n jinja_template.render.assert_called_with({'snippet_id': 0})\n eq_(result, jinja_template.render.return_value)", "def render(template_name, extra_vars=None, cache_key=None,\n cache_type=None, cache_expire=None):\n # Create a render callable for the cache function\n def render_template():\n # Pull in extra vars if needed\n globs = extra_vars or {}\n\n # Grab a template reference\n template = _LOOKUP.get_template(template_name)\n\n return literal(template.render_unicode(**globs))\n\n return _cached_template(template_name, render_template,\n cache_key=cache_key,\n cache_type=cache_type, cache_expire=cache_expire)", "def do_cache(*args, **kws):\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')", "def cache_page(*args, **kwargs):\n if len(args) != 1 or callable(args[0]):\n raise TypeError(\"cache_page has a single mandatory positional argument: timeout\")\n cache_timeout = args[0]\n cache_alias = kwargs.pop('cache', None)\n key_prefix = kwargs.pop('key_prefix', None)\n if kwargs:\n raise TypeError(\"cache_page has two optional keyword arguments: cache and key_prefix\")\n\n return decorator_from_middleware_with_args(LocalCacheMiddleware)(\n cache_timeout=cache_timeout, cache_alias=cache_alias, key_prefix=key_prefix\n )" ]
[ "0.59044135", "0.571624", "0.56906414", "0.55681616", "0.5532053", "0.55177927", "0.5490605", "0.54303783", "0.541575", "0.54028773", "0.5383336", "0.5373419", "0.53715813", "0.53494227", "0.53489083", "0.53416514", "0.52813894", "0.5276173", "0.52550995", "0.5205229", "0.52037543", "0.5201268", "0.5197503", "0.5194998", "0.5143358", "0.513292", "0.51317114", "0.51305836", "0.513052", "0.51130766" ]
0.5767859
1
Outputs a .txt file that contains the names of my songs
def generate_playlist(): with open(r'C:\Users\adria\OneDrive\Desktop\Muzica.txt', 'w+', encoding='utf-8') as playlist: playlist_songs = os.listdir('D:\\Muzica\\') for song in playlist_songs: playlist.write(song + '\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_artist_songs(genius, name, max_songs, out_dir):\n\tartist = genius.search_artist(name, max_songs=max_songs, sort=\"title\")\n\tif artist is not None and artist.songs is not None:\n\t\tsongs = list(filter(lambda x: x is not None, map(lambda x: x.lyrics, artist.songs)))\n\t\twith open(out_dir + name + '.txt', 'w') as f:\n\t\t\tf.write(''.join(songs))\n\t\treturn len(artist.songs)", "def last_50_played_songs_to_txt(username,scope,redirect_uri,client_id,client_secret):\r\n sp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope, \r\n client_id=CLIENT_ID,\r\n client_secret=CLIENT_SECRET,\r\n redirect_uri=redirect_uri,\r\n username = username\r\n ))\r\n \r\n with open('last_50_played_songs.txt','w',encoding=\"utf-8\") as txt:\r\n txt.write('Listing the last ({}) played songs:'.format(len(sp.current_user_recently_played()['items']))+'\\n')\r\n txt.write('-------------------------------------------------------'+'\\n')\r\n txt.write('-------------------------------------------------------'+'\\n')\r\n for i in range(len(sp.current_user_recently_played()['items'])): \r\n txt.write('artist(s): '+str(sp.current_user_recently_played()['items'][i]['track']['artists'][0]['name'])+'\\n')\r\n txt.write('song title: '+str(sp.current_user_recently_played()['items'][i]['track']['name'])+'\\n')\r\n txt.write('album: '+str(sp.current_user_recently_played()['items'][i]['track']['album']['name'])+'\\n')\r\n txt.write('popularity: '+str(sp.current_user_recently_played()['items'][i]['track']['popularity'])+'\\n')\r\n txt.write('href: '+str(sp.current_user_recently_played()['items'][i]['track']['href'])+'\\n')\r\n txt.write('-------------------------------------------------------'+'\\n')\r\n txt.write(str(sp.current_user_recently_played()['items'][i]['played_at'])+'\\n')\r\n txt.write('-------------------------------------------------------'+'\\n')\r\n txt.write(str(sp.current_user_recently_played()['items'][i]['context'])+'\\n')\r\n txt.write('-------------------------------------------------------'+'\\n')\r\n \r\n \r\n return None", "def print_songs(self):\n\t\tfor i,s in enumerate(self._songs):\n\t\t\tprint('{0}. {1}'.format(i, s.print_info()))", "def create_checkfile(artist_list):\n\n print(\"Creating checkfile...\")\n\n with open(\"checkfile.txt\", \"w\") as checkfile:\n\n for artist in artist_list:\n print(artist.name)\n for album in artist.albums:\n print(\"\\t\", album.name, album.year)\n for song in album.tracks:\n print(\"\\t\\t\", song.title)\n print(f\"{artist.name}\\t{album.name}\\t{album.year}\\t{song.title}\", file=checkfile)\n\n print(\"Checkfile created.\")\n print()\n print(\"=\" * 40)\n print()", "def saveToTextFile(self, file_name):\n with open(file_name, 'w') as file_obj:\n for item in self.items:\n print(item, file=file_obj)", "def save_to_file(content, song_name):\n file = open(\"./assets/homemade_partitions.txt\", \"a+\")\n # Move to the start of the file\n file.seek(0)\n # Read the total lines\n total_lines = len(file.readlines())\n # Move to the end of the file\n file.seek(0, 2)\n # Write the song's name\n file.write(f\"#{int(total_lines / 2 + 1)} {song_name}\\n\")\n # Write the song's partition\n file.write(content + \"\\n\")\n file.close()", "def song2text(song):\n text = \"\"\n for tup in song:\n if len(tup) == 2:\n f, d = tup\n text += \"%s %s; \" % (_getNoteFromFrequency(f), d)\n elif len(tup) == 3:\n f1, f2, d = tup\n text += \"%s %s %s; \" % (_getNoteFromFrequency(f1),\n _getNoteFromFrequency(f2), d)\n return text", "def write_player_names_to_outfile(self) -> None:\n if not self.outfile:\n return\n\n with open(self.outfile, \"w+\") as outfile:\n # write the player names to the outfile\n outfile.write(f\"{' '.join(self.player_names)}\\n\")", "def create_checkfile(artists):\n with open('./data/checkfile.txt', 'w') as checkfile:\n for new_artist in artists:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print('{0.name}\\t{1.name}\\t{1.year}\\t{2.title}'.format(new_artist, new_album, new_song), file=checkfile)", "def create_chceckfile(artist_list):\n with open(\"Udemy_Course/Object_Oriented_Programing_and_Classes/OOP_Song_Class/checkfile.txt\", \"w\") as checkfile:\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks:\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format\n (new_artist, new_album, new_song), file=checkfile)", "def songInfo():\n \n global songFile, currentRadio\n \n lines = songFile.readlines()\n if len(lines) > 0:\n\n songFile.seek(0)\n title = formatSong(lines[0]).strip()\n \n with canvas(device) as draw:\n invert(draw, 0, 0, names[currentRadio][0], True)\n if len(title)<19:\n draw.text((72-4*(len(title)), 20), title , fill=\"white\")\n else:\n lineNum = len(title)\n if lineNum > 72:\n lineNum = 72\n thelist = [title[i:i+19] for i in range(0, lineNum, 19)]\n for i in range(len(thelist)): \n draw.text((81-4*(len(thelist[i].strip())), 19+10*i), thelist[i] , fill=\"white\")", "def create_checkfile(artist_list):\n with open(\"checkfile4.txt\", 'w') as checkfile: # we are creating new file named checkfile, hence method r for write\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks: # NOTE: we change below from 2.name back to 2.title\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format(new_artist, new_album, new_song),\n file=checkfile)\n\n # NOTE: python 2 does not allow print above where you have {0.name} etc\n # To run this pring format in python 2, you need to import print_function at the top of code using:\n # from __future__ import print_function", "def saveSong(song, filename, append = 1):\n if append:\n mode = \"w+\"\n else:\n mode = \"w\"\n fp = open(filename, mode) # will append it if it exists\n if type(song) in [list]:\n for tup in song:\n if len(tup) == 2:\n f, d = tup\n fp.write(\"%s %s\\n\" % (_getNoteFromFrequency(f), d))\n elif len(tup) == 3:\n f1, f2, d = tup\n fp.write(\"%s %s %s\\n\" % (_getNoteFromFrequency(f),\n _getNoteFromFrequency(f), d))\n else: # string\n song = song.replace(\"\\n\", \";\")\n lines = song.split(\";\")\n for line in lines:\n fp.write(line + \"\\n\")\n fp.close()", "def getSongTextInfo():\n sids = []\n documents = []\n sFile = open('../txt/two__Lastfm_song_Docs.txt')\n lines = sFile.readlines()\n index = 0\n for line in lines:\n line.strip('\\n')\n line.strip('\\r\\n')\n items = line.split('>>')\n sid = int(items[0])\n text = items[1]\n documents.append(text)\n sids.append(sid)\n sFile.close()\n print 'len = ',len(sids)\n print 'len = ',len(documents)\n return sids,documents", "def dump_pinball_music():\n\texport_sounds(song_labels, os.path.join(conf.path, 'music'), 'Music_')", "def updatesong(song, fpath):\n song.filename = fpath\n song.save()\n return \"[U] %s\\n\" % song.title", "def text_to_file(phase, filename):\n path = \"sons/%s\" % filename # caminho para arquivo\n\n # gera e salva frase pelo gTTS\n voice = gTTS(phase, lang='pt')\n voice.save(path)\n\n return path", "def write_to_file(file, name):\n with open(file, \"a\") as player_list:\n player_list.writelines(name)", "def newtwogfile(ntf_twogs):\n outfile = open(\"Twogs.txt\", \"w\")\n for x in ntf_twogs:\n outfile.write(\"%s\\n\" % x)\n outfile.close()", "def getNames(self):\r\n ListFiles = os.listdir(\"Save\")\r\n centering = \" \"\r\n stringFiles = centering + \"List of {} files in your Save folder : \\n \\n\".format(\r\n \"PVP\" if self.PVP else \"AI\"\r\n )\r\n if self.PVP:\r\n for k in ListFiles:\r\n if self.PVP and \"PVP_mode\" == k[:8]:\r\n realName = k[9:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n else:\r\n stringFiles += \" Files where AI is playing white : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"B\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n stringFiles += \"\\n Files where AI is playing black : \\n\"\r\n for k in ListFiles:\r\n if \"AI_mode\" == k[:7] and k[8] == \"W\":\r\n realName = k[8:]\r\n stringFiles += \" - \" + realName + \"\\n\"\r\n self.existingFiles.setText(stringFiles)", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def update():\n\tglobal songList\n\tglobal songs\n\tsongList=os.listdir(\"./music/\")\n\tsongs=['```']\n\tfor song in songList:\n\t\tif len(songs[-1])>1800:\n\t\t\tsongs[-1]+='```'\n\t\t\tsongs.append('```')\n\t\tif '.mp3' in song:\n\t\t\tsongs[-1]+=song.replace('.mp3','')\n\t\t\tsongs[-1]+='\\n'\n\tsongs[-1]+='```'", "def write_txt_file(title, abstract, f_out):\n\n print(\n '*' * 40,\n '\\n',\n '[Title] {}'.format(title),\n '\\n',\n '[Abstract] {}'.format(abstract),\n file=f_out\n )", "def writeRatingsToSongs(self):\n judgeNotesLogger.info(\"writeRatingsToSongs: Writing file containing songs for each rating\")\n try:\n os.chdir(self.fileDir)\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n fileName = \"ratingsToSongs_\" + self.judgeName + \".txt\"\n with open(fileName, 'w') as outFile:\n\n # Write out the normal ratings first.\n for rating in sortedRatings:\n songsInRating = self.ratingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n\n # Write out the special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n songsInRating = self.specialRatingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n \n outFile.close()\n judgeNotesLogger.info(\"writeRatingsToSongs: Successfully wrote file '%s'\", fileName)\n except:\n judgeNotesLogger.warning(\"writeRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def gravar():\n frase = input(\"Digite a frase a ser gravada: \")\n filename = frase.replace(\" \", \"\").lower() + '.mp3'\n txt = \"{};{}\\n\".format(frase, filename)\n\n # adiciona texto ao arquivo\n with open('frases', 'a') as file:\n file.write(txt)\n\n play_async(text_to_file(frase, filename))", "def get_file_name(self):\n\n return \"%s - %s\" % (self.get_tags()[\"artist\"], self.get_tags()[\"title\"])", "def PrintMetadata(self):\n def PrintTrack(trackno, track):\n output = [f\"File {str(trackno + 1).zfill(2)}:\"]\n with IgnoreKeyError:\n output.append(f\"Disc {track['disc']}\")\n with IgnoreKeyError:\n output.append(f\"Side {track['side']}\")\n output.append(f\"Track {track['track'].ljust(2)}\")\n with IgnoreKeyError:\n output.append(f\"Phase {track['phase']}\")\n with IgnoreKeyError:\n output.append(f\"Subindex {track['subindex']}\")\n output.append(f\"Time {track['start_time']}\")\n output.append(f'\"{track[\"title\"]}\"')\n with IgnoreKeyError:\n output[-1] = f'{output[-1][:-1]}: {track[\"subtitle\"]}\"'\n print(' '.join(output))\n\n print(self)\n for trackno, track in enumerate(self.tracks):\n PrintTrack(trackno, track)\n filename = self.GetOutputFilename().replace(ext.WAV, ext.MKA)\n print(\"Filename:\", filename)", "def detect_netease_music_name(file_path, dist_path, KEEP_SOURCE=True):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0\"\n }\n url_base = \"http://music.163.com/api/song/detail/?id={}&ids=[{}]\"\n\n if not os.path.exists(dist_path):\n os.mkdir(dist_path)\n\n for file_name in os.listdir(file_path):\n if not file_name.endswith(\".mp3\"):\n continue\n if not len(file_name.split(\"-\")) == 3:\n print(\n \">>>> File %s not in format <song id>-<bite rate>-<random number>.mp3\"\n % (file_name)\n )\n continue\n\n try:\n song_id = file_name.split(\"-\")[0]\n url_target = url_base.format(song_id, song_id)\n resp = requests.get(url_target, headers=headers)\n rr = json.loads(resp.text)\n\n tt = eyed3.load(os.path.join(file_path, file_name))\n tt.tag.title = rr[\"songs\"][0][\"name\"].replace(\"\\xa0\", \" \")\n tt.tag.artist = rr[\"songs\"][0][\"artists\"][0][\"name\"]\n tt.tag.album = rr[\"songs\"][0][\"album\"][\"name\"]\n tt.tag.album_artist = rr[\"songs\"][0][\"album\"][\"artists\"][0][\"name\"]\n print(\n \"song_id = %s, tt.tag title = %s, artist = %s, album = %s, album_artist = %s\"\n % (\n song_id,\n tt.tag.title,\n tt.tag.artist,\n tt.tag.album,\n tt.tag.album_artist,\n )\n )\n tt.tag.save()\n except UnicodeEncodeError as e:\n print(\n \">>>> UnicodeEncodeError, try again later: file_name = %s, error = %s\"\n % (file_name, str(e))\n )\n continue\n except:\n print(\">>>> Some other error happens: file_name = %s\" % (file_name))\n continue\n\n dist_name = (\n os.path.join(\n dist_path,\n \"%s - %s\"\n % (tt.tag.artist.replace(\"/\", \" \"), tt.tag.title.replace(\"/\", \" \")),\n )\n + \".mp3\"\n )\n \n if KEEP_SOURCE == True:\n shutil.copyfile(os.path.join(file_path, file_name), dist_name)\n else:\n os.rename(os.path.join(file_path, file_name), dist_name)" ]
[ "0.69582075", "0.68970346", "0.678131", "0.6512336", "0.648723", "0.6459645", "0.63525665", "0.6315502", "0.6313916", "0.628889", "0.6231965", "0.6108531", "0.61006314", "0.60837126", "0.6061707", "0.6027118", "0.60271174", "0.60226905", "0.5982154", "0.59551275", "0.5949267", "0.59329003", "0.59185886", "0.5917795", "0.58936924", "0.585445", "0.58116114", "0.579759", "0.57962376", "0.5747023" ]
0.77204114
0
get processes tables from overcloud node
def get_overcloud_node_processes_table(ssh_client: ssh.SSHClientType): output = sh.execute( "ps -axw -o \"%U\" -o \"DELIM%p\" -o \"DELIM%P\" -o \"DELIM%C\" -o " "\"DELIM%z\" -o \"DELIM%x\" -o \"DELIM%c\" -o \"DELIM%a\" |grep -v " "ps|sed 's/\"/''/g'", ssh_client=ssh_client).stdout stream = io.StringIO(output) table: pandas.DataFrame = pandas.read_csv( stream, sep='DELIM', header=None, skiprows=1) table.replace(to_replace=' ', value="", regex=True, inplace=True) table.columns = ['USER', 'PID', 'PPID', 'CPU', 'VSZ', 'TIME', 'PROCESS', 'PROCESS_ARGS'] # pylint: disable=unsupported-assignment-operation hostname = sh.get_hostname(ssh_client=ssh_client) table['overcloud_node'] = hostname LOG.debug("Successfully got overcloud nodes processes status table") return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def get_tables(self):\n logging.debug(f\"\"\"get_tables\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name,server1_select,server2_select,schema1,\n schema2,tips from {self.schemaRepo}.tablediff\n where step = 0 and result = 'init' order by id\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")\n rows = curs.fetchall()\n return rows", "async def test_tornado_list_tables(self):\n\n tables = self.r.table_list().run(self.conn)\n assert isinstance(tables, list)", "def main():\n mvip, user, user_pass, mvip_node = get_inputs()\n headers, url = build_auth(mvip, user, user_pass, mvip_node)\n payload = build_payload()\n response_json = connect_cluster(headers, url, payload)\n account_table = create_table(response_json)\n print(account_table)", "def list_tables(service):\n r = _post(service)\n if 'tables' in r:\n return [table(p) for p in r['tables']]\n return None", "def load_status_table():", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def show_tablespaces(self):\n sql = \"SELECT TABLESPACE_NAME FROM DBA_TABLESPACES WHERE CONTENTS <> 'TEMPORARY' ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#TABLESPACE}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def get_tables(self, db_name):\n pass", "def processor():\n conn = pymssql.connect(server, user, password, \"database\")\n cursor = conn.cursor()\n\n cursor.execute('SELECT COUNT(*) FROM Processor')\n num=cursor.fetchone()[0]\n\n cursor.execute('SELECT Processor.Processor_Id, Processor.Name, Processor.CoreCount, Processor.Architecture_nm, Company.Name, Component.year\\\n FROM Processor JOIN Component on Processor.Processor_Id=Component.Component_Id\\\n join Company on Company.Company_Id =Component.Company_Id') \n row = cursor.fetchone() \n items=[None]*num\n i=0\n while row:\n items[i]=Processor(row[0], row[1], row[2], row[3], row[4], row[5])\n i+=1\n row = cursor.fetchone()\n conn.close()\n\n return render_template(\n 'table.html',\n title='Processor',\n table=ProcessorTable(items),\n )", "def select_all_topologies(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM topologies_topology\")\n \n rows = cur.fetchall()\n \n for row in rows:\n print(row)", "def _get_global_table_all_regions(table_name: str) -> List[dict]:\n description = _describe_table(table_name=table_name)\n replicas = description['Table'].get('Replicas', [])\n return replicas", "def _getTables(self, oids):\n repetitions = self._maxOidsPerRequest / len(oids)\n t = self.snmpProxy.getTable(oids,\n timeout=self.snmpConnInfo.zSnmpTimeout,\n retryCount=self.snmpConnInfo.zSnmpTries,\n maxRepetitions=repetitions,\n limit=sys.maxint)\n return t", "def getTable(table):\n\n return session.query(table).all()", "def get_table():\n response = dict(blocks=[])\n block_ids = DB.get_sched_block_instance_ids()\n for index, block_id in enumerate(block_ids):\n block = DB.get_block_details([block_id]).__next__()\n info = [\n index,\n block['id'],\n block['sub_array_id'],\n len(block['processing_blocks'])\n ]\n response['blocks'].append(info)\n return response, HTTPStatus.OK", "def connect():\n conn = None\n try:\n conn = psycopg2.connect(\n host=\"db\",\n database=\"app_db\",\n user=\"app_user\",\n password=\"app_pass\")\n cur = conn.cursor()\n datalist = []\n# cur.execute('SELECT version()')\n cur.execute('CREATE TABLE IF NOT EXISTS apptable \\\n (id SERIAL PRIMARY KEY, value VARCHAR(255) NOT NULL)')\n sql = f\"INSERT INTO apptable (value) VALUES \\\n ({ res_text['main']['temp'] })\"\n cur.execute(sql)\n\n cur.execute('SELECT * FROM apptable')\n rows = cur.fetchall()\n for row in rows:\n datalist.append(row)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n return datalist", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def get_tables(self):\n\t\tbuild = 'SELECT * FROM pg_catalog.pg_tables WHERE schemaname != \\'pg_catalog\\' AND schemaname != \\'information_schema\\';'\n\t\tself.cur.execute(build)\n\t\ttotal = self.cur.fetchall()\n\t\ttable_list = []\n\t\tfor a in total:\n\t\t\ttable_list.append(a[1])\n\t\treturn table_list", "def db_get_all_tasks():\n sql = \"SELECT * FROM {};\".format(TABLE_NAME)\n return db_query(sql)", "def __read_all_tables( self, dbfile, iteration=2000 ): #limit=None ):\n conn = sql3.connect( dbfile )\n tnames = pd.read_sql(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name\", conn)\n tables = {}\n for tname in tnames.name.values:\n #print tname\n tmp = pd.read_sql( 'select * from %s limit 3' % tname, conn )\n if tname != 'motif_infos' and 'iteration' in tmp.columns.values.tolist():\n query = 'select * from %s where iteration=' + str(iteration)\n else:\n query = 'select * from %s'\n table = pd.read_sql(query % tname, conn)\n if tname == 'motif_infos':\n table = table[ table.iteration == iteration ]\n tables[ tname ] = table\n\n conn.close()\n table = tables[ 'meme_motif_sites' ]\n table = table.ix[ np.in1d( table.motif_info_id, tables[ 'motif_infos' ].index.values ) ]\n tables[ 'meme_motif_sites' ] = table\n return tables", "def run(port, like):\n ports = get_ports(port, like)\n table = get_table(ports)\n print(table)", "def get_psql(npi_list):\n\n npi_dict = {}\n conn = pg2.connect(dbname='medicare', user='postgres')\n cur = conn.cursor()\n\n for dr in npi_list:\n query = \"SELECT hcpcs_desc FROM util_payments_2013 WHERE npi='{0}';\".format(dr)\n cur.execute(query)\n npi_dict[dr] = cur.fetchall()\n \n return npi_dict", "def process(self, device, results, log):\n log.info('processing %s for device %s', self.name(), device.id)\n getdata, tabledata = results\n \n ltmnode_table = tabledata.get(\"ltmNodeAddrTable\")\n \n # Grab the second table and append it to the first\n status_table = tabledata.get(\"ltmNodeStatusTable\")\n for oid, data in status_table.items():\n for key, value in data.items():\n if key not in ltmnode_table[oid]:\n ltmnode_table[oid][key] = value\n \n maps = []\n rm = self.relMap()\n # Get the list of name patterns to search for\n node_name_filter = getattr(device, 'zF5BigipNodesNameFilter', None)\n log.debug(\"Picked up Filter List of: %s\" , node_name_filter)\n for oid, data in ltmnode_table.items():\n # log.debug(\"%s : %s\\n\", oid, data)\n #\n om = self.objectMap(data)\n binclude = True\n if node_name_filter != None and node_name_filter != \"\":\n # If there is a regex filter supplied, lets use it\n if re.search(node_name_filter, om.ltmNodeAddrScreenName) == None:\n binclude = False\n if binclude == True:\n # The value fetched is a packed hex representation of the IP\n # Try and unpack the address, and check if route_domains\n # are in use\n address, route_domain = unpack_address_to_string(oid, \n om.ltmNodeAddrAddr)\n if address != \"\":\n om.ltmNodeAddrAddr = address\n if route_domain != \"\":\n om.ltmNodeAddrRouteDomain = route_domain\n om.id = self.prepId(om.ltmNodeAddrAddr)\n om.snmpindex = oid\n\n om.ltmNodeAddrStatusEnabledState = \\\n enable_state_values[om.ltmNodeAddrStatusEnabledState]\n om.ltmNodeAddrStatusAvailState = \\\n avail_status_values[om.ltmNodeAddrStatusAvailState]\n rm.append(om)\n log.debug(rm)\n return [rm]", "def sys_service_memory():\n sort_cmd = [\"sort\", \"-k\", \"2nr\"]\n\n p_table = prettytable.PrettyTable(\n ['Service',\n 'Resident Set Size (MiB)',\n ], caching=False)\n p_table.align = 'l'\n p_table.align['Resident Set Size (MiB)'] = 'r'\n\n try:\n output = pipe_command(GREP_CMD, AWK_CMD, sort_cmd,\n cwd=MEMPATH + \"system.slice\")\n LOG.debug(\n 'command: %s\\n%s',\n ' '.join(GREP_CMD + [MEMPATH] + AWK_CMD + sort_cmd), output)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n for line in output.split(\"\\n\"):\n service = line.split(\"memory.stat:total_rss \")[0]\n rss_mem = line.split(\"memory.stat:total_rss \")[-1]\n p_table.add_row(\n [service,\n mem_to_mebibytes(rss_mem),\n ])\n\n # Delete first row wich display total system.slice rss\n p_table.del_row(0)\n return p_table", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def batch_looper(dimacontainer, pg=False):\n d = db('dima')\n tablelist = None\n while tablelist is None:\n print('gathering tables within dimas..')\n tablelist = table_collector(dimacontainer)\n else:\n print('creating csvs for each table..')\n for table in tablelist:\n if pg!=True:\n looper(dimacontainer, table, csv=True)\n else:\n df = looper(dimacontainer,table,csv=False)\n if 'ItemType' in df.columns:\n # if one of the non-vegetation bsne tables, use 'new_tablename' ,\n # function to produce a new tablename: 'tblHorizontalFlux' or\n # 'tblDustDeposition'\n newtablename = new_tablename(df)\n if tablecheck(newtablename):\n print('MWACK')\n ingesterv2.main_ingest(df, newtablename, d.str, 10000)\n else:\n table_create(df, newtablename, 'dima')\n print('llegue a 2')\n ingesterv2.main_ingest(df, newtablename, d.str, 10000)\n\n else:\n print(\"NOT A HORFLUX TABLE\")\n newtablename = table\n if tablecheck(table):\n print(\"FOUND THE TABLE IN PG\")\n ingesterv2.main_ingest(df, newtablename, d.str, 10000)\n\n else:\n print(\"DID NOT FIND TABLE IN PG, CREATING...\")\n table_create(df, table, 'dima')\n ingesterv2.main_ingest(df, newtablename, d.str, 10000)", "def top(name):\n\n try:\n container = CLIENT.containers.get(name)\n print(str(pd.DataFrame(data=container.top()['Processes'],\n columns=container.top()['Titles'])))\n except (docker.errors.NotFound, docker.errors.APIError) as err:\n print(err)", "def run(self):\n with open(self.source_file) as file:\n for index, mem_access in enumerate(file):\n access_type = mem_access.split(' ')[0]\n address = int(mem_access.split(' ')[1], 16)\n self.page_table.query(address, access_type, index)\n return {\"memory_accesses\": self.mem_accesses,\n \"page_faults\": self.page_table.page_faults,\n \"writes_to_disk\": self.page_table.writes_to_disk}", "def get_table_list(self):\n # the \\\"{{}}\\\" is where the sql command will be added via a second `.format()`\n container_command = \"docker exec {} sh -c \\\"{{}}\\\"\".format(self.mysql_container)\n sql_command = \"mysql {} --execute='SHOW TABLES FROM {};'\".format(self.mysql_credentials, self.database_name)\n table_list = self.shell(container_command.format(sql_command))\n table_list = table_list.split(\"\\n\")\n assert table_list[0] == \"Tables_in_{}\".format(self.database_name)\n return table_list[1:]", "def vm_table_view(vlab_api, info):\n vm_body = []\n vm_header = ['Name', 'IPs', 'Type', 'Version', 'Powered', 'Networks']\n for vm, data in info.items():\n body = {'url': data['console']}\n network = data.get('networks', ['?'])\n kind = data['meta']['component']\n version = data['meta']['version']\n power = data['state'].replace('powered', '')\n row = [vm, '\\n'.join(data['ips']), kind, version, power, ','.join(network)]\n vm_body.append(row)\n if not vm_body:\n table = None\n else:\n table = tabulate(vm_body, headers=vm_header, tablefmt='presto')\n return table" ]
[ "0.5725424", "0.5656669", "0.559338", "0.5493022", "0.5485279", "0.54482675", "0.54397696", "0.54324937", "0.5425778", "0.54190934", "0.54075503", "0.53506416", "0.5315462", "0.5313187", "0.5285384", "0.52728575", "0.5259367", "0.5251954", "0.52515304", "0.52458316", "0.52287155", "0.52163064", "0.52150434", "0.5174113", "0.5162899", "0.5157383", "0.51559466", "0.51303726", "0.5122471", "0.5121655" ]
0.7119928
0
Checks that the oc_procs_df dataframe has OVN processes running on the expected overcloud node or nodes
def ovn_overcloud_processes_validations(self): if not neutron.has_ovn(): LOG.info("Networking OVN not configured") return True for process_dict in self.ovn_processes_to_check_per_node: if not self.oc_procs_df.query('PROCESS=="{}"'.format( process_dict['name'])).empty: LOG.info("overcloud processes status checks: " f"process {process_dict['name']} is " "in running state") ovn_proc_filtered_df = self.oc_procs_df.query( 'PROCESS=="{}"'.format(process_dict['name'])) if (process_dict['node_group'] not in topology.list_openstack_node_groups()): LOG.debug(f"{process_dict['node_group']} is not " "a node group part of this Openstack cloud") continue node_list = [node.name for node in topology.list_openstack_nodes( group=process_dict['node_group'])] node_names_re = re.compile(r'|'.join(node_list)) node_filter = (ovn_proc_filtered_df.overcloud_node. str.match(node_names_re)) # obtain the processes running on a specific type of nodes ovn_proc_filtered_per_node_df = \ ovn_proc_filtered_df[node_filter] if type(process_dict['number']) == int: assert process_dict['number'] == \ len(ovn_proc_filtered_per_node_df), ( "Unexpected number" f" of processes {process_dict['name']} running on " f"{process_dict['node_group']} nodes") elif process_dict['number'] == 'all': num_nodes = len(node_list) assert num_nodes == len(ovn_proc_filtered_per_node_df), ( "Unexpected number of processes " f"{process_dict['name']} running on " f"{process_dict['node_group']} nodes") else: raise RuntimeError("Unexpected value:" f"{process_dict['node_group']}") # process successfully validated LOG.debug(f"{process_dict['name']} successfully validated on " f"{process_dict['node_group']} nodes") # if all procs are running we can return true return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_process_running_on_overcloud(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n if not oc_procs_df.query('PROCESS==\"{}\"'.format(process)).empty:\n return True\n else:\n return False", "def basic_overcloud_processes_running(self):\n for attempt_number in range(600):\n\n try:\n\n for process_name in self.processes_to_check:\n # osp16/python3 process is \"neutron-server:\"\n if process_name == 'neutron-server' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'neutron-server:'\n # osp17 mysqld process name is mysqld_safe\n if process_name == 'mysqld' and \\\n self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n process_name = 'mysqld_safe'\n # redis not deployed on osp17 by default, only if some\n # other services such as designate and octavia are deployed\n if (process_name == 'redis-server' and\n not overcloud.is_redis_expected()):\n redis_message = (\"redis-server not expected on OSP 17 \"\n \"and later releases by default\")\n if self.oc_procs_df.query(\n f'PROCESS==\"{process_name}\"').empty:\n LOG.info(redis_message)\n continue\n else:\n raise OvercloudProcessesException(\n process_error=redis_message)\n\n if not self.oc_procs_df.query('PROCESS==\"{}\"'.format(\n process_name)).empty:\n LOG.info(\"overcloud processes status checks: \"\n \"process {} is \"\n \"in running state\".format(process_name))\n continue\n else:\n LOG.info(\"Failure : overcloud processes status checks:\"\n \"process {} is not running \".format(\n process_name))\n raise OvercloudProcessesException(\n process_error=\"process {} is not running \".format(\n process_name))\n # if all procs are running we can return true\n return True\n except OvercloudProcessesException:\n LOG.info('Retrying overcloud processes checks attempt '\n '{} of 360'.format(attempt_number))\n time.sleep(1)\n self.oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n # exhausted all retries\n tobiko.fail('Not all overcloud processes are running !\\n')", "def get_overcloud_nodes_running_process(process):\n oc_procs_df = overcloud.get_overcloud_nodes_dataframe(\n get_overcloud_node_processes_table)\n oc_nodes_running_process = oc_procs_df.query('PROCESS==\"{}\"'.format(\n process))['overcloud_node'].unique()\n return oc_nodes_running_process", "def verify_vn_in_control_nodes(self):\n self.api_s_route_targets = self.api_s_inspect.get_cs_route_targets(\n vn_id=self.uuid)\n\n self.cn_verification_flag = True\n for cn in self.inputs.bgp_ips:\n cn_config_vn_obj = self.cn_inspect[cn].get_cn_config_vn(\n vn_name=self.vn_name, project=self.project_name, domain=self.domain_name)\n if not cn_config_vn_obj:\n self.logger.warn('Control-node %s does not have VN %s info ' %\n (cn, self.vn_name))\n self.cn_verification_flag = self.cn_verification_flag and False\n return False\n self.logger.debug(\"Control-node %s : VN object is : %s\" %\n (cn, cn_config_vn_obj))\n if self.vn_fq_name not in cn_config_vn_obj['node_name']:\n self.logger.debug(\n 'IFMAP View of Control-node does not yet have the VN detail',\n ' of %s' % (self.vn_fq_name))\n self.cn_verification_flag = self.cn_verification_flag and False\n return False\n # TODO UUID verification to be done once the API is available\n cn_object = self.cn_inspect[\n cn].get_cn_routing_instance(ri_name=self.ri_name)\n if not cn_object:\n self.logger.debug(\n 'No Routing Instance found in CN %s with name %s' %\n (cn, self.ri_name))\n self.cn_verification_flag = self.cn_verification_flag and False\n return False\n try:\n rt_names = self.api_s_inspect.get_cs_rt_names(\n self.api_s_route_targets)\n if cn_object['export_target'][0] not in rt_names:\n self.logger.debug(\n \"Route target %s for VN %s is not found in Control-node %s\" %\n (rt_names, self.vn_name, cn))\n self.cn_verification_flag = self.cn_verification_flag and False\n return False\n except Exception as e:\n self.logger.exception(\n \"Got exception from control node verification as %s\" % (e))\n self.cn_verification_flag = self.cn_verification_flag and False\n return False\n # end for\n self.logger.info(\n 'On all control nodes, Config, RI and RT verification for VN %s '\n 'passed' % (self.vn_name))\n self.cn_verification_flag = self.cn_verification_flag and True\n return True", "def check_if_cluster_was_upgraded():\n return True if \"replaces\" in get_ocs_csv().get().get(\"spec\") else False", "def instanceha_deployed():\n if overcloud.has_overcloud():\n return get_overcloud_nodes_running_pcs_resource(\n resource='nova-evacuate')\n else:\n return False", "def get_overcloud_node_processes_table(ssh_client: ssh.SSHClientType):\n\n output = sh.execute(\n \"ps -axw -o \\\"%U\\\" -o \\\"DELIM%p\\\" -o \\\"DELIM%P\\\" -o \\\"DELIM%C\\\" -o \"\n \"\\\"DELIM%z\\\" -o \\\"DELIM%x\\\" -o \\\"DELIM%c\\\" -o \\\"DELIM%a\\\" |grep -v \"\n \"ps|sed 's/\\\"/''/g'\",\n ssh_client=ssh_client).stdout\n stream = io.StringIO(output)\n table: pandas.DataFrame = pandas.read_csv(\n stream, sep='DELIM', header=None, skiprows=1)\n table.replace(to_replace=' ', value=\"\", regex=True, inplace=True)\n table.columns = ['USER', 'PID', 'PPID', 'CPU', 'VSZ', 'TIME', 'PROCESS',\n 'PROCESS_ARGS']\n # pylint: disable=unsupported-assignment-operation\n hostname = sh.get_hostname(ssh_client=ssh_client)\n table['overcloud_node'] = hostname\n\n LOG.debug(\"Successfully got overcloud nodes processes status table\")\n return table", "def check_all_critical_processes_running(duthost):\n processes_status = duthost.all_critical_process_status()\n for container_name, processes in processes_status.items():\n if processes[\"status\"] is False or len(processes[\"exited_critical_process\"]) > 0:\n return False\n\n return True", "def validate_openvpn_pid(result):\n for ps in result:\n if 'openvpn --daemon' in ps:\n print 'OpenVPN Process - OK'\n return True\n print 'OpenVPN Process - DOWN'\n return False", "def oci_compute_attack_surface_open_tcp_port_check(cache, awsAccountId, awsRegion, awsPartition, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # ISO Time\n iso8601Time = datetime.datetime.now(datetime.timezone.utc).isoformat()\n for instance in get_oci_compute_instances(cache, ociTenancyId, ociUserId, ociRegionName, ociCompartments, ociUserApiKeyFingerprint):\n # B64 encode all of the details for the Asset\n assetJson = json.dumps(instance,default=str).encode(\"utf-8\")\n assetB64 = base64.b64encode(assetJson)\n instanceId = instance[\"id\"]\n instanceName = instance[\"display_name\"]\n compartmentId = instance[\"compartment_id\"]\n imageId = instance[\"image_id\"]\n shape = instance[\"shape\"]\n lifecycleState = instance[\"lifecycle_state\"]\n # Get the VNIC info\n instanceVnic = get_compute_instance_vnic(ociTenancyId, ociUserId, ociRegionName, ociUserApiKeyFingerprint, compartmentId, instanceId)\n # Skip over instances that are not public\n pubIp = instanceVnic[\"public_ip\"]\n if instanceVnic[\"public_ip\"] is None:\n continue\n # Submit details to the scanner function\n scanner = scan_host(pubIp, instanceName, \"OCI Cloud Compute instance\")\n # NoneType returned on KeyError due to Nmap errors\n if scanner == None:\n continue\n else:\n # Loop the results of the scan - starting with Open Ports which require a combination of\n # a Public Instance, an open SG rule, and a running service/server on the host itself\n # use enumerate and a fixed offset to product the Check Title ID number\n for index, p in enumerate(scanner[pubIp][\"ports\"]):\n # Parse out the Protocol, Port, Service, and State/State Reason from NMAP Results\n checkIdNumber = str(int(index + 1))\n portNumber = int(p[\"portid\"])\n if portNumber == 8089:\n serviceName = 'SPLUNKD'\n elif portNumber == 10250:\n serviceName = 'KUBERNETES-API'\n elif portNumber == 5672:\n serviceName = 'RABBITMQ'\n elif portNumber == 4040:\n serviceName = 'SPARK-WEBUI'\n else:\n try:\n serviceName = str(p[\"service\"][\"name\"]).upper()\n except KeyError:\n serviceName = \"Unknown\"\n serviceStateReason = str(p[\"reason\"])\n serviceState = str(p[\"state\"])\n # This is a failing check\n if serviceState == \"open\":\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"HIGH\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is publicly reachable on port {portNumber} which corresponds to the {serviceName} service. When Services are successfully fingerprinted by the ElectricEye Attack Surface Management Auditor it means the instance is public (mapped 'public_ip` in the associated vNIC), has an open Security List or Network Security Group, and a running service on the host which adversaries can also see. Refer to the remediation insturctions for an example of a way to secure OCI Cloud Compute instances.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"FAILED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"NEW\"},\n \"RecordState\": \"ACTIVE\"\n }\n yield finding\n else:\n finding = {\n \"SchemaVersion\": \"2018-10-08\",\n \"Id\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"ProductArn\": f\"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default\",\n \"GeneratorId\": f\"{ociTenancyId}/{ociRegionName}/{compartmentId}/{instanceId}/oci-attack-surface-compute-instance-open-{serviceName}-check\",\n \"AwsAccountId\": awsAccountId,\n \"Types\": [\n \"Software and Configuration Checks/AWS Security Best Practices/Network Reachability\",\n \"TTPs/Discovery\"\n ],\n \"FirstObservedAt\": iso8601Time,\n \"CreatedAt\": iso8601Time,\n \"UpdatedAt\": iso8601Time,\n \"Severity\": {\"Label\": \"INFORMATIONAL\"},\n \"Confidence\": 99,\n \"Title\": f\"[AttackSurface.OCI.ComputeInstance.{checkIdNumber}] Cloud Compute instances should not be publicly reachable on {serviceName}\",\n \"Description\": f\"Oracle Cloud Compute instance {instanceName} in Compartment {compartmentId} in {ociRegionName} is not publicly reachable on port {portNumber} which corresponds to the {serviceName} service due to {serviceStateReason}. OCI Cloud Compute instances and their respective Security Lists and/or Network Security Groups should still be reviewed for minimum necessary access.\",\n \"Remediation\": {\n \"Recommendation\": {\n \"Text\": \"OCI Cloud Compute instances should only have the minimum necessary ports open to achieve their purposes, allow traffic from authorized sources, and use other defense-in-depth and hardening strategies. For a basic view on traffic authorization into your instances refer to the Public IP Addresses section of the Oracle Cloud Infrastructure Documentation for Networks.\",\n \"Url\": \"https://docs.oracle.com/en-us/iaas/Content/Network/Tasks/managingpublicIPs.htm#Public_IP_Addresses\"\n }\n },\n \"ProductFields\": {\n \"ProductName\": \"ElectricEye\",\n \"Provider\": \"OCI\",\n \"ProviderType\": \"CSP\",\n \"ProviderAccountId\": ociTenancyId,\n \"AssetRegion\": ociRegionName,\n \"AssetDetails\": assetB64,\n \"AssetClass\": \"Compute\",\n \"AssetService\": \"Oracle Cloud Compute\",\n \"AssetComponent\": \"Instance\"\n },\n \"Resources\": [\n {\n \"Type\": \"OciCloudComputeInstance\",\n \"Id\": instanceId,\n \"Partition\": awsPartition,\n \"Region\": awsRegion,\n \"Details\": {\n \"Other\": {\n \"TenancyId\": ociTenancyId,\n \"CompartmentId\": compartmentId,\n \"Region\": ociRegionName,\n \"Name\": instanceName,\n \"Id\": instanceId,\n \"ImageId\": imageId,\n \"Shape\": shape,\n \"LifecycleState\": lifecycleState\n }\n }\n }\n ],\n \"Compliance\": {\n \"Status\": \"PASSED\",\n \"RelatedRequirements\": [\n \"NIST CSF V1.1 PR.AC-3\",\n \"NIST SP 800-53 Rev. 4 AC-1\",\n \"NIST SP 800-53 Rev. 4 AC-17\",\n \"NIST SP 800-53 Rev. 4 AC-19\",\n \"NIST SP 800-53 Rev. 4 AC-20\",\n \"NIST SP 800-53 Rev. 4 SC-15\",\n \"AICPA TSC CC6.6\",\n \"ISO 27001:2013 A.6.2.1\",\n \"ISO 27001:2013 A.6.2.2\",\n \"ISO 27001:2013 A.11.2.6\",\n \"ISO 27001:2013 A.13.1.1\",\n \"ISO 27001:2013 A.13.2.1\",\n \"MITRE ATT&CK T1040\",\n \"MITRE ATT&CK T1046\",\n \"MITRE ATT&CK T1580\",\n \"MITRE ATT&CK T1590\",\n \"MITRE ATT&CK T1592\",\n \"MITRE ATT&CK T1595\"\n ]\n },\n \"Workflow\": {\"Status\": \"RESOLVED\"},\n \"RecordState\": \"ARCHIVED\"\n }\n yield finding", "def has_nodes_for_process_type(self, process_type):\n return self.get_nodes_for_process_type(process_type).exists()", "def check_process(dbcur, process_id):\n sql = \"\"\"select * from process where id = '\"\"\" + process_id + \"\"\"'\"\"\"\n dbcur.execute(sql)\n result = dbcur.fetchall()\n\n return len(result) == 0", "def has_nodes_for_process(self, uuid, clean=True):\n return self.get_nodes_for_process(uuid, clean).exists()", "def test_openshift_node_with_cluster_and_all_node_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def check_completeness(self,df,node):\n errors = []\n all_na = df.columns[df.isna().all()].tolist()\n if len(all_na) > 0:\n error = \"'{}' TSV has all NA values for these properties: {}\".format(node,all_na)\n print(error)\n errors.append(error)\n return errors", "def check_pod_pvc_status(self, skip_replication_resources=False):\n config.switch_to_cluster_by_name(self.preferred_primary_cluster)\n dr_helpers.wait_for_all_resources_creation(\n self.workload_pvc_count,\n self.workload_pod_count,\n self.workload_namespace,\n skip_replication_resources=skip_replication_resources,\n )", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def test_openshift_node_with_all_cluster_and_node_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(node__in=[RBAC_NODE])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def test_openshift_node_with_cluster_and_node_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(node__in=[RBAC_NODE], cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def detect_instance_pids_still_alive(self):\n missing_instances = []\n running_pids = psutil.pids()\n for instance in self.all_instances:\n if instance.pid not in running_pids:\n missing_instances.append(instance)\n\n if len(missing_instances) > 0:\n logging.error(\n \"Not all instances are alive. The following are not running: %s\",\n str(missing_instances),\n )\n logging.error(get_process_tree())\n raise Exception(\"instances missing: \" + str(missing_instances))\n instances_table = get_instances_table(self.get_instance_essentials())\n logging.info(\"All arangod instances still running: \\n%s\", str(instances_table))\n attach_table(instances_table, \"Instances table\")", "def test_openshift_node_with_cluster_access_view(self):\n with schema_context(self.schema_name):\n expected = (\n OCPCostSummaryByNodeP.objects.annotate(**{\"value\": F(\"node\")})\n .values(\"value\")\n .distinct()\n .filter(cluster_id__in=[\"OCP-on-AWS\"])\n .count()\n )\n self.assertTrue(expected)\n url = reverse(\"openshift-nodes\")\n response = self.client.get(url, **self.headers)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n json_result = response.json()\n self.assertIsNotNone(json_result.get(\"data\"))\n self.assertIsInstance(json_result.get(\"data\"), list)\n self.assertEqual(len(json_result.get(\"data\")), expected)", "def is_on_off_consistent(df):\n lazy_results = []\n for dev in df[DEVICE].unique():\n res = dask.delayed(on_off_consistent_func)(df.copy(), dev)\n lazy_results.append(res)\n\n results = np.array(list(dask.compute(*lazy_results)))\n return results[:,0].all()", "def verify_aggPercPrimTreatment(self):\n self.c.execute('''SELECT aggCode, (aggC2*aggGenerated/100)\n FROM Agglomerations\n WHERE (aggC2*aggGenerated/100) >= 2000 \n AND aggPercPrimTreatment IS NULL OR aggPercPrimTreatment = \"\"\n ''')\n res = self.c.fetchall()\n if (len(res) > 0):\n return [False,\n \"In the agglomeration '%s' aggPercPrimTreatment must be reported since the generated load is '%s'\",\n res]\n else:\n return [True]", "def owserver_running():\n for proc in psutil.process_iter():\n if 'owserver' in proc.name():\n return True\n return False", "def check_if_sa_running(self, process):\n try:\n err, pid, _ = self.connection.execute(\"pgrep -f %s\" % process)\n # strip whitespace\n return err, pid.strip()\n except OSError as e:\n if e.errno in {errno.ECONNRESET}:\n # if we can't connect to check, then we won't be able to connect to stop it\n LOG.exception(\"can't connect to host to check collectd status\")\n return 1, None\n raise", "def is_running(proc_name:str) -> bool:\r\n with Popen(\"tasklist /NH /FO TABLE\", shell=False, stdout=PIPE) as proc:\r\n rprocs = proc.stdout.read().decode(\"utf-8\")\r\n plist = rprocs.split(\"\\r\\n\")\r\n return(any(i.lower().startswith(proc_name.lower()) for i in plist))", "def check_cores(self):\n\n cores = []\n # Execute command to check for cores\n header = [\"VDC\", \"Module\", \"Instance\",\n \"Process-name\", \"PID\", \"Date\\(Year-Month-Day Time\\)\"]\n\n if self.device.alias == 'uut':\n # In case of restarting process on a the main VDC\n output = oper_fill_tabular(device=self.device,\n show_command='show cores vdc-all',\n header_fields=header, index=[5])\n else:\n # In case of restarting process on a sub-VDC\n self.device.disconnect()\n output = oper_fill_tabular(device=self.device,\n show_command='show cores',\n header_fields=header, index=[5])\n\n if not output.entries:\n log.info('No core found')\n return []\n\n # Parse through output to collect core information (if any)\n for k in sorted(output.entries.keys(), reverse=True):\n row = output.entries[k]\n date = row.get(\"Date\\(Year-Month-Day Time\\)\", None)\n if not date:\n continue\n date_ = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')\n\n # Save core info\n core_info = dict(module=row['Module'],\n pid=row['PID'],\n instance=row['Instance'],\n process=row['Process-name'],\n date=date.replace(\" \", \"_\"))\n cores.append(core_info)\n\n return cores", "def _is_pool_owned(self, pdata):\n svc = '/api/system/v1/version'\n ret = self.rest_get(svc, restclient.Status.OK)\n vdata = jsonutils.loads(ret.data)\n return (vdata['version']['asn'] == pdata['pool']['asn'] and\n vdata['version']['nodename'] == pdata['pool']['owner'])", "def postcheck_critical_processes_status(duthost, up_bgp_neighbors):\n logger.info(\"Post-checking status of critical processes and BGP sessions...\")\n return wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS,\n post_test_check, duthost, up_bgp_neighbors)", "def check_pecosystem(self, node):\n assert \"pecosystem\" in node, \"Version node does not contain attribute 'pecosystem'\"\n assert len(node[\"pecosystem\"]) >= 1, \"Expecting at least one 'pecosystem' value\"\n # TODO: add more thorough checks" ]
[ "0.747246", "0.6814092", "0.63278425", "0.6067881", "0.5803084", "0.57029784", "0.56655276", "0.55178803", "0.55107725", "0.54893816", "0.54448056", "0.5434573", "0.5392496", "0.5376301", "0.5372792", "0.5320815", "0.53180724", "0.53141224", "0.52915174", "0.5282118", "0.5265299", "0.52604175", "0.5232631", "0.5225359", "0.5212898", "0.52011526", "0.5184207", "0.51828593", "0.51291496", "0.5123377" ]
0.82281446
0
Arguments and in_shapes are pytrees.
def test_pytree(self): # Arguments are of the form [([x00, x01], [x10]), dict(a=ya, b=yb)] def add_all_jax(x_pair_of_list, y_dict): x_list_0, x_list_1 = x_pair_of_list return functools.reduce(operator.add, x_list_0 + x_list_1 + [y_dict["a"], y_dict["b"]]) self.CheckShapePolymorphism( add_all_jax, input_signature=[([tf.TensorSpec([None]), tf.TensorSpec([None])], [tf.TensorSpec([None])]), dict(a=tf.TensorSpec([None]), b=tf.TensorSpec([None]))], in_shapes=[(["(v,)", "(v,)"], [("v,")]), dict(a="(v,)", b="(v,)")], expected_output_signature=tf.TensorSpec([None])) # Now partial in_shapes; the parts of the in_shapes that are not specified # must have full input_signatures. self.CheckShapePolymorphism( add_all_jax, input_signature=[([tf.TensorSpec([4]), tf.TensorSpec([4])], [tf.TensorSpec([4])]), dict(a=tf.TensorSpec([4]), b=tf.TensorSpec([4]))], in_shapes=[(["(4,)", "(_,)"], [("4,")]), dict(a="(_,)", b="(4,)")], expected_output_signature=tf.TensorSpec([4]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n ...", "def data_shapes(self):", "def __call__(self, shape):\n raise NotImplementedError()", "def shape(self):", "def shape(self):", "def graph_implementation(arg_objs, shape, data=None):\n # TODO(akshakya): This will need to be updated when we add support\n # for >2D ararys.\n return (lu.transpose(arg_objs[0]), [])", "def input_nodes(self):\n pass", "def build(self, input_shape):\n pass", "def nodeInitializer(cls):\n\n inAttributes = []\n outAttributes = []\n\n # =======================================\n # Input Attribute\n # =======================================\n\n # Shape Type\n shapeTypeAttr = OpenMaya.MFnEnumAttribute()\n cls.iShapeType = shapeTypeAttr.create(\"shapeType\", \"st\", 0);\n cls.setMFnAttribute(shapeTypeAttr)\n\n for idx, shape_name in enumerate(SHAPE_NAMES):\n shapeTypeAttr.addField(shape_name, idx);\n\n inAttributes.append(cls.iShapeType)\n\n # Drawing type\n drawTypeAttr = OpenMaya.MFnEnumAttribute()\n cls.iDrawingType = drawTypeAttr.create(\"drawType\", \"dt\", 2);\n cls.setMFnAttribute(drawTypeAttr)\n\n for idx, draw_type in enumerate([\"Wireframe\", \"Shaded\", \"Both\"]):\n drawTypeAttr.addField(draw_type, idx);\n\n inAttributes.append(cls.iDrawingType)\n\n # Up Axis\n upAxisAttr = OpenMaya.MFnEnumAttribute()\n cls.iUpAxis = upAxisAttr.create(\"upAxis\", \"ua\", 1);\n cls.setMFnAttribute(upAxisAttr)\n\n for idx, shape_name in enumerate([\"X\", \"Y\", \"Z\"]):\n upAxisAttr.addField(shape_name, idx);\n\n inAttributes.append(cls.iUpAxis)\n\n # XRay\n xRayAttr = OpenMaya.MFnNumericAttribute()\n cls.iXRay = xRayAttr.create(\"xRay\", \"xr\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(xRayAttr)\n\n inAttributes.append(cls.iXRay)\n\n # BillBoard\n biilBoardAttr = OpenMaya.MFnNumericAttribute()\n cls.iBillBoard = biilBoardAttr.create(\"biilBoard\", \"bbd\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(biilBoardAttr)\n\n inAttributes.append(cls.iBillBoard)\n\n # Force Refresh\n forceRefreshAttr = OpenMaya.MFnNumericAttribute()\n cls.iForceRefresh = forceRefreshAttr.create(\"forceRefresh\", \"fr\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(forceRefreshAttr)\n\n inAttributes.append(cls.iForceRefresh)\n\n # Edge Color\n edgeColorAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeColor = edgeColorAttr.createPoint(\"edgeColor\", \"ec\")\n cls.setMFnAttribute(edgeColorAttr)\n\n inAttributes.append(cls.iEdgeColor)\n\n # Edge Opacity\n edgeOpacityAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeOpacity = edgeOpacityAttr.create(\"edgeOpacity\", \"ep\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(edgeOpacityAttr)\n\n inAttributes.append(cls.iEdgeOpacity)\n\n # Polygon Color\n polygonColorAttr = OpenMaya.MFnNumericAttribute()\n cls.iPolygonColor = polygonColorAttr.createPoint(\"polygonColor\", \"pc\")\n cls.setMFnAttribute(polygonColorAttr)\n\n inAttributes.append(cls.iPolygonColor)\n\n # Polygon Opacity\n polygonOpacityAttr = OpenMaya.MFnNumericAttribute()\n cls.iPolygonOpacity = polygonOpacityAttr.create(\"polygonOpacity\", \"pp\", OpenMaya.MFnNumericData.kFloat, .3)\n cls.setMFnAttribute(polygonOpacityAttr)\n\n inAttributes.append(cls.iPolygonOpacity)\n\n # Shape Size\n shapeSizeAttr = OpenMaya.MFnNumericAttribute()\n cls.iShapeSize = shapeSizeAttr.create(\"shapeSize\", \"ss\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(shapeSizeAttr)\n\n inAttributes.append(cls.iShapeSize)\n\n # Edge Size\n edgeSizeAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeSize = edgeSizeAttr.create(\"edgeSize\", \"es\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(edgeSizeAttr)\n\n inAttributes.append(cls.iEdgeSize)\n\n # Position Offset\n positionOffsetAttr = OpenMaya.MFnNumericAttribute()\n cls.iPositionOffset = positionOffsetAttr.createPoint(\"positionOffset\", \"po\")\n cls.setMFnAttribute(positionOffsetAttr)\n\n inAttributes.append(cls.iPositionOffset)\n\n # Rotation Offset\n rotationOffsetAttr = OpenMaya.MFnNumericAttribute()\n cls.iRotationOffset = rotationOffsetAttr.createPoint(\"rotationOffset\", \"ro\")\n cls.setMFnAttribute(rotationOffsetAttr)\n\n inAttributes.append(cls.iRotationOffset)\n\n # =======================================\n # Output Attribute\n # =======================================\n\n # =======================================\n # Add Attribute\n # =======================================\n for attribute in inAttributes + outAttributes:\n cls.addAttribute(attribute)\n\n # =======================================\n # Attribute dependencies\n # =======================================\n for outAttr in outAttributes:\n for inAttr in inAttributes:\n cls.attributeAffects(inAttr, outAttr)", "def add_input_and_output_shape(self, input_shape, output_shape):", "def parse(all_blobs, all_angles):", "def __init__(self):\n self.superelevations = []\n self.shapes = []", "def _set_shapes(self, batch_size, features_in, labels_in):\n features_in['mcts_features'] = tf.reshape(\n features_in['mcts_features'], [batch_size, self._env_state_space],\n name='mcts_feature_reshape')\n\n features_in['policy_features'] = tf.reshape(\n features_in['policy_features'], [batch_size, self._env_state_space],\n name='policy_feature_reshape')\n\n labels_in['action_tensor'] = tf.reshape(\n labels_in['action_tensor'], [batch_size, self._env_action_space],\n name='action_reshape')\n\n labels_in['mean_tensor'] = tf.reshape(\n labels_in['mean_tensor'], [batch_size, self._env_action_space],\n name='mean_reshape')\n\n labels_in['logstd_tensor'] = tf.reshape(\n labels_in['logstd_tensor'], [batch_size, self._env_action_space],\n name='logstd_reshape')\n\n labels_in['value_tensor'] = tf.reshape(\n labels_in['value_tensor'], [batch_size], name='value_reshape')\n\n labels_in['return_tensor'] = tf.reshape(\n labels_in['return_tensor'], [batch_size], name='return_reshape')\n\n labels_in['old_neg_logprob_tensor'] = tf.reshape(\n labels_in['old_neg_logprob_tensor'], [batch_size], name='log_reshape')\n\n labels_in['mcts_enable_tensor'] = tf.reshape(\n labels_in['mcts_enable_tensor'], [batch_size], name='mcts_reshape')\n\n labels_in['policy_action_tensor'] = tf.reshape(\n labels_in['policy_action_tensor'], [batch_size, self._env_action_space],\n name='policy_action_reshape')\n\n labels_in['policy_value_tensor'] = tf.reshape(\n labels_in['policy_value_tensor'], [batch_size],\n name='policy_value_reshape')\n\n labels_in['policy_return_tensor'] = tf.reshape(\n labels_in['policy_return_tensor'], [batch_size],\n name='policy_return_reshape')\n\n labels_in['policy_old_neg_logprob_tensor'] = tf.reshape(\n labels_in['policy_old_neg_logprob_tensor'], [batch_size],\n name='log_reshape')\n\n return features_in, labels_in", "def create_dummy_args(self):\n if not self.is_built:\n raise RuntimeError(\"A plan needs to be built before input shapes can be known.\")\n\n def traverse_nested_types(arg, leaf_function):\n if isinstance(arg, list):\n return [traverse_nested_types(obj, leaf_function) for obj in arg]\n elif isinstance(arg, tuple):\n return tuple(traverse_nested_types(obj, leaf_function) for obj in arg)\n elif isinstance(arg, dict):\n return {k: traverse_nested_types(v, leaf_function) for k, v in arg.items()}\n else:\n return leaf_function(arg)\n\n input_placeholders = (ph for ph in self.role.input_placeholders())\n\n def create_dummy(input_type, input_placeholder):\n if issubclass(input_type, FrameworkTensor):\n return input_type(\n PlaceHolder.create_placeholders([input_placeholder.expected_shape])[0]\n )\n else:\n return input_type()\n\n return traverse_nested_types(\n self.input_types.nested_input_types,\n lambda input_type: create_dummy(input_type, input_placeholders.__next__()),\n )", "def build(self, input_shape):\n #pylint: disable=useless-super-delegation\n super().build(input_shape)", "def infer_shape(self, node, input_shapes):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_shapes) == 1\r\n N, C, H, W = input_shapes[0]\r\n p_H = (H + 2 * self.padding - self.kernel_H) / self.stride + 1\r\n p_W = (W + 2 * self.padding - self.kernel_W) / self.stride + 1\r\n return (N, C, p_H, p_W)", "def create_nodes(self):", "def do_shape(self, args, opts=None):\n if opts.kind == 'number':\n print(self.explorer[args[0]].size)\n\n elif opts.kind == 'maxshape':\n print(self.explorer[args[0]].maxshape)\n\n elif opts.kind == 'chunks':\n print(self.explorer[args[0]].chunks)\n\n else:\n print(self.explorer[args[0]].shape)\n\n return False", "def input_type_shapes(self):\n return self._input_type_shapes", "def instantiate_inputs(self, args_):\n\n def traversal_function(obj):\n placeholder = input_placeholders.pop(0)\n placeholder.instantiate(obj)\n\n input_placeholders = [\n self.placeholders[input_id] for input_id in self.input_placeholder_ids\n ]\n\n Role.nested_object_traversal(args_, traversal_function, FrameworkTensor)", "def add_trees(t1, t2):\n \"*** YOUR CODE HERE ***\"", "def shapeCompare(*args, **kwargs)->int:\n pass", "def _setup_type_shapes(self, named_ops, extra_type_shapes):\n type_shape_set = set()\n for op in six.itervalues(named_ops):\n type_shape_set.update(op.input_type_shapes)\n type_shape_set.update(op.output_type_shapes)\n if extra_type_shapes is not None:\n type_shape_set.update(extra_type_shapes)\n\n # _type_shapes: a list of all the typeshapes this loom object supports.\n self._type_shapes = sorted(type_shape_set)\n\n # Enforce uniqueness for non-empty TypeShape tags.\n non_empty_tags = set()\n for ts in self._type_shapes:\n if ts.tag:\n if ts.tag in non_empty_tags:\n raise TypeError('Tags on tagged TypeShapes must be unique; '\n '%s occured more than once.' % (ts.tag,))\n else:\n non_empty_tags.add(ts.tag)\n\n # _type_shape_to_idx: a dict mapping TypeShape objects to their indices in\n # '_type_shapes'.\n self._type_shape_to_idx = {ts: idx for idx, ts in\n enumerate(self._type_shapes)}", "def __init__(self, the_input_shape, num_classes):\n self.the_input_shape = the_input_shape\n self.num_classes = num_classes", "def _get_graph_callable_inputs(shape_and_dtypes):\n ret = []\n for x in shape_and_dtypes:\n if isinstance(x, ShapeAndDtype):\n ret.append(array_ops.placeholder(x.dtype, x.shape))\n elif isinstance(x, (tuple, list)):\n ret.append(_get_graph_callable_inputs(x))\n else:\n raise errors.InvalidArgumentError(\n None, None, \"shape_and_dtypes not ShapeAndDtype, type: %s \" % type(x))\n\n return tuple(ret) if isinstance(shape_and_dtypes, tuple) else ret", "def __init__(self, shape):\n\n self.shape = shape", "def ShapeFrom(self, *args):\n return _BRepAlgo.BRepAlgo_BooleanOperations_ShapeFrom(self, *args)", "def get_inputs(node, kwargs, with_shapes=False):\n name = node[\"name\"]\n proc_nodes = kwargs[\"proc_nodes\"]\n index_lookup = kwargs[\"index_lookup\"]\n graph_shapes = kwargs[\"graph_shapes\"]\n inputs = node[\"inputs\"]\n attrs = node.get(\"attrs\", {})\n\n input_nodes = []\n input_shapes = []\n for ip in inputs:\n input_node_id = index_lookup[ip[0]]\n try:\n # ip[1] defines which output index to use\n input_nodes.append(proc_nodes[input_node_id].output[ip[1]])\n except AttributeError:\n # fallback to the name attribute as output if the output attribute does not exist (e.g. for data nodes)\n input_nodes.append(proc_nodes[input_node_id].name)\n\n input_shapes.append(graph_shapes.get(input_nodes[-1]))\n\n if with_shapes:\n return name, input_nodes, input_shapes, attrs\n\n return name, input_nodes, attrs", "def build_boxes(inputs):\n center_x, center_y, width, height, confidence, classes = tf.split(inputs, [1, 1, 1, 1, 1, -1], axis=-1)\n\n top_left_x = center_x - width / 2\n top_left_y = center_y - height / 2\n bottom_right_x = center_x + width / 2\n bottom_right_y = center_y + height / 2\n return tf.concat([top_left_x, top_left_y, bottom_right_x, bottom_right_y, confidence, classes], axis=-1)", "def create_tree_roots(inputs=None, input_shapes=None, make_layer_fn=None, trainable=True):\n branch_inputs = []\n branch_logits = None\n if inputs is not None:\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n branch_logits = []\n for vector in inputs:\n v = Input(tensor=vector)\n branch_inputs += [v]\n if make_layer_fn is not None:\n v = make_layer_fn(v)\n branch_logits += [v]\n\n elif input_shapes is not None:\n if not isinstance(input_shapes, list):\n input_shapes = [input_shapes]\n\n branch_logits = []\n for shape in input_shapes:\n v = Input(shape=shape)\n branch_inputs += [v]\n if make_layer_fn is not None:\n v = make_layer_fn(v)\n branch_logits += [v]\n\n if not trainable:\n for logit in branch_logits:\n if isinstance(logit, Model):\n for layer in logit.layers:\n layer.trainable = False\n else:\n raise ValueError(\n 'Only set trainable=False Keras for Models, '\n 'layers and lists of layers can be done later '\n 'when the Model has been created. '\n 'Got Type: ' + str(type(logit)))\n\n return branch_inputs, branch_logits" ]
[ "0.6255682", "0.57242876", "0.53818107", "0.5377391", "0.5377391", "0.53602016", "0.5299667", "0.5298952", "0.5294987", "0.5235773", "0.5211122", "0.52006346", "0.5196273", "0.51867396", "0.5165427", "0.5116953", "0.510076", "0.5098263", "0.5088183", "0.5087767", "0.5056091", "0.5045795", "0.5044384", "0.5021863", "0.5018506", "0.49964428", "0.49642673", "0.4948696", "0.4924365", "0.49228725" ]
0.68708235
0
create a __new__ function that has cls as first arguemnt and fields as requiredkeyword arguments return a Record instance from asyncpg module
def _new_fn(cls_name, fields, *, globals=None): body_lines = [] body_lines.append("mapping = collections.OrderedDict({" + ", ".join(f"'{f.name}': {i}" for i, f in enumerate(fields)) + "})") body_lines.append('elems = (' + ", ".join(f.name for f in fields) + ')') body_lines.append("return Record(mapping, elems)") return _create_fn('__new__', cls_name, fields, body_lines, globals=globals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new(cls, **kwargs):\n return cls(**kwargs)", "def __init__(self,\n record_class: Optional[Type[_Record]] = None) -> None:\n self.fields: List[tsdb.Field] = []\n self._field_index: tsdb.FieldIndex = {}\n self.data: tsdb.Records = []\n self.projection = None\n if record_class is None:\n record_class = _Record\n self.record_class = record_class\n self.joined: Set[str] = set()", "def create(cls, _):\n return cls", "def __newobj__(cls, *args):\n return cls.__new__(cls, *args)", "def __new__(cls, connection):\n return object.__new__(cls)", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ...", "def __new__(*args, **kwargs):\n ..." ]
[ "0.6626878", "0.6611793", "0.65634555", "0.64572716", "0.6446112", "0.63184273", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708", "0.6291708" ]
0.7193039
0
Compress using ZLIB algorithm and encode the given value in base64.
def compress_encode(value): return base64.b64encode(zlib.compress(value.encode("ascii"))).decode("ascii")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(Value):\n return base64.b64encode(zlib.compress(pickle.dumps(Value),9))", "def gzdeflate():\n return zlib.compress(val)", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def b64_gz_json_encode(obj):\n # The |separators| argument is to densify the command line.\n return base64.b64encode(zlib.compress(\n json.dumps(obj or {}, sort_keys=True, separators=(',', ':')), 9))", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def encode_string(string, level=9):\n return base64.b64encode(zlib.compress(string, level)[2:-4])", "def b64encode(value, *args, **kwargs):\n return base64.b64encode(encode(value, *args, **kwargs).encode('ascii'))", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw", "def encode_transaction(value):\n\n return base64.b64encode(json.dumps(value).encode('utf8')).decode('utf8')", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def encode_data(data, order='C', compression_level=None):\n if compression_level is None:\n compression_level = DEFAULT_COMPRESSION_LEVEL\n return _zlib.compress(data.tobytes(order=order), compression_level)", "def compression(s):", "def compress(string):", "def compress(string):", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()", "def encode(self, compressed, hash160=False):\n # calculate the bytes\n if compressed:\n prefix = b'\\x02' if self.y % 2 == 0 else b'\\x03'\n pkb = prefix + self.x.to_bytes(32, 'big')\n else:\n pkb = b'\\x04' + self.x.to_bytes(32, 'big') + self.y.to_bytes(32, 'big')\n # hash if desired\n return ripemd160(sha256(pkb)) if hash160 else pkb", "def base64_encode(data):\n return base64.encodestring(data);", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 4)", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def gzinflate(val):\n return zlib.decompress(val)", "def Encode(cls,\n value: Any) -> bytes:\n return cls._EncodeWithBytesLength(value, 1)", "def b64_encode(value: bytes) -> bytes:\n return base64.urlsafe_b64encode(value).strip(b\"=\")" ]
[ "0.7250324", "0.6997009", "0.69732076", "0.68763185", "0.6822638", "0.6522047", "0.6522047", "0.651008", "0.6489224", "0.6446565", "0.6415703", "0.6200399", "0.6144001", "0.5902229", "0.58805466", "0.5842919", "0.5842919", "0.58235097", "0.58078873", "0.57558864", "0.57503444", "0.571279", "0.5711681", "0.5680953", "0.5660845", "0.56383187", "0.5636591", "0.5635666", "0.5635631", "0.5627696" ]
0.8424179
0
Assign scores to trajectories
def assign_score(self, trajs): traj_scores = [] for traj in trajs: obs = torch.stack(traj['states']).squeeze(dim=1) if isinstance(traj['states'], list) else traj['states'] rewards = self._single_traj_score(obs) traj_scores.append(rewards.sum().item()) return traj_scores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_scores(self, scores):\n self.score = {k: v for k, v in scores.items()}", "def scores(self, value):\n self._scores = value", "def setTreasureScore(self, scores):\n if not self.hasLocalToon: return\n self.notify.debug(\"setTreasureScore: %s\" % scores)\n\n for i in range(len(self.scorePanels)):\n self.scorePanels[i].setScore(scores[i])", "def set_score(self, points):\n self.score += points", "def set_ability_scores(self, scores: List[int]):\n for s in range(6):\n self.dna[s] = scores[s]", "def set_score(self, a, b, score):\n ### FILL IN ###", "def compute_scores(self, *scorers):\n if self.nodes[0]:\n list_ = self.nodes\n else:\n list_ = self.reaction_trees\n\n for idx, item in enumerate(list_):\n scores = {repr(scorer): scorer(item) for scorer in scorers}\n self.all_scores[idx].update(scores)\n self._update_route_dict(self.all_scores, \"all_score\")", "def _tally(self, score):\n self._score[self._turn] += score", "def update_score():\n pass", "def __init__(self):\r\n self.score = 0", "def score(self):", "def set_score(self,score):\n self._score = score", "def _update_score(self) -> None:\n\n # setting new score by iterating over players\n self.score_play[self.n_play_turns, ] = [\n self._score_table[(\n self.contract.level,\n self.contract.suit,\n self.tricks[i],\n self.contract.player_vulnerability[i],\n int(self.contract.double + self.contract.redouble)\n )]\n for i in range(NUM_PLAYERS)\n ]", "def scoring(self):\n pass", "def rescore(self, scorer):\n if self.nodes[0]:\n self.nodes, self.scores, sortidx = scorer.sort(\n self.nodes, return_sort_indices=True\n )\n self.reaction_trees = [self.reaction_trees[idx] for idx in sortidx]\n else:\n self.reaction_trees, self.scores, sortidx = scorer.sort(\n self.reaction_trees, return_sort_indices=True\n )\n self._routes = [self._routes[idx] for idx in sortidx]\n self.all_scores = [self.all_scores[idx] for idx in sortidx]\n if self._dicts:\n self._dicts = [self._dicts[idx] for idx in sortidx]\n if self._images:\n self._images = [self._images[idx] for idx in sortidx]\n if self._jsons:\n self._jsons = [self._jsons[idx] for idx in sortidx]\n\n for idx, score in enumerate(self.scores):\n self.all_scores[idx][repr(scorer)] = score\n self._update_route_dict(self.all_scores, \"all_score\")", "def __init__(self, score=0):\n self.score = score", "def _score_to_decision(self, score):", "def __init__(self, score = 0):\n self.score = score", "def __score_t(self, *args, **kwargs):\n pass", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def score(self, score):\n\n self._score = score", "def _compute_scores(self, triples):\n # compute scores as sum(s * p * o)\n scores = tf.reduce_sum(triples[0] * triples[1] * triples[2], 1)\n return scores", "def calculateScore(self, queue):\n for song in queue:\n if song['explicit']:\n song['score'] = 3 * song['age'] + 2 * song['upvotes'] - 2 * song['downvotes']\n else:\n song['score'] = -1 * song['downvotes']", "def learn(self):\n #get the training tweets and insert them into a list\n self.training_tweets = []\n print self.datas\n for t in self.datas.get_positive_tweets():\n t.polarity = 10\n self.training_tweets.append(t)\n for t in self.datas.get_negative_tweets():\n t.polarity = -10\n self.training_tweets.append(t)\n for t in self.datas.get_neutral_tweets():\n t.polarity = 0\n self.training_tweets.append(t)\n self.m_learner.learn_from_tweets(self.training_tweets)", "def score(self, test_data):\n\n\t\tpass", "def score_model(self, length):\n train_score = self.dtr.score(self.X_train, self.y_train)\n test_score = self.dtr.score(self.X_test, self.y_test)\n self.scores.append([length, train_score, test_score])", "def setScore(self, i, score):\n self.scores[i - 1] = score", "def set_score(self, score):\n self._score = score", "def score(self, X, y=...):\n ..." ]
[ "0.6659069", "0.6490567", "0.64631295", "0.6377789", "0.6331852", "0.6191901", "0.61868435", "0.61754644", "0.6111496", "0.6064884", "0.6040767", "0.60371596", "0.6019798", "0.60130745", "0.59846324", "0.5944102", "0.5935818", "0.59234035", "0.59189606", "0.5904517", "0.5904517", "0.5904517", "0.5877938", "0.5871981", "0.5861636", "0.58500934", "0.5848401", "0.58408403", "0.58321613", "0.57930356" ]
0.73590773
0
Stores a file into the database object file = The file that should be stored, should be a python filelike object encrypt = Set to true if the file should be encrypted, requires Crypto.Cipher to be installed, otherwise it does nothing compress = compress the file using zlib compression
def store(self, file, encrypt=False, compress=False): estring = file.read() self.size = file.size self.nicename = file.name if DBF_SETTINGS["DATABASE_FILES_CACHE"] and DBF_SETTINGS["DATABASE_FILES_CACHE_UNENCRYPTED"]: # Pre-fill the cache, the reasoning being that the file will probably be needed # immediately after storing cache.set(self.get_cache_key(), estring) if compress: # Compress using zlib, this should be done before encryption if not self.get_extension() in DBF_SETTINGS["DATABASE_FILES_COMPRESSION_EXCLUDE"]: estring = zlib.compress(estring) self.compressed = True if encrypt and cipher and DBF_SETTINGS["DATABASE_FILES_SECRET_KEY"]: estring = self._encrypt_string(estring) self.encrypted = True estring = self._encode_string(estring) self._store_string(estring) if DBF_SETTINGS["DATABASE_FILES_CACHE"] and not DBF_SETTINGS["DATABASE_FILES_CACHE_UNENCRYPTED"]: # Pre-fill the cache, cache.set(self.get_cache_key(), estring) self.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store(self, filename):", "def crypt_file(self, file_path, encrypted=False):\n\n with open(file_path, 'rb+') as f:\n _data = f.read()\n\n if not encrypted:\n## print(f'File contents pre encryption: {_data}')\n data = self.cryptor.encrypt(_data)\n## print(f'File contents post encryption: {data}')\n else:\n data = self.cryptor.decrypt(_data)\n## print(f'File content post decryption: {data}')\n\n file=open(file_path,'wb')\n file.write(data)", "def encrypt(self, filename):\n\t f = Fernet(self.key)\n\t with open(filename, \"rb\") as file:\n\t # read all file data\n\t file_data = file.read()\n\t # encrypt data\n\t encrypted_data = f.encrypt(file_data)\n\t # write the encrypted file\n\t with open(filename+\".enc\", \"wb\") as file:\n\t file.write(encrypted_data)", "def encrypt_and_store_file(path_to_original_file):\n\t\toriginal_file_name, _ = os.path.splitext(path_to_original_file)\n\t\toutput_string = EncryptDecrypt.ascii_string_to_hex(EncryptDecrypt.file_to_string(path_to_original_file))\n\t\twith open(original_file_name+\".enc\", \"w+\") as save_file:\n\t\t\tsave_file.write(output_string)\n\t\tos.remove(path_to_original_file)", "def put(self, filename, data, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n file_obj = open(file_path, \"w\")\n file_obj.write(data)", "def create_compressed_file(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name)\n\t\tself._is_png = 'png' in self.file_extension\n\t\tself._is_jpg = 'jpg' in self.file_extension\n\n\t\timage = Image.open(self.full_path)\n\n\t\tif self._is_png:\n\t\t\timage.save(self._compressed_save_path, quality=85, optimize=False, compress_level=9)\n\t\telif self._is_jpg:\n\t\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\t\telse:\n\t\t\tprint('Non-recognized asset format!!')\n\t\t\texit()\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)", "def encrypt_fd(self, in_fd, out_fd, file_entry, flags=0):\n bs = self.block_size\n if file_entry is None:\n file_entry = FileEntry('file_entry.tmp', 0, time(), time(), 0)\n if file_entry.salt is None:\n file_entry.salt = os.urandom(bs - 4)\n key, iv = self.gen_key_and_iv(file_entry.salt)\n\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv),\n backend=default_backend())\n\n # cipher = Cipher(algorithms.AES(key), modes.CBC(iv),\n # backend=default_backend())\n encryptor = cipher.encryptor()\n pathname = file_entry.pathname.encode(\"utf-8\")[:2**16]\n pathname_size = len(pathname)\n pathname_padding = b''\n if pathname_size % bs != 0:\n padding_length = (bs - pathname_size % bs)\n pathname_padding = padding_length * b'\\0'\n\n self.trace('Generating file header for {0}'.format(file_entry))\n flags &= 0xFF\n out_fd.write(pack(b'BB', self.VERSION, flags))\n out_fd.write(pack(b'!H', pathname_size))\n out_fd.write(file_entry.salt)\n out_fd.write(encryptor.update(pathname+pathname_padding))\n compress_obj = None\n if flags & Crypto.COMPRESS:\n compress_obj = zlib.compressobj()\n\n finished = False\n md5 = hashlib.md5()\n rest = b''\n end = False\n\n self.trace('Encrypting and compressing file {0}'.format(file_entry))\n while not finished:\n if compress_obj is not None:\n buf = BytesIO()\n buf.write(rest)\n compress_size = len(rest)\n while compress_size < self.BUFFER_SIZE:\n in_data = in_fd.read(self.BUFFER_SIZE)\n if len(in_data) == 0:\n end = True\n try:\n buf.write(compress_obj.flush())\n except Exception:\n pass\n break\n md5.update(in_data)\n self.trace('Compressing file')\n compress_data = compress_obj.compress(in_data)\n compress_size += len(compress_data)\n buf.write(compress_data)\n data = buf.getvalue()\n data_size = len(data)\n if end:\n chunk = data\n elif data_size < self.BUFFER_SIZE:\n rest_size = data_size % bs\n chunk = data[:-rest_size]\n rest = data[-rest_size:]\n else:\n chunk = data[:self.BUFFER_SIZE]\n rest = data[self.BUFFER_SIZE:]\n else:\n chunk = in_fd.read(self.BUFFER_SIZE)\n md5.update(chunk)\n if len(chunk) == 0 or len(chunk) % bs != 0:\n padding_length = (bs - len(chunk) % bs) or bs\n chunk += padding_length * pack(b'B', padding_length)\n finished = True\n out_fd.write(encryptor.update(chunk))\n\n self.trace('Generating file checksum for {0}'.format(file_entry))\n file_entry.digest = md5.digest()\n footer = self._build_footer(file_entry)\n md5.update(footer)\n entire_digest = md5.digest()\n self.trace('Saving footer for {0}'.format(file_entry))\n out_fd.write(encryptor.update(footer))\n out_fd.write(encryptor.update(entire_digest))\n out_fd.write(encryptor.finalize())\n return file_entry", "def save_compressed(data, filename, compression_type='bz2', create_link=False):\n # write to compressed HDF5 file\n hdf5 = open_compressed(filename, 'w')\n save(data, hdf5)\n close_compressed(filename, hdf5, compression_type, create_link)", "def _encrypt(self):\n self._outfile = os.path.join(self.dest, self.encrypted_file)\n self._infile = self.plain_file\n self._log.info(\"Encrypting '%s' to '%s'\", self.plain_file, self._outfile)\n with open(self.plain_file, \"rb\") as plain_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=plain_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' encrypted to '%s'\", self.plain_file, self._outfile)\n return True", "def encrypt(self,timerPrinting=False):\n\n\t\tt = time.time() \n\t\tif self.extension not in self.path:\n\t\t\twith open(self.path,'rb') as infile:\n\t\t\t\tfile_data = infile.read()\n\t\t\t#Start To CHecking The PlatForm\n\t\t\t# if platform.system() == \"Windows\":\n\t\t\t# \tself.path_dir = self.path.split(\"\\\\\")[-1]\n\t\t\t# elif platform.system() == \"Linux\":\n\t\t\t# \tself.path_dir = self.path.split('/')[-1]\n\t\t\t# #End Checking Wich Platform\n\t\t\t# print('Encryption of '+self.path_dir+'...')\n\t\t\t# print('It\\'s may take a will')\n\t\t\t################################### Blowfish Algorithm ##############################\n\t\t\tbs = Blowfish.block_size\n\t\t\tiv = Random.new().read(bs)\n\t\t\tpadding = b\"}\"\n\t\t\tp = lambda s: s+(bs - len(s) % bs )*padding\n\t\t\tc= Blowfish.new(self.key, Blowfish.MODE_CBC, iv)\n\t\t\tencrypt = iv + c.encrypt(p(file_data))\n\t\t\tself.encrypt = base64.b64encode(encrypt) \n\t\t\t################################################################\n\t\t\t#print(\"writing in your file ...\")\n\t\t\tos.remove(self.path)\n\t\t\twith open(self.path + self.extension,\"wb\") as newfile:\n\t\t\t\tnewfile.write(self.encrypt)\n\t\t\tif timerPrinting:\n\t\t\t\tprint('Done In '+ time.time() -t)\n\t\telse:\n\t\t\tprint('The File is already encrypt.')", "def store(self, course_id, filename, buff):\r\n key = self.key_for(course_id, filename)\r\n\r\n data = buff.getvalue()\r\n key.size = len(data)\r\n key.content_encoding = \"gzip\"\r\n key.content_type = \"text/csv\"\r\n\r\n # Just setting the content encoding and type above should work\r\n # according to the docs, but when experimenting, this was necessary for\r\n # it to actually take.\r\n key.set_contents_from_string(\r\n data,\r\n headers={\r\n \"Content-Encoding\": \"gzip\",\r\n \"Content-Length\": len(data),\r\n \"Content-Type\": \"text/csv\",\r\n }\r\n )", "def encrypt_file(self, input_file_name='', output_file_name=''):\n # Checking input and output file\n assert input_file_name and isfile(input_file_name), \"Input file wasn't selected!\"\n assert output_file_name, \"Output file wasn't selected!\"\n\n # Encrypting file and saving result\n alpha = pow(self.keys['public']['g'], self.keys['session'], self.keys['public']['p'])\n try:\n debug_message('Encrypting...')\n with open(output_file_name, 'w') as f:\n for _byte in self._open_file_binary(input_file_name):\n beta = self.encrypt_byte(_byte)\n f.write(str(alpha) + '\\n')\n f.write(str(beta) + '\\n')\n except Exception:\n debug_message(f\"Error occurred while encrypting file ({Exception})\")\n raise AssertionError(f\"File encrypting error! ({Exception})\")\n\n return 1", "def putFile(self, filename):\n basename = os.path.basename(filename)\n fp = open(filename, 'rb')\n self.ftp.storbinary('stor ' + basename, fp)\n fp.close();", "def push_file_to_server(cnc_bot, filename, content, encryption_key=None):\r\n c = content\r\n if encryption_key is not None:\r\n c = rc4.encrypt(c, encryption_key, salt_length=0) # encrypt content via rc4\r\n cfg = {'filename': filename, 'content': c}\r\n cnc_bot.host_orders(cPickle.dumps(cfg)) # upload a serialized dict\r", "def upload_file_obj_db_s3():\n\n # TODO: upload metadata to database\n temp_engine = create_engine(NEX2_URI)\n session_factory = sessionmaker(\n bind=temp_engine, extension=ZopeTransactionExtension(), expire_on_commit=False)\n db_session = scoped_session(session_factory)\n readme_file_id = None\n file_content_list = file_upload_to_obj()\n try:\n if file_content_list:\n sorted_content = sorted(\n file_content_list, key=itemgetter('file_extension'))\n for item in sorted_content:\n if item['readme_name']:\n readme = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == obj['readme_name']).one_or_none()\n\n if readme is None:\n logging.warning(\n 'unable to find README ' + obj['readme_name'])\n else:\n readme_file_id = readme.dbentity_id\n\n # see if file_meta already exists, else create\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n source_id = db_session.query(Source.source_id).filter(\n Source.display_name == item['source']).one_or_none()[0]\n\n d_name = item['display_name']\n f_ext = item['file_extension']\n temp_file_path = get_file_from_path_collection(f_ext, d_name)\n\n if not existing_file_meta_data:\n try:\n data_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['data_edam_id']).one_or_none()[0]\n\n format_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['format_edam_id']).one_or_none()[0]\n topic_id = db_session.query(Edam.edam_id).filter(\n Edam.edamid == item['topic_edam_id']).one_or_none()[0]\n item[\"data_id\"] = data_id\n item[\"format_id\"] = format_id\n item[\"topic_id\"] = topic_id\n item[\"source_id\"] = source_id\n item[\"readme_file_id\"] = readme_file_id\n\n except TypeError:\n logging.error(\n 'invalid EDAM id or source in row ' +\n str(row_num) + ' val in ' + item['data_edam_id'] +\n ', ' + item['format_edam_id'] +\n ', ' + item['topic_edam_id'])\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n upload_file_helper(CREATED_BY, remote_file, item)\n\n db_session.flush()\n else:\n existing_file_meta_data.display_name = item['display_name']\n existing_file_meta_data.description = item['description']\n existing_file_meta_data.status = item['status']\n existing_file_meta_data.is_public = item['is_public']\n existing_file_meta_data.is_in_spell = item['is_in_spell']\n existing_file_meta_data.is_in_browser = item['is_in_browser']\n existing_file_meta_data.source_id = source_id\n\n if temp_file_path:\n with open(temp_file_path, 'r') as remote_file:\n #update file size\n if not existing_file_meta_data.file_size and existing_file_meta_data.s3_url:\n remote_file.seek(0, os.SEEK_END)\n file_size = remote_file.tell()\n remote_file.seek(0)\n existing_file_meta_data.file_size = file_size\n\n if item['file_date']:\n existing_file_meta_data.file_date = item['file_date']\n existing_file_meta_data.year = item['file_date'].year\n existing_file_meta_data.readme_file_id = readme_file_id\n remote_file.seek(0, os.SEEK_END)\n\n #transaction.commit()\n existing_file_meta_data = db_session.query(Filedbentity).filter(\n Filedbentity.display_name == item['display_name']).one_or_none()\n # only upload s3 file if not defined\n if existing_file_meta_data.s3_url is None:\n existing_file_meta_data.upload_file_to_s3(\n remote_file, item['display_name'])\n db_session.flush()\n\n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)", "def encrypt_file(file, target_path, key):\n file_name = file.split('/')[-1] + '.enc'\n image = convert_content(file, key)\n write_raw(image, os.path.join(target_path, file_name))", "def __init__(self, file_name, key):\n try:\n self._file_name = file_name\n self._encryptor = AES(key.encode())\n self._document = open(self._file_name, \"rb+\")\n except Exception as error:\n print(error)\n sys.exit(1)", "def store_harvest(self, file_prefix, data):\n compressed = bz2.compress(data)\n k = f\"{self.harvest_key_prefix}/{self.harvest_date}/{file_prefix}.bz2\"\n self.s3_client.put_object(\n Body=compressed,\n Bucket=self.s3_bucket,\n Key=k,\n )", "def store_file(self, key, local_file):\n\t\t\n\t\ttry:\n\t\t\tdata = open(local_file, 'r').read()\n\t\t\tself.s3.put(\n\t\t\t\tself.bucket,\n\t\t\t\tkey,\n\t\t\t\tS3Object(data), {\n\t\t\t\t\t'Content-Type': 'application/x-bzip2',\n\t\t\t\t\t'x-amz-acl': 'private',\n\t\t\t\t\t'Content-Length': len(data)\n\t\t\t\t}\n\t\t\t)\n\t\texcept:\n\t\t\treturn False", "def StoreFile(self, file_data):\r\n ### ###########################\r\n\r\n now = datetime.datetime.now();\r\n root_dir = os.path.join(settings.MEDIA_ROOT, now.strftime(\"%Y-%m-%d\"));\r\n fname = os.path.join(root_dir, now.strftime(\"%H%M%S_{}\".format(file_data.name)));\r\n\r\n try:\r\n os.makedirs(root_dir, exist_ok=True);\r\n dest = open(fname, 'wb+');\r\n for c in file_data.chunks():\r\n dest.write(c);\r\n dest.close();\r\n except Exception as e:\r\n os.remove(fname);\r\n return False, \"Failed to write uploaded file.\";\r\n\r\n if not os.path.exists(fname):\r\n return False, \"Failed to write upload file.\";\r\n\r\n self.file_path = fname;\r\n\r\n return True, None;", "def save_as(self, filename: str) -> None:\n save_data = lzma.compress(pickle.dumps(self))\n with open(filename, \"wb\") as f:\n f.write(save_data)", "def persist(self, file_name, model_dir):\n\n pass", "def save(self, file):\n self._save(file.encode())", "def compressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.compress(data, 1)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def persist(self, file_name, model_dir):\n pass", "def store(self, key, filepath):\n if not key or not filepath:\n return False\n # check that file actually exists and handle errors\n if not os.path.isfile(filepath):\n logging.error(\"File '%s' does not exists, skipping\", filepath)\n return False\n else:\n obj = self.resource.Object(self.bucketname, key)\n obj.put(Body=open(filepath, 'rb'))\n obj.Acl().put(ACL='public-read')\n return True", "def write (self, file):\n\t\tfile.write (self.pack ())", "def upload(self, file_obj):\n file_path = ''\n file_name = file_obj.filename\n file_class, file_type = file_obj.content_type.split('/')\n\n def allowed_file():\n return '.' in file_name and file_name.split('.')[1] in ALLOWED_EXTENSIONS\n\n try:\n log.debug('Try to save file <%s> for user ID: %s', file_name, self.current_user.login)\n\n if not allowed_file():\n log.debug('Filetype not allowed')\n return {'success': False, 'errorMessage': 'Filetype not allowed'}\n\n upload_dir = os.path.join(UPLOAD_FOLDER, self.current_user.login)\n file_path = os.path.join(upload_dir, file_name)\n\n if os.path.isfile(file_path):\n log.debug('File was uploaded already')\n return {'success': False, 'errorMessage': 'File was uploaded already'}\n\n if not os.path.exists(upload_dir):\n log.debug('--> Create path: %s', upload_dir)\n os.makedirs(upload_dir)\n\n # save in File System\n with open(file_path, \"ab\") as f:\n data = file_obj.body\n f.write(bytes(data))\n\n os_f_size = os.stat(file_path).st_size\n\n # check file quota\n if (self.user_api.user_db.used_file_quota + os_f_size) > self.user_api.user_db.file_quota:\n os.remove(file_path)\n log.error('You don\\'t have empty space!')\n return {'success': False, 'errorMessage': 'You don\\'t have empty space!'}\n\n file_db = FileDB()\n file_db.name = file_name\n file_db.type = file_type\n file_db.f_class = file_class\n file_db.size = os_f_size\n file_db.user_id = self.current_user.id\n file_db.date_load = datetime.now().strftime(DATE_FORMAT)\n\n self.db.create(file_db)\n\n log.debug('--> File has been updated in DB.')\n\n # update user\n self.user_api.user_db.used_file_quota += os.stat(file_path).st_size # bytes\n #self.user_api.db.update(self.user_api.user_db)\n\n self.db.commit()\n self.user_api.db.commit()\n\n log.debug('--> User in DB has been updated.')\n\n return {'success': True, 'id': file_db.id}\n except StandardError:\n self.db.session.rollback()\n if os.path.isfile(file_path):\n log.error('File <%s> has been deleted', file_path)\n os.remove(file_path)\n log.exception('Cannot upload file')\n return SERVER_ERROR", "def create(path, crypt_options):\n\n content = Database.DEFAULT_CONTENT\n\n aes_key, hmac_key, salt, iterations = crypt.make_keys(\n password=crypt_options.password,\n salt=crypt_options.salt,\n iterations=crypt_options.iterations\n )\n ciphertext, iv = crypt.encrypt(content, aes_key)\n hmac = crypt.make_hmac(ciphertext, hmac_key)\n\n output = {\n \"hmac\": hmac,\n \"iterations\": crypt_options.iterations\n }\n\n for key, value in ((\"ciphertext\", ciphertext),\n (\"iv\", iv),\n (\"salt\", salt)):\n output[key] = base64.b64encode(value).decode(\"utf-8\")\n output_data = json.dumps(output).encode(\"utf-8\")\n\n with open(path, \"wb\") as f:\n f.write(output_data)\n\n database = Database(\n path=path,\n credentials=[], # empty credentials\n crypt_options=crypt_options\n )\n return database", "def encryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n enc = Encrypt(infile)\n\n if mode.upper() == 'AES':\n encrypted_data = enc.AES(password)\n elif mode.upper() == 'DES':\n encrypted_data = enc.DES(password)\n elif mode.upper() == 'SALSA20':\n encrypted_data = enc.Salsa20(password)\n else:\n return 2\n\n if not encrypted_data:\n return 3\n\n write_data(get_extension(infile) + encrypted_data, outfile)\n return 0" ]
[ "0.6278036", "0.61073726", "0.588124", "0.57019895", "0.56977314", "0.56578296", "0.5633474", "0.5595208", "0.5583352", "0.55689156", "0.5562824", "0.546508", "0.543627", "0.54322916", "0.5420462", "0.5410077", "0.54031205", "0.5371673", "0.53660554", "0.5350774", "0.53082126", "0.52944195", "0.52917475", "0.5284935", "0.52763265", "0.5271779", "0.5259536", "0.5252951", "0.5239927", "0.5234939" ]
0.74104655
0
Create a Numpy array with size equal to the sum of the sizes of all the NDArrays in the list of NDArrays l.
def _zeros_like_nd_list(l, dtype): total_size = np.sum([x.size for x in l]) return np.zeros(total_size, dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def n(l):\n return np.array(l,dtype=object)", "def _copy_to_numpy_array(l, a):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n # a[j:j+x.size] = x.asnumpy().reshape((x.size,))\n _copy_into(x, a[j:j + x.size])\n j += x.size", "def _copy_from_numpy_array(a, l):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n x[:] = a[j:j + x.size].reshape(x.shape)\n j += x.size", "def concat(list_of_arrays):\n shape = np.shape(list_of_arrays)\n newShape = [ shape[0]*shape[1] ]\n if len(shape)>2:\n for i in range(2,len(shape)):\n newShape.append(shape[i])\n \n array_concat = np.zeros(newShape)\n s=0\n e=shape[1]\n \n for i in range(0,shape[0]):\n array_concat[s:e] = list_of_arrays[i]\n s=e\n e=e+shape[1] \n return array_concat", "def arraylistcat(arraylist):\n if len(arraylist) < 1:\n \n return None\n \n nx = 0\n for x in arraylist:\n \n nx += len(x)\n \n dummy = arraylist[0]\n shape = list(dummy.shape)\n shape[0] = nx\n units = _getunits(dummy)\n \n outarray = SimArray(np.zeros(shape), units)\n counter = 0\n \n for array in arraylist:\n \n outarray[counter:counter+len(array)] = array\n counter += len(array)\n \n return outarray", "def sum(lists) -> list:\r\n return list(np.sum(lists, 0))", "def get_array_sizes(ast_type, list_so_far):\n arr_list = []\n alignment = get_sizes(ast_type.type, arr_list)\n if isinstance(ast_type.dim, AST.Constant):\n size = int(ast_type.dim.value)\n elif isinstance(ast_type.dim, AST.ID):\n size = 1 #length needs to be handle in interpret.py\n #raise Exception(\"Size of dynamically sized array unknown\")\n else:\n raise Exception('Array dim must be constant or id')\n for _ in range(size):\n list_so_far.extend(arr_list)\n return alignment", "def mult_and_sum(*arg_list):\r\n result = numpy.empty(arg_list[0].shape, dtype=numpy.float32)\r\n result[:] = nodata\r\n array_stack = numpy.array(arg_list[0::2])\r\n scalar_list = numpy.array(arg_list[1::2])\r\n # make a valid mask as big as a single array\r\n valid_mask = numpy.logical_and.reduce(\r\n array_stack != pop_nodata, axis=0)\r\n\r\n # mask out all invalid elements but reshape so there's still the same\r\n # number of arrays\r\n valid_array_elements = (\r\n array_stack[numpy.broadcast_to(valid_mask, array_stack.shape)])\r\n array_stack = None\r\n\r\n # sometimes this array is empty, check first before reshaping\r\n if valid_array_elements.size != 0:\r\n valid_array_elements = valid_array_elements.reshape(\r\n -1, numpy.count_nonzero(valid_mask))\r\n # multiply each element of the scalar with each row of the valid\r\n # array stack, then sum along the 0 axis to get the result\r\n result[valid_mask] = numpy.sum(\r\n (valid_array_elements.T * scalar_list).T, axis=0)\r\n scalar_list = None\r\n valid_mask = None\r\n valid_array_elements = None\r\n return result", "def _create_arrays(self, array_list, ndim=1, dtype=None, zeros=True):\n for array in array_list:\n self._create_array(array, ndim, dtype, zeros)", "def ndarray_size(self) -> int:\n pass", "def lsize( lst ):\n return sum( [ x[1] for x in lst ] )", "def __call__(self, *array_list):\n valid_mask = numpy.zeros(array_list[0].shape, dtype=bool)\n result = numpy.empty_like(array_list[0])\n result[:] = 0\n for array in array_list:\n local_valid_mask = array != _INDEX_NODATA\n result[local_valid_mask] += array[local_valid_mask]\n valid_mask |= local_valid_mask\n result[~valid_mask] = _INDEX_NODATA\n return result", "def numdim(l):\n if not isinstance(l, (list, tuple)):\n return 0\n if not isinstance(l[-1], (list, tuple)):\n return 1\n else:\n return 1 + numdim(l[-1])", "def expand_like(arrays: List[np.ndarray], fill: float = -100) -> np.ndarray:\n full_shape = list(arrays[0].shape)\n if len(full_shape) == 1:\n return np.concatenate(arrays)\n full_shape[0] = sum(a.shape[0] for a in arrays)\n full_shape[1] = max(a.shape[1] for a in arrays)\n result = np.full(full_shape, fill)\n row_offset = 0\n for a in arrays:\n result[row_offset : row_offset + a.shape[0], : a.shape[1]] = a\n row_offset += a.shape[0]\n return result", "def asum (a, dimension=None,keepdims=0):\r\n if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:\r\n a = a.astype(N.float_)\r\n if dimension == None:\r\n s = N.sum(N.ravel(a))\r\n elif type(dimension) in [IntType,FloatType]:\r\n s = N.add.reduce(a, dimension)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n shp[dimension] = 1\r\n s = N.reshape(s,shp)\r\n else: # must be a SEQUENCE of dims to sum over\r\n dims = list(dimension)\r\n dims.sort()\r\n dims.reverse()\r\n s = a *1.0\r\n for dim in dims:\r\n s = N.add.reduce(s,dim)\r\n if keepdims == 1:\r\n shp = list(a.shape)\r\n for dim in dims:\r\n shp[dim] = 1\r\n s = N.reshape(s,shp)\r\n return s", "def create_array( n ):", "def PLCTYPE_ARR_LREAL(n: int) -> Type[Array]:\n return c_double * n", "def list_to_padded_array(x_list, pad_type='edge', dt='float32'):\n n_items = len(x_list)\n max_sequence_length = max(map(lambda x: x.shape[0], x_list))\n other_dims = x_list[0].shape[1:]\n X = np.zeros((n_items, max_sequence_length) + other_dims, dt)\n for i, x in enumerate(x_list):\n pad_start = (max_sequence_length - x.shape[0]) // 2\n pad_end = max_sequence_length - (pad_start + x.shape[0])\n X[i] = np.pad(x, ((pad_start, pad_end), (0, 0)), pad_type)\n return X", "def init_one_d_array(len, val):\n return [val for i in range(len)]", "def extend_array(l, n):\n l.extend([-1] * n)\n l = l[:n]\n return l", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def PLCTYPE_ARR_UDINT(n: int) -> Type[Array]:\n return c_uint32 * n", "def _asarray(source, size):\n noise = source()\n if size is None:\n return noise.next()\n #count = reduce(operator.mul, shape)\n return numpy.asarray([noise.next() for _ in range(size)])", "def PLCTYPE_ARR_DINT(n: int) -> Type[Array]:\n return c_int32 * n", "def ragged_to_regular(array_list):\n join_length = len(array_list)\n # the weird line below is faster than allocating numpy arrays\n dims = list(zip(*[array.shape for array in array_list]))\n max_dims = tuple(max(dim) for dim in dims)\n dtype = array_list[0].dtype\n padded_hypercube = np.zeros((join_length,) + max_dims, dtype=dtype)\n for i in range(join_length):\n multislice = (slice(i, i+1, 1),) + tuple(slice(0, dim[i], 1)\n for dim in dims)\n padded_hypercube[multislice] = array_list[i]\n return padded_hypercube", "def _get_final_size(param_grid):\n tmp = {} # same pattern than param_grid but store the size\n for idx, key in enumerate(param_grid.iterkeys()):\n if isinstance(param_grid[key], list):\n tmp[idx] = [sys.getsizeof(value) for value in param_grid[key]]\n else:\n tmp[idx] = [sys.getsizeof(param_grid[key])]\n return np.array([x for x in itertools.product(*tmp.values())]).sum()", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def __NDim_restriction_correct_ndarray_list(self):\n\n strTestName = 'The number of dimensions in a Numpy array higher then the number of dimensions in a list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a Numpy array parameter\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramNDimH('parameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [1, 11, 12]\n RxCSObject.parameter1 = np.random.rand(4, 2, 4)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def fullArray(a, D):\n\tA = list()\n\tif isinstance(a, (int, float)): A = full(D, a)\n\telif isinstance(a, (ndarray, list)):\n\t\tif len(a) == D: A = a if isinstance(a, ndarray) else asarray(a)\n\t\telif len(a) > D: A = a[:D] if isinstance(a, ndarray) else asarray(a[:D])\n\t\telse:\n\t\t\tfor i in range(int(ceil(float(D) / len(a)))): A.extend(a[:D if (D - i * len(a)) >= len(a) else D - i * len(a)])\n\t\t\tA = asarray(A)\n\treturn A" ]
[ "0.6617197", "0.6432536", "0.62234515", "0.5939849", "0.5882715", "0.5754678", "0.5660762", "0.5622342", "0.5540883", "0.5535033", "0.5450192", "0.54291904", "0.5417099", "0.5367531", "0.5318646", "0.52935463", "0.5293152", "0.52884835", "0.5279245", "0.5263805", "0.5259671", "0.5242475", "0.5239431", "0.52356243", "0.5214374", "0.52034646", "0.5201529", "0.5194698", "0.5173135", "0.5167688" ]
0.6924306
0
Copy the values from the given ndarray into the given (preallocated) numpy array. This can be used to avoid extra memory allocation that ndarray.asnumpy() performs.
def _copy_into(ndarray, nparray): assert nparray.size == ndarray.size assert nparray.flags.f_contiguous and nparray.flags.behaved # NOTE: The copy=False variant of NDArray.astype does not seem to work if ndarray.dtype != nparray.dtype: ndarray = ndarray.astype(nparray.dtype) mx.base.check_call(mx.base._LIB.MXNDArraySyncCopyToCPU( ndarray.handle, nparray.ctypes.data_as(ctypes.c_void_p), ctypes.c_size_t(ndarray.size)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copyto(self, nparray):\n np.copyto(self.__np_array, nparray)", "def copy(a):\n return array(a, copy=True)", "def copyto(self, nparray):\n if self.__parity[0]==0:\n np.copyto(self.__np_array1, nparray)\n else:\n np.copyto(self.__np_array2, nparray)", "def _copy_from_NdArray(vecObj, NdArray):\n vecObj.getNdArray()[:] = NdArray\n return", "def assign(ary, out):\n\n from . import _bh\n\n if not np.isscalar(ary):\n (ary, out) = broadcast_arrays(ary, out)[0]\n # We ignore self assignments\n if _bh.same_view(ary, out):\n return\n\n # Assigning empty arrays doesn't do anything\n if hasattr(ary, \"size\"):\n if ary.size == 0:\n return\n if hasattr(out, \"size\"):\n if out.size == 0:\n return\n\n # We use a tmp array if the in-/out-put has memory conflicts\n if overlap_conflict(out, ary):\n tmp = array_create.empty_like(out)\n assign(ary, tmp)\n return assign(tmp, out)\n\n if bhary.check(out):\n _bh.ufunc(UFUNCS[\"identity\"].info['id'], (out, ary))\n else:\n if bhary.check(ary):\n if \"BH_SYNC_WARN\" in os.environ:\n import warnings\n warnings.warn(\"BH_SYNC_WARN: Copying the array to NumPy\", RuntimeWarning, stacklevel=2)\n ary = ary.copy2numpy()\n out[...] = ary", "def copytobuffer(self, nparray):\n if self.__parity[0]==0:\n np.copyto(self.__np_array2, nparray)\n else:\n np.copyto(self.__np_array1, nparray)", "def memcopy(dst, src, offset=0, length=None):\n length = length if length is not None else len(src)\n assert type(dst) == np.ndarray, 'invalid type for \"dst\" in memcopy'\n if type(src) is not np.ndarray:\n if type(src) is str and six.PY3:\n src = src.encode()\n src = np.frombuffer(src, dtype='uint8', count=len(src))\n\n dst[:] = src[offset:offset + length]", "def _copy_to_numpy_array(l, a):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n # a[j:j+x.size] = x.asnumpy().reshape((x.size,))\n _copy_into(x, a[j:j + x.size])\n j += x.size", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def _copy_from_numpy_array(a, l):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n x[:] = a[j:j + x.size].reshape(x.shape)\n j += x.size", "def ndarray_copy(func):\n\n def wrapper(*args, **kw):\n args = list(args)\n\n # copy args\n for idx, arg in enumerate(args):\n if type(arg) == np.ndarray:\n args[idx] = arg.copy()\n\n # Copy key args\n for key, value in kw.items():\n if type(value) == np.ndarray:\n kw[key] = value.copy()\n\n return func(*args, **kw)\n\n return wrapper", "def _asarray1d(arr, copy=False):\n if copy:\n return asarray(arr).flatten()\n else:\n return asarray(arr).ravel()", "def test_setitem_rightvalue_ndarray_fails():\r\n a = numpy.arange(3 * 4 * 5)\r\n a.resize((3, 4, 5))\r\n a = theano._asarray(a, dtype='float32')\r\n _a = cuda_ndarray.CudaNdarray(a)\r\n\r\n b = theano._asarray([7, 8, 9, 10], dtype='float32')\r\n _b = cuda_ndarray.CudaNdarray(b)\r\n b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')\r\n _b5 = cuda_ndarray.CudaNdarray(b)\r\n\r\n # attempt to assign the ndarray b with setitem\r\n _a[:, :, 1] = _b\r\n a[:, :, 1] = b\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n #test direct transfert from numpy to contiguous region\r\n # attempt to assign the ndarray b with setitem\r\n # same number of dim\r\n mat = numpy.random.rand(4, 5).astype('float32')\r\n _a[2, :, :] = mat\r\n a[2, :, :] = mat\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n # without same number of dim\r\n try:\r\n _a[0, :, :] = mat\r\n #a[0, :, :] = mat\r\n #assert numpy.allclose(numpy.asarray(_a), a)\r\n except ValueError, e:\r\n pass\r\n\r\n #test direct transfert from numpy with broadcast\r\n _a[0, :, :] = b5\r\n a[0, :, :] = b5\r\n assert numpy.allclose(numpy.asarray(_a), a)\r\n\r\n #test direct transfert from numpy to not contiguous region\r\n # attempt to assign the ndarray b with setitem\r\n _a[:, :, 2] = b\r\n a[:, :, 2] = b\r\n assert numpy.allclose(numpy.asarray(_a), a)", "def test_setitem_rightvalue_ndarray_fails():\n a = numpy.arange(3 * 4 * 5)\n a.resize((3, 4, 5))\n a = theano._asarray(a, dtype='float32')\n _a = cuda_ndarray.CudaNdarray(a)\n\n b = theano._asarray([7, 8, 9, 10], dtype='float32')\n _b = cuda_ndarray.CudaNdarray(b)\n b5 = theano._asarray([7, 8, 9, 10, 11], dtype='float32')\n _b5 = cuda_ndarray.CudaNdarray(b)\n\n # attempt to assign the ndarray b with setitem\n _a[:, :, 1] = _b\n a[:, :, 1] = b\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # test direct transfert from numpy to contiguous region\n # attempt to assign the ndarray b with setitem\n # same number of dim\n mat = numpy.random.rand(4, 5).astype('float32')\n _a[2, :, :] = mat\n a[2, :, :] = mat\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # without same number of dim\n try:\n _a[0, :, :] = mat\n #a[0, :, :] = mat\n #assert numpy.allclose(numpy.asarray(_a), a)\n except ValueError as e:\n pass\n\n # test direct transfert from numpy with broadcast\n _a[0, :, :] = b5\n a[0, :, :] = b5\n assert numpy.allclose(numpy.asarray(_a), a)\n\n # test direct transfert from numpy to not contiguous region\n # attempt to assign the ndarray b with setitem\n _a[:, :, 2] = b\n a[:, :, 2] = b\n assert numpy.allclose(numpy.asarray(_a), a)", "def copy(a, order='C', subok=False):\n\n if not use_origin_backend(a):\n return dpnp_copy(a, order, subok)\n\n return call_origin(numpy.copy, a, order, subok)", "def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]", "def copy(self):\n copy_arr = DynamicArray(self._growth_factor) # Create new array to store values\n for i in range(self._length): # Append all values from original\n copy_arr.append(self._arr[i])\n return copy_arr", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def copyto(self, src, where=None):\n for k in self.containers:\n v = self.__dict__[k]\n if isinstance(v, np.ndarray):\n np.copyto(v, src.__dict__[k], where=where)\n else:\n v.copyto(src.__dict__[k], where=where)", "def double(arr):\n newarr = np.array([(xx,xx) for xx in arr]).ravel()\n return newarr", "def test_safe_array_cast(self):\n msg = '^Copying array of size \\(5, 5\\) to convert it in the ' \\\n 'right format$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X.astype(int))\n\n msg = '^Copying array of size \\(3, 5\\) to create a ' \\\n 'C-contiguous version of it$'\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression._safe_array(self.X[::2])\n\n np.testing.assert_array_equal(self.X,\n PoissonRegression._safe_array(self.X))", "def cpointer_to_ndarray(ptr, size, dtype, shape):\n buf = np.core.multiarray.int_asbuffer(\n ctypes.addressof(ptr.contents), np.dtype(dtype).itemsize * size)\n arr = np.ndarray(shape, dtype=dtype, buffer=buf)\n return arr", "def encode_from_ndarray(self, stream, array):\n # Check for bad input.\n assert isinstance( array, numpy.ndarray ), \\\n \"Expected a numpy.ndarray, not {}\".format( type(array) )\n assert array.dtype == self._voxels_metadata.dtype, \\\n \"Wrong dtype. Expected {}, got {}\".format( self._metainfo.dtype, array.dtype )\n\n # Unfortunately, if the array isn't F_CONTIGUOUS, we have to copy it.\n if not array.flags['F_CONTIGUOUS']:\n array_copy = numpy.empty_like(array, order='F')\n array_copy[:] = array[:]\n array = array_copy\n\n buf = numpy.getbuffer(array)\n self._send_from_buffer(buf, stream)", "def array(a: any,\n dtype: any = None,\n order: {'C', 'F', 'A', 'K'} = 'K',\n *,\n alignment: int = 16,\n copy: bool = True,\n **kwargs) -> np.ndarray:\n\n # Store reference to the original array\n _a = a\n\n # Get array\n a = np.asarray(_a, dtype=dtype, order=order)\n\n # Check if a new copy is created\n _new = a is not _a\n\n # Get dtype, size and alignment\n dtype = a.dtype\n shape = a.shape\n size = np.prod(shape)\n order = 'C' if a.flags.c_contiguous else 'F'\n alignment = int(alignment)\n\n # Check alignment is compatible\n if alignment % dtype.itemsize:\n raise ValueError(\n f\"{dtype} is not compatible with 'alignment={alignment}'\")\n\n # If new, check alignment and eventually return if already aligned\n if (_new or not copy) and isaligned(a, alignment=alignment):\n return a\n\n # Get max_shift\n max_shift = alignment // dtype.itemsize\n\n # If _new, resize\n if _new:\n # Resize memory\n a.resize(size + max_shift)\n\n # Reference to buffer\n buffer = a\n\n # Return to the orginal size\n a = a[:size]\n\n # Otherwise, get new buffer\n else:\n buffer = np.empty((size + max_shift,), dtype=dtype, order=order)\n\n # Get right shift\n shift = (alignment - (buffer.ctypes.data % alignment)) // dtype.itemsize\n assert (shift <= max_shift)\n\n # Re-align if needed\n buffer = buffer[shift:size + shift]\n\n # Reshape\n buffer = np.reshape(buffer, shape, order=order)\n\n # Check alignment\n assert (isaligned(buffer, alignment=alignment))\n\n # Copy if a was provided\n np.copyto(buffer, np.reshape(a, shape, order=order))\n\n # Return buffer\n return buffer", "def adapt_array(self,array):\n import io\n import array,numpy\n out = io.BytesIO()\n numpy.save(out, array)\n out.seek(0)\n \n return out.read()", "def _resize_arr(self, new_capacity):\n if new_capacity < self._length:\n raise RuntimeError('New capacity is lower than length')\n\n # Copy values to array with new capacity\n longer_arr = self._create_array(new_capacity)\n for i in range(self._length):\n longer_arr[i] = self._arr[i]\n\n # Set the arr to the new array\n self._arr = longer_arr\n self._capacity = new_capacity", "def _hpat_ensure_array_capacity(new_size, arr):\n\n k = len(arr)\n if k >= new_size:\n return arr\n\n n = k\n while n < new_size:\n n = 2 * n\n res = numpy.empty(n, arr.dtype)\n res[:k] = arr[:k]\n return res", "def array(self, src) -> None:\n self.set_array(src)", "def to_numpy(array):\n if not CUPY_LOADED:\n return array\n else:\n return xp.asnumpy(array)", "def resize_array(src, dest, fill=0):\n old_shape = np.shape(src)\n new_array = np.full_like(dest, fill)\n if len(old_shape) is 2: # Matrix\n height, width = old_shape\n new_array[:height, :width] = src\n else: # Vector\n length = old_shape[0]\n new_array[:length] = src\n return new_array" ]
[ "0.69130385", "0.63658416", "0.63439447", "0.6323527", "0.6044379", "0.6023708", "0.60227984", "0.601236", "0.5889105", "0.5846941", "0.5768551", "0.54975843", "0.5460591", "0.543775", "0.54138625", "0.5381993", "0.5331947", "0.5294816", "0.5264773", "0.5249572", "0.52417374", "0.52365416", "0.5175633", "0.51534206", "0.5152428", "0.5125215", "0.512184", "0.5116065", "0.5115446", "0.5114505" ]
0.7299799
0
Copy values from each NDArray in the list l to the numpy array a (in order).
def _copy_to_numpy_array(l, a): total_size = np.sum([x.size for x in l]) assert total_size == a.size j = 0 for x in l: # a[j:j+x.size] = x.asnumpy().reshape((x.size,)) _copy_into(x, a[j:j + x.size]) j += x.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_from_numpy_array(a, l):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n x[:] = a[j:j + x.size].reshape(x.shape)\n j += x.size", "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:\n assert a is not None, \"Empty result cannot be converted to numpy\"\n return [x.numpy() for x in a]", "def n(l):\n return np.array(l,dtype=object)", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:\n assert a is not None, \"Empty result cannot be converted to TVM NDArray\"\n return [tvm.nd.array(x) for x in a]", "def list_to_array(llist, target):\r\n while llist.is_empty() == False:\r\n target.append(llist.pop(0))\r\n \r\n return", "def from_list(cls, l):\n max_cols = max([len(r) for r in l])\n output = cls(len(l), max_cols)\n\n for i, r in enumerate(l):\n for j, c in enumerate(r):\n output[i, j] = c\n\n return output", "def flatten_list(l):\n obj = []\n\n def recurse(ll):\n if isinstance(ll, list) or isinstance(ll, np.ndarray):\n for i, _ in enumerate(ll):\n recurse(ll[i])\n else:\n obj.append(ll)\n\n recurse(l)\n return obj", "def concatMatrix(self, a, l, cols):\n l_i = l * np.identity(cols)\n concat = np.concatenate((a, l_i))\n\n return concat", "def _zeros_like_nd_list(l, dtype):\n total_size = np.sum([x.size for x in l])\n return np.zeros(total_size, dtype)", "def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")", "def cell2mat2(l, max_len=None) -> nptyp.NDArray[float]:\n if max_len is None:\n max_len = np.amax([len(l1) for l1 in l])\n \n n = len(l)\n m = np.zeros([n, max_len]) + np.nan\n \n for ii in range(n):\n l1 = l[ii]\n if len(l1) > max_len:\n m[ii,:] = l1[:max_len]\n elif len(l1) < max_len:\n m[ii,:len(l1)] = l1\n else:\n m[ii,:] = l1\n\n return m", "def np(self, *args, **kwargs):\n return plist([np.array(x, *args, **kwargs) for x in self], root=self.__root__)", "def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)", "def copy(a):\n return array(a, copy=True)", "def translate_pandas_to_numpy(data_list:list) -> list:\n list_size = len(data_list)\n for i in range(list_size):\n data_list[i] = data_list[i].values.astype('float32')\n return data_list", "def copyto(self, nparray):\n np.copyto(self.__np_array, nparray)", "def to_np_arr_and_then_mean(list_of_lists):\n # print(list_of_lists)\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)", "def __setitem__( self, l, c_l ) :\n\n if( l == len( self ) ) :\n self.coefficients.append( float( c_l ) )\n else :\n self.coefficients[l] = float( c_l )", "def to_np_arr_and_then_mean(list_of_lists):\n np_arr = np.array(list_of_lists)\n return np_arr.mean(axis=0)", "def extend_array(l, n):\n l.extend([-1] * n)\n l = l[:n]\n return l", "def putarowlist(self,sub_,ptrb_,ptre_,asub_,aval_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(ptrb_)\n elif num_ != len(ptrb_):\n raise IndexError(\"Inconsistent length of array ptrb\")\n if num_ is None:\n num_ = len(ptre_)\n elif num_ != len(ptre_):\n raise IndexError(\"Inconsistent length of array ptre\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putarowlist64(self.__nativep,num_,_sub_tmp,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def make_A(\n As, ns, device=torch.device(\"cpu\"), dtype=torch.float32\n): # pylint: disable=unused-argument\n As = [A_j.to(device, dtype) for A_j in As]\n return As", "def flatten(self, l):\n if self.left:\n self.left.flatten(l)\n l.append(self.data)\n if self.right:\n self.right.flatten(l)", "def update(self):\n for i in range(self.min_y, self.max_y + 1):\n for j in range(self.min_x, self.max_x + 1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = self.lis[i-self.min_y][j-self.min_x]\n except IndexError:\n pass", "def array_to_list(llist, source):\r\n while source: #a list is considered True as long as it is not empty\r\n llist.append(source.pop(0))\r\n \r\n return", "def toVector(tupL,n,v):\n v = numpy.zeros(n)\n for (i,val) in tupL:\n #print i,v\n v[i]=val\n return v", "def merge_AllLeft(lsts):\r\n new_lsts = []\r\n for row in lsts:\r\n array1 = add_tiles(row)\r\n new_lsts.append(array1)\r\n lsts = new_lsts\r\n\r\n return lsts" ]
[ "0.8242542", "0.63269496", "0.6141394", "0.5692313", "0.55791426", "0.55791426", "0.55682117", "0.54690933", "0.5439151", "0.5422417", "0.5382426", "0.5348696", "0.5288169", "0.5280279", "0.525362", "0.5152643", "0.514693", "0.5124691", "0.51072955", "0.5106846", "0.509638", "0.5051394", "0.5027678", "0.50275606", "0.50215125", "0.50204736", "0.5018302", "0.49812308", "0.49628145", "0.4961679" ]
0.8206372
1
Copy values from subarrays of the numpy array a to the NDArrays in the list l. The sizes of the sub arrays correspond to the sizes of the NDArrays, so that this performs a copy in the reverse direction of copy_to_numpy_array(). Entries of l can have different dtype than a.
def _copy_from_numpy_array(a, l): total_size = np.sum([x.size for x in l]) assert total_size == a.size j = 0 for x in l: x[:] = a[j:j + x.size].reshape(x.shape) j += x.size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _copy_to_numpy_array(l, a):\n total_size = np.sum([x.size for x in l])\n assert total_size == a.size\n j = 0\n for x in l:\n # a[j:j+x.size] = x.asnumpy().reshape((x.size,))\n _copy_into(x, a[j:j + x.size])\n j += x.size", "def to_numpy(a: List[tvm.nd.NDArray]) -> List[np.ndarray]:\n assert a is not None, \"Empty result cannot be converted to numpy\"\n return [x.numpy() for x in a]", "def putarowlist(self,sub_,ptrb_,ptre_,asub_,aval_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(ptrb_)\n elif num_ != len(ptrb_):\n raise IndexError(\"Inconsistent length of array ptrb\")\n if num_ is None:\n num_ = len(ptre_)\n elif num_ != len(ptre_):\n raise IndexError(\"Inconsistent length of array ptre\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putarowlist64(self.__nativep,num_,_sub_tmp,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putacollist(self,sub_,ptrb_,ptre_,asub_,aval_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(ptrb_)\n elif num_ != len(ptrb_):\n raise IndexError(\"Inconsistent length of array ptrb\")\n if num_ is None:\n num_ = len(ptre_)\n elif num_ != len(ptre_):\n raise IndexError(\"Inconsistent length of array ptre\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n if ptrb_ is None:\n raise ValueError(\"Argument ptrb cannot be None\")\n if ptrb_ is None:\n raise ValueError(\"Argument ptrb may not be None\")\n if isinstance(ptrb_, numpy.ndarray) and ptrb_.dtype is numpy.dtype(numpy.int64) and ptrb_.flags.contiguous:\n _ptrb_copyarray = False\n _ptrb_tmp = ctypes.cast(ptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptrb_ is not None:\n _ptrb_copyarray = True\n _ptrb_np_tmp = numpy.zeros(len(ptrb_),numpy.dtype(numpy.int64))\n _ptrb_np_tmp[:] = ptrb_\n assert _ptrb_np_tmp.flags.contiguous\n _ptrb_tmp = ctypes.cast(_ptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptrb_copyarray = False\n _ptrb_tmp = None\n \n if ptre_ is None:\n raise ValueError(\"Argument ptre cannot be None\")\n if ptre_ is None:\n raise ValueError(\"Argument ptre may not be None\")\n if isinstance(ptre_, numpy.ndarray) and ptre_.dtype is numpy.dtype(numpy.int64) and ptre_.flags.contiguous:\n _ptre_copyarray = False\n _ptre_tmp = ctypes.cast(ptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif ptre_ is not None:\n _ptre_copyarray = True\n _ptre_np_tmp = numpy.zeros(len(ptre_),numpy.dtype(numpy.int64))\n _ptre_np_tmp[:] = ptre_\n assert _ptre_np_tmp.flags.contiguous\n _ptre_tmp = ctypes.cast(_ptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _ptre_copyarray = False\n _ptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n res = __library__.MSK_XX_putacollist64(self.__nativep,num_,_sub_tmp,_ptrb_tmp,_ptre_tmp,_asub_tmp,_aval_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def from_numpy(self, a):\n raise NotImplementedError(\"from_numpy\")", "def copy(a):\n return array(a, copy=True)", "def listoflistToarray(l):\n max_dim=max([len(c) for c in l])\n all_array=[np.pad(c,(0,max_dim-len(c)),\"constant\",constant_values=(0,0)) for c in l]\n return np.array(all_array)", "def polylinedatasetarraycopy(l,ratio,x_offset,y_offset,layername,arraycount): \r\n if layername in globalconfig.JUSTCOPYLIST: #根据图层名称判断是按中心放缩率直接放大后复制还是按多种放缩率放大后做边上的点的延伸或者不延伸的操作\r\n dataset=datasetjustcopy(l,globalconfig.CENTER_RATIO,x_offset,y_offset)\r\n elif layername in globalconfig.EXTENDCOPYLIST:\r\n dataset=datasetratiocopy_extend(l,ratio,x_offset,y_offset)\r\n else:\r\n if arraycount==0: #判断是最左边的图案\r\n dataset=datasetratiocopy_xl_extend(l,ratio,x_offset,y_offset)\r\n elif arraycount==globalconfig.X_ARRAY_NUM-1: #判断是最右边的图案\r\n dataset=datasetratiocopy_xr_extend(l,ratio,x_offset,y_offset)\r\n else: #判断是中间的图案 \r\n dataset=datasetratiocopy_notextend(l,ratio,x_offset,y_offset)\r\n return dataset", "def extend_array(l, n):\n l.extend([-1] * n)\n l = l[:n]\n return l", "def distribute_list(a):\n if isinstance(a, int):\n a = range(a)\n out_idxs = []\n ratio = (0.5*(1 + np.sqrt(5)) - 1)*len(a) # Golden fraction\n idx = -ratio\n while len(out_idxs) < len(a):\n idx += ratio\n idx %= len(a)\n idx_int = int(idx)\n while idx_int in out_idxs:\n idx_int += 1\n idx_int %= len(a)\n out_idxs.append(idx_int)\n out = copy.copy(a)\n try:\n for idx_out, idx_a in enumerate(out_idxs):\n out[idx_out] = a[idx_a]\n except TypeError:\n out = list(copy.copy(a))\n for idx_out, idx_a in enumerate(out_idxs):\n out[idx_out] = a[idx_a]\n\n return out", "def copyto(self, nparray):\n np.copyto(self.__np_array, nparray)", "def extend(a, b):\n if np.isscalar(a):\n return a\n # CRUFT: python 2.7 support\n extra_dims = (1,)*(b.ndim-a.ndim)\n return a.reshape(a.shape + extra_dims)\n # python 3 uses\n #extra_dims = (np.newaxis,)*(b.ndim-a.ndim)\n #return a[(..., *extra_dims)]", "def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:\n assert a is not None, \"Empty result cannot be converted to TVM NDArray\"\n return [tvm.nd.array(x) for x in a]", "def putarowlist(self,sub,ptrb,ptre,asub,aval): # 3\n num_ = None\n if num_ is None:\n num_ = len(sub)\n elif num_ != len(sub):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(ptrb)\n elif num_ != len(ptrb):\n raise IndexError(\"Inconsistent length of array ptrb\")\n if num_ is None:\n num_ = len(ptre)\n elif num_ != len(ptre):\n raise IndexError(\"Inconsistent length of array ptre\")\n if num_ is None: num_ = 0\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n \n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n \n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putarowlist64(num_,sub_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def putacollist(self,sub,ptrb,ptre,asub,aval): # 3\n num_ = None\n if num_ is None:\n num_ = len(sub)\n elif num_ != len(sub):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(ptrb)\n elif num_ != len(ptrb):\n raise IndexError(\"Inconsistent length of array ptrb\")\n if num_ is None:\n num_ = len(ptre)\n elif num_ != len(ptre):\n raise IndexError(\"Inconsistent length of array ptre\")\n if num_ is None: num_ = 0\n if sub is None: raise TypeError(\"Invalid type for argument sub\")\n if sub is None:\n sub_ = None\n else:\n try:\n sub_ = memoryview(sub)\n except TypeError:\n try:\n _tmparr_sub = array.array(\"i\",sub)\n except TypeError:\n raise TypeError(\"Argument sub has wrong type\")\n else:\n sub_ = memoryview(_tmparr_sub)\n \n else:\n if sub_.format != \"i\":\n sub_ = memoryview(array.array(\"i\",sub))\n \n if ptrb is None: raise TypeError(\"Invalid type for argument ptrb\")\n if ptrb is None:\n ptrb_ = None\n else:\n try:\n ptrb_ = memoryview(ptrb)\n except TypeError:\n try:\n _tmparr_ptrb = array.array(\"q\",ptrb)\n except TypeError:\n raise TypeError(\"Argument ptrb has wrong type\")\n else:\n ptrb_ = memoryview(_tmparr_ptrb)\n \n else:\n if ptrb_.format != \"q\":\n ptrb_ = memoryview(array.array(\"q\",ptrb))\n \n if ptre is None: raise TypeError(\"Invalid type for argument ptre\")\n if ptre is None:\n ptre_ = None\n else:\n try:\n ptre_ = memoryview(ptre)\n except TypeError:\n try:\n _tmparr_ptre = array.array(\"q\",ptre)\n except TypeError:\n raise TypeError(\"Argument ptre has wrong type\")\n else:\n ptre_ = memoryview(_tmparr_ptre)\n \n else:\n if ptre_.format != \"q\":\n ptre_ = memoryview(array.array(\"q\",ptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n res = self.__obj.putacollist64(num_,sub_,ptrb_,ptre_,asub_,aval_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def a(a,N): \n a=np.ravel(a, order='F') # Same order\n return a", "def copy(self):\n obj = type(self)(self.a_n[:], domain=self.domain, name=self.name)\n if isinstance(obj.a_n, np.ndarray):\n obj.a_n = obj.a_n.copy()\n return obj", "def flatten(self, l, ltypes=(list, tuple)):\n i = 0\n while i < len(l):\n while isinstance(l[i], ltypes):\n if not l[i]:\n l.pop(i)\n if not len(l):\n break\n else:\n l[i:i + 1] = list(l[i])\n i += 1\n return l", "def putvarboundlist(self,sub_,bkx_,blx_,bux_):\n num_ = None\n if num_ is None:\n num_ = len(sub_)\n elif num_ != len(sub_):\n raise IndexError(\"Inconsistent length of array sub\")\n if num_ is None:\n num_ = len(bkx_)\n elif num_ != len(bkx_):\n raise IndexError(\"Inconsistent length of array bkx\")\n if num_ is None:\n num_ = len(blx_)\n elif num_ != len(blx_):\n raise IndexError(\"Inconsistent length of array blx\")\n if num_ is None:\n num_ = len(bux_)\n elif num_ != len(bux_):\n raise IndexError(\"Inconsistent length of array bux\")\n if sub_ is None:\n raise ValueError(\"Argument sub cannot be None\")\n if sub_ is None:\n raise ValueError(\"Argument sub may not be None\")\n if isinstance(sub_, numpy.ndarray) and sub_.dtype is numpy.dtype(numpy.int32) and sub_.flags.contiguous:\n _sub_copyarray = False\n _sub_tmp = ctypes.cast(sub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif sub_ is not None:\n _sub_copyarray = True\n _sub_np_tmp = numpy.zeros(len(sub_),numpy.dtype(numpy.int32))\n _sub_np_tmp[:] = sub_\n assert _sub_np_tmp.flags.contiguous\n _sub_tmp = ctypes.cast(_sub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _sub_copyarray = False\n _sub_tmp = None\n \n if bkx_ is None:\n raise ValueError(\"Argument bkx cannot be None\")\n if bkx_ is None:\n raise ValueError(\"Argument bkx may not be None\")\n if bkx_ is not None:\n _bkx_tmp = (ctypes.c_int32 * len(bkx_))(*bkx_)\n else:\n _bkx_tmp = None\n if blx_ is None:\n raise ValueError(\"Argument blx cannot be None\")\n if blx_ is None:\n raise ValueError(\"Argument blx may not be None\")\n if isinstance(blx_, numpy.ndarray) and blx_.dtype is numpy.dtype(numpy.float64) and blx_.flags.contiguous:\n _blx_copyarray = False\n _blx_tmp = ctypes.cast(blx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blx_ is not None:\n _blx_copyarray = True\n _blx_np_tmp = numpy.zeros(len(blx_),numpy.dtype(numpy.float64))\n _blx_np_tmp[:] = blx_\n assert _blx_np_tmp.flags.contiguous\n _blx_tmp = ctypes.cast(_blx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blx_copyarray = False\n _blx_tmp = None\n \n if bux_ is None:\n raise ValueError(\"Argument bux cannot be None\")\n if bux_ is None:\n raise ValueError(\"Argument bux may not be None\")\n if isinstance(bux_, numpy.ndarray) and bux_.dtype is numpy.dtype(numpy.float64) and bux_.flags.contiguous:\n _bux_copyarray = False\n _bux_tmp = ctypes.cast(bux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bux_ is not None:\n _bux_copyarray = True\n _bux_np_tmp = numpy.zeros(len(bux_),numpy.dtype(numpy.float64))\n _bux_np_tmp[:] = bux_\n assert _bux_np_tmp.flags.contiguous\n _bux_tmp = ctypes.cast(_bux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bux_copyarray = False\n _bux_tmp = None\n \n res = __library__.MSK_XX_putvarboundlist(self.__nativep,num_,_sub_tmp,_bkx_tmp,_blx_tmp,_bux_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def flatten_list(l):\n obj = []\n\n def recurse(ll):\n if isinstance(ll, list) or isinstance(ll, np.ndarray):\n for i, _ in enumerate(ll):\n recurse(ll[i])\n else:\n obj.append(ll)\n\n recurse(l)\n return obj", "def reshape_append_ones (a1, a2):\n l = [a1, a2]\n if (isinstance(a1, numpy.ndarray) and isinstance(a2, numpy.ndarray)):\n len1 = len(a1.shape)\n len2 = len(a2.shape)\n if (len1 == len2 or len1 == 0 or len2 == 0 or\n a1.shape[0] != a2.shape[0]):\n return l;\n elif (len1 < len2):\n d = len1\n maxLength = len2\n i = 0\n else:\n d = len2\n maxLength = len1\n i = 1\n while (d < maxLength):\n l[i] = numpy.expand_dims(l[i], d)\n d = d + 1\n return l", "def copy(a, order='C', subok=False):\n\n if not use_origin_backend(a):\n return dpnp_copy(a, order, subok)\n\n return call_origin(numpy.copy, a, order, subok)", "def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)", "def copyto(self, src, where=None):\n for k in self.containers:\n v = self.__dict__[k]\n if isinstance(v, np.ndarray):\n np.copyto(v, src.__dict__[k], where=where)\n else:\n v.copyto(src.__dict__[k], where=where)", "def _zeros_like_nd_list(l, dtype):\n total_size = np.sum([x.size for x in l])\n return np.zeros(total_size, dtype)", "def _copy_into(ndarray, nparray):\n assert nparray.size == ndarray.size\n assert nparray.flags.f_contiguous and nparray.flags.behaved\n # NOTE: The copy=False variant of NDArray.astype does not seem to work\n if ndarray.dtype != nparray.dtype:\n ndarray = ndarray.astype(nparray.dtype)\n mx.base.check_call(mx.base._LIB.MXNDArraySyncCopyToCPU(\n ndarray.handle,\n nparray.ctypes.data_as(ctypes.c_void_p),\n ctypes.c_size_t(ndarray.size)))", "def n(l):\n return np.array(l,dtype=object)", "def copy(a):\n height = len(a)\n width = len(a[0])\n new_a = create_board(width, height)\n\n for row in range(1, height - 1):\n for col in range(1, width - 1):\n new_a[row][col] = a[row][col]\n\n return new_a", "def list_copy(l: List[Any]) -> List[Any]:\n return [item for item in l]" ]
[ "0.824406", "0.5672978", "0.56350243", "0.5510067", "0.5456553", "0.52112097", "0.51868254", "0.5168819", "0.51417094", "0.5063647", "0.5061758", "0.49632236", "0.49518946", "0.494873", "0.4942277", "0.49309397", "0.49309397", "0.49226636", "0.48952842", "0.4880583", "0.4874069", "0.4798011", "0.4792793", "0.47747847", "0.47699744", "0.47626457", "0.47303686", "0.47249272", "0.4724855", "0.4724629" ]
0.8338329
0
Make a deep copy of the input arg_dict (dict param_name to mx.nd)
def _deep_copy_arg_dict(input_arg_dict): output_arg_dict = {} for name, param in input_arg_dict.items(): output_arg_dict[name] = param.copy() return output_arg_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ndarray_copy(func):\n\n def wrapper(*args, **kw):\n args = list(args)\n\n # copy args\n for idx, arg in enumerate(args):\n if type(arg) == np.ndarray:\n args[idx] = arg.copy()\n\n # Copy key args\n for key, value in kw.items():\n if type(value) == np.ndarray:\n kw[key] = value.copy()\n\n return func(*args, **kw)\n\n return wrapper", "def clone(self, *args, **kwargs):\n new_self = copy.copy(self)\n kwargs = self.get_arguments(args, kwargs, onlykeys=True, onlyused=True)\n _map_parameters = getattr(self, \"_map_parameters\", None)\n for key in kwargs:\n if _map_parameters is not None and key in _map_parameters:\n setattr(new_self, _map_parameters[key], kwargs[key])\n else:\n setattr(new_self, key, kwargs[key])\n return new_self", "def varcopy(self, vars):", "def copy_obs_dict(obs):\n return {k: np.copy(v) for k, v in obs.items()}", "def copy(self):\n obj = type(self)(self.a_n[:], domain=self.domain, name=self.name)\n if isinstance(obj.a_n, np.ndarray):\n obj.a_n = obj.a_n.copy()\n return obj", "def copy_dict(in_dict):\n\n if in_dict is None:\n return None\n\n out_dict = {}\n\n for key, val in in_dict.items():\n if isinstance(val, np.ndarray):\n out_dict[key] = val.copy()\n elif isinstance(val, dict):\n out_dict[key] = copy_dict(val)\n\n else:\n out_dict[key] = val\n\n return out_dict", "def copy(self) -> \"Param\":\n copied = super().copy()\n copied._stack = OrderedDiot(\n [(key, param.copy()) for key, param in self._stack.items()]\n )\n return copied", "def update_args(self, a_dict):\n for k, v in a_dict.items():\n if k is \"id\":\n self.id = v\n elif k is \"width\":\n self.__width = v\n elif k is \"height\":\n self.__height = v\n elif k is \"x\":\n self.__x = v\n elif k is \"y\":\n self.__y = v", "def clone_state_dict(state_dict):\n return OrderedDict([(name, clone(param)) for name, param in state_dict.items()])", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def __call__(self, par_dict: dict) -> np.ndarray:", "def rebuild_param(self,vec,**kwargs):\n from collections import OrderedDict\n tmp = OrderedDict([('lengthscale',None),( 'variance',None),( 'gstds',None)])\n for key,val in kwargs.items():\n assert val!=None, \"Can't have None as fixed values\"\n tmp[key]=val\n for key,val in tmp.items():\n if val==None:\n tmp[key]=vec[0]\n vec = np.delete(vec,0)\n return np.array([tmp[key] for key in tmp])", "def copy(self):\n return TightBinding(dict((k,v.copy()) for k,v in self.__m__.items()), dimensions = self.dims, shape = self.shape)", "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def _attrdict_copy(d):\n if isinstance(d, collections.Iterable):\n return AttrDict(dict(d))\n else:\n return AttrDict(vars(d))", "def copy (self, **kwargs):\n out = copy.deepcopy (self)\n out.update (**kwargs)\n return out", "def _prepare_input_for_pytorch(args, kwargs):\n if isinstance(args, (torch.Tensor, dict)):\n args = (args,)\n # In-place operators will update input tensor data as well.\n # Thus inputs are replicated before every forward call.\n args = copy.deepcopy(args)\n if kwargs:\n kwargs = copy.deepcopy(kwargs)\n else:\n kwargs = {}\n return args, kwargs", "def build_dict(arg):\n # helper function to the Evaluator.to_property_di_graph() method that\n # packages the dictionaries returned by the \"associate_\" family of\n # functions and then supplies the master dict (one_dict) to the Vertex\n # obj as **kwargs\n one_dict = {}\n for ar in arg:\n one_dict.update(ar)\n return one_dict", "def reverse_params(cls, ax_params, space): # pylint: disable=unused-argument\n orion_params = copy.deepcopy(ax_params)\n return orion_params", "def __deepcopy__(self, memodict=None):\n return self.__class__(self.m, self.n, deepcopy(self.data))", "def _copy_from_NdArray(vecObj, NdArray):\n vecObj.getNdArray()[:] = NdArray\n return", "def getCloneArgs(self):\n\n values = {\n \"locals_arg\": self.subnode_locals_arg.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def copy_params(source, target):\n for target_param, param in zip(target.parameters(), source.parameters()):\n target_param.data.copy_(param.data)", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def copy(self):\n dyn = type(self)(self._hyperparams)\n dyn.Fm = np.copy(self.Fm)\n dyn.fv = np.copy(self.fv)\n dyn.dyn_covar = np.copy(self.dyn_covar)\n return dyn", "def reset_parameters(self, p: Dict[str, ArrayType]) -> None:\n self.p = self.opt.parameters.dict2vec(p)\n self._p_dict = self.opt.parameters.vec2dict(self.p)", "def copy_params(self):\n tf.get_default_session().run(self.copy_ops)", "def getCloneArgs(self):\n\n values = {\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"value\": self.subnode_value.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def getCloneArgs(self):\n\n values = {\n \"value\": self.subnode_value.makeClone(),\n \"dict_arg\": self.subnode_dict_arg.makeClone(),\n \"key\": self.subnode_key.makeClone(),\n }\n\n values.update(self.getDetails())\n\n return values", "def test_named_params(self):\n varargs = ()\n kwargs = {'arg1' : \"arg1_val\", 'default' : \"default_val\"}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assertEquals(kwargs, var_dict)" ]
[ "0.6190199", "0.5896156", "0.5724149", "0.56482667", "0.56246066", "0.56068045", "0.55655664", "0.5564277", "0.5551959", "0.55463356", "0.55164564", "0.54720664", "0.5424602", "0.5411418", "0.5409501", "0.54047245", "0.537435", "0.5354888", "0.5348229", "0.5329177", "0.5297332", "0.52586085", "0.5255011", "0.52543336", "0.52543336", "0.5248199", "0.5240473", "0.5224388", "0.52199316", "0.52193767" ]
0.76881003
0
In order to initialize LBFGS from multiple starting points, this function makes it possible to randomize, inplace, an arg_dict (as used by executors to communicate parameters to LBFGS). The randomization is centered around mean_arg_dict, with standard deviation std.
def _inplace_arg_dict_randomization(arg_dict, mean_arg_dict, bounds, std=STARTING_POINT_RANDOMIZATION_STD): # We check that arg_dict and mean_arg_dict are compatible assert arg_dict.keys() == mean_arg_dict.keys() for name, param in arg_dict.items(): assert param.shape == mean_arg_dict[name].shape assert param.dtype == mean_arg_dict[name].dtype assert param.context == mean_arg_dict[name].context # We apply a sort to make the for loop deterministic (especially with the internal calls to mx.random) for name, param in sorted(arg_dict.items()): arg_dict[name][:] = mean_arg_dict[name] + mx.random.normal(0.0, std, shape=param.shape, dtype=param.dtype, ctx=param.context) lower, upper = bounds[name] lower = lower if lower is not None else -np.inf upper = upper if upper is not None else np.inf # We project back arg_dict[name] within its specified lower and upper bounds # (in case of we would have perturbed beyond those bounds) arg_dict[name][:] = mx.nd.maximum(lower, mx.nd.minimum(upper, arg_dict[name]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_random_params(scale, layer_sizes, seed=0):\n rs = npr.RandomState(seed)\n return [(rs.randn(insize, outsize) * scale, # weight matrix\n rs.randn(outsize) * scale) # bias vector\n for insize, outsize in zip(layer_sizes[:-1], layer_sizes[1:])]", "def apply_lbfgs_with_multiple_starts(\n exec_func, arg_dict, grad_dict, bounds, n_starts=N_STARTS, **kwargs):\n\n assert n_starts >= 1\n\n copy_of_initial_arg_dict = _deep_copy_arg_dict(arg_dict)\n best_objective_over_restarts = None\n best_arg_dict_over_restarts = copy_of_initial_arg_dict\n\n # Loop over restarts\n ret_infos = []\n for iter in range(n_starts):\n if iter > 0:\n _inplace_arg_dict_randomization(\n arg_dict, copy_of_initial_arg_dict, bounds)\n decorator = ExecutorDecorator(exec_func)\n ret_info = apply_lbfgs(\n decorator.exec_func, arg_dict, grad_dict, bounds, **kwargs)\n ret_infos.append(ret_info)\n if ret_info is None and (\n best_objective_over_restarts is None or\n decorator.best_objective < best_objective_over_restarts):\n best_objective_over_restarts = decorator.best_objective\n best_arg_dict_over_restarts = _deep_copy_arg_dict(arg_dict)\n\n # We copy back the values of the best parameters into arg_dict (again,\n # inplace, as required by the executor)\n for name in arg_dict.keys():\n arg_dict[name][:] = best_arg_dict_over_restarts[name]\n return ret_infos", "def _random_weight_initialization(self, mean=0, stddev=1):\n self.slp_weights = np.random.normal(mean, stddev, size=(self.n_rbfs, self.n_targets))", "def init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):\n return [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]", "def sampling(args: tuple):\n\n # unpack the input tuple\n z_mean, z_log_var = args\n\n # mini-batch size\n mb_size = K.shape(z_mean)[0]\n\n # latent space size\n dim = K.int_shape(z_mean)[1]\n\n # random normal vector with mean=0 and std=1.0\n epsilon = K.random_normal(shape=(mb_size, dim))\n\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n z_mean, z_log_sigma = args\n epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean = 0., stddev=0.1)\n return z_mean + K.exp(z_log_sigma) * epsilon", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def initBoundedParams(bounds, sn=[]):\n hypinit = {\n 'cov': np.zeros(len(bounds)),\n 'lik': np.atleast_1d(np.log(sn)),\n 'mean': np.array([])\n }\n # Sample from a uniform distribution\n for idx, pair in enumerate(bounds):\n # Randomize only if bounds are specified\n if isinstance(pair, collections.Iterable):\n hypinit['cov'][idx] = np.random.uniform(pair[0], pair[1])\n # If no bounds, then keep default value always\n else:\n hypinit['cov'][idx] = pair\n return hypinit", "def sampling(args):\r\n\r\n z_mean, z_log_var = args\r\n batch = K.shape(z_mean)[0]\r\n dim = K.int_shape(z_mean)[1]\r\n # by default, random_normal has mean=0 and std=1.0\r\n epsilon = K.random_normal(shape=(batch, dim))\r\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def init_random_params(self,scale, layer_sizes, rs=npr.RandomState(0)):\n return [(rs.randn(insize, outsize) * scale, # weight matrix\n rs.randn(outsize) * scale) # bias vector\n for insize, outsize in zip(self.layer_sizes[:-1], self.layer_sizes[1:])]", "def sampling(args):\n\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean = 0 and std = 1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def sampling(args):\r\n z_mean,z_log_var = args\r\n batch = K.shape(z_mean)[0]\r\n dim = K.shape(z_mean)[1]\r\n epsilon = K.random_normal(shape=(batch,dim),\r\n mean=0.0,stddev=1.0)\r\n std_epsilon = 1e-4\r\n return z_mean + (z_log_var + std_epsilon) * epsilon", "def GaussianPosteriorSample(bs, ls) :\n def gps(args) :\n mu, log_var = args\n eps = K.random_normal(shape=(bs, ls), mean=0.0, stddev=1.0) # 10 x 2\n return mu + K.exp(log_var / 2.) * eps\n return gps", "def sampling(args):\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.int_shape(z_mean)[1]\n # by default, random_normal has mean=0 and std=1.0\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(-0.5 * z_log_var) * epsilon", "def custom_init(init_params, seed=0):\n import numpy as np\n new_params = []\n rng = jax.random.PRNGKey(seed)\n i = 0\n number_layers = len([0 for l1 in init_params if len(l1) != 0])\n for l1 in init_params:\n if (len(l1)) == 0: new_params.append(()); continue\n new_l1 = []\n for l2 in l1:\n if len(l2.shape) == 1:\n # Zero init biases\n new_l1.append(jnp.zeros_like(l2))\n else:\n n = max(l2.shape)\n first = int(i == 0)\n last = int(i == number_layers - 1)\n mid = int((i != 0) * (i != number_layers - 1))\n mid *= i\n\n std = 1.0 / np.sqrt(n)\n std *= 2.2 * first + 0.58 * mid + n * last\n\n if std == 0:\n raise NotImplementedError(\"Wrong dimensions for MLP\")\n\n new_l1.append(jax.random.normal(rng, l2.shape) * std)\n rng += 1\n i += 1\n\n new_params.append(new_l1)\n\n return new_params", "def sampling(self, args):\n self.z_mean, self.z_log_var = args\n batch = K.shape(self.z_mean)[0]\n dim = K.int_shape(self.z_mean)[1]\n epsilon = K.random_uniform(shape=(batch, dim))\n \n return self.z_mean + K.exp(0.5 * self.z_log_var) * epsilon", "def sampling(self, args):\n z_mean, z_log_var = args\n batch = K.shape(z_mean)[0]\n dim = K.shape(z_mean)[1]\n epsilon = K.random_normal(shape=(batch, dim))\n return z_mean + K.exp(0.5 * z_log_var) * epsilon", "def randomize(self, rand_gen=None, *args, **kwargs):\n if rand_gen is None:\n rand_gen = np.random.normal\n # first take care of all parameters (from N(0,1))\n x = rand_gen(size=self._size_transformed(), *args, **kwargs)\n updates = self.update_model()\n self.update_model(False) # Switch off the updates\n self.optimizer_array = x # makes sure all of the tied parameters get the same init (since there's only one prior object...)\n # now draw from prior where possible\n x = self.param_array.copy()\n #Py3 fix\n #[np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.iteritems() if not p is None]\n [np.put(x, ind, p.rvs(ind.size)) for p, ind in self.priors.items() if not p is None]\n unfixlist = np.ones((self.size,),dtype=np.bool)\n unfixlist[self.constraints[__fixed__]] = False\n self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]\n self.update_model(updates)", "def random():\n gauss_scale = 10**np.random.uniform(1, 3)\n lorentz_scale = 10**np.random.uniform(1, 3)\n cor_length_static = 10**np.random.uniform(0, 3)\n cor_length_dynamic = 10**np.random.uniform(0, 3)\n pars = dict(\n #background=0,\n scale=1,\n gauss_scale=gauss_scale,\n lorentz_scale=lorentz_scale,\n cor_length_static=cor_length_static,\n cor_length_dynamic=cor_length_dynamic,\n )\n return pars", "def reset_parameters(self, param_init):\n logger.info('===== Initialize %s with uniform distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_with_uniform(n, p, param_init)", "def random_init(constr=None):\n if constr is not None:\n pass\n else:\n constr = {}\n if \"PERIODS\" in constr.keys():\n periods = constr[\"PERIODS\"]\n else:\n periods = np.random.randint(2, 20)\n if \"AGENTS\" in constr.keys():\n agents = constr[\"AGENTS\"]\n else:\n agents = np.random.randint(100, 5000)\n if \"SEED\" in constr.keys():\n seed = constr[\"SEED\"]\n else:\n seed = np.random.randint(1000, 10000)\n if \"SHARE\" in constr.keys():\n share = constr[\"SHARE\"]\n else:\n share = np.random.uniform(0.1, 0.8)\n if \"FILE\" in constr.keys():\n file = constr[\"FILE\"]\n else:\n file = str(uuid.uuid4()).upper().replace(\"-\", \"\")[0:8]\n\n init_dict = {\"SIMULATION\": {}, \"PARAMS\": {}, \"DIST\": {}}\n\n init_dict[\"SIMULATION\"][\"periods\"] = periods\n init_dict[\"SIMULATION\"][\"agents\"] = agents\n init_dict[\"SIMULATION\"][\"share\"] = share\n init_dict[\"SIMULATION\"][\"seed\"] = seed\n init_dict[\"SIMULATION\"][\"file\"] = file\n\n init_dict[\"PARAMS\"][\"alpha\"] = np.random.normal(1, 0.25)\n init_dict[\"PARAMS\"][\"theta\"] = np.random.normal(0.1, 0.025)\n\n init_dict[\"DIST\"][\"beta\"] = np.random.normal(0.75, 0.1)\n init_dict[\"DIST\"][\"mu\"] = np.random.normal(0.5, 0.1)\n\n print_dict(init_dict)\n\n return init_dict", "def initialize_parameters_random(layers_dims):\n # set random seed\n np.random.seed(3)\n parameters = {}\n L = len(layers_dims)\n\n for l in range(1, L):\n parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 10\n parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))\n\n return parameters", "def _get_random_features_initializer(initializer, shape):\n\n def _get_cauchy_samples(loc, scale, shape):\n probs = np.random.uniform(low=0., high=1., size=shape)\n return loc + scale * np.tan(np.pi * (probs - 0.5))\n\n random_features_initializer = initializer\n if isinstance(initializer, six.string_types):\n if initializer.lower() == 'gaussian':\n random_features_initializer = init_ops.random_normal_initializer(\n stddev=1.0)\n elif initializer.lower() == 'laplacian':\n random_features_initializer = init_ops.constant_initializer(\n _get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))\n\n else:\n raise ValueError(\n 'Unsupported kernel type: \\'{}\\'. Supported kernel types: {}.'.format(\n random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))\n return random_features_initializer", "def reset_parameters(self):\n logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)\n for n, p in self.named_parameters():\n init_like_transformer_xl(n, p, std=0.02)" ]
[ "0.61080253", "0.60588044", "0.60554767", "0.60480046", "0.59878653", "0.5974961", "0.5911697", "0.5907807", "0.5863012", "0.58475083", "0.58475083", "0.58475083", "0.58475083", "0.58475083", "0.58393437", "0.58366144", "0.5830874", "0.58203185", "0.5815766", "0.5807405", "0.5805584", "0.57745296", "0.57418025", "0.5707347", "0.5696349", "0.5694071", "0.5621302", "0.5618989", "0.5614795", "0.5602467" ]
0.70326656
0
When dealing with nonconvex problems (e.g., optimization the marginal likelihood), we typically need to start from various starting points. This function applies this logic around apply_lbfgs, randomizing the starting points around the initial values provided in arg_dict (see below "copy_of_initial_arg_dict"). The first optimization happens exactly at arg_dict, so that the case n_starts=1 exactly coincides with the previously used apply_lbfgs. Importantly, the communication with the LBFGS solver happens via arg_dict, hence all the operations with respect to arg_dict are inplace. We catch exceptions and return ret_infos about these. If none of the restarts worked, arg_dict is not modified.
def apply_lbfgs_with_multiple_starts( exec_func, arg_dict, grad_dict, bounds, n_starts=N_STARTS, **kwargs): assert n_starts >= 1 copy_of_initial_arg_dict = _deep_copy_arg_dict(arg_dict) best_objective_over_restarts = None best_arg_dict_over_restarts = copy_of_initial_arg_dict # Loop over restarts ret_infos = [] for iter in range(n_starts): if iter > 0: _inplace_arg_dict_randomization( arg_dict, copy_of_initial_arg_dict, bounds) decorator = ExecutorDecorator(exec_func) ret_info = apply_lbfgs( decorator.exec_func, arg_dict, grad_dict, bounds, **kwargs) ret_infos.append(ret_info) if ret_info is None and ( best_objective_over_restarts is None or decorator.best_objective < best_objective_over_restarts): best_objective_over_restarts = decorator.best_objective best_arg_dict_over_restarts = _deep_copy_arg_dict(arg_dict) # We copy back the values of the best parameters into arg_dict (again, # inplace, as required by the executor) for name in arg_dict.keys(): arg_dict[name][:] = best_arg_dict_over_restarts[name] return ret_infos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_valid_initial_params(rng, model, *model_args, init_strategy=init_to_uniform,\n param_as_improper=False, prototype_params=None, **model_kwargs):\n init_strategy = jax.partial(init_strategy, skip_param=not param_as_improper)\n\n def cond_fn(state):\n i, _, _, is_valid = state\n return (i < 100) & (~is_valid)\n\n def body_fn(state):\n i, key, _, _ = state\n key, subkey = random.split(key)\n\n # Wrap model in a `substitute` handler to initialize from `init_loc_fn`.\n # Use `block` to not record sample primitives in `init_loc_fn`.\n seeded_model = substitute(model, substitute_fn=block(seed(init_strategy, subkey)))\n model_trace = trace(seeded_model).get_trace(*model_args, **model_kwargs)\n constrained_values, inv_transforms = {}, {}\n for k, v in model_trace.items():\n if v['type'] == 'sample' and not v['is_observed']:\n if v['intermediates']:\n constrained_values[k] = v['intermediates'][0][0]\n inv_transforms[k] = biject_to(v['fn'].base_dist.support)\n else:\n constrained_values[k] = v['value']\n inv_transforms[k] = biject_to(v['fn'].support)\n elif v['type'] == 'param' and param_as_improper:\n constraint = v['kwargs'].pop('constraint', real)\n transform = biject_to(constraint)\n if isinstance(transform, ComposeTransform):\n base_transform = transform.parts[0]\n inv_transforms[k] = base_transform\n constrained_values[k] = base_transform(transform.inv(v['value']))\n else:\n inv_transforms[k] = transform\n constrained_values[k] = v['value']\n params = transform_fn(inv_transforms,\n {k: v for k, v in constrained_values.items()},\n invert=True)\n potential_fn = jax.partial(potential_energy, model, model_args, model_kwargs, inv_transforms)\n pe, param_grads = value_and_grad(potential_fn)(params)\n z_grad = ravel_pytree(param_grads)[0]\n is_valid = np.isfinite(pe) & np.all(np.isfinite(z_grad))\n return i + 1, key, params, is_valid\n\n if prototype_params is not None:\n init_state = (0, rng, prototype_params, False)\n else:\n init_state = body_fn((0, rng, None, None))\n _, _, init_params, is_valid = while_loop(cond_fn, body_fn, init_state)\n return init_params, is_valid", "def csminwell(fun, x0, args,\n options={'x_tol' : 1e-10,\n 'f_tol' : 1e-10,\n 'g_tol' : 1e-10,\n 'show_trace' : False,\n 'max_iter' : 100},\n **kwargs):\n\n xtol, ftol, grtol = options['x_tol'], options['f_tol'], options['g_tol']\n iterations = options['max_iter']\n show_trace = options['show_trace']\n\n H0 = 1e-5 * np.eye(len(x0))\n\n # Unpack dimensions\n nx = x0.shape[0]\n\n # Count function and gradient calls\n f_calls, g_calls, h_calls = 0, 0, 0\n\n # Maintain current state in x and previous state in x_previous\n x, x_previous = copy.copy(x0), copy.copy(x0)\n\n # start with Initial Hessian\n H = H0\n\n # start rc parameter at 0\n rc = 0\n\n f_x = fun(x0, args)\n f_calls += 1\n\n if (f_x == np.inf) | (f_x == np.nan):\n raise ArgumentError(\"Bad initial guess, 'f_x' returned Inf. Try again\")\n elif f_x > 1e50:\n raise ArgumentError(\"Bad initial guess. Try again\")\n\n # Create gradient\n def _grad(f, X, args, h = 1e-4):\n nx = len(X)\n I = np.eye(nx)\n results = np.zeros(nx)\n for i in range(nx):\n e = I[i]\n X_fh = X + h * e\n X_bh = X - h * e\n results[i] = (f(X_fh, args) - f(X_bh, args))/(2*h)\n return results\n\n def _gradwrap(f, x, args):\n # df = grad(f)\n # stor = df(x, args)\n stor = _grad(f, x, args)\n bad_grads = abs(stor) >= 1e15\n stor[bad_grads] = 0.0\n return stor, any(bad_grads)\n\n gr, badg = _gradwrap(fun, x0, args)\n g_calls += 1\n\n # Count interations\n iteration = 0\n\n # Maintain a trace\n # TBD\n\n # set objects to their starting values\n retcode3 = 101\n\n # set up return variables so they are available outside while loop\n fh = np.copy(f_x)\n xh = np.copy(x0)\n gh = np.copy(x0)\n retcodeh = 1000\n\n # Assess multiple types of convergence\n x_converged, f_converged, gr_converged = False, False, False\n\n # Iterate until convergence or exhaustion\n converged = False\n while (not converged) & (iteration < iterations):\n iteration += 1\n f1, x1, fc, retcode1 = csminit(fun, x, f_x, gr, badg, H, args, show_trace)\n f_calls += fc\n\n if retcode1 != 1:\n if (retcode1 == 2) | (retcode1 == 4):\n wall1, badg1 = True, True\n else:\n g1, badg1 = _gradwrap(fun, x1, args)\n g_calls += 1\n wall1 = badg1\n\n if wall1 & (len(H) > 1):\n\n Hcliff = H + np.diag(np.diag(H) * np.random.rand(nx))\n\n # print('Cliff. Perturbing search direction.\\n')\n\n f2, x2, fc, retcode2 = csminit(fun, x, f_x, gr, badg, Hcliff, args, show_trace)\n f_calls += fc\n\n if f2 < f_x:\n if (retcode2 == 2) or (retcode2 == 4):\n wall2, badg2 = True, True\n else:\n g2, badg2 = _gradwrap(fun, x2, args)\n g_calls += 1\n wall2 = badg2\n badg2\n\n if wall2:\n # print(\"Cliff again. Try traversing\\n\")\n if np.linalg.norm(x2-x1) < 1e-13:\n f3 = f_x\n x3 = x\n badg3 = True\n retcode3 = 101\n else:\n gcliff = ((f2-f1) / ((np.linalg.norm(x2-x1))**2)) * (x2-x1)\n if len(x0.shape) == 2:\n gcliff = gcliff.conj().T\n f3, x3, fc, retcode3 = csminit(fun, x, f_x, gcliff, False, np.eye(nx), args, show_trace)\n f_calls += fc\n\n if (retcode3 == 2) or (retcode3==4):\n wall3 = True\n badg3 = True\n else:\n g3, badg3 = _gradwrap(fun, x3, args)\n g_calls += 1\n wall3 = badg3\n else:\n f3 = f_x\n x3 = x\n badg3 = True\n retcode3 = 101\n else:\n f3 = f_x\n x3 = x\n badg3 = True\n retcode3 = 101\n else:\n f2, f3 = f_x, f_x\n badg2, badg3 = True, True\n retcode2, retcode3 = 101, 101\n else:\n f1, f2, f3 = f_x, f_x, f_x\n retcode2, retcode3 = retcode1, retcode1\n #badg1, badg2, badg3 = False, False, False\n\n if (f3 < f_x - ftol) & (badg3 == 0):\n ih = 2\n fh = f3\n xh = x3\n gh = g3\n badgh = badg3\n retcodeh = retcode3\n elif (f2 < f_x - ftol) & (badg2 == 0):\n ih = 1\n fh = f2\n xh = x2\n gh = g2\n badgh = badg2\n retcodeh = retcode2\n elif (f1 < f_x - ftol) & (badg1 == 0):\n ih = 0\n fh = f1\n xh = x1\n gh = g1\n badgh = badg1\n retcodeh = retcode1\n else:\n fh = np.min([f1, f2, f3])\n ih = np.argmin([f1, f2, f3])\n\n if ih == 0:\n xh = x1\n retcodeh = retcode1\n elif ih == 1:\n xh = x2\n retcodeh = retcode2\n elif ih == 2:\n xh = x3\n retcodeh = retcode3\n\n try:\n nogh = not gh\n except:\n nogh = True\n\n if nogh:\n gh, badgh = _gradwrap(fun, xh, args)\n g_calls += 1\n\n badgh = True\n\n stuck = (abs(fh-f_x) < ftol)\n if (not badg) & (not badgh) & (not stuck):\n H = bfgsi(H, gh-gr, xh-x)\n\n if show_trace:\n print('Improvement on iteration {0} = {1:.9f}\\n'.format(iteration, fh-f_x))\n\n x_previous = np.copy(x)\n # Update before next iteration\n f_x_previous, f_x = f_x, fh\n x = xh\n gr = gh\n badg = badgh\n\n x_converged, f_converged, gr_converged, converged = \\\n assess_convergence(x, x_previous, f_x, f_x_previous, gr, xtol, ftol, grtol)\n\n result = optimize.OptimizeResult()\n result.initial_x = x0\n result.x = x\n result.success = converged\n result.hess = H\n result.iterations = iteration\n return result", "def test_insufficient_initial_values_in_fit_mle(self):\n # Create a variable for the arguments to the fit_mle function.\n # Note `None` is the argument passed when using the init_shapes,\n # init_intercepts and init_coefs keyword arguments.\n fit_args = [None]\n\n # Create base set of incorrect kwargs for fit_mle function\n kwargs = {\"init_intercepts\": self.fake_intercepts,\n \"init_coefs\": None,\n \"print_res\": False}\n\n kwargs_2 = {\"init_intercepts\": None,\n \"init_coefs\": None,\n \"print_res\": False}\n\n for bad_kwargs in [kwargs, kwargs_2]:\n # Test to ensure that the ValueError when not passing\n # kwarg with an incorrect number of parameters\n self.assertRaisesRegexp(ValueError,\n \"must pass init_coefs\",\n self.base_clog.fit_mle,\n *fit_args, **bad_kwargs)\n\n return None", "def lbfgs(self, pstart, Nepochs=50, bounds=None, disp_p=False,\n maxfun=15000, args=(), pass_self=False, res=False,\n callback=None):\n\n self.params = pstart\n self.bounds = self._parse_bounds(bounds)\n self.Nepochs = Nepochs\n self.disp_p = disp_p \n\n # Restart the counters\n self.iteration = 0\n self.t_store = time.time()\n self.of_list = []\n\n # Get initial of value\n of = self.objective(self.params, *args)\n self.of_list.append(self._get_value(of)) \n\n def of(params, *args, **kwargs):\n \"\"\"Modify the objective function slightly to allow storing\n intermediate objective values without re-evaluating the function\n \"\"\"\n if pass_self == True:\n arglist = list(args)\n arglist.append(self)\n args = tuple(arglist)\n\n out = value_and_grad(self.objective)(params, *args, **kwargs)\n self.of_last = self._get_value(out[0])\n return out\n\n def cb(xk):\n \"\"\"Callback function for the SciPy minimizer\n \"\"\"\n self.iteration += 1\n t_current = time.time()\n t_elapsed = t_current - self.t_store\n self.t_store = t_current\n \n self.of_list.append(self.of_last)\n self.params = xk\n self._disp(t_elapsed)\n\n # Call the custom callback function if any\n if callback is not None:\n callback(self)\n\n res_opt = minimize(of, self.params, args=args, method='L-BFGS-B',\n jac=True, bounds=self.bounds, tol=None, callback=cb,\n options={'disp': False,\n 'maxcor': 10,\n 'ftol': 1e-8,\n 'gtol': 1e-5,\n 'eps': 1e-08,\n 'maxfun': maxfun,\n 'maxiter': Nepochs,\n 'iprint': -1,\n 'maxls': 20})\n\n if res == False:\n return (res_opt.x, self.of_list)\n else:\n return (res_opt.x, self.of_list, res_opt)", "def get_initial_state_args(value_and_gradients_function,\n initial_position,\n grad_tolerance,\n control_inputs=None):\n if control_inputs:\n with tf.control_dependencies(control_inputs):\n f0, df0 = value_and_gradients_function(initial_position)\n else:\n f0, df0 = value_and_gradients_function(initial_position)\n return dict(\n converged=_check_within_tolerance(df0, grad_tolerance),\n failed=tf.convert_to_tensor(value=False),\n num_iterations=tf.convert_to_tensor(value=0),\n num_objective_evaluations=tf.convert_to_tensor(value=1),\n position=initial_position,\n objective_value=f0,\n objective_gradient=df0)", "def apply_lbfgs(exec_func, arg_dict, grad_dict, bounds, **kwargs):\n\n param_names = sorted(\n [name for name, value in grad_dict.items() \\\n if value is not None])\n name_to_index = _get_name_to_index(arg_dict, param_names)\n # Construct initial evaluation point (NumPy)\n param_nd_arrays = [arg_dict[name] for name in param_names]\n param_numpy_array = _zeros_like_nd_list(param_nd_arrays, _dtype)\n _copy_to_numpy_array(param_nd_arrays, param_numpy_array)\n\n return _apply_lbfgs_internal(\n exec_func, arg_dict, grad_dict, param_names, param_numpy_array,\n name_to_index, bounds, return_results=False, **kwargs)", "def LBFGS(fun, x, args=(), jac=None, x_old=None, maxcor=5, gtol = None, g2tol=1e-10, maxiter=10000,\n maxls=20, store_iterates=\"iterate\", printdb=donothing, linesearch_options={}):\n\n\n\n if x_old is None:\n x_old = x.copy()\n\n x,grad,x_old,grad_old = steepest_descent_wolfe2(x_old, fun, jac,**linesearch_options)\n\n k=1\n\n n = x.size # Dimension of x\n\n gamma = 1\n\n S = np.zeros((n, 0))\n Y = np.zeros((n, 0))\n R = np.zeros((0, 0))\n STgrad = np.array((1, maxcor))\n YTgrad = np.array((1, maxcor))\n\n grad = np.asarray(jac(x))\n grad2 = np.sum(grad**2)\n grad_old = np.asarray(jac(x_old))\n\n alpha=0\n\n # Start loop\n\n iterates = list()\n if store_iterates == 'iterate':\n iterate = scipy.optimize.OptimizeResult(\n {'x': x_old.copy(),\n 'fun': fun(x_old),\n 'jac': grad_old.copy()})\n iterates.append(iterate)\n\n iterate = scipy.optimize.OptimizeResult(\n {'x': x.copy(),\n 'fun': fun(x),\n 'jac': grad})\n iterates.append(iterate)\n\n\n while True:\n #printdb(k)\n #printdb(\"grads\")\n #printdb(grad)\n #printdb(grad_old)\n\n # Update Sk,Yk\n if k > maxcor:\n S = np.roll(S, -1)\n S[:, -1] = (x - x_old).flat\n Y = np.roll(Y, -1)\n Y[:, -1] = (grad - grad_old).flat\n\n else:\n S = np.hstack([S, x - x_old])\n Y = np.hstack([Y, grad - grad_old])\n #printdb(\"S: {}\".format(S))\n #printdb(\"Y: {}\".format(Y))\n\n # 2.\n grad2prev = grad2.copy()\n grad2 = np.sum(grad ** 2) # ok\n\n # check if job is done\n if ((grad2 < g2tol if g2tol is not None else True) and\n (np.max(np.abs(grad)) < gtol if gtol is not None else True)):\n result = scipy.optimize.OptimizeResult({'success': True,\n 'x': x,\n 'nit': k,\n 'iterates': iterates})\n\n #if iterates:\n # result['iterates'] = iterates\n return result\n\n if k > maxiter:\n result = scipy.optimize.OptimizeResult({'success': False,\n 'x': x,\n 'nit': k,\n 'iterates':iterates})\n\n #if iterates:\n # result['iterates'] = iterates\n return result\n\n STgrad_prev = STgrad.copy()\n YTgrad_prev = YTgrad.copy()\n\n STgrad = np.dot(S.T, grad)\n YTgrad = np.dot(Y.T, grad) # OK, this is new\n\n #printdb(\"STgrad : {}\".format(STgrad))\n #printdb(\"YTgrad: {}\".format(YTgrad))\n\n if k > maxcor:\n w = np.vstack([STgrad_prev, gamma * YTgrad_prev])\n S_now_T_grad_prev = np.roll(STgrad_prev,-1)\n S_now_T_grad_prev[-1] = - alpha * gamma * grad2prev - alpha * w.T.dot(p)\n else : # straightforward Version\n S_now_T_grad_prev = np.dot(S.T, grad_old)\n\n #printdb(\"S_now_T_grad_prev {}\".format(S_now_T_grad_prev))\n #np.testing.assert_allclose(S_now_T_grad_prev,np.dot(S.T, grad_old),\n # err_msg=\"Maybe the assumption of Theorem 2.2\"\n # \"is not valid: sk-1Tyk-1 = {}\".format(S[:,-1].T.dot(Y[:,-1])))\n\n # 3. # TOOPTIMIZE\n #sprevTgradprev = np.dot(S[:, -1].T, grad_old) # sk-1T gk-1\n\n #%% 4.\n #ykm12 = np.dot(Y[:, -1].T, Y[:, -1]) #TOOPTIMIZE\n\n #printdb(\"before\")\n #printdb(\"R: {}\".format(R))\n if k > maxcor:\n R = np.roll(R, (-1, -1), axis=(0, 1)) # mxm Matrix hold by all Processors\n R[-1, :] = 0\n STym1 = STgrad - S_now_T_grad_prev\n R[:, -1] = STym1.flat #O(m x n)\n\n elif k == 1:\n R = np.triu(np.dot(S.T, Y))\n else:\n R = np.vstack([R, np.zeros(k - 1)])\n R = np.hstack([R, np.dot(S.T, Y[:, -1]).reshape(k, 1)])\n\n #np.testing.assert_allclose(R, np.triu(np.dot(S.T, Y)))\n\n if k > maxcor:\n D = np.roll(D, (-1, -1), axis=(0, 1))\n # D[-1,-1] = np.dot(Y[:,-1],Y[:,-1])# yk-1Tyk-1 # TOOPTIMIZE\n D[-1, -1] = R[-1,-1]\n else:\n #D = np.diag(np.einsum(\"ik,ik -> k\", S, Y))\n D=np.diag(R.diagonal())\n\n assert D[-1,-1] >0, \"k = {}: \".format(k) # Assumption of Theorem 2.2\n #np.testing.assert_allclose(np.diag(D), np.diag(R))\n\n # YTY = np.dot(Y.T,Y) #TOPTIMIZED\n if k > maxcor:\n YTY = np.roll(YTY, (-1, -1), axis=(0, 1))\n #printdb(YTgrad)\n #printdb(YTgrad_prev)\n YTY[-1, :-1] = YTY[:-1, -1] = (YTgrad[:-1] - YTgrad_prev[1:]).flat\n YTY[-1, -1] = grad2prev - grad2 + 2 * YTgrad[-1]\n else:\n YTY = np.dot(Y.T, Y)\n #np.testing.assert_allclose(YTY, np.dot(Y.T, Y))\n ##\n #printdb(\"after\")\n #printdb(\"R: {}\".format(R))\n #printdb(\"YTY: {}\".format(YTY))\n #printdb(\"D: {}\".format(D))\n\n #%% 5.\n gamma = D[-1, -1] / YTY[-1,-1] # n.b. D[-1,-1] = sk-1T yk-1 = yk-1T sk-1\n\n #%% 6.\n #Rinv = np.linalg.inv(R)\n Rinv = scipy.linalg.solve_triangular(R,np.eye(min(k,maxcor)))\n\n RiSg = Rinv.dot(STgrad)\n\n p = np.vstack([Rinv.T.dot(D + gamma * YTY).dot(RiSg) - gamma * Rinv.T.dot(YTgrad)\n , - RiSg])\n\n #%% 7.\n Hgrad = gamma * grad + np.hstack([S, gamma * Y]).dot(p)\n\n #%% linesearch\n\n #reslinesearch = scipy.optimize.minimize_scalar(fun=lambda alpha: fun(x - Hgrad * alpha), bounds=(0, 10), method=\"bounded\")\n #assert reslinesearch.success, \"Linesearch not converged\"\n # line_search did cause problems, maybe because of the differentz interpretation of the arrays\n #alpha,fc,gc,new_fval,old_fval,new_slope = scipy.optimize.line_search(fun,lambda x_ : fprime(x_).flatten(),x, - Hgrad.flatten() , c1=1e-4,c2=0.9,maxiter=20)\n\n #printdb(\"assert descent direction\")\n #assert fun(x - Hgrad * 0.001) - fun(x) < 0\n #printdb(fun(x - Hgrad * 0.001) - fun(x))\n \n alpha,phi,phi0,derphi = scipy.optimize.linesearch.scalar_search_wolfe2(lambda alpha: fun(x - Hgrad * alpha), lambda alpha: np.dot(jac(x - Hgrad * alpha).T, -Hgrad),maxiter = maxls,**linesearch_options)\n\n if derphi is None:\n import matplotlib.pyplot as plt\n figdebug,axdebug=plt.subplots()\n alphas = np.linspace(-1,10)\n\n axdebug.plot(alphas,[fun(x - a * Hgrad) for a in alphas] )\n figdebug.show()\n printdb(\"scalar line search did not converge\")\n printdb(\"alpha: {}\".format(alpha))\n plt.show(block=True)\n\n assert derphi is not None, \"scalar line-search did not converge\"\n #assert new_fval is not None, \"Line-search didn't converge\"\n #printdb(\"x: {}\".format(x))\n x_old[:] = x\n #printdb(\"x_old: {}\".format(x_old))\n x = x - Hgrad * alpha\n #printdb(\"x: {}\".format(x))\n #printdb(\"x_old: {}\".format(x_old))\n #printdb(\"x = {}\".format(x))\n #assert phi < phi0, \"f(x) >= f(x_old) ! \"\n grad_old[:] = grad\n grad = jac(x)\n #assert fun(x) <= fun(x_old) + 1e-4 * alpha * grad_old.T.dot(-Hgrad), \"First Wolfe Condition not fullfilled\"\n #assert grad.T.dot(-Hgrad) >= 0.9 * grad_old.T.dot(-Hgrad), \"second Wolfe Condition not fullfilled\"\n #printdb(\"dx * -Hgrad:{}\".format((x-x_old).T.dot(-Hgrad)))\n #printdb(alpha)\n #assert (grad - grad_old).T.dot(x - x_old) > 0, \"msg derphi = {}\".format(derphi)\n if store_iterates == 'iterate':\n iterate = scipy.optimize.OptimizeResult(\n {'x': x.copy(),\n 'fun': phi,\n 'jac': grad})\n iterates.append(iterate)\n\n k = k + 1", "def calibrate(self, param_start):\n method = 'L-BFGS-B'\n# method = 'Nelder-Mead'\n basin = False\n options = {'maxiter': 500, 'disp': False}\n minimizer_kwargs = {'method': method, 'bounds': self.bounds()}\n if basin:\n res = sco.basinhopping(self.objective, param_start, niter=100,\n disp=options['disp'],\n minimizer_kwargs=minimizer_kwargs)\n else:\n res = sco.minimize(self.objective, param_start, method=method,\n options=options, bounds=self.bounds())\n return self.get_pnames(), res.x", "def bd_init(trajectory, rep=10, method='Nelder-Mead', mp=True,\n fix_N=None, brute=True, fit_func=bd_nll):\n\n # Make sure that there are no DP values = 0 in dataset\n trajectory.loc[trajectory['DP'] == 0, 'DP'] = (\n trajectory[trajectory.DP != 0].DP.mean())\n trajectory['DP'] = trajectory['DP'].astype(int)\n\n # Select random parameter initiations\n fitness_range = np.linspace(fitness_bounds[0], fitness_bounds[1], rep+1)\n t0_range = np.linspace(0, trajectory.iloc[0].age-1, rep+1).astype('int')\n\n # the number of wild type stem cells can be fix using fix_N parameter\n if fix_N is None:\n # if fix_N is not set, then compute a range of valid stem cell counts\n N_w_range = np.linspace(N_w_bounds[0], N_w_bounds[1], rep+1, dtype=int)\n else:\n N_w_range = [fix_N]\n\n # Create all possible combinations of initial parameters\n params_init = list(product(fitness_range, N_w_range, t0_range))\n\n # Set fitting function\n if brute is False:\n partial_func = partial(bd_fit,\n trajectory=trajectory,\n method=method,\n fit_func=fit_func)\n else:\n # Set fit_func function to return parameters\n fit_func_return_params = partial(fit_func,\n return_params=True)\n partial_func = partial(fit_func_return_params,\n trajectory=trajectory)\n\n if mp is True:\n with Pool(8) as p:\n model_list = list(p.map(partial_func, params_init))\n else:\n model_list = list(map(partial_func, params_init))\n\n if brute is False:\n # Optimal model\n optimal_model = model_list[0]\n for model in model_list:\n if model.fit.fun < optimal_model.fit.fun:\n optimal_model = model\n\n else:\n # Unpack nll and parameter values from model_list\n nll, params = zip(*model_list)\n s, N_w, t0 = zip(*params)\n\n # Convert negative log-likelihoods to likelihood\n likelihood = np.exp(-np.array(nll))\n # Create DataFrame with the nll for each combination of parameters\n brute_force_df = pd.DataFrame({'fitness': s,\n 'N_w': N_w,\n 't0': t0,\n 'likelihood': likelihood})\n # find dataframe row of optimal nll\n optimal_idx = brute_force_df['likelihood'].idxmax()\n\n # Fit new model with optimal parameters as initial parameters\n model_fit = bd_fit(params=[brute_force_df.iloc[optimal_idx].fitness,\n brute_force_df.iloc[optimal_idx].N_w,\n brute_force_df.iloc[optimal_idx].t0],\n trajectory=trajectory,\n method='Nelder-Mead',\n fit_func=fit_func)\n\n # Create model class object from optimal model\n optimal_model = trajectory_model(model_type='bd_process',\n fit=model_fit.fit,\n data=trajectory)\n\n # Create heatmaps for each combination of 2 parameters\n heatmaps = likelihood_heatmaps(brute_force_df)\n conditional_distribution_plots = conditional_distributions(\n brute_force_df)\n\n # Add dataframe and heatmap as attribute of the class object.\n optimal_model.brute_force_df = brute_force_df\n optimal_model.heatmap = heatmaps\n optimal_model.conditional_dist = conditional_distribution_plots\n\n return optimal_model", "def fit(self, h_init):\n M = h_init.shape[0]\n\n cn_states = self.create_cn_states(M, 2, self.max_copy_number, self.max_copy_number_diff)\n cn_states = np.array([cn_states] * self.N)\n cn_states[:, :, 0, :] = self.normal_copies[:, np.newaxis, :]\n\n # Remap cn states\n cn_states = cn_states[self.seg_rev_remap, :, :, :]\n\n brk_states = self.create_brk_states(M, self.max_copy_number, self.max_copy_number_diff)\n\n self.model = remixt.bpmodel.RemixtModel(\n M,\n self.N1,\n self.num_breakpoints,\n self.normal_contamination,\n cn_states,\n brk_states,\n h_init,\n self.l1,\n self.x1[:, 2],\n self.x1[:, 0:2],\n self.is_telomere,\n self.breakpoint_idx,\n self.breakpoint_orient,\n self.transition_log_prob,\n self.divergence_weight,\n )\n\n self.model.total_likelihood_mask = self._total_likelihood_mask.astype(int)\n self.model.allele_likelihood_mask = self._allele_likelihood_mask.astype(int)\n\n if self.breakpoint_init is not None:\n p_breakpoint = np.ones((self.model.self.num_breakpoints, self.model.num_brk_states))\n brk_states = np.array(self.model.brk_states)\n\n for k, bp in enumerate(self.breakpoints):\n cn = self.breakpoint_init[bp]\n\n for s in range(self.model.num_brk_states):\n if np.all(cn == brk_states[s]):\n p_breakpoint[k, s] = 1000.\n\n p_breakpoint /= np.sum(p_breakpoint, axis=-1)[:, np.newaxis]\n\n self.model.p_breakpoint = p_breakpoint\n\n self.model.transition_model = self.transition_model\n\n if self.prev_elbo is None:\n self.prev_elbo = self.model.calculate_elbo()\n\n for i in range(self.num_em_iter):\n for j in range(self.num_update_iter):\n self.variational_update()\n\n if self.do_h_update:\n self.em_update_h()\n\n self.em_update_params()\n\n elbo = self.model.calculate_elbo()\n\n self.prev_elbo_diff = elbo - self.prev_elbo\n self.prev_elbo = elbo\n\n print ('[{}] completed iteration {}'.format(_gettime(), i))\n print ('[{}] elbo: {:.10f}'.format(_gettime(), self.prev_elbo))\n print ('[{}] elbo diff: {:.10f}'.format(_gettime(), self.prev_elbo_diff))\n print ('[{}] h = {}'.format(_gettime(), np.asarray(self.model.h)))\n for name, value in self.get_likelihood_param_values().items():\n print ('[{}] {} = {}'.format(_gettime(), name, value))", "def LBFGS_run(seed, alpha, rho, x0=5, n0=100, iter_count=1000, mu_1=2, mu_2=5, sigma_1=1, sigma_2=1, SAA_seed=None):\n np.random.seed(seed)\n begin = datetime.datetime.now()\n res = minimize(estimate_w_grad, np.array([x0]),\n args=(n0, alpha, rho, mu_1, mu_2, sigma_1, sigma_2, SAA_seed),\n method='L-BFGS-B',\n jac=True,\n bounds=[(-10, 10)],\n options={'disp': True,\n 'maxiter': iter_count,\n 'maxfun': iter_count,\n # 'return_all': True\n }\n )\n print(res)\n x_list = res.x.tolist()\n now = datetime.datetime.now()\n print('done time: %s' % (now - begin))\n print('call count: %d' % call_count)\n # np.save(\"sa_out/normal/BFGS_\" + rho + \"_\" + str(alpha) + \"_iter_\" + str(iter_count) + \"_x.npy\", x_list)\n return x_list", "def optimize(self, num_restarts=1, max_iters=100, max_f_eval=300.0, method='Anneal'):\n dic = DictVectorizer()\n # flatten the parameters\n init_params,bounds=dic.fit_transform(self.params)\n #we minimise minus the marginal likelihood\n def objective(params_flatten):\n self.params=dic.inverse_transform(params_flatten,bounds)\n val = -self.log_marginal_likelihood()\n return val# we want to maximize it\n \n \n #run ptimisation with multiple restarts\n optml=np.inf\n for i in range(num_restarts):\n #minimise function\n if method=='Anneal':\n res=dual_annealing(objective,bounds, maxiter=max_iters, maxfun=max_f_eval, x0=init_params)\n else:\n \n res = minimize(objective, init_params, \n bounds=bounds, method=method,options={'maxiter': max_iters, 'disp': False})\n #print(\"Iteration \"+str(i)+\" \",-res.fun)\n if res.fun<optml:\n params_best=res.x #init_params \n optml=res.fun\n init_params=bounds[:,0]+(bounds[:,1]-bounds[:,0])*np.random.rand(len(bounds[:,0]))\n print(\"Iteration \"+str(i)+\" \",-res.fun)\n #params_best=res.x\n #optml=res.fun\n self.params=dic.inverse_transform(params_best,bounds)\n return -optml", "def initial_parameters(ship_data: dict) -> dict:\n\n mask = df_parameters[\"brix_lambda\"].notnull()\n df_parameters.loc[mask, \"brix_prime\"] = df_parameters.loc[mask].apply(\n calculate_prime, ship_parameters=ship_data, axis=1\n )\n\n df_parameters[\"prime\"] = df_parameters[\"brix_prime\"]\n\n df_parameters.loc[\"Ydelta\", \"prime\"] = 0.003 # Just guessing\n df_parameters.loc[\"Ndelta\", \"prime\"] = (\n -df_parameters.loc[\"Ydelta\", \"prime\"] / 2\n ) # Just guessing\n\n df_parameters.loc[\"Nu\", \"prime\"] = 0\n df_parameters.loc[\"Nur\", \"prime\"] = 0\n # df_parameters.loc[\"Xdelta\", \"prime\"] = -0.001\n df_parameters.loc[\"Xr\", \"prime\"] = 0\n df_parameters.loc[\"Xrr\", \"prime\"] = 0.000\n df_parameters.loc[\"Xu\", \"prime\"] = 0\n df_parameters.loc[\"Xuu\", \"prime\"] = 0\n df_parameters.loc[\"Xv\", \"prime\"] = 0\n df_parameters.loc[\"Xvr\", \"prime\"] = 0\n df_parameters.loc[\"Yu\", \"prime\"] = 0\n df_parameters.loc[\"Yur\", \"prime\"] = 0.00\n\n df_parameters.loc[\"Nuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Xthrust\", \"prime\"] = 1.0\n df_parameters.loc[\"Yrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xvdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yvdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Nvdeltadelta\", \"prime\"] = 0.0\n\n df_parameters.loc[\"Ythrustdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nthrustdelta\", \"prime\"] = 0.0\n\n parameters = df_parameters[\"prime\"].dropna().to_dict()\n\n return parameters", "def mle_parameters(self, start=None, random_seed=None):\n\n if random_seed is not None:\n np.random.seed(random_seed)\n\n if start is None:\n start = self.sample_parameters_posterior(1)[0, :]\n\n bounds = self.get_bounds_parameters\n\n objective_function = self.objective_llh\n grad_function = self.grad_llh\n\n optimization = Optimization(\n LBFGS_NAME,\n objective_function,\n bounds,\n grad_function,\n minimize=False)\n\n return optimization.optimize(start)", "def _inplace_arg_dict_randomization(arg_dict, mean_arg_dict, bounds, std=STARTING_POINT_RANDOMIZATION_STD):\n\n # We check that arg_dict and mean_arg_dict are compatible\n assert arg_dict.keys() == mean_arg_dict.keys()\n for name, param in arg_dict.items():\n assert param.shape == mean_arg_dict[name].shape\n assert param.dtype == mean_arg_dict[name].dtype\n assert param.context == mean_arg_dict[name].context\n\n # We apply a sort to make the for loop deterministic (especially with the internal calls to mx.random)\n for name, param in sorted(arg_dict.items()):\n\n arg_dict[name][:] = mean_arg_dict[name] + mx.random.normal(0.0, std, shape=param.shape, dtype=param.dtype, ctx=param.context)\n\n lower, upper = bounds[name]\n lower = lower if lower is not None else -np.inf\n upper = upper if upper is not None else np.inf\n\n # We project back arg_dict[name] within its specified lower and upper bounds\n # (in case of we would have perturbed beyond those bounds)\n arg_dict[name][:] = mx.nd.maximum(lower, mx.nd.minimum(upper, arg_dict[name]))", "def init_kernel(init_params,\n num_warmup,\n step_size=1.0,\n inverse_mass_matrix=None,\n adapt_step_size=True,\n adapt_mass_matrix=True,\n dense_mass=False,\n target_accept_prob=0.8,\n trajectory_length=2*math.pi,\n max_tree_depth=10,\n find_heuristic_step_size=False,\n model_args=(),\n model_kwargs=None,\n rng_key=random.PRNGKey(0)):\n step_size = lax.convert_element_type(step_size, canonicalize_dtype(jnp.float64))\n nonlocal wa_update, trajectory_len, max_treedepth, vv_update, wa_steps\n wa_steps = num_warmup\n trajectory_len = trajectory_length\n max_treedepth = max_tree_depth\n if isinstance(init_params, ParamInfo):\n z, pe, z_grad = init_params\n else:\n z, pe, z_grad = init_params, None, None\n pe_fn = potential_fn\n if potential_fn_gen:\n if pe_fn is not None:\n raise ValueError('Only one of `potential_fn` or `potential_fn_gen` must be provided.')\n else:\n kwargs = {} if model_kwargs is None else model_kwargs\n pe_fn = potential_fn_gen(*model_args, **kwargs)\n\n find_reasonable_ss = None\n if find_heuristic_step_size:\n find_reasonable_ss = partial(find_reasonable_step_size,\n pe_fn,\n kinetic_fn,\n momentum_generator)\n\n wa_init, wa_update = warmup_adapter(num_warmup,\n adapt_step_size=adapt_step_size,\n adapt_mass_matrix=adapt_mass_matrix,\n dense_mass=dense_mass,\n target_accept_prob=target_accept_prob,\n find_reasonable_step_size=find_reasonable_ss)\n\n rng_key_hmc, rng_key_wa, rng_key_momentum = random.split(rng_key, 3)\n wa_state = wa_init(z, rng_key_wa, step_size,\n inverse_mass_matrix=inverse_mass_matrix,\n mass_matrix_size=jnp.size(ravel_pytree(z)[0]))\n r = momentum_generator(z, wa_state.mass_matrix_sqrt, rng_key_momentum)\n vv_init, vv_update = velocity_verlet(pe_fn, kinetic_fn)\n vv_state = vv_init(z, r, potential_energy=pe, z_grad=z_grad)\n energy = kinetic_fn(wa_state.inverse_mass_matrix, vv_state.r)\n hmc_state = HMCState(0, vv_state.z, vv_state.z_grad, vv_state.potential_energy, energy,\n 0, 0., 0., False, wa_state, rng_key_hmc)\n return device_put(hmc_state)", "def test_init_coefs_length_error_in_fit_mle(self):\n # Create a variable for the arguments to the fit_mle function.\n # Note `None` is the argument passed when using the init_intercepts\n # and init_coefs keyword arguments.\n fit_args = [None]\n\n # Create base set of kwargs for fit_mle function\n kwargs = {\"init_intercepts\": self.fake_intercepts,\n \"print_res\": False}\n\n # Note there is only one beta, so we can't go lower than zero betas.\n for i in [1, -1]:\n # This will ensure we have too many or too few intercepts\n num_coefs = self.fake_betas.shape[0] + i\n kwargs[\"init_coefs\"] = np.arange(num_coefs)\n\n # Test to ensure that the ValueError when using an init_intercepts\n # kwarg with an incorrect number of parameters\n self.assertRaises(ValueError, self.base_clog.fit_mle,\n *fit_args, **kwargs)\n\n return None", "def init(self, rng_key, num_warmup, init_params, model_args, model_kwargs):\n raise NotImplementedError", "def get_likelihood(\n self,\n qb,\n inv_fish,\n map_tag=None,\n null_first_cmb=False,\n lmin=33,\n lmax=250,\n mcmc=True,\n alpha_tags=[\"95\", \"150\"],\n beam_tags=[\"95\", \"150\"],\n r_prior=[0, np.inf],\n alpha_prior=[0, np.inf],\n res_prior=None,\n beam_prior=[0, 1],\n betad_prior=[0, 1],\n dust_amp_prior=[0, np.inf],\n dust_ellind_prior=[0, 1],\n num_walkers=50,\n num_steps=20000,\n converge_criteria=0.01,\n reset_backend=None,\n file_tag=None,\n ):\n\n for x in [\n r_prior,\n alpha_prior,\n res_prior,\n beam_prior,\n betad_prior,\n dust_amp_prior,\n dust_ellind_prior,\n ]:\n if x is not None:\n x[:] = [float(x[0]), float(x[1])]\n\n save_name = \"like_mcmc\"\n if not mcmc:\n alpha_prior = None\n res_prior = None\n beam_prior = None\n betad_prior = None\n dust_amp_prior = None\n dust_ellind_prior = None\n\n # no template cleaning if there aren't any templates specified\n if not getattr(self, \"template_cleaned\", False):\n alpha_prior = None\n\n # null out unused priors\n self.template_alpha = getattr(self, \"template_alpha\", None)\n if self.template_alpha is None or all(\n [x is None for x in self.template_alpha.values()]\n ):\n alpha_prior = None\n\n # count alpha parameters to fit\n alpha_tags = [x for x in alpha_tags if x in self.map_tags_orig]\n if not len(alpha_tags):\n alpha_prior = None\n\n num_alpha = 0\n if alpha_prior is not None:\n num_alpha = len(alpha_tags)\n\n # count beam parameters to fit\n beam_tags = [x for x in beam_tags if x in self.map_tags_orig]\n if not len(beam_tags):\n beam_prior = None\n\n num_beam = 0\n if beam_prior is not None:\n num_beam = len(beam_tags)\n\n if not any([k.startswith(\"res_\") for k in qb]):\n res_prior = None\n\n if np.any(\n [\n betad_prior is not None,\n dust_amp_prior is not None,\n dust_ellind_prior is not None,\n ]\n ):\n dust_ell_fit = True\n else:\n dust_ell_fit = False\n\n # bookkeeping: ordered priors\n priors = {\n \"r_prior\": r_prior,\n \"alpha_prior\": alpha_prior,\n \"res_prior\": res_prior,\n \"beam_prior\": beam_prior,\n \"betad_prior\": betad_prior,\n \"dust_amp_prior\": dust_amp_prior,\n \"dust_ellind_prior\": dust_ellind_prior,\n }\n # priors on quantities that affect Dmat_obs or gmat (precalculated)\n obs_priors = [alpha_prior]\n\n # check parameter space\n if all([x is None for x in priors.values()]):\n raise RuntimeError(\"Empty parameter space\")\n\n out = dict(\n r_prior=r_prior,\n alpha_prior=alpha_prior,\n res_prior=res_prior,\n beam_prior=beam_prior,\n betad_prior=betad_prior,\n dust_amp_prior=dust_amp_prior,\n dust_ellind_prior=dust_ellind_prior,\n alpha_tags=alpha_tags,\n num_walkers=num_walkers,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n lmin=lmin,\n lmax=lmax,\n )\n\n if mcmc and reset_backend is None:\n ret = self.load_data(\n save_name,\n \"likelihood\",\n bp_opts=True,\n to_attrs=False,\n map_tag=map_tag,\n value_ref=out,\n extra_tag=file_tag,\n )\n if ret is not None and ret.get(\"converged\", False):\n if converge_criteria >= ret.get(\"converge_criteria\", 0.01):\n return ret\n if ret is not None:\n for pname, pval in priors.items():\n if np.all(pval != ret.get(pname, None)):\n ret = None\n # clear chain cache if rerunning, otherwise append to chain by default\n reset_backend = ret is None\n\n out.update(converge_criteria=converge_criteria)\n\n # save state\n if mcmc and reset_backend:\n self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )\n\n # clear pre-computed quantities\n self.clear_precalc()\n use_precalc = all([x is None for x in obs_priors])\n\n cls_input, cls_noise, cls_debias = self.get_data_spectra()\n\n # extract residual bins, ignoring bins outside of lmin/lmax\n if res_prior is not None:\n bin_def_orig = copy.deepcopy(self.bin_def)\n nbins_res_orig = self.nbins_res\n qb_res = OrderedDict()\n num_res = 0\n for k in list(qb):\n if k.startswith(\"res_\"):\n bd = self.bin_def[k]\n good = np.where((bd[:, 1] > lmin) & (bd[:, 0] < lmax))[0]\n # use all qb res in range lmin, lmax\n self.bin_def[k] = bd[good]\n v = qb.pop(k)[good]\n num_res += len(v)\n\n # use average qb res in good range per map\n # self.bin_def[k] = np.array([[lmin, lmax + 1]])\n # v = np.array([(qb.pop(k)[good]).mean()])\n # num_res += 1\n qb_res[k] = v\n self.nbins_res = num_res\n\n # set CMB model bandpowers to unity, since we are computing\n # the likelihood of this model given the data\n if r_prior is None:\n self.log(\"Computing model spectrum\", \"debug\")\n self.warn(\"Beam variation not implemented for case of no r fit\")\n cbl = self.bin_cl_template(map_tag=map_tag)\n cls_model = self.get_model_spectra(qb, cbl, delta=True, cls_noise=cls_noise)\n else:\n qb = copy.deepcopy(qb)\n for spec in self.specs:\n stags = [\"cmb_{}\".format(spec), \"fg_{}\".format(spec)]\n for stag in stags:\n if stag not in qb:\n continue\n qb[stag] = np.ones_like(qb[stag])\n\n self.log(\"Computing r model spectrum\", \"debug\")\n cls_shape_scalar = self.get_signal_shape(\n r=1.0, save=False, component=\"scalar\"\n )\n\n cls_shape_tensor = self.get_signal_shape(\n r=1.0, save=False, component=\"tensor\"\n )\n\n # load tensor and scalar terms separately\n cbl_scalar = self.bin_cl_template(cls_shape_scalar, map_tag)\n cls_model_scalar = self.get_model_spectra(\n qb, cbl_scalar, delta=True, cls_noise=cls_noise\n )\n cbl_tensor = self.bin_cl_template(cls_shape_tensor, map_tag)\n cls_model_tensor = self.get_model_spectra(\n qb, cbl_tensor, delta=False, res=False\n )\n if beam_prior is not None:\n # load beam error term for tensor and scalar\n cbl_scalar_beam = self.bin_cl_template(\n cls_shape_scalar, map_tag, beam_error=True\n )\n cls_mod_scal_beam = self.get_model_spectra(\n qb, cbl_scalar_beam, delta=True, res=False\n )\n cbl_tensor_beam = self.bin_cl_template(\n cls_shape_tensor, map_tag, beam_error=True\n )\n cls_mod_tens_beam = self.get_model_spectra(\n qb, cbl_tensor_beam, delta=False, res=False\n )\n\n # load foreground shape\n if dust_ell_fit:\n cls_shape_dust = self.get_signal_shape(save=False, component=\"fg\")\n # if dust_ellind_prior is None:\n # # can preload shape since not varying ell index\n cbl_fg = self.bin_cl_template(cls_shape_dust, map_tag=map_tag)\n if beam_prior is not None:\n cbl_fg_beam = self.bin_cl_template(\n cls_shape_dust, map_tag, beam_error=True\n )\n\n cbl = copy.deepcopy(cbl_scalar)\n cls_model = copy.deepcopy(cls_model_scalar)\n\n # XXX TODO\n # how to marginalize over the garbage bin?\n\n def parse_params(theta):\n \"\"\"\n Parse array of parameters into a dict\n \"\"\"\n params = {}\n if r_prior is not None:\n params[\"r\"] = theta[0]\n theta = theta[1:]\n if alpha_prior is not None:\n params[\"alpha\"] = theta[:num_alpha]\n theta = theta[num_alpha:]\n if res_prior is not None:\n params[\"res\"] = theta[:num_res]\n theta = theta[num_res:]\n if beam_prior is not None:\n params[\"beam\"] = theta[:num_beam]\n theta = theta[num_beam:]\n if betad_prior is not None:\n params[\"betad\"] = theta[0]\n theta = theta[1:]\n if dust_amp_prior is not None:\n # param for ee and bb\n params[\"dust_amp\"] = theta[:2]\n theta = theta[2:]\n if dust_ellind_prior is not None:\n params[\"dust_ellind\"] = theta[0]\n theta = theta[1:]\n if len(theta):\n raise ValueError(\"Too many parameters to parse\")\n return params\n\n def log_prior(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log prior function constructed from input options\n \"\"\"\n values = {\n \"r_prior\": r,\n \"alpha_prior\": alpha,\n \"res_prior\": res,\n \"dust_amp_prior\": dust_amp,\n }\n for v, pval in values.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n if np.any(pval < prior[0]) or np.any(pval > prior[1]):\n return -np.inf\n\n values_gauss = {\n \"beam_prior\": beam,\n \"betad_prior\": betad,\n \"dust_ellind_prior\": dust_ellind,\n }\n # for beam and betad, use gaussian prior\n log_prob = 0.0\n for v, pval in values_gauss.items():\n prior = priors[v]\n if pval is not None and prior is not None:\n pval = np.atleast_1d(pval)\n norm = np.log(1.0 / (prior[1] * np.sqrt(2 * np.pi)))\n chi = (pval - prior[0]) / prior[1]\n log_prob += np.sum(norm - chi ** 2 / 2.0)\n\n return log_prob\n\n def log_like(\n r=None,\n alpha=None,\n res=None,\n beam=None,\n betad=None,\n dust_amp=None,\n dust_ellind=None,\n ):\n \"\"\"\n Log likelihood function constructed from input options\n \"\"\"\n cls_model0 = copy.deepcopy(cls_model)\n\n # compute new template subtracted data spectra\n if alpha is None:\n clsi = cls_input\n else:\n self.get_masked_data(template_alpha=OrderedDict(zip(alpha_tags, alpha)))\n clsi = self.get_data_spectra(do_noise=False)\n\n if beam is not None:\n beam = dict(zip(beam_tags, beam))\n beam_coeffs = dict()\n for xname, (m0, m1) in self.map_pairs_orig.items():\n d = {}\n b0, b1 = [beam.get(m, None) for m in (m0, m1)]\n if b0 is not None:\n d[\"b1\"] = b0\n if b1 is not None:\n d[\"b2\"] = b1\n if b0 is not None:\n d[\"b3\"] = b0 * b1\n beam_coeffs[xname] = d\n\n # compute new signal shape by scaling tensor component by r\n if r is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n dd[:] = (\n cls_model_scalar[stag][xname]\n + r * cls_model_tensor[ctag][xname]\n )\n\n if beam is None:\n continue\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * (\n cls_mod_scal_beam[ctag][xname][bn]\n + r * cls_mod_tens_beam[ctag][xname][bn]\n )\n dd[:] += beam_term\n\n elif beam is not None:\n for stag, d in cls_model0.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"cmb\", \"total\"]:\n continue\n ctag = \"cmb_{}\".format(spec)\n for xname, dd in d.items():\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_scal_beam[ctag][xname][bn]\n dd[:] = cls_model_scalar[stag][xname] + beam_term\n\n # fg term, including beam modifications. Because mix terms are\n # dependent on dust amp, get model specs here.\n if dust_ell_fit:\n if dust_amp is None:\n qb[\"fg_ee\"][:] = 1\n qb[\"fg_bb\"][:] = 1\n else:\n qb[\"fg_ee\"][:] = dust_amp[0]\n qb[\"fg_bb\"][:] = dust_amp[1]\n if betad is None:\n qb[\"delta_beta\"][:] = 0\n else:\n qb[\"delta_beta\"][:] = betad\n if dust_ellind is not None:\n cbl_fg0 = self.bin_cl_template(\n cls_shape_dust, map_tag=map_tag, fg_ell_ind=dust_ellind\n )\n if beam is not None:\n cbl_fg_beam0 = self.bin_cl_template(\n cls_shape_dust,\n map_tag,\n fg_ell_ind=dust_ellind,\n beam_error=True,\n )\n else:\n cbl_fg0 = cbl_fg\n if beam is not None:\n cbl_fg_beam0 = cbl_fg_beam\n\n cls_model_fg = self.get_model_spectra(\n qb, cbl_fg0, delta=True, res=False\n )\n if beam is not None:\n cls_mod_fg_beam = self.get_model_spectra(\n qb, cbl_fg_beam0, delta=True, res=False\n )\n # add fg field to model, and add fg to total model\n for stag, d in cls_model_fg.items():\n comp, spec = stag.split(\"_\", 1)\n if spec not in [\"ee\", \"bb\"] or comp not in [\"fg\", \"total\"]:\n continue\n ftag = \"fg_{}\".format(spec)\n if stag not in cls_model0:\n cls_model0[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in cls_model0[stag]:\n cls_model0[stag][xname] = cls_model_fg[ftag][xname]\n else:\n cls_model0[stag][xname] += cls_model_fg[ftag][xname]\n\n # add beam terms to fg and total fields\n if beam is not None:\n beam_term = 0\n for bn, bc in beam_coeffs[xname].items():\n beam_term += bc * cls_mod_fg_beam[ftag][xname][bn]\n cls_model0[stag][xname] += beam_term\n\n # compute noise model terms\n if res is None:\n clsm = cls_model0\n else:\n res = pt.arr_to_dict(res, qb_res)\n clsm = copy.deepcopy(cls_model0)\n cls_res = self.get_model_spectra(res, cbl)\n for stag, d in cls_res.items():\n if stag not in clsm:\n clsm[stag] = OrderedDict()\n for xname, dd in d.items():\n if xname not in clsm[stag]:\n clsm[stag][xname] = dd\n else:\n clsm[stag][xname] += dd\n\n # compute likelihood\n like = self.fisher_calc(\n qb,\n cbl,\n clsi,\n cls_noise=cls_noise,\n cls_debias=cls_debias,\n cls_model=clsm,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n use_precalc=use_precalc,\n like_lmin=lmin,\n like_lmax=lmax,\n )\n return like\n\n def log_prob(theta):\n \"\"\"\n Log posterior probability from prior and likelihood\n\n Returns log_prior with each step\n \"\"\"\n params = parse_params(theta)\n prior = log_prior(**params)\n if not np.isfinite(prior):\n return -np.inf, -np.inf\n like = log_like(**params)\n if not np.isfinite(like):\n return -np.inf, prior\n return prior + like, prior\n\n # initial values\n x0 = []\n brute_force = True if not mcmc else False # only vary r\n if r_prior is not None:\n x0 += [0.01]\n if alpha_prior is not None:\n alphas = [self.template_alpha[tag] for tag in alpha_tags]\n x0 += [0.01 if a == 0 else a for a in alphas]\n brute_force = False\n if res_prior is not None:\n x0 += list(pt.dict_to_arr(qb_res, flatten=True))\n brute_force = False\n if beam_prior is not None:\n # add a beam term for each frequency\n x0 += [0.01] * len(beam_tags)\n brute_force = False\n if betad_prior is not None:\n x0 += [0.01]\n brute_force = False\n if dust_amp_prior is not None:\n x0 += [1, 1]\n brute_force = False\n if dust_ellind_prior is not None:\n x0 += [0.01]\n brute_force = False\n\n ndim = len(x0)\n if ndim * 2 > num_walkers:\n num_walkers = int(np.round(ndim / float(num_walkers)) * num_walkers * 2)\n self.warn(\n \"Found {} parameters, increasing number of MCMC walkers to {}\".format(\n ndim, num_walkers\n )\n )\n x0 = np.array(x0)[None, :] * (1 + 1e-4 * np.random.randn(num_walkers, len(x0)))\n\n if brute_force or (r_prior is not None and ndim == 1):\n self.log(\"Computing brute-force r profile likelihood\", \"info\")\n likefile = self.get_filename(\n save_name, ext=\".txt\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n rs = np.linspace(0, 3, 500)\n likes = np.zeros_like(rs)\n for idx, r in enumerate(rs):\n like = log_like(r=r)\n if idx % 20 == 0:\n self.log(\"r = {:.3f}, loglike = {:.2f}\".format(r, like), \"debug\")\n likes[idx] = like\n header = \"{} r likelihood\\nColumns: r, loglike\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag)\n )\n np.savetxt(likefile, np.column_stack((rs, likes)), header=header)\n\n if not mcmc:\n return [rs, likes]\n\n # run chains!\n import emcee\n\n # setup sampler output file\n filename = self.get_filename(\n save_name, ext=\".h5\", map_tag=map_tag, extra_tag=file_tag, bp_opts=True\n )\n backend_exists = os.path.exists(filename)\n backend = emcee.backends.HDFBackend(filename)\n if backend_exists and backend.shape != (num_walkers, ndim):\n self.warn(\n \"Expected backend of shape ({}, {}), found {}. Resetting\".format(\n num_walkers, ndim, backend.shape\n )\n )\n reset_backend = True\n if reset_backend:\n backend.reset(num_walkers, ndim)\n\n # initialize sampler\n self.log(\"Initializing sampler\", \"info\")\n sampler = emcee.EnsembleSampler(num_walkers, ndim, log_prob, backend=backend)\n if not reset_backend and backend_exists:\n # grab the last sample if appending to an existing run\n x0 = sampler.run_mcmc(None, 1)\n\n # track autocorrelation time\n old_tau = np.inf\n converged = False\n\n self.log(\n \"Starting {} iterations with {} parameters\".format(num_steps, ndim), \"info\"\n )\n for sample in sampler.sample(x0, iterations=num_steps):\n if not sampler.iteration % 10:\n self.log(\"MCMC iteration {}\".format(sampler.iteration), \"debug\")\n # check convergence every 100 steps\n if sampler.iteration % 100:\n continue\n\n # compute autocorrelation time\n tau = sampler.get_autocorr_time(tol=0)\n\n # check convergence\n converged = np.all(tau / converge_criteria < sampler.iteration)\n converged &= np.all(np.abs(old_tau - tau) / tau < converge_criteria)\n self.log(\n \"MCMC iteration {} autocorr time: mean {:.1f} min {:.1f} max {:.1f}\".format(\n sampler.iteration, np.mean(tau), np.min(tau), np.max(tau)\n ),\n \"info\",\n )\n if converged:\n break\n old_tau = tau\n\n out.update(converged=converged, num_steps=sampler.iteration)\n\n # converged posterior distribution\n if converged:\n self.log(\n \"MCMC converged in {} iterations\".format(sampler.iteration), \"info\"\n )\n tau = sampler.get_autocorr_time()\n burnin = int(2 * np.max(tau))\n thin = int(0.5 * np.min(tau))\n samples = sampler.get_chain(discard=burnin, thin=thin, flat=True)\n out.update(tau=tau, burnin=burnin, thin=thin, samples=samples)\n else:\n self.warn(\"MCMC not converged in {} iterations\".format(num_steps))\n\n if res_prior is not None:\n self.bin_def = bin_def_orig\n self.nbins_res = nbins_res_orig\n\n # save and return\n return self.save_data(\n save_name, map_tag=map_tag, extra_tag=file_tag, bp_opts=True, **out\n )", "def optimize(self, startPoint=0, epsilon=1e-5, maxIterations=100):\n n = len(startPoint)\n alpha = 1\n Hk = numpy.eye(n)\n I = numpy.eye(n)\n k = 0\n xk = startPoint\n gk = self.g(xk)\n \n while 1:\n # Compute the norm of the gradient.\n gradNorm = numpy.sqrt(numpy.dot(gk, gk))\n\n # Display the function value for the current iteration.\n fk = f(xk)\n print \"%d: fval = %d, norm = %f\" % (k, fk, gradNorm) \n \n # Termination based on tolerenace criterion.\n if (gradNorm <= epsilon):\n print \"Terminating: Tolerence %f (fval = %f, norm = %f)\"\\\n % (epsilon, fk, gradNorm)\n return {'optimalPoint':xk, 'functVal':fk}\n\n # Termination due to maximum iterations.\n if (k > maxIterations):\n print \"Terminating: Max iterations %d (fval = %f, norm = %f)\" \\\n % (i, fk, gradNorm) \n return {'optimalPoint':xk, 'functVal':fk}\n\n # Computing the search direction.\n pk = -numpy.dot(Hk, gk)\n sk = alpha * pk\n xk1 = xk + sk\n gk1 = self.g(xk1)\n yk = gk1 - gk\n\n # Computing Hk1.\n rhok = 1.0 / numpy.dot(yk, sk)\n A = I - (rhok * numpy.outer(sk, yk))\n B = rhok * numpy.outer(sk, sk)\n Hk = numpy.dot(numpy.dot(A, Hk), A) + B\n\n # Update the variables for the next iteration.\n xk = xk1\n gk = gk1\n k += 1\n pass \n pass", "def beta_iter(b,px,py,pyx_c,pm_size,restarts,iterations):\n candidates = []\n for r in range(restarts):\n\t # initialize distribution for bottleneck variable\n\t pm = np.random.rand(pm_size)+1\n\t pm /= pm.sum()\n\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t pym_c /= pym_c.sum(axis=0)\n\t # iterate the BA algorithm\n\t for i in range(iterations):\n\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,b)\n\t\t pm = p_m(pmx_c,px)\n\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t break\n\t\t pmx_c_old = pmx_c\n\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t# among the restarts, select the result that gives the minimum\n\t# value for the functional we're actually minimizing (eq 29 in\n\t# Tishby et al 2000).\n selected_candidate = min(candidates, key=lambda c: c['functional'])\n i_p = selected_candidate['past_info']\n i_f = selected_candidate['future_info']\n return [i_p,i_f,b]", "def implicitly_restarted_arnoldi_method(\n matvec, args, initial_state, num_krylov_vecs, numeig, which, eps, maxiter,\n res_thresh) -> Tuple[List[Tensor], List[Tensor]]:\n N = np.prod(initial_state.shape)\n p = num_krylov_vecs - numeig\n num_krylov_vecs = np.min([num_krylov_vecs, N])\n if (p <= 1) and (num_krylov_vecs < N):\n raise ValueError(f\"`num_krylov_vecs` must be between `numeig` + 1 <\"\n f\" `num_krylov_vecs` <= N={N},\"\n f\" `num_krylov_vecs`={num_krylov_vecs}\")\n\n dtype = initial_state.dtype\n # initialize arrays\n krylov_vectors = jax.numpy.zeros(\n (num_krylov_vecs + 1, jax.numpy.ravel(initial_state).shape[0]),\n dtype=dtype)\n H = jax.numpy.zeros((num_krylov_vecs + 1, num_krylov_vecs), dtype=dtype)\n # perform initial arnoldi factorization\n Vm_tmp, Hm_tmp, numits, converged = arnoldi_fact(matvec, args,\n initial_state,\n krylov_vectors, H, 0,\n num_krylov_vecs, eps)\n # obtain an m-step arnoldi factorization\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, numits)\n\n it = 0\n if which == 'LR':\n _which = 0\n elif which == 'LM':\n _which = 1\n else:\n raise ValueError(f\"which = {which} not implemented\")\n # make sure the dtypes are matching\n if maxiter > 0:\n if Vm.dtype == np.float64:\n dtype = np.complex128\n elif Vm.dtype == np.float32:\n dtype = np.complex64\n elif Vm.dtype == np.complex128:\n dtype = Vm.dtype\n elif Vm.dtype == np.complex64:\n dtype = Vm.dtype\n else:\n raise TypeError(f'dtype {Vm.dtype} not supported')\n Vm = Vm.astype(dtype)\n Hm = Hm.astype(dtype)\n fm = fm.astype(dtype)\n\n while (it < maxiter) and (not converged):\n evals, _ = jax.numpy.linalg.eig(Hm)\n krylov_vectors, H, fk, converged = shifted_QR(Vm, Hm, fm, evals, numeig,\n p, _which, res_thresh)\n if converged:\n break\n v0 = jax.numpy.reshape(fk, initial_state.shape)\n # restart\n Vm_tmp, Hm_tmp, _, converged = arnoldi_fact(matvec, args, v0,\n krylov_vectors, H, numeig,\n num_krylov_vecs, eps)\n Vm, Hm, fm = update_data(Vm_tmp, Hm_tmp, num_krylov_vecs)\n it += 1\n\n ev_, U_ = np.linalg.eig(np.array(Hm))\n eigvals = jax.numpy.array(ev_)\n U = jax.numpy.array(U_)\n _, inds = LR_sort(eigvals, _which)\n vectors = get_vectors(Vm, U, inds, numeig)\n\n return eigvals[inds[0:numeig]], [\n jax.numpy.reshape(vectors[n, :], initial_state.shape)\n for n in range(numeig)\n ]", "def do_fitting(flam, ferr, object_lam_obs, lsf, starting_z, resampling_lam_grid, \\\n model_lam_grid, total_models, model_comp_spec, model_spec_hdu):\n\n #flam_obs, ferr_obs, lam_obs, lsf, redshift, resampling_lam_grid, \\\n #model_lam_grid, total_models, model_comp_spec, bc03_all_spec\n\n # Get resampled spectra for bootstrap based \n # on errors and assuming gaussian errors.\n # Resampling is done in observed frame.\n num_samp_to_draw = 1\n\n if num_samp_to_draw > 1:\n resampled_spec = get_resamples(flam, ferr, num_samp_to_draw)\n\n # loop over bootstrap runs\n for i in range(int(num_samp_to_draw)):\n \n # find if you're only doing one run or multiple bootstrap runs\n if num_samp_to_draw > 1:\n flam = resampled_spec[i]\n\n print \"Starting at redshift\", starting_z\n\n previous_z = starting_z\n num_iter = 0\n while 1:\n \n print \"\\n\", \"Currently testing redshift:\", previous_z\n\n # modify the model to be able to compare with data\n model_comp_spec_modified = \\\n model_modifications_wrapper(object_lam_obs, model_lam_grid, model_comp_spec, resampling_lam_grid, total_models, lsf, previous_z)\n print \"Model mods done at current z:\", previous_z, \"\\n\", \"Total time taken up to now --\", time.time() - start, \"seconds.\"\n\n # now get teh best fit model spectrum\n best_fit_model_in_objlamgrid, best_fit_model_whole, bestalpha, min_chi2, age, tau, tauv = \\\n get_best_fit_model(flam, ferr, object_lam_obs, resampling_lam_grid, \\\n model_comp_spec_modified, previous_z, model_spec_hdu)\n\n print \"Current min Chi2:\", \"{:.2}\".format(min_chi2)\n print \"Current best fit log(age [yr]):\", \"{:.2}\".format(age)\n print \"Current best fit Tau [Gyr]:\", \"{:.2}\".format(tau)\n print \"Current best fit Tau_V:\", tauv\n #plot_fit_and_residual(object_lam_obs, flam, ferr, best_fit_model_in_objlamgrid, bestalpha)\n\n #print \"Current best fit model parameters are:\"\n #print \"Age:\"\n #print \"Metallicity: Solar (this was kept fixed)\"\n #print \"Tau (i.e. exponential SFH time scale):\"\n #print \"Tau_V:\", , \"A_V:\",\n\n # now shift in wavelength range and get new_z\n new_z = \\\n shift_in_wav_get_new_z(flam, ferr, object_lam_obs, resampling_lam_grid, previous_z, \\\n len(best_fit_model_in_objlamgrid), best_fit_model_whole)\n print \"Current Old and New redshifts\", previous_z, new_z\n\n # Need to add code to stop it from getting worse\n # Can keep track of the minimum chi2 and if it increases after any run \n # then you know you've gone too far in that direction.\n\n # Stop if maximum iterations are reached\n num_iter += 1\n if num_iter >= 20:\n print \"Maximum iterations reached. Current grism redshift is\", new_z\n print \"Exiting out of iterative loop.\"\n break \n\n # if the relative error is less than 1% then stop\n if abs(new_z - previous_z) < 0.01 * new_z:\n break\n\n previous_z = new_z\n\n plot_fit_and_residual(object_lam_obs, flam, ferr, best_fit_model_in_objlamgrid, bestalpha)\n\n print \"New refined redshift\", new_z\n\n return None", "def _initialize_model(rngs):\n init_model_state, init_params = model_def.init(\n rngs, *dummy_input, train=False, debug=False).pop('params')\n # Set bias in the head to low value, such that loss is small initially.\n if config.get('init_head_bias', None) is not None:\n init_params = flax.core.unfreeze(init_params)\n init_params['output_projection'] = optimizers.tree_map_with_names(\n lambda p: jnp.full_like(p, config.init_head_bias),\n init_params['output_projection'],\n match_name_fn=lambda name: 'bias' in name)\n init_params = flax.core.freeze(init_params)\n return init_params, init_model_state", "def _fit_single(\n self,\n X,\n indices_ones,\n n1,\n n2,\n early_stop=None,\n init_params=None,\n in_place=False,\n run_number=None,\n ):\n old_ll = -self._np.inf\n success = False\n\n if init_params:\n if init_params is True:\n if (\n self.pi_ is not None\n and self.alpha_1_ is not None\n and self.alpha_2_ is not None\n and self.tau_1_ is not None\n and self.tau_2_ is not None\n ):\n alpha_1, alpha_2, tau_1, tau_2, pi = (\n self._np.asarray(self.alpha_1_),\n self._np.asarray(self.alpha_2_),\n self._np.asarray(self.tau_1_),\n self._np.asarray(self.tau_2_),\n self._np.asarray(self.pi_),\n )\n else:\n assert False\n else:\n (pi, alpha_1, alpha_2, tau_1, tau_2) = init_params\n else:\n alpha_1, alpha_2, tau_1, tau_2, pi = self._init_LBM_random(\n n1, n2, self.n_row_clusters, self.n_column_clusters, X.nnz\n )\n\n # Repeat EM step until convergence.\n for iteration in range(self.max_iter):\n if early_stop and iteration >= early_stop:\n ll = self._compute_likelihood(\n indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2\n )\n break\n if iteration % 5 == 0:\n ll = self._compute_likelihood(\n indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2\n )\n if (ll - old_ll) < (self.atol + self.rtol * self._np.abs(ll)):\n success = True\n break\n\n log_txt = f\"\\t EM Iter: {iteration:5d} \\t log-like:{ll.get() if self.use_gpu else ll:.4f} \\t diff:{self._np.abs(old_ll - ll).get() if self.use_gpu else self._np.abs(old_ll - ll):.6f}\"\n if self.verbosity > 1:\n logger.info(log_txt)\n else:\n logger.debug(log_txt)\n old_ll = ll\n pi, alpha_1, alpha_2, tau_1, tau_2 = self._step_EM(\n X, indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2, n1, n2\n )\n else:\n success = True\n if self.verbosity > 1 and run_number:\n logger.info(\n f\"Run {run_number:3d} / {self.n_init:3d} \\t success : {success} \\t log-like: {ll.get() if self.use_gpu else ll:.4f} \\t nb_iter: {iteration:5d}\"\n )\n\n if in_place:\n self.loglikelihood_ = ll.get() if self.use_gpu else ll\n self.trained_successfully_ = True\n self.pi_ = pi.get() if self.use_gpu else pi\n self.alpha_1_ = alpha_1.get() if self.use_gpu else alpha_1\n self.alpha_2_ = alpha_2.get() if self.use_gpu else alpha_2\n self.tau_1_ = tau_1.get() if self.use_gpu else tau_1\n self.tau_2_ = tau_2.get() if self.use_gpu else tau_2\n\n return success, ll, pi, alpha_1, alpha_2, tau_1, tau_2", "def test_init_vals_length_error_in_fit_mle(self):\n # Note there is only one beta, so we can't go lower than zero betas.\n original_intercept_ref_position = self.fake_intercept_ref_pos\n for intercept_ref_position in [None, original_intercept_ref_position]:\n self.base_clog.intercept_ref_position = intercept_ref_position\n for i in [1, -1]:\n # This will ensure we have too many or too few intercepts\n num_coefs = self.fake_betas.shape[0] + i\n\n # Test to ensure that the ValueError when using an\n # init_intercepts kwarg with an incorrect number of parameters\n self.assertRaisesRegexp(ValueError,\n \"dimension\",\n self.base_clog.fit_mle,\n np.arange(num_coefs),\n print_res=False)\n\n return None", "def guess_fit_parameters(self):\n\n def errfcn(pars):\n lnl = -self._lnprob(pars)\n p = list(pars)\n p.append(lnl)\n logging.info(p)\n return lnl if np.isfinite(lnl) else np.sign(lnl) * 9e9\n\n if self.vary_bin_frac:\n initial_pars = [0.5, 0.5]\n bounds_list = [[0.0, 1.0], [0, 0.999]]\n else:\n initial_pars = [0.5]\n bounds_list = [[0, 0.999]]\n out = minimize(errfcn, initial_pars, bounds=bounds_list)\n self.guess_pars = out.x\n return out.x", "def gfit(func, x0, fprime, args=(), kwargs=dict(), maxiter=2000, ftol=0.001, factor=1., disp=False, bounds=None):\n # 2013-08-09 10:37 IJMC: Created\n # 2013-08-11 16:06 IJMC: Added a missing boolean flag\n \n if bounds is not None:\n bounds = np.array(bounds)\n\n def applyBounds(params):\n if bounds is not None:\n params = np.vstack((params, bounds[:,0])).max(0)\n params = np.vstack((params, bounds[:,1])).min(0)\n return params\n\n bestparams = applyBounds(x0)\n nx = bestparams.size\n\n metric = func(x0, *args, **kwargs)\n dmetric = 9e9\n keepFitting = True\n lastIterSaidToStop = False\n \n\n iter = 0\n recalcGrad = True\n if disp:\n fmtstr = '%7i %1.'+str(np.abs(np.log(ftol)).astype(int)+2)+'f %1.5e %1.3e'\n print ' ITER METRIC FACTOR DMETRIC'\n while iter<maxiter and keepFitting:\n iter += 1\n if recalcGrad: grad = fprime(bestparams, *args, **kwargs)\n newparam = applyBounds(bestparams - factor * grad)\n newmetric = func(newparam, *args, **kwargs)\n if newmetric < metric:\n bestparams = newparam.copy()\n dmetric = newmetric - metric\n metric = newmetric\n if recalcGrad is True: factor *= 1.5 # we updated twice in a row!\n recalcGrad = True\n if np.abs(dmetric) < ftol:\n if disp: print \"Met termination criterion\"\n if lastIterSaidToStop:\n keepFitting = False\n else:\n lastIterSaidToStop = True\n else:\n factor /= 2\n recalcGrad = False\n lastIterSaidToStop = False\n \n if disp: print fmtstr % (iter, metric, factor, dmetric)\n\n return bestparams, metric, iter", "def fit(events_generator, n_params, n_obs, n_bg, sigma, prior, out_put_dir, bg_model='vary',\n resume=False, verbose=True, n_live_points=5000, evidence_tolerance=0.1, sampling_efficiency=0.3, **kwargs):\n if bg_model == 'vary':\n def lgl(cube, ndim, nparams):\n n_signal = events_generator(cube)\n likelihood = np.zeros(n_obs.shape[0])\n for i in range(n_obs.shape[0]):\n n_bg_list = np.arange(max(0, int(n_bg[i] - 2*np.sqrt(n_bg[i]))), max(10, int(n_bg[i] + 2*np.sqrt(n_bg[i]))))\n for nbgi in n_bg_list:\n likelihood[i] += quad(lambda a: _poisson(n_obs[i], (1 + a) * n_signal[i] + nbgi) *\n _gaussian(a, 0, sigma), -3 * sigma, 3 * sigma)[0] * _poisson(n_bg[i], nbgi)\n prod_like = np.prod(likelihood)\n return np.log(prod_like) if prod_like > 0 else -np.inf\n elif bg_model == 'shape':\n def lgl(cube, ndim, nparams):\n n_signal = events_generator(cube)\n prod_like = 0\n nbg_total = sum(n_bg)\n for nbg in np.arange(int(nbg_total - 2*np.sqrt(nbg_total)), int(nbg_total + 2*np.sqrt(nbg_total))):\n likelihood = np.zeros(n_obs.shape[0])\n for i in range(n_obs.shape[0]):\n if n_bg[i] <= 0:\n likelihood[i] = 1\n continue\n likelihood[i] += quad(lambda a: _poisson(n_obs[i], (1 + a)*n_signal[i] + n_bg[i]*nbg/nbg_total) *\n _gaussian(a, 0, sigma), -3 * sigma, 3 * sigma)[0]\n prod_like += np.prod(likelihood) * _poisson(nbg_total, nbg)\n return np.log(prod_like) if prod_like > 0 else -np.inf\n elif bg_model == 'fixed':\n def lgl(cube, ndim, nparams):\n n_signal = events_generator(cube)\n likelihood = np.zeros(n_obs.shape[0])\n for i in range(n_obs.shape[0]):\n if n_bg[i] <= 0:\n likelihood[i] = 1\n continue\n likelihood[i] = quad(lambda a: _poisson(n_obs[i], (1 + a) * n_signal[i] + n_bg[i]) *\n _gaussian(a, 0, sigma), -3 * sigma, 3 * sigma)[0]\n return np.sum(np.log(likelihood))\n else:\n raise Exception('background model not implemented!')\n\n def prr(cube, ndim, nparams):\n prior(cube)\n\n pymultinest.run(lgl, prr, n_params, outputfiles_basename=out_put_dir+'_',\n resume=resume, verbose=verbose, n_live_points=n_live_points,\n evidence_tolerance=evidence_tolerance, sampling_efficiency=sampling_efficiency,\n **kwargs)", "def SPGP_train(X,Y,num_pseudo_inputs,num_starts=1):\n \n (n,dim) = X.shape\n m = np.min([num_pseudo_inputs,n])\n \n # center data\n mu_y = np.mean(Y)\n y0 = Y - mu_y\n \n min_lik = np.inf\n for i in range(num_starts): \n # randomly choose initial points\n\t# should randomly sample, but hacking this in for the ACR since\n\t# the pandas version is older\n\t#xb_init = np.array(X.sample(m))\n\txb_init = np.array(X.iloc[:m,:])\n \n # initialize hyperparameters\n hyp_ARD = np.array([-2*np.log((X.max() - X.min() + 0.1) / 2)])\n hyp_coeff = np.array([[np.log(Y.var() + 0.1)]])\n hyp_noise = np.array([[np.log(Y.var() / 4 + 0.01)]])\n hyperparams = pack_hyps(xb_init, hyp_ARD, hyp_coeff, hyp_noise)\n \n # minimize neg. log likelihood\n # min_result = minimize(SPGP_likelihood, hyperparams, args=(y0,np.array(X),m), method='BFGS', jac=True)\n #iter_res = np.reshape(min_result.x, (1,(m+1)*dim + 2))\n #lik = SPGP_likelihood(iter_res,y0,np.array(X),m,compute_deriv=False)\n #st = time.time()\n (iter_res, lik, i) = minimize(hyperparams, SPGP_likelihood, args=(y0,np.array(X),m), maxnumfuneval=200)\n #print(time.time() - st)\n if(lik[0] < min_lik):\n min_lik = lik[0]\n opt_res = iter_res\n \n # extract minimizing hyperparameters\n (xb, hyp_ARD, hyp_coeff, hyp_noise) = unpack_hyps(opt_res, m, dim)\n \n hyperparams = (hyp_ARD, hyp_coeff, hyp_noise)\n \n return xb, hyperparams #, mu_y" ]
[ "0.57707477", "0.56678426", "0.5657085", "0.5636293", "0.5565462", "0.5560217", "0.5544692", "0.5538522", "0.5508883", "0.54963434", "0.5412181", "0.5406637", "0.537584", "0.5374977", "0.5325472", "0.53251934", "0.53189695", "0.53159976", "0.530835", "0.5276131", "0.5253891", "0.5239956", "0.52396727", "0.5238809", "0.5213594", "0.51966673", "0.51920253", "0.5185895", "0.51835847", "0.5166685" ]
0.70813787
0
Initialize the table, create default roles, set profile image
def __init__(self, **kwargs): super(User, self).__init__(**kwargs) Role.insert_roles() if self.role is None: if self.email == current_app.config["FLASKY_ADMIN"]: self.role = Role.query.filter_by(permissions=0xff).first() if self.role is None: self.role = Role.query.filter_by(default=True).first() # self.image_url = photos.url("user/default.png") self.image_url = self.avatar(128)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_role_table():\n roles = [\n {\n \"name\": \"user\",\n \"description\": \"registered user permission\",\n \"raw_permissions\": Role.Permissions.REGISTERED.value\n },\n {\n \"name\": \"editor\",\n \"description\": \"user has ability to edit all content and comments\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR).value\n },\n {\n \"name\": \"admin\",\n \"description\": \"administrator user with access to all of the application\",\n \"raw_permissions\": (Role.Permissions.REGISTERED | Role.Permissions.EDITOR | Role.Permissions.ADMINISTRATOR).value\n }\n ]\n with session_manager() as session:\n for r in roles:\n role = Role.query.filter(Role.name == r.get(\"name\")).one_or_none()\n\n # is there no existing role by a given name?\n if role is None:\n role = Role(\n name=r.get(\"name\"),\n description=r.get(\"description\"),\n raw_permissions=r.get(\"raw_permissions\")\n )\n # otherwise, need to update existing role permissions\n else:\n role.description = r.get(\"description\")\n role.raw_permissions = r.get(\"raw_permissions\")\n\n db.session.add(role)\n db.session.commit()", "def setup(self):\n # TODO: refactor database cleanup\n with gus.config.get_db_conn().cursor() as c:\n c.execute(\"TRUNCATE TABLE chef_roles, chef_roles_xref_projects CASCADE\")\n self.role_name = 'www'\n self.role_id = chef_role.create(self.role_name, True)", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def create_table(self):\n pass", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def init_db_command():\n # db.create_all()\n Role.insert_roles()\n admin = User(email=current_app.config[\"FLASK_ADMIN\"],\n password=\"secure\", username=\"zhangzhi_up\", confirmed=True)\n db.session.add(admin)\n db.session.commit()\n click.echo('Initialized the database.')", "def init():\n database.create_tables([Tracker])\n database.commit()", "def insert_roles():\n roles = {\n 'User': [Permission.CRUD_OWNED],\n 'Usermanager': [Permission.CRUD_OWNED, Permission.CRUD_USERS],\n 'Administrator': [Permission.CRUD_OWNED, Permission.CRUD_USERS,\n Permission.ADMIN],\n }\n default_role = 'User'\n for rol in roles:\n role = Role.query.filter_by(name=rol).first()\n if role is None:\n role = Role(name=rol)\n role.reset_permissions()\n for perm in roles[rol]:\n role.add_permission(perm)\n role.default = (role.name == default_role)\n db.session.add(role)\n db.session.commit()", "def bootstrap():\n db.create_all()\n os.environ['ADMIN_EMAIL'] = '[email protected]'\n os.environ['ADMIN_PASSWORD'] = '1111'\n admin = User(email='[email protected]', username='Maxim', password='1111')\n admin.gravatar()\n db.session.commit()\n db.session.add(admin)\n User._bootstrap()\n Interest._bootstrap()", "def init_roles(self):\n self.role_owner = Role.objects.get_or_create(\n name=PROJECT_ROLE_OWNER, rank=ROLE_RANKING[PROJECT_ROLE_OWNER]\n )[0]\n self.role_delegate = Role.objects.get_or_create(\n name=PROJECT_ROLE_DELEGATE, rank=ROLE_RANKING[PROJECT_ROLE_DELEGATE]\n )[0]\n self.role_contributor = Role.objects.get_or_create(\n name=PROJECT_ROLE_CONTRIBUTOR,\n rank=ROLE_RANKING[PROJECT_ROLE_CONTRIBUTOR],\n )[0]\n self.role_guest = Role.objects.get_or_create(\n name=PROJECT_ROLE_GUEST, rank=ROLE_RANKING[PROJECT_ROLE_GUEST]\n )[0]\n self.role_finder = Role.objects.get_or_create(\n name=PROJECT_ROLE_FINDER,\n rank=ROLE_RANKING[PROJECT_ROLE_FINDER],\n project_types=[PROJECT_TYPE_CATEGORY],\n )[0]", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initDB():\n global DATABASE\n\n uid0 = generate_resource_uid('Admin1', 0)\n\n DATABASE[\"users\"] = {\n \"Admin1\": {\n \"Type\": \"admin\",\n \"Password\": \"AdminPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": {uid0},\n \"Created\": 1,\n },\n \"User1\": {\n \"Type\": \"user\",\n \"Password\": \"UserPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": set([]),\n \"Created\": 0,\n }\n }\n\n DATABASE[\"resources\"] = {\n uid0: \"Admin1\",\n }", "def setUp(self):\r\n\t\tself.u1 = User.objects.create(username='Gabby')\r\n\t\tself.u1.profile.bio = \"I'm a female profile with inserted components\"\r\n\t\tself.u1.profile.birth_date = datetime.now()\r\n\t\tself.u1.profile.gender = 'female'\r\n\t\tself.u1.profile.save()", "def __init__(self, **kwargs):\n super(User, self).__init__(**kwargs)\n\n # Set default role for a regular new User\n self.role = Role.query.filter_by(default=True).first()", "def setUp(self):\n # ensure there is no data in the test database when the test starts\n db.session.commit()\n db.drop_all()\n db.create_all()\n usRoles = [\"Guest\",\"Couple\",\"2nd line\",\"Wedding party\"]\n\n for i in usRoles:\n roleAdd = User_roles(role = i)\n db.session.add(roleAdd)\n db.session.commit()\n\n # create test admin user\n admin = User(first_name=\"admin\", last_name=\"admin\",permission=\"Couple\", email=\"[email protected]\", password=\"admin2016\")\n\n # create test non-admin user\n employee = User(first_name=\"test\", last_name=\"user\",permission = \"Guest\", email=\"[email protected]\", password=\"test2016\")\n\n # save users to database\n db.session.add(admin)\n db.session.add(employee)\n db.session.commit()", "def insert_roles_staging(self):\n\n self.load_wikidata(\"roles\", ROLES_SPARQL_QUERY, INSERT_ROLE_SQL_QUERY,\n INSERT_ROLE_MAP_COLUMNS)", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def test_profiles_table_populated(self):\n print('(' + self.test_profiles_table_populated.__name__ + ')',\n self.test_profiles_table_populated.__doc__)\n test_table_populated(self, USERS_PROFILE_TABLE,\n INITIAL_USERS_PROFILE_COUNT)", "async def _create_table(self, table: TableSchema) -> None:\n try:\n await self.conn.execute(get_create_table(table))\n except PostgresError: # Only DB related exceptions\n print(f\"Failed to execute CREATE TABLE for {table['name']}\")\n raise\n # Initialize migration level (so that it can be altered in future)\n await self.conn.execute('INSERT INTO tinymud_migrations (table_name, level) VALUES ($1, $2)', table['name'], 0)", "def setUp(self):\n\n self.user_1 = User.objects.create_user(\n username='testuser', password='12345',\n email='[email protected]'\n )\n\n # self.profile_1 = Profile.objects.create(user=self.user_1,\n # image='profile_default.jpg')" ]
[ "0.7216759", "0.6458455", "0.6416841", "0.6405134", "0.6322869", "0.6314916", "0.61332387", "0.61158174", "0.60757625", "0.604426", "0.6002373", "0.5999153", "0.59768623", "0.5972818", "0.5942051", "0.5905494", "0.5901315", "0.5885008", "0.58837014", "0.58837014", "0.5876468", "0.5827163", "0.5784197", "0.5778841", "0.57723945", "0.57693064", "0.57598966", "0.5713701", "0.5686294", "0.5658105" ]
0.6746913
1
Returns the temp_files class where all decompressed data is stored. If you want a file, request it from this class and it will look for it, decompress it if it exists, and return it. It will also deal with cleanup when nearing the memory cap.
def temp_files(self) -> misc_.TempFilesContainer: return self._temp_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_temproot(cls):\n import tempfile\n\n return local(tempfile.gettempdir())", "def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n os.close(fd)\n raise", "def get_temp_file(self, delete: bool = False, close: bool = False):\n prefix = str(self._tmp_folder / f\"pysimt_{os.getpid()}\")\n t = tempfile.NamedTemporaryFile(\n mode='w', prefix=prefix, delete=delete)\n self.register_tmp_file(t.name)\n if close:\n t.close()\n return t", "def get_zip(self):\n self.zip.rewind()\n return self.zip.in_memory_zip", "def _tmp(self):\n tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',\n suffix='.out',\n delete=False)\n return tmpfn.name", "def pg_get_temporary_file(self) -> Optional[TempFileInfo]:\n if self.failed_queries.temp_file_query_failed:\n # prevent a spam of errors in PostgreSQL logs if we already failed once\n # for lack of privilege or timeout\n return None\n\n if self.pg_num_version >= 120000:\n query = queries.get(\"get_temporary_files_post_120000\")\n elif self.pg_num_version >= 90100:\n query = queries.get(\"get_temporary_files_post_090100\")\n else:\n query = queries.get(\"get_temporary_files_oldest\")\n\n try:\n pg.execute(\n self.pg_conn,\n sql.SQL(\"SET statement_timeout TO {}\").format(sql.Literal(\"400ms\")),\n )\n return pg.fetchone(self.pg_conn, query, mkrow=TempFileInfo)\n except pg.InsufficientPrivilege:\n # superuser or pg_read_server_files are required (Issue #278)\n self.failed_queries.temp_file_query_failed = True\n logger.info(\n \"Insufficient privilege to fetch the tempfile data. \"\n \"The feature was disabled. Please use --no-tempfiles or a platform specific setting (eg. --rds).\"\n )\n\n return None\n except pg.QueryCanceled:\n # if an excessive amount of tempfile exists, the query could be very long\n # to avoid such a case we set a statement_timeout shorter than the lowest\n # refresh rate. This could end up spamming the PostgreSQL logs.\n self.failed_queries.temp_file_query_failed = True\n logger.info(\n \"The tempfile query ended in a timeout. \"\n \"The feature was disabled. Check the temporary files on the server.\"\n )\n return None\n finally:\n pg.execute(self.pg_conn, queries.get(\"reset_statement_timeout\"))", "def get_temp_dir():\n return settings.FILE_STORE_TEMP_DIR", "def managed_temp_object():\n import tempfile\n _, temp_object = tempfile.mkstemp()\n try:\n yield temp_object\n finally:\n os.remove(temp_object)", "def load_temp_dir():\n\n temp_subname = 'GETURLS_TMP_{}'.format(int(time.time()))\n tmp_dir = tempfile.TemporaryDirectory(prefix=temp_subname)\n return tmp_dir", "def getTempFile():\n root = getDir(tempDir)\n for i in range(100):\n path = os.path.join(root, '%d-%d' % (\n os.getpid(), random.randint(100000, 999999)))\n if not os.path.isfile(path):\n return path\n raise NotImplementedError(\"getTempFile() appears to be failing\")", "def get_temp_file(self, prefix=template, suffix=\"\"):\n ret = NamedTemporaryFile(delete=False, prefix=prefix, suffix=suffix)\n self._tempfiles.append(ret)\n if is_win():\n ret.close()\n return ret", "def _tmpfile(self,filename=None):\n\t\tif self._tmpdir is None:\n\t\t\tself._tmpdir = TemporaryDirectory(prefix=\"jitcxde_\")\n\t\t\n\t\tif filename is None:\n\t\t\treturn self._tmpdir.name\n\t\telse:\n\t\t\treturn path.join(self._tmpdir.name, filename)", "def _CreateStorageFile(self):\n if self._storage_type == definitions.STORAGE_TYPE_TASK:\n return gzip_file.GZIPStorageFile(storage_type=self._storage_type)\n\n return ZIPStorageFile(\n maximum_buffer_size=self._buffer_size,\n storage_type=self._storage_type)", "def a_temp_file():\n filename = None\n try:\n tmpfile = tempfile.NamedTemporaryFile(delete=False)\n filename = tmpfile.name\n yield tmpfile\n finally:\n if filename and os.path.exists(filename):\n os.remove(filename)", "def simple_files_data(tmpdir):\n return simple(tmpdir)[\"data\"]", "def _get_compressed_file(files, password=None):\n multiple_files = len(files) > 1\n # Replace the data and report type with just `.zip`.\n zipfile = re.sub(r'(_(\\w+))?\\.(\\w+)$', '.zip', files[0].name)\n compression = pyminizip.compress_multiple if multiple_files else pyminizip.compress\n compression([f.name for f in files] if multiple_files else files[0].name, zipfile, password, COMPRESSION_LEVEL)\n return zipfile", "def gettempdir():\n\tpass", "def temp_directory(self):\n\n return self.get_raw(\"temp_directory\")", "def copy_to_temp(object):\n temp_file = NamedTemporaryFile(delete=False)\n _copy_and_close(object, temp_file)\n return temp_file.name", "def _simple_files(tmpdir):\n return simple(tmpdir)[\"files\"]", "def _build_b(self):\r\n\r\n use_file = self.store and self.content_l >= self.file_limit\r\n if use_file: return tempfile.NamedTemporaryFile(mode = \"w+b\")\r\n else: return netius.legacy.BytesIO()", "def __enter__(self):\n self.temporary_directory = tempfile.mkdtemp(**self.options)\n logger.debug(\"Created temporary directory: %s\", self.temporary_directory)\n return self.temporary_directory", "def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)", "def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name", "def get_raster_file(self):\n self.log('Getting raster file from storage')\n\n raster_workdir = getattr(settings, 'RASTER_WORKDIR', None)\n self.tmpdir = tempfile.mkdtemp(dir=raster_workdir)\n\n # Access rasterfile and store in a temp folder\n rasterfile = open(os.path.join(self.tmpdir, self.rastername), 'wb')\n for chunk in self.rasterlayer.rasterfile.chunks():\n rasterfile.write(chunk)\n rasterfile.close()\n\n # If the raster file is compressed, decompress it\n fileName, fileExtension = os.path.splitext(self.rastername)\n\n if fileExtension == '.zip':\n\n # Open and extract zipfile\n zf = zipfile.ZipFile(os.path.join(self.tmpdir, self.rastername))\n zf.extractall(self.tmpdir)\n\n # Remove zipfile\n os.remove(os.path.join(self.tmpdir, self.rastername))\n\n # Get filelist from directory\n raster_list = glob.glob(os.path.join(self.tmpdir, \"*.*\"))\n\n # Check if only one file is found in zipfile\n if len(raster_list) > 1:\n self.log(\n 'WARNING: Found more than one file in zipfile '\n 'using only first file found. This might lead '\n 'to problems if its not a raster file.'\n )\n\n # Return first one as raster file\n self.rastername = os.path.basename(raster_list[0])", "def get_tmp_dir():\n tmpdir_obj = tempfile.TemporaryDirectory()\n tmpdir = tmpdir_obj.name\n return tmpdir, tmpdir_obj", "def _OpenFileObject(self):\n try:\n if self._decompress_stream:\n self._zip_file.extract(self.name, self._temporary_path)\n else:\n self._file_object = self._zip_file.open(self.name, mode=b'r')\n return\n\n except KeyError as exception:\n raise IOError(\n 'Unable to open stream with error: {0!s}'.format(exception))\n\n self._stream_file_path = os.path.join(self._temporary_path, self.name)\n self._file_object = open(self._stream_file_path, 'rb')", "def get_temp_dir():\n return tempfile.mkdtemp()", "def _get_tempdir(self):\n #return os.path.normpath(systmp() + '/' + self._get_instance())\n return self.tmpdir", "def temp_dir():\n global _temp_dir\n warnings.warn(\n \"Please use the :mod:`tempfile` module from the standard library\",\n DeprecationWarning\n )\n _create_temp_dir()\n return _temp_dir" ]
[ "0.59913236", "0.58054686", "0.5762157", "0.5731633", "0.5715007", "0.57014346", "0.5626013", "0.561726", "0.5587675", "0.5576853", "0.5558325", "0.5551208", "0.5544451", "0.550435", "0.547272", "0.5469176", "0.5460168", "0.5456278", "0.54530966", "0.54210013", "0.54140574", "0.5317902", "0.53133696", "0.5298985", "0.5293773", "0.52550024", "0.5253712", "0.52535313", "0.52223015", "0.52177674" ]
0.68818074
0
Returns the game identifier for the game currently loaded. Returns None if load_game has not been called.
def game_identifier(self) -> Union[str, None]: if self.game_functions is not None: if hasattr(self.game_functions, 'game_identifier'): return self.game_functions.game_identifier else: raise AttributeError else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_game_id(self) -> str:\n return self.game_name_entry.get()", "def get_last_game_id():\n\t\ttry:\n\t\t\tf = open(game_id_file, 'r')\n\t\t\tid = int(f.read())\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint('IOError raised, returning zero (0)')\n\t\t\treturn 0\n\t\treturn id", "def get_id():\n global UNIQUE_GAME_ID\n with threadLock:\n UNIQUE_GAME_ID += 1\n return str(UNIQUE_GAME_ID)", "def get_game(self, request):\n return games_ctrl.get_game(request.urlsafe_game_key)", "def get_game(request):\n active_game = str(uuid4())\n GAMES[active_game] = Board()\n return {'id': active_game}", "def get_game(self):\n return self._game_board", "def get_player_id(self):\n return self.game.get_player_id()", "def league_id(self):\n if self.league_string == NBA_STRING:\n return NBA_GAME_ID_PREFIX\n elif self.league_string == WNBA_STRING:\n return WNBA_GAME_ID_PREFIX\n elif self.league_string == G_LEAGUE_STRING:\n return G_LEAGUE_GAME_ID_PREFIX", "def load_game(self):\n self.game = db.get_game(self.game_id)", "def get(self, game_id):\n ret = None\n game = _games.get(game_id)\n if game is not None:\n ret = game.to_dict()\n return ret", "def get_game_by_id(game_id):\n\n return Game.query.get(game_id)", "def get_gym_game_name():\n return 'SpaceInvadersNoFrameskip-v4'", "def get_game_version(uuid: UUID) -> Optional[str]:\n scenario = store.get_scenario(uuid)\n if scenario:\n return scenario.game_version\n return None", "def get_game(user_id):\n data = g.db.find_one({'_id': user_id})\n if data:\n return Game(data['game'])\n return None", "def get_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game:\n if game.game_over:\n return game.to_form('Game is over.')\n else:\n return game.to_form('Make a move!')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def get_game(self, game_id: UUID) -> Game:\n logger.info('Getting game with game_id = %s', game_id)\n try:\n return self.games[game_id]\n except KeyError:\n logger.exception(\n 'Game \"%s\" doesnt exist. Existing games: %s',\n game_id,\n self.games.keys()\n )\n raise DoesNotExist('Game with id \"%s\" doesnt exist' % game_id)", "def board_game_geek_id(title):\n pass", "def get_current_player(self):\r\n\r\n return self.players[(self.turn_number) % len(self.players)].get_id()", "def getUncachedGame(self, theKey):\n cachedGame = loadGameFromCache(theKey)\n if cachedGame != None:\n return cachedGame\n # Request the game directly on a cache miss.\r\n return RemoteGameRepository(self.theRepoURL).getGame(theKey)", "def get_gamepad():\n try:\n gamepad = devices.gamepads[0]\n except IndexError:\n raise UnpluggedError(\"No gamepad found.\")\n return gamepad.read()", "async def get_game(game_id):\n return await ex.conn.fetchrow(\"SELECT gameid, player1, player2, bid1, bid2, channelid FROM blackjack.games WHERE gameid = $1\", game_id)", "def set_game_id(self, game_name):\n dic = {(''.join(filter(str.isalpha, key))): v for key, v in self.games_map.items()}\n dic = dic[self.league]\n dic = {(''.join(filter(str.isalpha,key))):v for key,v in dic.items()}\n self.game_id = dic[game_name][0]\n self.game_time = dic[game_name][1]", "def get_game(self, request):\n game = get_by_urlsafe(request.urlsafe_key, Game)\n if not game:\n raise endpoints.NotFoundException('Game Not Found')\n else:\n if game.game_over:\n return game.to_form(message=\"This Game has ended!\")\n else:\n return game.to_form(message=\"Game in Progress!\")", "def get_game(self, request):\n game = get_by_urlsafe(request.urlsafe_game_key, Game)\n if game:\n return game.to_form('Time to make a move!')\n else:\n raise endpoints.NotFoundException('Game not found!')", "def load_game(self):\n print('Game loaded!')\n return pickle.load(open(\"save.dat\", 'rb'))", "async def fetch_game(self, id: int) -> Game:\n if not Game.MAPPING:\n await self.fill_game_list()\n\n game = Game(game_id=id)\n if game.name is None:\n raise ValueError(f'{id} is not a recognized game id.')\n\n return game", "def get_game(username):\n user = User.objects.get(username=username)\n if user.two_player_game_id != None:\n return TwoPlayerGame.objects.get(\n game_id=user.two_player_game_id), \"two\"\n if user.four_player_game_id != None:\n return FourPlayerGame.objects.get(\n game_id=user.four_player_game_id), \"four\"\n return None, None", "def do_load_game(game_id, this_player_id):\n c = common_db.Common_DB()\n\n logger.info(f\"Starting to load game for ID '{game_id}'\")\n game = Game()\n game.state.game_id = game_id\n game.state.this_player_id = this_player_id\n logger.debug(\"Created game\")\n\n players = get_users_for_game(game_id, c.common_engine)\n logger.debug(f\"players for game {game_id} identified: {players}\")\n game.players = []\n for player in players:\n p = Player(player)\n if p.ID == this_player_id:\n logger.debug(\"Found this player with ID %s\", this_player_id)\n game.this_player = p\n game.players.append(p)\n\n if not game.load(c.common_engine):\n logger.error(f\"Failed to load game {game_id}\")\n\n logger.info(f\"do_load_game complete for game {game_id}\")\n return game", "def game_choice(game):\n global set_game\n set_game = game\n return set_game", "async def fetch_game(self, id: int | Game) -> FetchedGame | None:\n id = id if isinstance(id, int) else id.id\n resp = await self.http.get_game(id)\n if resp is None:\n return None\n data = resp[str(id)]\n if not data[\"success\"]:\n return None\n return FetchedGame(self._connection, data[\"data\"])" ]
[ "0.7608188", "0.68220633", "0.66524506", "0.6388743", "0.62113184", "0.6166356", "0.61501443", "0.6094548", "0.60707545", "0.5975456", "0.58981115", "0.57935846", "0.5776967", "0.57671946", "0.5766691", "0.5746554", "0.56954575", "0.56759334", "0.5648986", "0.5631534", "0.5609831", "0.55686384", "0.5551821", "0.5550341", "0.55484277", "0.55201864", "0.5517718", "0.5509572", "0.5501638", "0.54966736" ]
0.77910143
0
Returns the dictionary mapping file name string to Forge class.
def forge_files(self) -> Dict[str, BaseForge]: return self._forge_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filenames(self) -> dict[str, str]:\r\n ...", "def get_cls_dict(config_path):\n return {i: n for i, n in enumerate(get_names(config_path))}", "def pre_lookup(self, file):\n return {}", "def file_parser(file):\n\n # Copy of the file instance to save it\n new_file = file\n dict_file = {}\n # We find the right function depending on the extension of the file\n meta_func = find_meta_function(find_extension(file))\n if callable(meta_func):\n dict_file = meta_func(new_file)\n return dict_file", "def classes(class_name):\r\n\td = {}\r\n\tfor k, v in class_name.__dict__.items():\r\n\t\tif not (k.startswith('__') and k.endswith('__')):\r\n\t\t\td[k] = v\r\n\treturn d", "def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)", "def _get_extension_to_type_map(file_types):\n extension_to_type = dict()\n for file_type in file_types:\n for file_ext in file_type['extensions']:\n if file_ext not in extension_to_type:\n extension_to_type[file_ext] = file_type\n return extension_to_type", "def get_dict(modfile):\n import codecs\n\n odict = dict()\n of = codecs.open(modfile, 'r', encoding='utf-8')\n for line in of:\n # Dictionary lines should be like:\n # /path/filename.suffix: mo_mod1 mo_mod2\n ll = line.rstrip().split(':')\n fname = ll[0]\n mods = ll[1].strip().split(' ')\n for m in mods:\n odict[m] = fname\n of.close()\n\n return odict", "def get_filename(target, mode, file_type):\n data = CyBootloaderMapParser.get_json(CY_BOOTLOADER_MAP)\n for json_target in data:\n if json_target.lower().strip() in target.lower().strip():\n for json_mode in data[json_target]:\n if mode == json_mode:\n return data[json_target][json_mode][file_type]\n return None", "def get_elf_class(afile):\n if afile in g_elf_class_db:\n return g_elf_class_db[afile]\n cmd = 'readelf -h ' + cmd_quote(afile) + ' | grep \"Class:\" || true'\n output = get_shell_cmd_output(cmd)\n tokens = output.split(\":\")\n elf_class = tokens[1].strip()\n verbose(afile + \" ELF Class is \" + elf_class, LEVEL_2)\n g_elf_class_db[afile] = elf_class\n return elf_class", "def read_class_names(class_file_name):\n names = {}\n with open(class_file_name, 'r') as f:\n for idx, name in enumerate(f):\n names[idx] = name.strip('\\n')\n return names", "def forges():\n\n forges = {}\n\n for forge_path in sorted(glob.glob(\"/opt/service/forge/*.yaml\")):\n if forge_path.split(\"/\")[-1] not in [\"fields.yaml\", \"values.yaml\"]:\n with open(forge_path, \"r\") as forge_file:\n forges[forge_path.split(\"/\")[-1].split(\".\")[0]] = yaml.safe_load(forge_file)[\"description\"]\n\n return forges", "def load_class_index(filename_class_index):\n class_dictionary = np.load(filename_class_index).item()\n return class_dictionary", "def build_basenames():\r\n dict = {}\r\n with open(STREETS_FILE) as file:\r\n for line in file:\r\n dict[line.strip()] = True\r\n return dict", "def _GetSymbolNameToFilename(build_directory):\n symbol_extractor.CheckLlvmNmExists()\n path = os.path.join(build_directory, 'obj')\n object_filenames = cyglog_to_orderfile.GetObjectFilenames(path)\n pool = multiprocessing.Pool()\n symbol_names_filename = zip(\n pool.map(symbol_extractor.SymbolNamesFromLlvmBitcodeFile,\n object_filenames),\n object_filenames)\n pool.close()\n result = {}\n for (symbol_names, filename) in symbol_names_filename:\n stripped_filename = filename[len(build_directory):]\n if stripped_filename.startswith('/obj/'):\n stripped_filename = stripped_filename[len('/obj/'):]\n for s in symbol_names:\n result[s] = stripped_filename\n return result", "def get_class_file_name(name):\n name = _strip_class_name(name)\n return name + FILE_EXTENSION", "def __load_class_representation(self, filename):\n\n # Reads in the reverse dictionary from the given file.\n with open(filename) as file:\n return json.load(file)", "def process_path(module_path):\n\n if module_path == 'numpy.ndarray':\n return 'StorageNumpy', 'hecuba.hnumpy'\n if module_path == 'StorageDict':\n return 'StorageDict', 'hecuba.hdict'\n last = 0\n for key, i in enumerate(module_path):\n if i == '.' and key > last:\n last = key\n module = module_path[:last]\n class_name = module_path[last + 1:]\n return class_name, module", "def metadata_name(filename):\n\tif test_hachoir_extension(filename):\n\t\tmetadata = metadata_for_file(filename)\n\t\tif metadata:\n\t\t\tdata = dict([\n\t\t\t\t(data.key, data.values[0].value)\n\t\t\t\tfor data in metadata\n\t\t\t\tif data.values\n\t\t\t\t])\n\t\telse:\n\t\t\tdata=None\n\telif test_3D_extension(filename):# 3D not in the extention \n\t\tdata = {'mime_type':'model'}\n\telse:\n\t\tdata=None\n\treturn data", "def _dct_key(self):\n return self.__class__.__module__ + '.' + self.__class__.__name__", "def for_popen(self):\n return {compat.filename_str(k): compat.filename_str(v) for k, v in self.items()}", "def _get_attribute_dict(self, attributes, classname=None):\n if attributes and isinstance(attributes, six.string_types):\n return {\n 'class': attributes\n }\n if not attributes:\n attributes = {}\n if not classname:\n classname = self.DEFAULT_CLASS_NAME\n attributes.setdefault('class', classname)\n return attributes", "def compute_products(self):\r\n src_to_classfiles = defaultdict(list)\r\n for pcd_entry in self.pcd_entries:\r\n srcfile = pcd_entry[1]\r\n # In the file classes are represented with slashes, not dots. E.g., com/foo/bar/Baz.\r\n src_to_classfiles[srcfile].append(pcd_entry[0] + '.class')\r\n return src_to_classfiles", "def loadFileNameByModel(self, inputDir):\n fileNames = walktree(inputDir)\n fileByModel = {}\n for file in fileNames:\n modelName = file.split('/')[-1]\n modelName = modelName.replace('.txt', '')\n fileByModel[modelName] = file\n return fileByModel", "def filemap(self) -> GQAFilemap:\n return self._filemap", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def get_classes():\n file_name = 'imagenet_class_index.json'\n file_origin = os.path.join(FILE_PATH, file_name)\n file_path = get_file(file_name, file_origin, cache_subdir='models')\n with open(file_path) as f:\n class_dict = json.load(f)\n return [class_dict[str(i)][1] for i in range(len(class_dict))]", "def Extract_gene_type(gtf_file):\n gene_type_dic = {}\n for i in range(0,len(gtf_file)):\n if '##' not in gtf_file[i]:\n row = gtf_file[i].strip().split('\\t')\n if row[2] == 'transcript':\n trans_id = row[8].split('transcript_id \"')[1].split('\";')[0]\n #print trans_id\n gene_type_dic[trans_id] = row[8].split('transcript_type \"')[1].split('\";')[0]\n return gene_type_dic" ]
[ "0.56962377", "0.5674024", "0.5554048", "0.54324174", "0.53717715", "0.53676784", "0.53674424", "0.53652143", "0.5341359", "0.5310244", "0.5296541", "0.5288931", "0.5247922", "0.5225876", "0.52206045", "0.5177535", "0.51539", "0.5116209", "0.51110125", "0.5104048", "0.5101229", "0.50936145", "0.50840265", "0.5077131", "0.5066886", "0.5061728", "0.50347555", "0.50327027", "0.50277835", "0.50237256" ]
0.6105684
0
NewsList a model defined in Swagger
def __init__(self, news: List[News]=None): self.swagger_types = { 'news': List[News] } self.attribute_map = { 'news': 'news' } self._news = news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}", "def news(self) -> List[News]:\n return self._news", "def newsList(request):\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"date\") # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek}\n return render(request, 'news/newsList.html', context)", "def configure_news_api(self):\n self.news_api = newsapi.NewsApiClient(self.api_key)", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def get(self):\n\n return {\"message\": \"Welcome to the news API. \"}", "def top_news():\n data = get_top_news()\n return jsonify(data)", "def get(self):\n return GlobalNews.retrieve()", "def get_news(request):\n return get_all_posts(request, PostType.NEWS)", "def news(self, news: List[News]):\n\n self._news = news", "def from_dict(cls, dikt) -> 'NewsList':\n return deserialize_model(dikt, cls)", "def GET(self, *args):\n all_news= self.get_all_news()\n all_news.sort( key=lambda n : n['date'], reverse=True)\n if len(args):\n n_last=int(args[0])\n all_news = all_news[:n_last]\n\n return json.dumps(all_news)", "def news_list(request, year=None, month=None, day=None):\n query = models.NewsItem.objects\n filter_ = Q()\n if year is not None:\n filter_ &= Q(pub_date__year=int(year))\n if month is not None:\n filter_ &= Q(pub_date__month=int(month))\n if day is not None:\n filter_ &= Q(pub_date__day=int(day))\n if filter_ is not None:\n query = query.filter(filter_)\n\n return render(request, 'news/list.html', {\n 'object_list': query,\n 'months': models.NewsItem.objects.dates('pub_date', 'month',\n order='DESC'),\n })", "def news(request, start_id):\n\n MAX_NEWS = 10\n end_id = string.atoi(start_id) + 10\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"-date\")[start_id:end_id] # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n # Vypocet prvniho ID z predchozi skupiny novinek (jedna skupina = MAX_NEWS) \n start_id_num = string.atoi(start_id)\n if (start_id_num + MAX_NEWS) < news_count:\n preview_start_id = start_id_num + MAX_NEWS\n else:\n preview_start_id = start_id_num\n\n # Vypocet prvniho ID z nasledujici skupiny novinek (jedna skupina = MAX_NEWS) \n next_start_id = start_id_num - MAX_NEWS # prvni ID nasledujicich novinek\n if next_start_id < 0:\n next_start_id = 0;\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek, 'start_id': start_id,\n 'preview_start_id': preview_start_id, 'next_start_id': next_start_id}\n return render(request, 'news/news.html', context)", "def all_news(request):\n\n all_news = News.objects.all().order_by(\"-date_added\")\n context = {\n 'news': all_news,\n 'show_without_bag': True\n }\n return render(request, 'news/news.html', context)", "def list(self, request):\n a_viewset = [\n 'uses actions (list,create,retreive,update,partial_update)',\n 'Automatically maps to URLs using routers',\n 'provides more functionality with less code',\n ]\n return Response({'message': 'Hello!', 'a_viewset': a_viewset})", "def list_ticker_news(\n self,\n ticker: Optional[str] = None,\n ticker_lt: Optional[str] = None,\n ticker_lte: Optional[str] = None,\n ticker_gt: Optional[str] = None,\n ticker_gte: Optional[str] = None,\n published_utc: Optional[str] = None,\n published_utc_lt: Optional[str] = None,\n published_utc_lte: Optional[str] = None,\n published_utc_gt: Optional[str] = None,\n published_utc_gte: Optional[str] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[TickerNews], HTTPResponse]:\n url = \"/v2/reference/news\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_ticker_news, locals()),\n raw=raw,\n deserializer=TickerNews.from_dict,\n options=options,\n )", "def news():\n\n # ensure parameters are present\n # geo = request.args.get(\"geo\")\n geo = '95060'\n if not geo:\n raise RuntimeError(\"missing geo\")\n\n # lookup articles and store them as JSON array\n article_list = lookup(geo)\n\n # TODO\n print(article_list)\n news = jsonify(article_list) \n print(news)\n # return render_template(\"index.html\")\n return article_list", "def list(self, request):\n queryset = Article.objects.all()\n serializer_context = {'request': request}\n page = self.paginate_queryset(queryset)\n serializer = self.serializer_class(\n page,\n context=serializer_context,\n many=True\n )\n output = self.get_paginated_response(serializer.data)\n return output", "def news(request):\n articles = News.objects.all()\n return render(request, 'news.html', {\"articles\": articles})", "def articleList():\n articles = get_news(\n 5, since=news.YESTERDAY.strftime(\"%yyyy-%mm-%dd\"), query=\"covid\"\n )\n title_list = []\n desc_list = []\n url_list = []\n image_list = []\n source_list = []\n for art in articles:\n image_list.append(art.image)\n title_list.append(art.title)\n source_list.append(art.source)\n desc_list.append(art.description)\n url_list.append(art.url)\n socketio.emit(\n ARTICLE,\n {\n \"title\": title_list,\n \"desc\": desc_list,\n \"url\": url_list,\n \"img\": image_list,\n \"sources\": source_list,\n },\n )\n return True", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def news():\r\n with open('config.json', 'r') as cfile:\r\n config = json.load(cfile)\r\n news_api_key = config[\"news_api_key\"]\r\n response = requests.get(\"https://newsapi.org/v2/top-headlines?\"\r\n \"sources=bbc-news&apiKey=\" + news_api_key)\r\n resp_json = response.json()\r\n with open(\"news.json\", 'w') as file:\r\n json.dump(resp_json, file)\r\n file.close()", "def user_view_list_data():\n video = VideoFactory()\n collection = video.collection\n moira_list = factories.MoiraListFactory()\n collection.view_lists.set([moira_list])\n return SimpleNamespace(video=video, moira_list=moira_list, collection=collection)", "def get_one_news(self): # pylint: disable=no-self-use\n return operations.get_one_news()", "def show_list():\n\n response = []\n docs = SUPERHEROES.stream()\n for doc in docs:\n response.append(doc.to_dict())\n return jsonify(response), 201", "def list(self, request):\n\n a_viewset = [\n 'Uses action (list, create, reteieve, update, partial_update)',\n 'Automatically maps the urls using routers',\n 'provide more functionality with less code',\n ]\n\n return Response({'message': 'Hello', 'a_viewset': a_viewset})", "def json_news(request):\n if request.method == 'GET':\n feed = request.GET['feed']\n return JsonResponse(\n {\n 'news': get_news(feed),\n }\n )", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data" ]
[ "0.67651105", "0.6221242", "0.6082616", "0.60183597", "0.60091746", "0.59793127", "0.59738564", "0.5931639", "0.58304185", "0.5824736", "0.58020616", "0.57277834", "0.5725878", "0.5699052", "0.5653614", "0.5636298", "0.55957717", "0.5594664", "0.55653083", "0.55634767", "0.55420387", "0.5533317", "0.55239606", "0.5505271", "0.55011857", "0.5500553", "0.549321", "0.5489157", "0.5461236", "0.5419514" ]
0.79788524
0
Gets the news of this NewsList. List of all sites.
def news(self) -> List[News]: return self._news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return GlobalNews.retrieve()", "def get_local_news_items(self):\n catalog = api.portal.get_tool(name='portal_catalog')\n default_lang = api.portal.get_tool(\n \"portal_languages\").getDefaultLanguage()\n results = catalog.searchResults(\n portal_type=\"News Item\",\n sort_on=\"effective\",\n sort_order=\"descending\",\n review_state='published',\n Language=[default_lang, ''],\n )\n return results", "def get_one_news(self): # pylint: disable=no-self-use\n return operations.get_one_news()", "def get_list_of_sites(self):\n\n return self.site_db_obj.get_list_of_sites()", "def get_news(self, news_api_key, company):\r\n news_params = {\r\n \"q\": company,\r\n \"apiKey\": news_api_key\r\n }\r\n\r\n news_resp = requests.get(NEWS_API_ENDPOINT, params=news_params)\r\n news_resp.raise_for_status()\r\n news_data = news_resp.json()\r\n return news_data", "def get_news(self):\n if self.api_key_entry.get() == \"\":\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = (now-datetime.timedelta(days=14))\n #today = now.strftime()\n query = \"\"\n for cat in self.sorted_categories():\n query += f\"{cat},\"\n search = api.get_top_headlines(q=query,\n sources=\"bbc-news,the-verge\",\n language=\"en\")\n news = \"\"\n for article in search[\"articles\"]:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)", "async def getofficialnews(self, appID: int = None) -> typing.List:\n appID = appID if appID is not None else self.appID\n\n news = await SteamNewsPost.asyncgetnewsforapp(\n appID=appID, count=15, maxlength=600\n )\n logging.info(f\"{len(news)} {self._parsername} post(s) returned by Steam's API\")\n officialnews = [\n item for item in news if self.RLnewsfilter(item, self.psyonixstaff)\n ]\n\n logging.info(f\"Found {len(officialnews)} official {self._parsername} post(s)\")\n return officialnews", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def get_news(request):\n return get_all_posts(request, PostType.NEWS)", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}", "def select_news(self):\n data = self.soup.findAll('item')\n for item in data:\n news_data = dict()\n for tag in ['title', 'link']:\n news_data[tag] = item.find(tag).get_text()\n\n news_data['pubDate'] = parse(item.find('pubDate').get_text())\n media = item.find('media:content')\n\n if media:\n news_data['media'] = media.get('url')\n else:\n news_data['media'] = None\n\n yield news_data", "def Sites(self):\n if self._sites is None or len(self._sites) == 0:\n return None\n return self._sites", "def get_news(keywords, news='all'):\n if news is 'all':\n return news_client.get_everything(q=keywords)\n elif news is 'top':\n return news_client.get_top_headlines(q=keywords)\n else:\n raise AttributeError(\"Optional argument news expected 'top' or 'all'\")", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def get_remote_news_items(self):\n items = []\n params = {\n \"base_url\": self.osha_json_url,\n \"lang\": api.portal.get_tool(\"portal_languages\").getPreferredLanguage(),\n \"query_tags\": self.remote_news_query_tags,\n }\n qurl = \"{base_url}/{lang}/services/hw/news/{query_tags}\".format(**params)\n result = urlopen(qurl)\n if result.code == 200:\n json = load(result)\n for node in json.get(\"nodes\"):\n item = node.get(\"node\")\n pd = item.get('publication_date', '')\n items.append({\n 'remote_item': True,\n 'Title': item['title'],\n 'Date': (\n pd and DateTime(pd, datefmt=\"international\").strftime(\n \"%Y/%m/%d %H:%M\") or \"\"),\n 'getURL': item.get('path'),\n 'path': item.get('path'),\n 'Description': item.get('summary', '') or item.get('body', ''),\n 'text': item.get('summary', '') and item.get('body', '') or '',\n 'remote_image': item.get('image', ''),\n 'node_id': item.get('nid'),\n })\n return items", "def sites(self):\n return self._sites", "def sites(self):\n return self._sites", "def get_news(company_name: str) -> list[dict]:\n news_params = {\n \"q\": company_name,\n \"apiKey\": config.NEWS_API_KEY\n }\n response = requests.get(\"https://newsapi.org/v2/everything\", params=news_params)\n response.raise_for_status()\n news_data = response.json()\n return news_data[\"articles\"][:3]", "def sites(self):\n return self.data.sites.values", "def iter_sites(self):\n return iter(self.site_list)", "def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))", "def _sites(self):\n return self.properties.get('sites',\n SitePropertiesCollection(self.context, ResourcePath(\"sites\", self.resource_path)))", "def get_newsletters(self):\n return SubscriptionNewsletter.objects.filter(contact=self)", "def get_list_of_sites(self) -> list:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/sites\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n result = []\n\n for i in range(collection.Count):\n\n site = collection[i]\n prop = site.Properties\n # site_id = prop[\"id\"].Value\n name = prop[\"name\"].Value\n default_app = self.get_default_app(site)\n bindings = self.get_site_bindings(site.ChildElements)\n applications = self.get_applications(site)\n if default_app and not os.path.exists(self.core.expandvars(default_app[\"physicalPath\"])):\n # не показывать сайты для которых нет физ. директории для иис экспреса\n continue\n site = Site(name, bindings, default_app, applications)\n if hasattr(site, 'port') and site.port != 0:\n result.append(site)\n\n return result", "def get(cls):\n SiteModel.check_dates()\n _user_id = get_jwt_identity()\n\n # return all sites as json\n return {'sites': [x.json() for x in SiteModel.query.filter_by(\n user_id=_user_id).all()]}, 200", "def __get_local_g1_news(soup):\n news = []\n anchors = soup.find_all(\n 'a', class_='feed-post-link gui-color-primary gui-color-hover')\n\n for a in anchors:\n title = a.string\n link = a['href']\n news.append(dict(title=title, link=link))\n return news", "def get_news_feed(self):\n try:\n news_feed_url = self.base_url + \"/actualite-en-continu/\"\n except:\n news_feed_url = None\n return news_feed_url", "def sites(self):\n return self.properties.get('sites',\n SiteCollection(self.context, ResourcePath(\"sites\", self.resource_path)))" ]
[ "0.707582", "0.64522296", "0.6345483", "0.6317189", "0.6273085", "0.62567925", "0.621038", "0.61679095", "0.61474234", "0.6131414", "0.59921557", "0.59887254", "0.5983498", "0.59675586", "0.59466046", "0.5912889", "0.5902706", "0.5902237", "0.5902237", "0.58978075", "0.58638406", "0.5836537", "0.58254874", "0.5824435", "0.58072793", "0.57958436", "0.5782693", "0.5771183", "0.57648855", "0.57297564" ]
0.7832009
0
Sets the news of this NewsList. List of all sites.
def news(self, news: List[News]): self._news = news
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def news(self) -> List[News]:\n return self._news", "def insert_into_news_pool(self, news):\n self.news_pool.append(news)", "def __init__(self, news: List[News]=None):\n self.swagger_types = {\n 'news': List[News]\n }\n\n self.attribute_map = {\n 'news': 'news'\n }\n\n self._news = news", "def updateNewsFeed(self):\n try:\n news, events, categories, eventCategories = self.requestData()\n for language in NEWSFEED_LANGUAGES:\n self.newsFeedModel.update(news[language], events[language], categories[language],\n eventCategories[language], language=language)\n except Exception as e:\n print(\"there was a problem while updating the news feed\")\n raise e", "def websites(self, websites):\n\n self._websites = websites", "def update_news_to_model(self):\n internet = 1\n news_page_url_base = 'https://news.mydrivers.com/update/'\n url_to_get_list = []\n if self.dates_to_get[0] == 'Date entered error':\n self.show_in_browser.emit(f'Date entered error: {self.dates_to_get[1]}')\n self.show_in_browser.emit('Please check the entered date.')\n self.thread.quit()\n return\n for date_to_get in self.dates_to_get:\n if pd.to_datetime(date_to_get).date() > (datetime.date.today() + datetime.timedelta(days=1)):\n break\n url_to_get = news_page_url_base + date_to_get + '_1.htm'\n url_to_get_list.append(url_to_get)\n if url_to_get_list == []:\n self.show_in_browser.emit(f'Date entered error: {self.dates_to_get}')\n self.show_in_browser.emit('Please check the entered date.')\n self.thread.quit()\n return\n\n def get_url_content(url_to_get):\n try:\n r = requests.get(url_to_get)\n except BaseException as e:\n print('Cannot establish Internet connection with server.\\n', e)\n self.show_in_browser.emit('Cannot establish Internet connection with server.\\n')\n self.show_in_browser.emit(str(e))\n nonlocal internet\n internet = 0\n return\n status_code = r.status_code\n if status_code == 200:\n bs = BeautifulSoup(r.content, 'html.parser')\n news_pages_list = bs.select('#newsleft > div > a')\n news_summary_list = bs.select('#newsleft > li')\n else:\n news_pages_list = []\n news_summary_list = []\n return status_code, news_pages_list, news_summary_list\n\n def extract_news_pages(news_pages_list: list) -> list:\n news_page_url_list = []\n for item in news_pages_list:\n news_page_url_tail = item.get('href')\n if item.text.isnumeric() and news_page_url_tail:\n news_page_url = news_page_url_base + news_page_url_tail\n news_page_url_list.append(news_page_url)\n return news_page_url_list\n\n def extract_news_summary(news_summary_list: list):\n for item in news_summary_list:\n news_title = item.find('h3').find('a').text\n news_link = 'https:' + item.find('h3').find('a').get('href')\n news_author = item.find(class_=\"newstiao4\").text\n news_datetime_text = item.find(class_=\"news_plun hui2\") \\\n .find('li').text\n match = re.search('\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}',\n news_datetime_text)\n news_datetime = pd.to_datetime(match.group())\n # Naive Bayes classifier\n seg_list = jieba.cut(news_title)\n p_news_like_product = 1 # p_1*p_2*...*p_n\n p_news_like_1_product = 1 # (1-p_1)*(1-p_2)*...*(1-p_n)\n p_news_nolike_product = 1\n p_news_nolike_1_product = 1\n for word in seg_list:\n sql_query = f\"SELECT * FROM words WHERE word = '{word}';\"\n rows = self.conn.execute(sql_query).fetchall()\n if rows == []:\n p_word_like = 0.4\n p_word_nolike = 0.4\n else:\n p_word_like = rows[0][1] / rows[0][3]\n p_word_nolike = rows[0][2] / rows[0][3]\n p_news_like_product = p_news_like_product * p_word_like\n p_news_like_1_product = p_news_like_1_product * (1 - p_word_like)\n p_news_nolike_product = p_news_nolike_product * p_word_nolike\n p_news_nolike_1_product = p_news_nolike_1_product * (1 - p_word_nolike)\n p_news_like = 100 * p_news_like_product * self.p_like / (\n p_news_like_product * self.p_like + p_news_like_1_product * (1 - self.p_like))\n p_news_nolike = 100 * p_news_nolike_product * self.p_nolike / (\n p_news_nolike_product * self.p_nolike + p_news_nolike_1_product * (1 - self.p_nolike))\n p_news_like = round(p_news_like, 2)\n p_news_nolike = round(p_news_nolike, 2)\n\n self.news_df = self.news_df.append(\n pd.DataFrame([[news_datetime,\n news_title,\n news_link,\n news_author,\n p_news_like,\n p_news_nolike]],\n columns=self.news_df.columns),\n ignore_index=True)\n\n self.news_df = pd.DataFrame({'datetime': [], 'title': [], 'link': [],\n 'author': [], 'like': [], 'no_like': []})\n\n for url_to_get in url_to_get_list:\n try:\n status_code, news_pages_list, news_summary_list = \\\n get_url_content(url_to_get)\n except:\n break\n if status_code == 200:\n print(url_to_get, ' loaded.')\n self.show_in_browser.emit(url_to_get + ' loaded.')\n extract_news_summary(news_summary_list)\n else:\n print(url_to_get, 'is not available. Status Code:', status_code)\n self.show_in_browser.emit(url_to_get + ' is not available. Status Code: ' + str(status_code))\n continue\n\n for news_page_url in extract_news_pages(news_pages_list):\n status_code, news_pages_list, news_summary_list = \\\n get_url_content(news_page_url)\n if status_code == 200:\n extract_news_summary(news_summary_list)\n print(news_page_url, ' loaded.')\n self.show_in_browser.emit(news_page_url + ' loaded.')\n else:\n print(url_to_get, 'is not available. Status Code:',\n status_code)\n self.show_in_browser.emit(url_to_get + ' is not available. Status Code: ' + str(status_code))\n continue\n\n if internet == 0:\n self.thread.quit()\n return\n\n if (pd.to_datetime(self.dates_to_get[0]).date() == self.news_latest_datetime.date()):\n self.news_df.drop(self.news_df[self.news_df.datetime < self.news_latest_datetime].index,\n inplace=True)\n try:\n for link in self.news_latest_link:\n self.news_df.drop(self.news_df[self.news_df.link == link].index, inplace=True)\n except BaseException as e:\n print(e)\n else:\n sql_query = \"\"\"SELECT datetime FROM news_unread\"\"\"\n tmp_news_datetime = pd.read_sql_query(sql_query, self.conn,\n parse_dates='datetime')\n try: # Raise error when news_read is empty.\n sql_query = \"\"\"SELECT datetime FROM news_read\"\"\"\n tmp2_news_datetime = pd.read_sql_query(sql_query, self.conn,\n parse_dates='datetime')\n tmp_news_datetime = tmp_news_datetime.append(tmp2_news_datetime, ignore_index=True)\n del tmp2_news_datetime\n except:\n pass\n tmp_news_datetime.datetime = tmp_news_datetime.datetime.apply(lambda x: x.date())\n for date in self.dates_to_get:\n date = pd.to_datetime(date).date()\n if (tmp_news_datetime.datetime == date).any():\n sql_query = \"\"\"SELECT link FROM news_unread\"\"\"\n links = pd.read_sql_query(sql_query, self.conn)\n try:\n sql_query = \"\"\"SELECT link FROM news_read\"\"\"\n links2 = pd.read_sql_query(sql_query, self.conn)\n links = links.append(links2, ignore_index=True)\n except:\n pass\n links = links.link.tolist()\n self.news_df = self.news_df.drop(self.news_df[self.news_df.link.isin(links)].index)\n break\n\n self.news_df.sort_values('datetime', inplace=True)\n self.news_df.to_sql('news_unread', self.conn, index=False,\n if_exists='append')\n sql_query = \"\"\"\n SELECT * FROM news_unread ORDER BY datetime ASC\n \"\"\"\n self._data = pd.read_sql_query(sql_query, self.conn,\n parse_dates='datetime')\n self._data['datetime'] = self._data['datetime'].dt.strftime('%m-%d %H:%M')\n self.update_finished.emit()", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "def select_news(self):\n data = self.soup.findAll('item')\n for item in data:\n news_data = dict()\n for tag in ['title', 'link']:\n news_data[tag] = item.find(tag).get_text()\n\n news_data['pubDate'] = parse(item.find('pubDate').get_text())\n media = item.find('media:content')\n\n if media:\n news_data['media'] = media.get('url')\n else:\n news_data['media'] = None\n\n yield news_data", "def setSiteids(self):\n self.siteids = []\n for site in self.sites:\n self.siteids.append(site.siteid)", "def create_and_update_menu(self, list_of_news):\n self.create_menu(list_of_news)", "def load_sites(self, site_list: list = None):\n try:\n sites = self.api.get(host=self.host, endpoint=f\"/api/v1/orgs/{self.oid}/sites\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting org sites:{TextColors.ENDC} {e}\")\n raise e\n if site_list:\n sites = [s for s in sites if s['name'] in site_list]\n self.sites = sites", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def update(system):\n session = system.db_session.create_session()\n for sett in session.query(system.db_session.Settings).filter(\n system.db_session.Settings.name == 'news'):\n session.delete(sett)\n session.commit()\n system.module_news = set()\n apply_news(system.module_news)", "def urls(self, urls):\n\n self._urls = urls", "def store_news(list_of_news, connection, url):\n cursor = connection.cursor()\n list_of_values = []\n for item in list_of_news:\n new_date = item.date.strftime('%Y%m%d')\n new_article = [item.title, item.link, item.date, new_date, item.source, item.description, item.image]\n list_of_values.append(new_article)\n\n sql = \"INSERT OR REPLACE INTO news VALUES (?, ?, ?, ?, ?, ?, ?, ?)\"\n cursor.execute(sql, (new_article[0], new_article[1], new_article[2], new_article[3], new_article[4],\n new_article[5], new_article[6], url))\n connection.commit()", "def index_news_articles(self):\n # Get the RSS feed\n print('Fetching the RSS feed')\n item_list = rss_fetch.get_all_feed_urls(self.rss_url_file)\n # Index all the feed items into ES\n print('Going to index {0} news articles...'.format(len(item_list)))\n drop_count=0\n for item in item_list:\n try:\n # Use item specific id while indexing to avoid duplication\n self.es.index(index=self.index, doc_type=self.doc_type, id=item['id'], body=item)\n except KeyError:\n drop_count += 1\n traceback.print_exc()\n except elasticsearch.exceptions.RequestError:\n drop_count += 1\n traceback.print_exc()\n\n print('Indexed {0} Dropped {1}'.format(len(item_list)-drop_count, drop_count))\n print('Current index size {0}'.format(self.get_index_size()))", "def parse(self, response):\n \n response.selector.register_namespace('n', 'http://www.sitemaps.org/schemas/sitemap/0.9')\n news_urls = response.xpath(\"//n:url/n:loc/text()\").extract()\n for url in news_urls:\n yield Request(url, callback = self.parse_news)", "def parse_news(news):\n default_value = '---'\n\n news_list = []\n for entry in news:\n title = entry.get('title', default_value)\n link = entry.get('link', default_value)\n published = entry.get('published', default_value)\n source = entry.get('source', default_value)\n description = entry.get('description', default_value)\n media_content = entry.get('media_content', default_value)\n\n source_title = default_value\n if source != default_value:\n source_title = source['title']\n\n image = default_value\n if media_content != image:\n image = media_content[0]['url']\n\n article = Article(title, link, published, source_title, description, image)\n news_list.append(article)\n\n return news_list", "def get_local_news_items(self):\n catalog = api.portal.get_tool(name='portal_catalog')\n default_lang = api.portal.get_tool(\n \"portal_languages\").getDefaultLanguage()\n results = catalog.searchResults(\n portal_type=\"News Item\",\n sort_on=\"effective\",\n sort_order=\"descending\",\n review_state='published',\n Language=[default_lang, ''],\n )\n return results", "def __local_se(soup):\n news = []\n ns = get_ns('localSE')\n\n anchors = soup.find('div', class_='coluna3 bordaTopoCinza').find_all('a')\n\n for a in anchors:\n title = a.string\n link = ns.url + a['href']\n news.append(dict(title=title, link=link))\n return news", "def __local_se(soup):\n news = []\n ns = get_ns('localSE')\n\n anchors = soup.find('div', class_='coluna3 bordaTopoCinza').find_all('a')\n\n for a in anchors:\n title = a.string\n link = ns.url + a['href']\n news.append(dict(title=title, link=link))\n return news", "def links(self, links):\n self._links = links", "def hosts(self, hosts):\n self._hosts = hosts", "def get_article_webpage_list(self, news_feed_webpage):\n url_list = list()\n # Use HTML parser to extract appropriates urls\n lemonde_parser = LeMondeHTMLParser()\n lemonde_parser.feed(news_feed_webpage)\n partial_url_list = lemonde_parser.links\n\n\n # add the base url of the website if not present in the article url\n for url in partial_url_list:\n if not 'http' in url:\n url_list.append(self.base_url + url)\n else:\n url_list.append(url)\n\n return url_list", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def parse_news(self, response):\n \n loader = NewsLoader(item=NewsItem(), response=response)\n loader.add_xpath('title', '//header//h1/text()')\n author = ''.join(response.xpath('//span[@class=\"byline\"]').extract())\n author = remove_tags(author).replace(\"by\", '').replace(' and ', ', ')\n loader.add_value('author', author)\n timestamp = response.xpath('//meta[@name=\"DC.date.issued\"][1]/@content').extract()[0]\n timestamp = du.normalize_timestamp(timestamp)\n loader.add_value('date', timestamp.split(' ')[0])\n loader.add_value('time', timestamp.split(' ')[1])\n list_of_contents = response.xpath(\n '//div[@id=\"storytext\"]/*[not(@class=\"cnnplayer\") and '\n 'not(@class=\"storytimestamp\")]').extract()\n content = ' '.join(list_of_contents)\n loader.add_value('content', content)\n loader.add_xpath('tags', '//meta[@name=\"keywords\"]/@content')\n return loader.load_item()", "def analyze_news(self, soup_news) -> list:\n logging.info(\"analyzing news collected\")\n analyzed_news = []\n for news in soup_news:\n main_column = news.find(\"div\", {\"class\": \"column size-3-4 column-main\"})\n tag_list = main_column.find_all(\"div\", {\"class\": re.compile(r\"etiqueta etiqueta-[0-9]\")})\n fake = self.__is_fake(tag_list)\n if fake:\n analyzed_news.append(main_column)\n\n return analyzed_news", "def pos_process_news(cls):\n # get from db\n df = Database.get_all_crawled_news(Database.db_news)\n\n # erase news without title or text\n df = df.loc[df['news_site_title'] != '']\n df = df.loc[df['news_site_text'] != '']\n\n # erase number words title\n df['title_len'] = df.apply(lambda row: Utils.count_words(row['news_site_title']), axis=1)\n df = df.loc[df['title_len'] > 3]\n\n # sort by date so that we keep the last news\n df['arquivo_date'] = pd.to_datetime(df.arquivo_date)\n df = df.sort_values(by=['arquivo_date'], ascending=False)\n df = df.set_index(['arquivo_date'])\n df['arquivo_date'] = df.index\n\n # drop news that have title repeated\n df_title_text = df[['news_site_title']]\n df_title_text = df_title_text.drop_duplicates()\n df = df.loc[df.index.isin(df_title_text.index)]\n\n # insert in mongo db\n Database.save_posprocessed_crawled_news(Database.db_pos_processed_news,df)", "def test_sites(self, test_sites):\n\n self._test_sites = test_sites", "def update_sites(self, sites):\n\n self.labels = {}\n\n with open(sites) as f:\n for line in f:\n (website, label) = line.split()\n self.labels[website] = label\n\n self.sites = list(self.labels.keys())" ]
[ "0.655942", "0.5809342", "0.57910275", "0.5721592", "0.5674233", "0.5635913", "0.56231123", "0.55927944", "0.54833764", "0.5420062", "0.541851", "0.5358861", "0.52144605", "0.5196891", "0.5148625", "0.5148489", "0.51216763", "0.5053114", "0.50516135", "0.50410926", "0.50410926", "0.50372756", "0.50305957", "0.50193655", "0.50007564", "0.49952987", "0.49888188", "0.4988356", "0.4986362", "0.4974113" ]
0.8127856
0
You can set attributes for the station by providing a dictionary Parameters. project & uri must be a list lat, lon, eas, must be convertible to float everything else is a string.
def setStation(self, attrib=None): if not isinstance(attrib, dict): return # minimal sanity check checkFloat = ['lat', 'lon', 'eas'] checkList = ['project', 'uri'] # create 'keys' without the underscore keys = [k.strip('_') for k in list(self.__dict__)] for a in attrib: a = a.strip('_') if a in keys and a in checkFloat: try: self.__setattr__('_' + a, float(attrib[a])) except: continue elif a in keys and a in checkList: try: self.__setattr__('_' + a, list(attrib[a])) except: continue else: self.__setattr__('_' + a, attrib[a])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_data(self, record):\n state = {}\n if 'lon' in record:\n state[ATTR_LONGITUDE] = record['lon']\n if 'lat' in record:\n state[ATTR_LATITUDE] = record['lat']\n if 'alt' in record:\n state[ATTR_ELEVATION] = record['alt']\n if 'ubi' in record:\n state[ATTR_STATION_NAME] = record['ubi']\n if 'prec' in record:\n state[ATTR_WEATHER_PRECIPITATION] = record['prec']\n if 'pres' in record:\n state[ATTR_WEATHER_PRESSURE] = record['pres']\n if 'ta' in record:\n state[ATTR_WEATHER_TEMPERATURE] = record['ta']\n if 'hr' in record:\n state[ATTR_WEATHER_HUMIDITY] = record['hr']\n if 'fint' in record:\n state[ATTR_LAST_UPDATE] = record['fint']\n if 'vis' in record:\n state[ATTR_WEATHER_VISIBILITY] = record['vis']\n if 'nieve' in record:\n state[ATTR_WEATHER_SNOW] = record['nieve']\n if 'vv' in record:\n state[ATTR_WEATHER_WIND_SPEED] = record['vv'] * 3.6 # m/s to km/h\n if 'dv' in record:\n state[ATTR_WEATHER_WIND_BEARING] = record['dv']\n self.data = state", "def __init__(self, lat, lon, grid, api):\n self.lat = lat\n self.lon = lon\n self.city = grid['city']\n self.county = grid['county']\n self.village = grid['village']\n self.api = api\n self.result = {}", "def __setAttributes(self):\n values = {\"f\":\"json\"}\n layerInfo = self._getEsriRESTJSON(self.url,values)\n #Geometry Type\n geometryType = getGeometryType(layerInfo['geometryType'])\n self.geometryType = geometryType\n #Name\n name=arcpy.ValidateTableName(layerInfo['name'])\n self.name=name\n #Spatial Reference - both the wkid and the arcpy SpatialReference object\n #in case it's in a wkt\n try:\n wkid = layerInfo['extent']['spatialReference']['wkid']\n except:\n wkid = 4326\n sr = arcpy.SpatialReference()\n sr.factoryCode = int(wkid)\n sr.create()\n self.sr = sr\n self.wkid = wkid\n #field used to update the feature class are a subset of all the fields in a feature class\n fields = layerInfo['fields']\n updateFields = []\n for field in fields:\n if (field['type'] in ['esriFieldTypeOID','esriFieldTypeGeometry','esriFieldTypeGUID'] or 'shape' in field['name'].lower() or field['name'] in self.userFields):\n pass\n else:\n updateFields.append(field)\n updateFields.insert(0, {\"name\":'Shape@', \"type\":\"esriFieldTypeGeometry\"})\n self.updateFields = updateFields\n #Max values\n if layerInfo.has_key('maxRecordCount'):\n self.maxRecordCount = int(layerInfo['maxRecordCount'])\n else:\n self.maxRecordCount = 1000", "def __init__(self):\n\n self._stationId = None # shortName like HTM or SE-NOR\n self._valid = False # if stationId is set and valid, return True\n self._name = None # longName\n self._theme = None # AS | ES | OS\n self._icosclass = None # 1 | 2 | Associated\n self._siteType = None # description of site\n\n # locations\n self._lat = None # latitude\n self._lon = None # longitude\n self._eas = None # elevation above sea level\n\n # pi information\n self._firstName = None # Station PI first name\n self._lastName = None # Station PI last name\n self._email = None # Station PI email\n\n # other information\n self._country = None\n self._project = None # list, project affiliation,\n self._uri = None # list, links to ressources, landing pages\n\n # data and products\n self._datacheck = False # check if data and products have been asked for already\n self._data = None # list of associated data objects\n self._products = None # list of available products", "async def set_station(self: SimpleNWS, station: Optional[str] = None) -> None:\n if station:\n self.station = station\n if not self.stations:\n self.stations = [self.station]\n else:\n self.stations = await self.get_points_stations()\n self.station = self.stations[0]", "def set_standard_attrs(da):\n da.coords[\"lat\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"latitude\"),\n (\"units\", \"degrees_north\"),\n (\"axis\", \"Y\"),\n (\"long_name\", \"latitude\"),\n (\"out_name\", \"lat\"),\n (\"stored_direction\", \"increasing\"),\n (\"type\", \"double\"),\n (\"valid_max\", \"90.0\"),\n (\"valid_min\", \"-90.0\"),\n ]\n )\n da.coords[\"lon\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"longitude\"),\n (\"units\", \"degrees_east\"),\n (\"axis\", \"X\"),\n (\"long_name\", \"longitude\"),\n (\"out_name\", \"lon\"),\n (\"stored_direction\", \"increasing\"),\n (\"type\", \"double\"),\n (\"valid_max\", \"180.0\"),\n (\"valid_min\", \"-180.0\"),\n ]\n )\n da.coords[\"depth_coord\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"depth\"),\n (\"units\", \"m\"),\n (\"axis\", \"Z\"),\n (\"long_name\", \"ocean depth coordinate\"),\n (\"out_name\", \"lev\"),\n (\"positive\", \"down\"),\n (\"stored_direction\", \"increasing\"),\n (\"valid_max\", \"12000.0\"),\n (\"valid_min\", \"0.0\"),\n ]\n )\n da.coords[\"time\"].attrs = OrderedDict(\n [\n (\"standard_name\", \"time\"),\n (\"axis\", \"T\"),\n (\"long_name\", \"time\"),\n (\"out_name\", \"time\"),\n (\"stored_direction\", \"increasing\"),\n ]\n )\n da.coords[\"time\"].encoding[\"units\"] = \"days since '1900-01-01'\"\n\n return da", "def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']", "def __init__(self, lat, longitude):\n\n self.lat = lat\n self.long = longitude", "def __init__(self, lat, lon, api):\n self.lat = lat\n self.lon = lon\n self.api = api\n self.result = {}", "def __init__(self, kwargs):\n\n self.postcode = kwargs[\"postcode\"]\n self.east = float(kwargs[\"east\"])\n self.north = float(kwargs[\"north\"])\n self.latitude = kwargs[\"latitude\"]\n self.longitude = kwargs[\"longitude\"]", "def __init__(self, id, name, stat, base, boost):\n super(BaseStation, self).__init__(id, name)\n self.__stat = stat\n self.__base = base\n self.__boost = boost", "def __init__(self, lat, lng):\n self.summary = \"Lorem ipsum\"", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_location_property = parameters[1]\r\n\t\tin_relation_degree = parameters[2]\r\n\t\tout_location = parameters[3]\r\n\t\tout_points_name = parameters[4]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.value:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\tout_location.value = currentWorkspace\r\n\r\n\t\t\t# get all the IRI from input point feature class of wikidata places\r\n\t\t\tinplaceIRIList = []\r\n\t\t\tcursor = arcpy.SearchCursor(inputFeatureClassName)\r\n\t\t\tfor row in cursor:\r\n\t\t\t\tinplaceIRIList.append(row.getValue(\"URL\"))\r\n\t\t\t\r\n\t\t\t# get all the property URL which are used in the input feature class. their objects are geographic locations which have coordinates, I call them location common properties\r\n\t\t\tlocationCommonPropertyJSONObj = SPARQLQuery.locationCommonPropertyQuery(inplaceIRIList)\r\n\t\t\tlocationCommonPropertyJSON = locationCommonPropertyJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\t\t\tfor jsonItem in locationCommonPropertyJSON:\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(jsonItem[\"p\"][\"value\"])\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(jsonItem[\"NumofSub\"][\"value\"])\r\n\r\n\t\t\tlocationCommonPropertyCountDict = dict(zip(LocationPropertyPath.locationCommonPropertyURLList, LocationPropertyPath.locationCommonPropertyCountList))\r\n\r\n\t\t\t# get the english label for each location common property\r\n\t\t\tlocationCommonPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(LocationPropertyPath.locationCommonPropertyURLList)\r\n\t\t\t# locationCommonPropertyLabelJSON = locationCommonPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t# a dictionary object: key: propertyNameCount, value: propertyURL\r\n\t\t\tLocationPropertyPath.locationCommonPropertyDict = dict()\r\n\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\r\n\t\t\tfor jsonItem in locationCommonPropertyLabelJSON:\r\n\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(propertyURL)\r\n\r\n\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\r\n\t\t\t\tpropertyCount = locationCommonPropertyCountDict[propertyURL]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(propertyCount)\r\n\r\n\t\t\t\tpropertyNameCount = propertyName + \"(\" + propertyCount + \")\"\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList.append(propertyNameCount)\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyDict[propertyNameCount] = propertyURL\r\n\r\n\t\t\tin_location_property.filter.list = LocationPropertyPath.locationCommonPropertyNameCountList\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif in_location_property.value and in_relation_degree.value and out_points_name.valueAsText == None:\r\n\t\t\t\tpropertyName = in_location_property.valueAsText\r\n\t\t\t\trelationdegree = in_relation_degree.valueAsText\r\n\r\n\t\t\t\tlastIndex = propertyName.rfind(\"(\")\r\n\t\t\t\tpropertyName = propertyName[:lastIndex]\r\n\r\n\t\t\t\tpropertyName = propertyName.replace(\" \", \"_\")\r\n\r\n\t\t\t\tif featureClassName.endswith(\".shp\"):\r\n\t\t\t\t\tlastIndex = featureClassName.rfind(\".\")\r\n\t\t\t\t\tfeatureClassNameNoShp = featureClassName[:lastIndex]\r\n\t\t\t\t\tout_points_name.value = featureClassNameNoShp + \"_D\" + relationdegree + \"_\" + propertyName + \".shp\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tout_points_name.value = featureClassName + \"_D\" + relationdegree + \"_\" + propertyName\r\n\r\n\r\n\t\t\t\tif arcpy.Exists(out_points_name.valueAsText):\r\n\t\t\t\t\tarcpy.AddError(\"The output feature class name already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\r\n\t\t\t\r\n\r\n\r\n\t\t\tif in_relation_degree.value:\r\n\t\t\t\trelationDegree = int(in_relation_degree.valueAsText)\r\n\t\t\t\tif relationDegree > 4:\r\n\t\t\t\t\tin_relation_degree.value = 4\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\r\n\t\treturn", "def __init__(self, raw_facil, raw_gir, raw_geo, proj):\n address1 = raw_facil.get('address1')\n address2 = raw_facil.get('address2')\n\n lon_lat = None\n if raw_geo:\n lon_lat = proj(\n raw_geo['longitude'],\n raw_geo['latitude'],\n inverse=True\n )\n\n self._init_attributes()\n self.source = 'facil-location'\n self.bldg_id = raw_facil['id']\n self.type = 'building'\n self.tags = []\n self.banner_abbreviation = raw_facil.get('abbreviation')\n self.name = raw_facil.get('name')\n self.campus = self._get_pretty_campus(raw_facil.get('campus'))\n self.address = self._get_address(address1, address2)\n self.city = raw_facil.get('city')\n self.state = raw_facil.get('state')\n self.zip = raw_facil.get('zip')\n self.geo_location = self._create_geo_location(\n lon_lat[0] if lon_lat else None,\n lon_lat[1] if lon_lat else None\n )\n self.geometry = self._create_geometry(\n raw_geo['coordinatesType'] if raw_geo else None,\n raw_geo['coordinates'] if raw_geo else None\n )\n self.gir_count = raw_gir['count'] if raw_gir else 0\n self.gir_limit = bool(raw_gir['limit'].strip()) if raw_gir and raw_gir['limit'] else None\n self.gir_locations = raw_gir['all'].strip() if raw_gir else None\n self.arcgis_abbreviation = (\n (raw_geo.get('abbreviation') if raw_geo else None)\n or (raw_gir.get('abbreviation') if raw_gir else None)\n )\n self.relationships = {'services': {'data': []}}\n self.merge = False\n self.open_hours = None\n self.description = None\n self.descriptionHtml = None\n self.images = None\n self.thumbnails = []\n self.website = None\n self.synonyms = None", "def __init__(self, address, latitude, longitude):\n self.address = address\n self.latitude = latitude\n self.longitude = longitude", "def define_attributes(new_cube):\n\n time_cmip5 = new_cube.coord('time').points # Defining the time variable\n lats_cmip5 = new_cube.coord('latitude').points # Defining the lat variable\n lons_cmip5 = new_cube.coord('longitude').points # Defining the lon variable\n\n return time_cmip5, lats_cmip5, lons_cmip5", "def __init__(self, latitude=None, longitude=None, altitude=0):\n if latitude and longitude:\n self.latitude = latitude\n self.longitude = longitude\n else:\n loc = requests.get(self.GEO_URL).json()\n self.latitude = loc['latitude']\n self.longitude = loc['longitude']\n print loc\n self.altitude = altitude\n # For first run update the satellite list and wait for it to complete\n self.update_satellite_list(block=True)", "def update(self, *args, **kwargs):\n if args is None or len(args) == 0:\n for i in kwargs:\n if hasattr(self, i):\n setattr(self, i, kwargs[i])\n largs = list(args)\n latts = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n for i in range(len(largs)):\n setattr(self, latts[i], largs[i])", "def __init__(self, api_key, station_id):\n self._station_id = station_id\n self._api_key = api_key\n self.data = {}", "def __init__(self, name, token_path, latitude, longitude):\n self._api = VoiNearestScooterApi(token_path)\n self._name = name\n self._latitude = latitude\n self._longitude = longitude\n self._state = None\n self._attributes = {}", "def update():\n\n # ensure parameters are present\n if not request.args.get(\"sw\"):\n raise RuntimeError(\"missing sw\")\n if not request.args.get(\"ne\"):\n raise RuntimeError(\"missing ne\")\n\n # ensure parameters are in lat,lng format\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"sw\")):\n raise RuntimeError(\"invalid sw\")\n if not re.search(\"^-?\\d+(?:\\.\\d+)?,-?\\d+(?:\\.\\d+)?$\", request.args.get(\"ne\")):\n raise RuntimeError(\"invalid ne\")\n\n # explode southwest corner into two variables\n (sw_lat, sw_lng) = [float(s) for s in request.args.get(\"sw\").split(\",\")]\n\n # explode northeast corner into two variables\n (ne_lat, ne_lng) = [float(s) for s in request.args.get(\"ne\").split(\",\")]\n\n # find stations within view\n if (sw_lng <= ne_lng):\n # doesn't cross the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.and_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n else:\n # crosses the antimeridian\n\n stations = Station.query.join(Place).\\\n filter(db.and_(\n sw_lat <= Place.lat, Place.lat <= ne_lat,(db.or_(\n sw_lng <= Place.lng, Place.lng <= ne_lng)))).all()\n\n result = geo_stations.dump(stations)\n\n return jsonify(result.data)", "def setStation(self, isStation: bool) -> None:", "def __init__(self, stopId: int, shortName: str, longName: str, xCoordinate: float, yCoordinate: float) -> None:\n self.stop_id = stopId\n self.short_name = shortName\n self.long_name = longName\n self.x_coord = xCoordinate\n self.y_coord = yCoordinate\n self.station = True", "def set_file_attr(self):\n if self.resolution == 1000:\n satellite_type = ['AQUA', 'TERRA']\n if self.satellite in satellite_type:\n try:\n h4r = SD(self.in_file, SDC.READ)\n self.file_attr = attrs2dict(h4r.attributes())\n except Exception as e:\n print(str(e))\n else:\n raise ValueError(\n 'Cant read this satellite`s data.: {}'.format(self.satellite))\n else:\n raise ValueError(\n \"Cant handle this resolution: \".format(self.resolution))", "def __init__(self, path):\r\n self.path = path\r\n # 0.75 deg per grid box latitudinally\r\n self.lat_unit = 360", "def __init__(self, path, local_name, uri, version='', properties = None):\n ConfigElement.__init__(self, path, local_name, properties)\n if uri is None:\n raise MultiProjectException(\"Invalid scm entry having no uri attribute for path %s\"%path)\n self.uri = uri.rstrip('/') # strip trailing slashes if defined to not be too strict #3061\n self.version = version", "def set_gps_location(self,file_name, lat, lng):\n lat_deg = self.to_deg(lat, [\"S\", \"N\"])\n lng_deg = self.to_deg(lng, [\"W\", \"E\"])\n\n print lat_deg\n print lng_deg\n\n # class pyexiv2.utils.Rational(numerator, denominator) => convert decimal coordinates into degrees, munutes and seconds\n exiv_lat = (ev.Rational(lat_deg[0]*60+lat_deg[1],60),ev.Rational(lat_deg[2]*100,6000), ev.Rational(0, 1))\n exiv_lng = (ev.Rational(lng_deg[0]*60+lng_deg[1],60),ev.Rational(lng_deg[2]*100,6000), ev.Rational(0, 1))\n\n exiv_image = ev.ImageMetadata(file_name)\n exiv_image.read()\n\n # modify GPSInfo of image\n exiv_image[\"Exif.GPSInfo.GPSLatitude\"] = exiv_lat\n exiv_image[\"Exif.GPSInfo.GPSLatitudeRef\"] = lat_deg[3]\n exiv_image[\"Exif.GPSInfo.GPSLongitude\"] = exiv_lng\n exiv_image[\"Exif.GPSInfo.GPSLongitudeRef\"] = lng_deg[3]\n exiv_image[\"Exif.Image.GPSTag\"] = 654\n exiv_image[\"Exif.GPSInfo.GPSMapDatum\"] = \"WGS-84\"\n exiv_image[\"Exif.GPSInfo.GPSVersionID\"] = '2 2 0 0'\n exiv_image.write()", "def __init__(self, number):\n super(DSS,self).__init__()\n self.number = number\n dsn = get_geodetic_coords()\n self.lon = -dsn[number][0]*math.pi/180.\n self.lat = dsn[number][1]*math.pi/180.\n self.elevation = dsn[number][2]\n self.timezone = dsn[number][3]\n self.name = dsn[number][4]\n self.diam = dsn[number][5]\n xyz = get_cartesian_coordinates()\n self.xyz = xyz[\"DSS %2d\" % number]", "def __init__(self):\n self.site = ('http://vortex.plymouth.edu/cgi-bin/gen_statlog-u.cgi')\n \"\"\"Root of URL to query for data.\"\"\"\n yesterday = datetime.today() - timedelta(days=1)\n self.year = yesterday.year\n \"\"\"Year to get data for.\"\"\"\n self.month = yesterday.month\n \"\"\"Month to get data for.\"\"\"\n self.day = yesterday.day\n \"\"\"Day to get data for.\"\"\"\n self.stns = dict(yvr=\"CYVR\",\n sandheads=\"CWVF\")\n \"\"\"Mapping of common station names to official station IDs.\"\"\"", "def updateParameters(self, parameters):\r\n\t\tin_buf_query_center = parameters[0]\r\n\t\tin_place_type = parameters[1]\r\n\t\tin_is_directed_instance = parameters[2]\r\n\t\tin_radius = parameters[3]\r\n\t\tout_location = parameters[4]\r\n\t\tout_points_name = parameters[5]\r\n\t\tout_place_type_url = parameters[6]\r\n\r\n\t\toutLocation = out_location.valueAsText\r\n\t\toutFeatureClassName = out_points_name.valueAsText\r\n\t\t\r\n\t\tarcpy.env.workspace = outLocation\r\n\r\n\t\tif out_points_name.value and arcpy.Exists(os.path.join(outLocation, outFeatureClassName)):\r\n\t\t\tarcpy.AddError(\"The Output Point Feature Class Name already exists in the current workspace!\")\r\n\t\t\traise arcpy.ExecuteError\r\n\r\n\t\tif in_place_type.value:\r\n\t\t\tenterTypeText = in_place_type.valueAsText\r\n\t\t\tif \"(\" in enterTypeText:\r\n\t\t\t\tlastIndex = enterTypeText.rfind(\"(\")\r\n\t\t\t\tplaceType = enterTypeText[:lastIndex]\r\n\t\t\telse:\r\n\t\t\t\tplaceType = enterTypeText\r\n\t\t\t# messages.addMessage(\"Use Input Type: {0}.\".format(in_place_type.valueAsText))\r\n\t\t\tqueryPrefix = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\t\t\t\t\t\t\t\t\t\tPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\r\n\t\t\t\t\t\t\t\t\t\tPREFIX wdt: <http://www.wikidata.org/prop/direct/>\"\"\"\r\n\r\n\t\t\tentityTypeQuery = queryPrefix + \"\"\"SELECT ?entityType ?entityTypeLabel\r\n\t\t\t\t\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t\t #?entity wdt:P31 ?entityType.\r\n\t\t\t\t\t\t\t\t\t\t\t ?entityType wdt:P279* wd:Q2221906.\r\n\t\t\t\t\t\t\t\t\t\t\t # retrieve the English label\r\n\t\t\t\t\t\t\t\t\t\t\t ?entityType rdfs:label ?entityTypeLabel .\r\n\t\t\t\t\t\t\t\t\t\t\t FILTER (LANG(?entityTypeLabel) = \"en\")\r\n\t\t\t\t\t\t\t\t\t\t\t FILTER REGEX(?entityTypeLabel, '\"\"\" + placeType + \"\"\"')\r\n\t\t\t\t\t\t\t\t\t\t\t # show results ordered by distance\r\n\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"\r\n\r\n\t\t\t# sparqlParam = {'query':'SELECT ?item ?itemLabel WHERE{ ?item wdt:P31 wd:Q146 . SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }}', 'format':'json'}\r\n\t\t\tentityTypeSparqlParam = {'query': entityTypeQuery, 'format': 'json'}\r\n\t\t\t# headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\r\n\t\t\tsparqlRequest = requests.get('https://query.wikidata.org/sparql', params=entityTypeSparqlParam)\r\n\t\t\tprint(sparqlRequest.url)\r\n\t\t\t# messages.addMessage(\"URL: {0}.\".format(sparqlRequest.url))\r\n\t\t\tentityTypeJson = sparqlRequest.json()[\"results\"][\"bindings\"]\r\n\r\n\t\t\tif len(entityTypeJson) == 0:\r\n\t\t\t\tarcpy.AddError(\"No entity type matches the user's input.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\tin_place_type.filter.list = [enterTypeText]\r\n\t\t\t\tself.entityTypeLabel = []\r\n\t\t\t\tself.entityTypeURLList = []\r\n\t\t\t\tfor jsonItem in entityTypeJson:\r\n\t\t\t\t\tlabel = jsonItem[\"entityTypeLabel\"][\"value\"]\r\n\t\t\t\t\twikiURL = jsonItem[\"entityType\"][\"value\"]\r\n\t\t\t\t\twikiURLLastIndex = wikiURL.rfind(\"/\")\r\n\t\t\t\t\twikiURLLastName = wikiURL[(wikiURLLastIndex+1):]\r\n\t\t\t\t\tself.entityTypeLabel.append(label+\"(\"+\"wd:\"+wikiURLLastName+\")\")\r\n\t\t\t\t\tself.entityTypeURLList.append(wikiURL)\r\n\t\t\t\t\t# in_place_type.filter.list.append(jsonItem[\"entityTypeLabel\"][\"value\"])\r\n\r\n\t\t\t\tin_place_type.filter.list = in_place_type.filter.list + self.entityTypeLabel\r\n\r\n\t\t\tfor i in range(len(self.entityTypeLabel)):\r\n\t\t\t\t# messages.addMessage(\"Label: {0}\".format(self.entityTypeLabel[i]))\r\n\t\t\t\tif in_place_type.valueAsText == self.entityTypeLabel[i]:\r\n\t\t\t\t\tout_place_type_url.value = self.entityTypeURLList[i]\r\n\r\n\t\treturn" ]
[ "0.571317", "0.55146784", "0.5513319", "0.5414872", "0.53760904", "0.53628534", "0.5342563", "0.5324857", "0.52393043", "0.518832", "0.5161087", "0.51601505", "0.5153292", "0.5135026", "0.51113105", "0.5096146", "0.5095356", "0.50654036", "0.5047394", "0.5043429", "0.50406104", "0.50322866", "0.5002017", "0.5000329", "0.49931148", "0.49600968", "0.49506754", "0.49416652", "0.49191245", "0.49059764" ]
0.7062441
0
Query the sparql endpoint for data products submitted by this station adjust latitude and longitude and store a list of data specifications and data objects (PID's)
def _setData(self): if not self.stationId: return """ # get the ressource url and adjust lat and lon from data portal query = sparqls.stationResource(self.stationId) key, val = RunSparql(query, 'array').run() if val: self.url = val[0][0] self.lat = float(val[0][2]) self.lon = float(val[0][3]) """ # it is possible, that a station id has multiple URI # ask for all URI query = sparqls.stationData(self.uri, 'all') data = RunSparql(query, 'pandas').run() if not data.empty: self._data = data else: self._data = 'no data available' # check if data is available and extract the 'unique' data products if isinstance(self._data, pd.DataFrame): p = self._data['specLabel'].unique() self._products = pd.DataFrame(p) # replace samplingheight=None with empty string self._data.samplingheight.replace(to_replace=[None], value="", inplace=True) else: self._products = 'no data available'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_datacube(product,latitude,longitude,time,measurements):\r\n\r\n dc = datacube.Datacube(app=\"Query\")\r\n\r\n xarr = dc.load(\r\n product=product, \r\n longitude=longitude, \r\n latitude=latitude,\r\n # Time format YYYY-MM-DD\r\n time=time, \r\n measurements=measurements\r\n )\r\n\r\n return xarr", "def query_radar_data(station,product,start,\n minute_delta=0,hour_delta=0,day_delta=0):\n \n end = start+timedelta(days=day_delta, minutes=minute_delta, hours=hour_delta)\n \n print(f\"query start time:{start}\")\n print(f\"query end time:{end}\")\n rs = RadarServer('http://thredds-aws.unidata.ucar.edu/thredds/radarServer/nexrad/level2/S3/')\n query = rs.query()\n rs.validate_query(query)\n print(rs.stations[station])\n\n query.stations(station).time_range(start,end).variables(product)\n catalog = rs.get_catalog(query)\n file_station = str(catalog.datasets[0])\n file_station = file_station[0:4]\n \n file_list = list(catalog.datasets.values())\n for t in file_list: print(t)\n LatLonBox = [rs.stations[station].longitude-3,rs.stations[station].longitude+3,\n rs.stations[station].latitude-2,rs.stations[station].latitude+2]\n \n return file_list,LatLonBox", "def process_location(pid, data_source, out_loc, start_date, end_date, debug=False):\n\n #query = client.query_data_points(page_size=PAGE_SIZE, source=pid)\n query = try_query(pid)\n\n location_query = try_filter(query, pid, 'pdk-location', start_date, end_date)\n \"\"\" location_query = query.filter(source=pid, \n generator_identifier='pdk-location',\n created__gte=start_date,\n created__lte=end_date).order_by('created')\n \"\"\"\n tot_count = location_query.count()\n count = 0\n frac = int(tot_count / 100)\n\n loc_df = pd.DataFrame()\n for point in location_query:\n point_df = pd.DataFrame.from_dict(point).iloc[0].to_frame().transpose()\n metadata_df = pd.Series(point['passive-data-metadata']).to_frame().transpose()\n # TODO check if ignoring errors is safe\n metadata_df = metadata_df.drop(['latitude', 'longitude'], axis='columns', errors=\"ignore\")\n point_df.reset_index(inplace=True, drop=True)\n point_df = pd.concat([metadata_df, point_df], axis=1, sort=True)\n \n point_df.drop('passive-data-metadata', axis='columns', inplace=True)\n missing_cols = [col for col in loc_df.columns.values if col not in point_df.columns.values]\n \n if len(missing_cols) > 0 and loc_df.shape[0] > 0:\n for col in missing_cols:\n point_df[col] = np.nan\n point_df = point_df[loc_df.columns]\n loc_df = loc_df.append(point_df)\n count += 1\n if debug and (count % frac == 0):\n print(\"{0:.2f}% complete\".format(float(count)/float(tot_count)*100))\n\n loc_df['pid'] = pid \n loc_df['data_source'] = data_source\n print(loc_df.shape)\n \n pickle.dump(loc_df, open(\"{}/pdk-location/{}.df\".format(out_loc, pid), 'wb'), -1)", "def stations(): \n # creating the Docstring\n session = Session(engine)\n\n # creat the Query stations\n\n stations_qu = session.query(measurement.station).group_by(measurement.station).all()\n\n # Converting the list of tuples into a normal list\n stations_qu_dict = list(np.ravel(stations_qu))\n session.close()\n\n return jsonify(stations_qu_dict)", "def updateParameters(self, parameters):\r\n\t\tin_buf_query_center = parameters[0]\r\n\t\tin_place_type = parameters[1]\r\n\t\tin_is_directed_instance = parameters[2]\r\n\t\tin_radius = parameters[3]\r\n\t\tout_location = parameters[4]\r\n\t\tout_points_name = parameters[5]\r\n\t\tout_place_type_url = parameters[6]\r\n\r\n\t\toutLocation = out_location.valueAsText\r\n\t\toutFeatureClassName = out_points_name.valueAsText\r\n\t\t\r\n\t\tarcpy.env.workspace = outLocation\r\n\r\n\t\tif out_points_name.value and arcpy.Exists(os.path.join(outLocation, outFeatureClassName)):\r\n\t\t\tarcpy.AddError(\"The Output Point Feature Class Name already exists in the current workspace!\")\r\n\t\t\traise arcpy.ExecuteError\r\n\r\n\t\tif in_place_type.value:\r\n\t\t\tenterTypeText = in_place_type.valueAsText\r\n\t\t\tif \"(\" in enterTypeText:\r\n\t\t\t\tlastIndex = enterTypeText.rfind(\"(\")\r\n\t\t\t\tplaceType = enterTypeText[:lastIndex]\r\n\t\t\telse:\r\n\t\t\t\tplaceType = enterTypeText\r\n\t\t\t# messages.addMessage(\"Use Input Type: {0}.\".format(in_place_type.valueAsText))\r\n\t\t\tqueryPrefix = \"\"\"PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\t\t\t\t\t\t\t\t\t\tPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\r\n\t\t\t\t\t\t\t\t\t\tPREFIX wdt: <http://www.wikidata.org/prop/direct/>\"\"\"\r\n\r\n\t\t\tentityTypeQuery = queryPrefix + \"\"\"SELECT ?entityType ?entityTypeLabel\r\n\t\t\t\t\t\t\t\t\t\t\tWHERE\r\n\t\t\t\t\t\t\t\t\t\t\t{\r\n\t\t\t\t\t\t\t\t\t\t\t #?entity wdt:P31 ?entityType.\r\n\t\t\t\t\t\t\t\t\t\t\t ?entityType wdt:P279* wd:Q2221906.\r\n\t\t\t\t\t\t\t\t\t\t\t # retrieve the English label\r\n\t\t\t\t\t\t\t\t\t\t\t ?entityType rdfs:label ?entityTypeLabel .\r\n\t\t\t\t\t\t\t\t\t\t\t FILTER (LANG(?entityTypeLabel) = \"en\")\r\n\t\t\t\t\t\t\t\t\t\t\t FILTER REGEX(?entityTypeLabel, '\"\"\" + placeType + \"\"\"')\r\n\t\t\t\t\t\t\t\t\t\t\t # show results ordered by distance\r\n\t\t\t\t\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\t\t\t\t\"\"\"\r\n\r\n\t\t\t# sparqlParam = {'query':'SELECT ?item ?itemLabel WHERE{ ?item wdt:P31 wd:Q146 . SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }}', 'format':'json'}\r\n\t\t\tentityTypeSparqlParam = {'query': entityTypeQuery, 'format': 'json'}\r\n\t\t\t# headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\r\n\t\t\tsparqlRequest = requests.get('https://query.wikidata.org/sparql', params=entityTypeSparqlParam)\r\n\t\t\tprint(sparqlRequest.url)\r\n\t\t\t# messages.addMessage(\"URL: {0}.\".format(sparqlRequest.url))\r\n\t\t\tentityTypeJson = sparqlRequest.json()[\"results\"][\"bindings\"]\r\n\r\n\t\t\tif len(entityTypeJson) == 0:\r\n\t\t\t\tarcpy.AddError(\"No entity type matches the user's input.\")\r\n\t\t\t\traise arcpy.ExecuteError\r\n\t\t\telse:\r\n\t\t\t\tin_place_type.filter.list = [enterTypeText]\r\n\t\t\t\tself.entityTypeLabel = []\r\n\t\t\t\tself.entityTypeURLList = []\r\n\t\t\t\tfor jsonItem in entityTypeJson:\r\n\t\t\t\t\tlabel = jsonItem[\"entityTypeLabel\"][\"value\"]\r\n\t\t\t\t\twikiURL = jsonItem[\"entityType\"][\"value\"]\r\n\t\t\t\t\twikiURLLastIndex = wikiURL.rfind(\"/\")\r\n\t\t\t\t\twikiURLLastName = wikiURL[(wikiURLLastIndex+1):]\r\n\t\t\t\t\tself.entityTypeLabel.append(label+\"(\"+\"wd:\"+wikiURLLastName+\")\")\r\n\t\t\t\t\tself.entityTypeURLList.append(wikiURL)\r\n\t\t\t\t\t# in_place_type.filter.list.append(jsonItem[\"entityTypeLabel\"][\"value\"])\r\n\r\n\t\t\t\tin_place_type.filter.list = in_place_type.filter.list + self.entityTypeLabel\r\n\r\n\t\t\tfor i in range(len(self.entityTypeLabel)):\r\n\t\t\t\t# messages.addMessage(\"Label: {0}\".format(self.entityTypeLabel[i]))\r\n\t\t\t\tif in_place_type.valueAsText == self.entityTypeLabel[i]:\r\n\t\t\t\t\tout_place_type_url.value = self.entityTypeURLList[i]\r\n\r\n\t\treturn", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_location_property = parameters[1]\r\n\t\tin_relation_degree = parameters[2]\r\n\t\tout_location = parameters[3]\r\n\t\tout_points_name = parameters[4]\r\n\t\t\r\n\t\tif in_wikiplace_IRI.value:\r\n\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\tout_location.value = currentWorkspace\r\n\r\n\t\t\t# get all the IRI from input point feature class of wikidata places\r\n\t\t\tinplaceIRIList = []\r\n\t\t\tcursor = arcpy.SearchCursor(inputFeatureClassName)\r\n\t\t\tfor row in cursor:\r\n\t\t\t\tinplaceIRIList.append(row.getValue(\"URL\"))\r\n\t\t\t\r\n\t\t\t# get all the property URL which are used in the input feature class. their objects are geographic locations which have coordinates, I call them location common properties\r\n\t\t\tlocationCommonPropertyJSONObj = SPARQLQuery.locationCommonPropertyQuery(inplaceIRIList)\r\n\t\t\tlocationCommonPropertyJSON = locationCommonPropertyJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\t\t\tfor jsonItem in locationCommonPropertyJSON:\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(jsonItem[\"p\"][\"value\"])\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(jsonItem[\"NumofSub\"][\"value\"])\r\n\r\n\t\t\tlocationCommonPropertyCountDict = dict(zip(LocationPropertyPath.locationCommonPropertyURLList, LocationPropertyPath.locationCommonPropertyCountList))\r\n\r\n\t\t\t# get the english label for each location common property\r\n\t\t\tlocationCommonPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(LocationPropertyPath.locationCommonPropertyURLList)\r\n\t\t\t# locationCommonPropertyLabelJSON = locationCommonPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t# a dictionary object: key: propertyNameCount, value: propertyURL\r\n\t\t\tLocationPropertyPath.locationCommonPropertyDict = dict()\r\n\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyURLList = []\r\n\t\t\tLocationPropertyPath.locationCommonPropertyCountList = []\r\n\r\n\t\t\tfor jsonItem in locationCommonPropertyLabelJSON:\r\n\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyURLList.append(propertyURL)\r\n\r\n\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\r\n\t\t\t\tpropertyCount = locationCommonPropertyCountDict[propertyURL]\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyCountList.append(propertyCount)\r\n\r\n\t\t\t\tpropertyNameCount = propertyName + \"(\" + propertyCount + \")\"\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyNameCountList.append(propertyNameCount)\r\n\t\t\t\tLocationPropertyPath.locationCommonPropertyDict[propertyNameCount] = propertyURL\r\n\r\n\t\t\tin_location_property.filter.list = LocationPropertyPath.locationCommonPropertyNameCountList\r\n\r\n\r\n\r\n\r\n\r\n\r\n\t\t\tif in_location_property.value and in_relation_degree.value and out_points_name.valueAsText == None:\r\n\t\t\t\tpropertyName = in_location_property.valueAsText\r\n\t\t\t\trelationdegree = in_relation_degree.valueAsText\r\n\r\n\t\t\t\tlastIndex = propertyName.rfind(\"(\")\r\n\t\t\t\tpropertyName = propertyName[:lastIndex]\r\n\r\n\t\t\t\tpropertyName = propertyName.replace(\" \", \"_\")\r\n\r\n\t\t\t\tif featureClassName.endswith(\".shp\"):\r\n\t\t\t\t\tlastIndex = featureClassName.rfind(\".\")\r\n\t\t\t\t\tfeatureClassNameNoShp = featureClassName[:lastIndex]\r\n\t\t\t\t\tout_points_name.value = featureClassNameNoShp + \"_D\" + relationdegree + \"_\" + propertyName + \".shp\"\r\n\t\t\t\telse:\r\n\t\t\t\t\tout_points_name.value = featureClassName + \"_D\" + relationdegree + \"_\" + propertyName\r\n\r\n\r\n\t\t\t\tif arcpy.Exists(out_points_name.valueAsText):\r\n\t\t\t\t\tarcpy.AddError(\"The output feature class name already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\r\n\t\t\t\r\n\r\n\r\n\t\t\tif in_relation_degree.value:\r\n\t\t\t\trelationDegree = int(in_relation_degree.valueAsText)\r\n\t\t\t\tif relationDegree > 4:\r\n\t\t\t\t\tin_relation_degree.value = 4\r\n\r\n\r\n\r\n\r\n\r\n\t\t\t\r\n\r\n\t\treturn", "def search_items(\n self,\n *,\n product_names: Optional[List[str]] = None,\n time: Optional[Tuple[datetime, datetime]] = None,\n bbox: Tuple[float, float, float, float] = None,\n intersects: BaseGeometry = None,\n limit: int = 500,\n offset: int = 0,\n full_dataset: bool = False,\n dataset_ids: Sequence[UUID] = None,\n order: ItemSort = ItemSort.DEFAULT_SORT,\n ) -> Generator[DatasetItem, None, None]:\n geom = func.ST_Transform(DATASET_SPATIAL.c.footprint, 4326)\n\n columns = [\n geom.label(\"geometry\"),\n func.Box2D(geom).cast(String).label(\"bbox\"),\n # TODO: dataset label?\n DATASET_SPATIAL.c.region_code.label(\"region_code\"),\n DATASET_SPATIAL.c.creation_time,\n DATASET_SPATIAL.c.center_time,\n ]\n\n # If fetching the whole dataset, we need to join the ODC dataset table.\n if full_dataset:\n query: Select = select(\n (*columns, *_utils.DATASET_SELECT_FIELDS)\n ).select_from(\n DATASET_SPATIAL.join(\n ODC_DATASET, onclause=ODC_DATASET.c.id == DATASET_SPATIAL.c.id\n )\n )\n # Otherwise query purely from the spatial table.\n else:\n query: Select = select(\n (*columns, DATASET_SPATIAL.c.id, DATASET_SPATIAL.c.dataset_type_ref)\n ).select_from(DATASET_SPATIAL)\n\n # Add all the filters\n query = self._add_fields_to_query(\n query,\n product_names=product_names,\n time=time,\n bbox=bbox,\n intersects=intersects,\n dataset_ids=dataset_ids,\n )\n\n # Maybe sort\n if order == ItemSort.DEFAULT_SORT:\n query = query.order_by(DATASET_SPATIAL.c.center_time, DATASET_SPATIAL.c.id)\n elif order == ItemSort.UNSORTED:\n ... # Nothing! great!\n elif order == ItemSort.RECENTLY_ADDED:\n if not full_dataset:\n raise NotImplementedError(\n \"Only full-dataset searches can be sorted by recently added\"\n )\n query = query.order_by(ODC_DATASET.c.added.desc())\n else:\n raise RuntimeError(\n f\"Unknown item sort order {order!r} (perhaps this is a bug?)\"\n )\n\n query = query.limit(limit).offset(\n # TODO: Offset/limit isn't particularly efficient for paging...\n offset\n )\n\n for r in self._engine.execute(query):\n yield DatasetItem(\n dataset_id=r.id,\n bbox=_box2d_to_bbox(r.bbox) if r.bbox else None,\n product_name=self.index.products.get(r.dataset_type_ref).name,\n geometry=_get_shape(r.geometry, self._get_srid_name(r.geometry.srid))\n if r.geometry is not None\n else None,\n region_code=r.region_code,\n creation_time=r.creation_time,\n center_time=r.center_time,\n odc_dataset=(\n _utils.make_dataset_from_select_fields(self.index, r)\n if full_dataset\n else None\n ),\n )", "def getPhaseData(bounds = None,radius=None,starttime = None,endtime = None,\n magrange = None,catalog = None,contributor = None,\n eventid = None,eventProperties=None,productProperties=None,verbose=False):\n\n #Make sure user is not specifying bounds search AND radius search\n if bounds is not None and radius is not None:\n raise Exception,'Cannot choose bounds search AND radius search.'\n \n if catalog is not None and catalog not in checkCatalogs():\n raise Exception,'Unknown catalog %s' % catalog\n if contributor is not None and contributor not in checkContributors():\n raise Exception,'Unknown contributor %s' % contributor\n \n #if someone asks for a specific eventid, then we can shortcut all of this stuff\n #below, and just parse the event json\n if eventid is not None:\n try:\n phaseml = __getEventPhase(eventid)\n return [phaseml]\n except Exception,msg:\n sys.stderr.write('Could not retrieve phase data for eventid \"%s\" - error \"%s\"\\n' % (eventid,str(msg)))\n return None\n\n #start creating the url parameters\n urlparams = {}\n urlparams['producttype'] = 'phase-data'\n if starttime is not None:\n urlparams['starttime'] = starttime.strftime(TIMEFMT)\n if endtime is None:\n urlparams['endtime'] = ShakeDateTime.utcnow().strftime(TIMEFMT)\n if endtime is not None:\n urlparams['endtime'] = endtime.strftime(TIMEFMT)\n if starttime is None:\n urlparams['starttime'] = ShakeDateTime(1900,1,1,0,0,0).strftime(TIMEFMT)\n\n #we're using a rectangle search here\n if bounds is not None:\n urlparams['minlongitude'] = bounds[0]\n urlparams['maxlongitude'] = bounds[1]\n urlparams['minlatitude'] = bounds[2]\n urlparams['maxlatitude'] = bounds[3]\n\n #fix possible issues with 180 meridian crossings\n minwest = urlparams['minlongitude'] > 0 and urlparams['minlongitude'] < 180\n maxeast = urlparams['maxlongitude'] < 0 and urlparams['maxlongitude'] > -180\n if minwest and maxeast:\n urlparams['maxlongitude'] += 360\n\n if radius is not None:\n urlparams['latitude'] = radius[0]\n urlparams['longitude'] = radius[1]\n urlparams['maxradiuskm'] = radius[2]\n\n if magrange is not None:\n urlparams['minmagnitude'] = magrange[0]\n urlparams['maxmagnitude'] = magrange[1]\n \n if catalog is not None:\n urlparams['catalog'] = catalog\n if contributor is not None:\n urlparams['contributor'] = contributor\n\n #search parameters we're not making available to the user (yet)\n urlparams['orderby'] = 'time-asc'\n urlparams['format'] = 'geojson'\n params = urllib.urlencode(urlparams)\n url = URLBASE % params\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n feed_data = fh.read()\n fh.close()\n\n fdict = json.loads(feed_data)\n outfiles = []\n eqlist = []\n ic = 0\n for feature in fdict['features']:\n eid = feature['id']\n #REMOVE\n sys.stderr.write('Fetching event %s (%i of %i)\\n' % (eid,ic+1,len(fdict['features'])))\n location = feature['properties']['place']\n ptypes = feature['properties']['types'].strip(',').split(',')\n if 'phase-data' not in ptypes:\n continue\n try:\n phaseml = __getEventPhase(eid)\n eqlist.append(phaseml)\n except Exception,msg:\n if verbose:\n sys.stderr.write('Could not retrieve data for eventid \"%s\" - error \"%s\"\\n' % (eid,str(msg)))\n ic += 1\n return eqlist", "def query(self, *, sparql: str) -> Result:\n pass", "def getStationData(self):\n dtime = datetime.strptime(self.refTime, \"%y%m%d/%H%M\")\n trange = TimeRange()\n trange.setStart(dtime)\n trange.setEnd(dtime)\n dataTime = DataTime(refTime=dtime, validPeriod=trange)\n req = StationDataRequest()\n req.setPluginName(self.pluginName)\n req.setStationId(self.stationId)\n req.setRefTime(dataTime)\n req.setParmList(self.parmList)\n req.setPartNumber(self.partNumber)\n resp = self.client.sendRequest(req)\n\n for i, rec in enumerate(resp):\n resp[i] = {\n key.decode() if isinstance(key, bytes) else key:\n val.decode() if isinstance(val, bytes) else val\n for key, val in rec.items()\n }\n\n return resp", "def prepare_data_with_location(self,from_date,to_date,locations,all_products):\n data_dict = {}\n stock_quant_obj=self.env['stock.quant']\n for loc in locations:\n all_locations = self.get_all_locations(warehouse=False, location=loc)\n if not all_locations:\n continue\n #here we are finding the opening stock for these we are using base query\n #of inventory at date v10\n result = self.get_product_qty(all_locations,from_date)\n qty_dict = dict((x,y) for x, y in result)\n \n for product in all_products:\n last_sales = ''\n qty_purchase_in_duration = 0\n qty_sales_in_duration = 0\n last_purchase_date = ''\n scrap_location_qty = 0\n adjusted_qty_in_duration = 0\n warehouse_out_qty = 0\n warehouse_in_qty = 0\n# here from result of inventory at date we are seaching for specific product.\n opening_product_qty = qty_dict.get(product.id)\n\n #finding last sales qty\n last_sales = self.find_last_sales_qty(from_date,to_date,False,all_locations,product)\n #finding last purchase date of product\n last_purchase_date = self.find_last_purchase_date(from_date,to_date,all_locations,product)\n #fiding date purchase qty in duration for specific product\n qty_purchase_in_duration = self.find_purchase_qty_in_duration(from_date,to_date,all_locations,product)\n #fiding scrap qty of precific product\n scrap_location_qty = self.find_scap_location_qty(from_date,to_date,product,all_locations)\n #finding sales qty in duration\n qty_sales_in_duration = self.find_sale_qty_in_duration(from_date,to_date,False,all_locations,product)\n #fidning adjusted qty in duration\n adjusted_qty_in_duration = self.find_adjusted_qty_in_duration(from_date, to_date, product, all_locations)\n\n # dest_location_lst = self.get_other_wahouse_locations(warehouse)\n \n # if any(all_locations) and any(dest_location_lst):\n # #fidning warehouse in qty \n # warehouse_in_qty = self.find_warehouse_transer_in_qty(product, all_locations, dest_location_lst,from_date,to_date)\n # #fidning warehouse out qty for specific product.\n # warehouse_out_qty = self.find_warehouse_transer_out_qty(product, all_locations, dest_location_lst,from_date,to_date)\n \n # if warehouse_out_qty:\n # warehouse_out_qty = warehouse_out_qty and warehouse_out_qty[0][0] or ''\n # if warehouse_in_qty:\n # warehouse_in_qty = warehouse_in_qty and warehouse_in_qty[0][0] or ''\n \n if adjusted_qty_in_duration:\n adjusted_qty_in_duration = adjusted_qty_in_duration and adjusted_qty_in_duration[0][0] or '' \n if scrap_location_qty:\n scrap_location_qty = scrap_location_qty and scrap_location_qty[0][0] or ''\n \n # if qty_sales_in_duration:\n # qty_sales_in_duration = qty_sales_in_duration and qty_sales_in_duration[0][0] or ''\n # if qty_purchase_in_duration:\n # qty_purchase_in_duration = qty_purchase_in_duration or ''\n if last_sales:\n last_sales = datetime.strptime(last_sales and last_sales[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if last_purchase_date:\n last_purchase_date = datetime.strptime(last_purchase_date and last_purchase_date[0][0], '%Y-%m-%d %H:%M:%S').strftime('%d-%m-%Y') or ''\n \n if data_dict.has_key(loc.id):\n data_lst=data_dict.get(loc.id)\n data_lst.append({'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,'last_sales':last_sales or '',\n 'last_purchase_date':last_purchase_date or '','qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0\n ,'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0 \n })\n data_dict.update({loc.id:data_lst})\n continue\n data_dict.update({loc.id:[{'product':product,'sku':product.default_code or '','name':product.name,\n 'Cost':product.standard_price or '','sales_price':product.lst_price or '',\n 'opening_qty':opening_product_qty or 0,\n 'last_sales':last_sales or '','last_purchase_date':last_purchase_date or '',\n 'qty_purchase_in_duration':qty_purchase_in_duration or 0,\n 'qty_sales_in_duration': qty_sales_in_duration or 0,\n 'scrap_location_qty':scrap_location_qty or 0,\n 'adjusted_qty_in_duration':adjusted_qty_in_duration or 0,\n 'warehouse_in_qty':warehouse_in_qty or 0,\n 'warehouse_out_qty':warehouse_out_qty or 0\n }]})\n return data_dict", "def stationdata():\n # * Return a JSON list of stations from the dataset.\n # as this should be a list, I'm just grabbing the station name\n session = Session(engine)\n results = session.query(Station.name).all()\n session.close()\n\n stations = list(np.ravel(results))\n return jsonify(stations)", "def listings_data():\n\n stmt = db.session.query(nyc).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n df[\"latitude\"] = pd.to_numeric(df[\"latitude\"])\n df[\"longitude\"] = pd.to_numeric(df[\"longitude\"])\n df[\"accommodates\"] = pd.to_numeric(df[\"accommodates\"])\n\n data = df.to_dict(orient='index')\n # Create a dictionary entry for each row of metadata information\n # data = {}\n # for result in results:\n #\n # data[\"ID\"] = result[0]\n # data[\"LISTING_URL\"] = result[1]\n # data[\"NAME\"] = result[2]\n # data[\"HOST_ID\"] = result[3]\n # data[\"NEIGHBORHOOD\"] = result[4]\n # data[\"NEIGHBORHOOD_GROUP\"] = result[5]\n # data[\"CITY\"] = result[6]\n # data[\"ZIPCODE\"] = result[7]\n # data[\"LAT\"] = float(result[8])\n # data[\"LON\"] = float(result[9])\n #\n # print(data)\n\n return jsonify(data)", "def query_list(self):\r\n self.plot_list.clear()\r\n self.settings.send_to_databox_header(self.plot_list)\r\n \r\n self.label_list_status.set_text('Getting frequencies and powers.')\r\n self.window.process_events()\r\n \r\n fs = self.api.get_list_frequencies()\r\n ps = self.api.get_list_powers()\r\n if fs == None or ps == None: return\r\n \r\n if not len(fs) == len(ps):\r\n print(\"ERROR query_list(): List lengths do not match. len(fs)=\"+str(len(fs))+' len(ps)='+str(len(ps)) )\r\n \r\n N = len(fs)\r\n self.plot_list['n'] = _n.linspace(0, N-1, N)\r\n self.plot_list['f_Hz'] = fs\r\n self.plot_list['P_dBm'] = ps\r\n \r\n self.label_list_status.set_text(str(N) + ' points in list memory')\r\n self.plot_list.plot()\r\n self.button_send_list.disable()\r\n self.window.process_events()", "def data_products(self, observation_id, product_id=None):\n pass", "def get_select_datapoints(self, endpoint, attrList= []):\r\n if type(attrList) is str:\r\n attrList = [attrList]\r\n result = {}\r\n if not attrList:\r\n raise ValueError(\"Please give a valid attribute list\")\r\n for symbol in self.symbolList:\r\n try: \r\n ep = self.data_set[symbol][endpoint]\r\n except:\r\n raise IEXEndpointError(endpoint)\r\n temp = {}\r\n for attr in attrList:\r\n try:\r\n query = ep[attr]\r\n except:\r\n raise IEXDatapointError(endpoint, attr)\r\n temp.update({attr: query})\r\n result.update({symbol:temp})\r\n return result", "def daily_search(term, state='', property=''):\n geo = \"\"\n if state == '':\n geo = state = 'US'\n else:\n geo = 'US-' + state\n\n out = service.getGraph(terms=term,\n restrictions_startDate='2008-01',\n restrictions_endDate='2008-07',\n restrictions_geo=geo,\n restrictions_property=property).execute().get('lines')[0].get('points')\n\n next = service.getGraph(terms=term,\n restrictions_startDate='2008-07',\n restrictions_endDate='2009-01',\n restrictions_geo=geo,\n restrictions_property=property).execute().get('lines')[0].get('points')\n\n #out['orig_value'] = out['value']\n #next['orig_value'] = next['value']\n\n multiplier = 1\n if next[30].get('value') != 0:\n multiplier = out[-1].get('value')/next[30].get('value')\n if multiplier == 0:\n multiplier = 1\n\n for i in next:\n i['value'] = i['value']*multiplier\n\n out = out + next[31:]\n\n for i in range(2009,2019):\n print(term + ', ' + property + ', ' + str(i))\n n = i + 1\n for j in range(1,3):\n if j == 1:\n s = str(i) + '-01'\n e = str(i) + '-07'\n else:\n s = str(i) + '-07'\n e = str(i + 1) + '-01'\n\n next = service.getGraph(terms=term,\n restrictions_startDate=s,\n restrictions_endDate=e,\n restrictions_geo=geo,\n restrictions_property=property).execute().get('lines')[0].get('points')\n\n #next['orig_value'] = next['value']\n\n multiplier = 1\n if next[30].get('value') != 0:\n multiplier = out[-1].get('value')/next[30].get('value')\n if multiplier == 0:\n multiplier = 1\n\n for k in next:\n k['value'] = k['value']*multiplier\n\n out = out + next[31:]\n\n\n\n\n if property == '':\n property = 'web'\n\n df1 = pd.DataFrame(out)\n\n df1['term'] = term\n df1['state'] = state\n df1['property'] = property\n\n return df1", "def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations", "def fetchwikidata(a_wid):\n\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", 'natural_earth_name_localizer v1.1.1 (github.com/nvkelso/natural-earth-vector)')\n query_template = \"\"\"\n SELECT\n ?e ?i ?r ?population\n ?name_ar\n ?name_bn\n ?name_de\n ?name_el\n ?name_en\n ?name_es\n ?name_fa\n ?name_fr\n ?name_he\n ?name_hi\n ?name_hu\n ?name_id\n ?name_it\n ?name_ja\n ?name_ko\n ?name_nl\n ?name_pl\n ?name_pt\n ?name_ru\n ?name_sv\n ?name_tr\n ?name_uk\n ?name_ur\n ?name_vi\n ?name_zh\n ?name_zh_hans\n ?name_zh_hant\n WHERE {\n {\n SELECT DISTINCT ?e ?i ?r\n WHERE{\n VALUES ?i { wd:Q2102493 wd:Q1781 }\n OPTIONAL{ ?i owl:sameAs ?r. }\n BIND(COALESCE(?r, ?i) AS ?e).\n }\n }\n SERVICE wikibase:label {bd:serviceParam wikibase:language \"en\".}\n OPTIONAL{?e wdt:P1082 ?population .}\n OPTIONAL{?e rdfs:label ?name_ar FILTER((LANG(?name_ar))=\"ar\").}\n OPTIONAL{?e rdfs:label ?name_bn FILTER((LANG(?name_bn))=\"bn\").}\n OPTIONAL{?e rdfs:label ?name_de FILTER((LANG(?name_de))=\"de\").}\n OPTIONAL{?e rdfs:label ?name_el FILTER((LANG(?name_el))=\"el\").}\n OPTIONAL{?e rdfs:label ?name_en FILTER((LANG(?name_en))=\"en\").}\n OPTIONAL{?e rdfs:label ?name_es FILTER((LANG(?name_es))=\"es\").}\n OPTIONAL{?e rdfs:label ?name_fa FILTER((LANG(?name_fa))=\"fa\").}\n OPTIONAL{?e rdfs:label ?name_fr FILTER((LANG(?name_fr))=\"fr\").}\n OPTIONAL{?e rdfs:label ?name_he FILTER((LANG(?name_he))=\"he\").}\n OPTIONAL{?e rdfs:label ?name_hi FILTER((LANG(?name_hi))=\"hi\").}\n OPTIONAL{?e rdfs:label ?name_hu FILTER((LANG(?name_hu))=\"hu\").}\n OPTIONAL{?e rdfs:label ?name_id FILTER((LANG(?name_id))=\"id\").}\n OPTIONAL{?e rdfs:label ?name_it FILTER((LANG(?name_it))=\"it\").}\n OPTIONAL{?e rdfs:label ?name_ja FILTER((LANG(?name_ja))=\"ja\").}\n OPTIONAL{?e rdfs:label ?name_ko FILTER((LANG(?name_ko))=\"ko\").}\n OPTIONAL{?e rdfs:label ?name_nl FILTER((LANG(?name_nl))=\"nl\").}\n OPTIONAL{?e rdfs:label ?name_pl FILTER((LANG(?name_pl))=\"pl\").}\n OPTIONAL{?e rdfs:label ?name_pt FILTER((LANG(?name_pt))=\"pt\").}\n OPTIONAL{?e rdfs:label ?name_ru FILTER((LANG(?name_ru))=\"ru\").}\n OPTIONAL{?e rdfs:label ?name_sv FILTER((LANG(?name_sv))=\"sv\").}\n OPTIONAL{?e rdfs:label ?name_tr FILTER((LANG(?name_tr))=\"tr\").}\n OPTIONAL{?e rdfs:label ?name_uk FILTER((LANG(?name_uk))=\"uk\").}\n OPTIONAL{?e rdfs:label ?name_ur FILTER((LANG(?name_ur))=\"ur\").}\n OPTIONAL{?e rdfs:label ?name_vi FILTER((LANG(?name_vi))=\"vi\").}\n OPTIONAL{?e rdfs:label ?name_zh FILTER((LANG(?name_zh))=\"zh\").}\n OPTIONAL{?e rdfs:label ?name_zh_hans FILTER((LANG(?name_zh_hans))=\"zh-hans\").}\n OPTIONAL{?e rdfs:label ?name_zh_hant FILTER((LANG(?name_zh_hant))=\"zh-hant\").}\n }\n\n \"\"\"\n\n wikidata_sparql_ids = \"\"\n for wid in a_wid:\n wikidata_sparql_ids += \" wd:\"+wid\n\n print(\"fetch: \", wikidata_sparql_ids.split()[1], \"... \", wikidata_sparql_ids.split()[-1])\n ne_query = query_template.replace('wd:Q2102493 wd:Q1781', wikidata_sparql_ids)\n\n # compress the Query - removing the extra spaces\n while ' ' in ne_query:\n ne_query = ne_query.replace(' ', ' ')\n\n results = None\n retries = 0\n while results is None and retries < 8:\n try:\n results = None\n sparql.setQuery(ne_query)\n sparql.setTimeout(1000)\n sparql.setReturnFormat(JSON)\n results = sparql.query().convert()\n\n except SPARQLExceptions.EndPointNotFound:\n print('ERRwikidata-SPARQLExceptions-EndPointNotFound: Retrying in 30 seconds.')\n time.sleep(30)\n retries += 1\n continue\n\n except SPARQLExceptions.EndPointInternalError as e:\n print(\"ERRwikidata-SPARQLExceptions-EndPointInternalError: Retrying in 30 seconds.\",e)\n time.sleep(30)\n retries += 1\n continue\n\n except SPARQLExceptions.QueryBadFormed as e:\n print(\"ERRwikidata-SPARQLExceptions-QueryBadFormed : Check! \",e)\n return \"error\"\n\n except TimeoutError as e:\n print(\"ERRwikidata-SPARQLExceptions TimeOut : Retrying in 1 seconds.\",e)\n time.sleep(1)\n retries += 1\n continue\n\n except KeyboardInterrupt:\n # quit\n sys.exit()\n\n except:\n wait = retries*5\n print(\"ERRwikidata: other error. Retrying in \"+str(wait)+\" seconds.\")\n print('error: %s ' % sys.exc_info()[0])\n time.sleep(3)\n retries += 1\n continue\n\n if results is None and retries >= 8:\n print(\"Wikidata request failed ; system stopped! \")\n sys.exit(1)\n\n\n return results", "def linkSearch(self):\n self.identificationParams = []\n try:\n url = 'https://shopee.sg/api/v2/search_items/?by=relevancy&keyword=' + self.searchParameters + '&limit=' + str(\n self.itemQuantity) + '&newest=' + str(\n self.items_per_page) + '&order=desc&page_type=search' # Base URL\n print(url)\n r = requests.get(url, headers=self.HEADERS).json()\n for item in r['items']: # Store name, price, stocks left and amount sold in respective lists\n self.identificationParams.append((item['shopid'], item['itemid']))\n except AttributeError:\n self.identificationParams = []", "def get_station_parameters2(stn_df, st_dt, end_dt, engine): \n # Get stn IDs\n assert len(stn_df) > 0, 'ERROR: Please select at least one station.'\n stn_df['station_id'].drop_duplicates(inplace=True)\n stn_ids = stn_df['station_id'].values.astype(int).tolist()\n\n # Convert dates\n st_dt = dt.datetime.strptime(st_dt, '%Y-%m-%d')\n end_dt = dt.datetime.strptime(end_dt, '%Y-%m-%d')\n \n # Query db\n bind_pars = ','.join(':%d' % i for i in range(len(stn_ids))) \n\n bind_pars = ','.join(':%d' % i for i in range(len(stn_ids)))\n sql = (\"SELECT DISTINCT parameter_id, \"\n \" name AS parameter_name, \"\n \" unit \"\n \"FROM nivadatabase.wcv_calk \"\n \"WHERE station_id IN (%s) \"\n \"AND sample_date >= :st_dt \"\n \"AND sample_date <= :end_dt \" \n \"ORDER BY name, \"\n \" unit\" % bind_pars)\n\n par_dict = {'end_dt':end_dt,\n 'st_dt':st_dt}\n bind_dict = {'%d' % idx:item for idx, item in enumerate(stn_ids)}\n par_dict.update(bind_dict) \n df = pd.read_sql(sql, params=par_dict, con=engine) \n \n return df", "def get_data(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n # Convert dates to datetime objects so deltas can be calculated\n begin_datetime = parse_known_date_formats(begin_date)\n end_datetime = parse_known_date_formats(end_date)\n delta = end_datetime - begin_datetime\n\n # If the length of our data request is less or equal to 31 days,\n # we can pull the data from API in one request\n if delta.days <= 31:\n data_url = build_query_url(\n begin_datetime.strftime(\"%Y%m%d %H:%M\"),\n end_datetime.strftime(\"%Y%m%d %H:%M\"),\n stationid, product, datum, bin_num, interval, units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is less than 365 days\n # AND the product is hourly_height or high_low, we can pull data directly\n # from the API in one request\n elif delta.days <= 365 and (\n product == 'hourly_height' or product == 'high_low'):\n data_url = build_query_url(\n begin_date, end_date, stationid, product, datum, bin_num, interval,\n units, time_zone)\n\n df = url2pandas(data_url, product, num_request_blocks=1)\n\n # If the length of the user specified data request is greater than 365 days\n # AND the product is hourly_height or high_low, we need to load data from\n # the API in365 day blocks.\n elif product == 'hourly_height' or product == 'high_low':\n # Find the number of 365 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_365day_blocks = int(math.floor(delta.days / 365))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 365 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_365day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 365))\n end_datetime_loop = begin_datetime_loop + timedelta(days=365)\n\n # If end_datetime_loop of the current 365 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build url for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_365day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # If the length of the user specified data request is greater than 31 days\n # for any other products, we need to load data from the API in 31 day\n # blocks\n else:\n # Find the number of 31 day blocks in our desired period,\n # constrain the upper limit of index in the for loop to follow\n num_31day_blocks = int(math.floor(delta.days / 31))\n\n df = pd.DataFrame([]) # Empty dataframe for data from API requests\n\n # Loop through in 31 day blocks,\n # adjust the begin_datetime and end_datetime accordingly,\n # make a request to the NOAA CO-OPS API\n for i in range(num_31day_blocks + 1):\n begin_datetime_loop = begin_datetime + timedelta(days=(i * 31))\n end_datetime_loop = begin_datetime_loop + timedelta(days=31)\n\n # If end_datetime_loop of the current 31 day block is greater\n # than end_datetime specified by user, use end_datetime\n if end_datetime_loop > end_datetime:\n end_datetime_loop = end_datetime\n\n # Build URL for each API request as we proceed through the loop\n data_url = build_query_url(\n begin_datetime_loop.strftime('%Y%m%d'),\n end_datetime_loop.strftime('%Y%m%d'),\n stationid, product, datum, bin_num, interval, units, time_zone)\n \n df_new = url2pandas(data_url, product, num_31day_blocks) # Get dataframe for block\n df = df.append(df_new) # Append to existing dataframe\n \n # Rename output dataframe columns based on requested product\n # and convert to useable data types\n if product == 'water_level':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'q': 'QC', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'QC', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'hourly_height':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 's': 'sigma',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['flags', 'date_time'])\n df[data_cols] = df[data_cols].apply(\n pd.to_numeric, axis=1, errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'high_low':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 'ty': 'high_low',\n 't': 'date_time', 'v': 'water_level'},\n inplace=True)\n\n # Separate to high and low dataframes\n df_HH = df[df['high_low'] == \"HH\"].copy()\n df_HH.rename(columns={'date_time': 'date_time_HH',\n 'water_level': 'HH_water_level'},\n inplace=True)\n\n df_H = df[df['high_low'] == \"H \"].copy()\n df_H.rename(columns={'date_time': 'date_time_H',\n 'water_level': 'H_water_level'},\n inplace=True)\n\n df_L = df[df['high_low'].str.contains(\"L \")].copy()\n df_L.rename(columns={'date_time': 'date_time_L',\n 'water_level': 'L_water_level'},\n inplace=True)\n\n df_LL = df[df['high_low'].str.contains(\"LL\")].copy()\n df_LL.rename(columns={'date_time': 'date_time_LL',\n 'water_level': 'LL_water_level'},\n inplace=True)\n\n # Extract dates (without time) for each entry\n dates_HH = [x.date() for x in pd.to_datetime(df_HH['date_time_HH'])]\n dates_H = [x.date() for x in pd.to_datetime(df_H['date_time_H'])]\n dates_L = [x.date() for x in pd.to_datetime(df_L['date_time_L'])]\n dates_LL = [x.date() for x in pd.to_datetime(df_LL['date_time_LL'])]\n\n # Set indices to datetime\n df_HH['date_time'] = dates_HH\n df_HH.index = df_HH['date_time']\n df_H['date_time'] = dates_H\n df_H.index = df_H['date_time']\n df_L['date_time'] = dates_L\n df_L.index = df_L['date_time']\n df_LL['date_time'] = dates_LL\n df_LL.index = df_LL['date_time']\n\n # Remove flags and combine to single dataframe\n df_HH = df_HH.drop(\n columns=['flags', 'high_low'])\n df_H = df_H.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_L = df_L.drop(columns=['flags', 'high_low',\n 'date_time'])\n df_LL = df_LL.drop(columns=['flags', 'high_low',\n 'date_time'])\n\n # Keep only one instance per date (based on max/min)\n maxes = df_HH.groupby(df_HH.index).HH_water_level.transform(max)\n df_HH = df_HH.loc[df_HH.HH_water_level == maxes]\n maxes = df_H.groupby(df_H.index).H_water_level.transform(max)\n df_H = df_H.loc[df_H.H_water_level == maxes]\n mins = df_L.groupby(df_L.index).L_water_level.transform(max)\n df_L = df_L.loc[df_L.L_water_level == mins]\n mins = df_LL.groupby(df_LL.index).LL_water_level.transform(max)\n df_LL = df_LL.loc[df_LL.LL_water_level == mins]\n\n df = df_HH.join(df_H, how='outer')\n df = df.join(df_L, how='outer')\n df = df.join(df_LL, how='outer')\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(\n ['date_time', 'date_time_HH', 'date_time_H', 'date_time_L',\n 'date_time_LL'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df.index)\n df['date_time_HH'] = pd.to_datetime(df['date_time_HH'])\n df['date_time_H'] = pd.to_datetime(df['date_time_H'])\n df['date_time_L'] = pd.to_datetime(df['date_time_L'])\n df['date_time_LL'] = pd.to_datetime(df['date_time_LL'])\n\n elif product == 'predictions':\n if interval == 'h':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n\n elif interval == 'hilo':\n # Rename columns for clarity\n df.rename(columns={'t': 'date_time', 'v': 'predicted_wl',\n 'type': 'hi_lo'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'hi_lo'])\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'currents':\n # Rename columns for clarity\n df.rename(columns={'b': 'bin', 'd': 'direction',\n 's': 'speed', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'wind':\n # Rename columns for clarity\n df.rename(columns={'d': 'dir', 'dr': 'compass',\n 'f': 'flags', 'g': 'gust_spd',\n 's': 'spd', 't': 'date_time'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags', 'compass'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_pressure':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_press'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'air_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'air_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n elif product == 'water_temperature':\n # Rename columns for clarity\n df.rename(columns={'f': 'flags', 't': 'date_time', 'v': 'water_temp'},\n inplace=True)\n\n # Convert columns to numeric values\n data_cols = df.columns.drop(['date_time', 'flags'])\n df[data_cols] = df[data_cols].apply(pd.to_numeric, axis=1,\n errors='coerce')\n\n # Convert date & time strings to datetime objects\n df['date_time'] = pd.to_datetime(df['date_time'])\n\n # Set datetime to index (for use in resampling)\n df.index = df['date_time']\n df = df.drop(columns=['date_time'])\n\n # Handle hourly requests for water_level and currents data\n if (product == 'water_level') | (product == 'currents') & (\n interval == 'h'):\n df = df.resample('H').first() # Only return the hourly data\n\n return df", "def get_product_data_off(self):\n list_products_name = []\n for x in self.list_categories: \n \"\"\"get products' data from openfoodfacts api with string as paramaters\"\"\"\n parameters = {\n 'action': 'process',\n 'json': 1,\n 'countries': 'France',\n 'page_size': 100,\n 'page': 1,\n 'tagtype_0': 'categories',\n 'tag_contains_0': 'contains',\n 'tag_0': x\n }\n r = requests.get('https://fr.openfoodfacts.org/cgi/search.pl',\n params=parameters) # passing parameters in URL\n print(r.url)\n data = r.json() # r. from requests module decodes json file\n products = data['products'] #access dictionnary items by referring to its key name, products ordered by id\n list_products_name.append(products) \n self.list_products = list_products_name # list_categories_name is passed in the instance property", "def build_query_url(\n begin_date, end_date, stationid, product, datum=None, bin_num=None,\n interval=None, units='metric', time_zone='gmt'):\n base_url = 'http://tidesandcurrents.noaa.gov/api/datagetter?'\n\n # If the data product is water levels, check that a datum is specified\n if product == 'water_level':\n if datum is None:\n raise ValueError('No datum specified for water level data.See'\n ' https://tidesandcurrents.noaa.gov/api/#datum '\n 'for list of available datums')\n else:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'datum': datum,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n elif product == 'hourly_height':\n if datum is None:\n raise ValueError('No datum specified for water level data.See'\n ' https://tidesandcurrents.noaa.gov/api/#datum '\n 'for list of available datums')\n else:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'datum': datum,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n elif product == 'high_low':\n if datum is None:\n raise ValueError('No datum specified for water level data.See'\n ' https://tidesandcurrents.noaa.gov/api/#datum '\n 'for list of available datums')\n else:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'datum': datum,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n elif product == 'predictions':\n # If no interval provided, return 6-min predictions data\n if interval is None:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'datum': datum,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n else:\n # Compile parameter string, including interval, for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'datum': datum,\n 'interval': interval,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n # If the data product is currents, check that a bin number is specified\n elif product == 'currents':\n if bin_num is None:\n raise ValueError(\n 'No bin specified for current data. Bin info can be '\n 'found on the station info page'\n ' (e.g., https://tidesandcurrents.noaa.gov/cdata/StationInfo?id=PUG1515)')\n else:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'bin': str(bin_num),\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n # For all other data types (e.g., meteoroligcal conditions)\n else:\n # If no interval provided, return 6-min met data\n if interval is None:\n # Compile parameter string for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n else:\n # Compile parameter string, including interval, for use in URL\n parameters = {'begin_date': begin_date,\n 'end_date': end_date,\n 'station': stationid,\n 'product': product,\n 'interval': interval,\n 'units': units,\n 'time_zone': time_zone,\n 'application': 'py_noaa',\n 'format': 'json'}\n\n # Build URL with requests library\n query_url = requests.Request(\n 'GET', base_url, params=parameters).prepare().url\n\n return query_url", "def set_store_details(self):\n query = db.select([self.tables.columns.ProductName,\n self.tables.columns.QuantityPerUnit,\n self.tables.columns.UnitPrice,\n self.tables.columns.UnitsInStock])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet", "def _query_data(self, index, tag):\n version, datapoints = yield self.quasar.stream_get(self.name, tag, tag+(15*qdf.MINUTE))\n values = np.empty((BLOCK_SIZE,), dtype=(type(datapoints[0])))\n values[:] = None\n \n for point in datapoints:\n time = float(point.time - tag)\n time_index = int(round(time*SAMPLE_RATE/qdf.SECOND))\n values[time_index] = point\n\n self.cache[index][CACHE_INDEX_TAG] = tag\n self.cache[index][CACHE_INDEX_DATA] = values", "def product_search(obj, query):\n client = get_client(obj)\n\n pgs = client.product_list(q=query)\n\n print(json.dumps(pgs, indent=4))", "def get_data(link):\n data = re.get(link)\n jsondata = data.json()\n for weatherstation in jsondata['weatherStations']:\n FetchandStore.sensordict.update({weatherstation[\"id\"]:weatherstation[\"sensorValues\"]})\n for sensorvalue in weatherstation[\"sensorValues\"]:\n FetchandStore.sensors.append({\"id\": sensorvalue[\"roadStationId\"], \"name\": sensorvalue[\"oldName\"],\n \"value\": sensorvalue[\"sensorValue\"], \"unit\": sensorvalue[\"sensorUnit\"],\n \"datetime\": sensorvalue[\"measuredTime\"]})\n return FetchandStore.sensors", "def query(self, page) -> [str, dict]:\n params = {'size': self.max_page_size,\n 'sort': 'displayLabel', 'page': page, 'query': self.querystring}\n url = '{base_url}/v1/stations/rsql'.format(base_url=self.base_url)\n return [url, params]", "def station():\n session = Session(engine)\n results_station = session.query(Station.station, Station.name,Station.latitude,Station.longitude,Station.elevation).all()\n session.close()\n\n # Create a dictionary from the row data and append to a list of all_passengers\n station = []\n for station, name, lat,lon,ele in results_station:\n station_dict = {}\n station_dict[\"station\"] = station\n station_dict[\"name\"] = name\n station_dict[\"latitude\"] = lat\n station_dict[\"longitude\"] = lon\n station_dict[\"elevation\"] = ele\n station.append(station_dict)\n\n return jsonify(station)" ]
[ "0.5754376", "0.5710004", "0.55562365", "0.55338526", "0.5481048", "0.54356885", "0.53965205", "0.5357878", "0.53457266", "0.53261817", "0.53222287", "0.53217673", "0.52814996", "0.52286285", "0.5211386", "0.5211245", "0.52014244", "0.5185626", "0.51776993", "0.51525617", "0.5123917", "0.50962645", "0.50962114", "0.5060024", "0.50456536", "0.5041709", "0.5035327", "0.5031843", "0.50253534", "0.50133884" ]
0.6863099
0
This function is a short cut to return the getSamplingHeight() for documentation, please refer to .getSamplingHeight()
def sh(self, product=None): return self.getSamplingHeight(product)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dimension_height(self):\n pass", "def stride_height(self):\n\t\treturn self.strides_shape_param('H')", "def get_height(self) -> int:\n return self.rsimulator.get_frame_height()", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", "def getHeight(self):\n return _libsbml.Dimensions_getHeight(self)", "def output_height(self):\n\t\treturn self.output_shape_param('H')", "def getHeight(self):\n return self._height", "def get_height(self):\n return self.__height", "def getHeight(self):\r\n height = 1\r\n if self.orientation == \"v\":\r\n height = self.size\r\n return height", "def get_frame_height(self) -> int:\n return self.__sim.frame_size()[1]", "def height(self):\n return self.__size[1]", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def height(self):\n return self.client.call('GET', self.name + 'height')", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self.state['h']", "def getHeight(self):\n return frameHeight", "def height(self):\n # type: () -> float\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def height(self):\n return _libsbml.Dimensions_height(self)", "def kernel_height(self):\n\t\treturn self.kernel_shape_param('H')", "def get_height(self):\n height = np.size(self.img, 1)\n return height", "def height(self) -> int:\n return self._image_data.height", "def pixheight(self):\n return self._labelHeight * self.y_sign * self.transform.scale[1]", "def get_height(self):\n return 'width'" ]
[ "0.7334868", "0.7218686", "0.7208123", "0.7113875", "0.7113875", "0.70994365", "0.7040811", "0.7000414", "0.69715154", "0.69591147", "0.6952543", "0.6943306", "0.6932998", "0.6928708", "0.69241124", "0.69241124", "0.69241124", "0.6922861", "0.6917013", "0.69131047", "0.6902513", "0.6902513", "0.6902513", "0.6902513", "0.6897537", "0.68771064", "0.68707293", "0.68663716", "0.6841566", "0.6818399" ]
0.74387443
0
a list of unique values for sampling heights for the specified data product in case of no sampling hights or the product is not found for this station, an empty list is returned. A shortcut function is defined .sh() which calls this function.
def getSamplingHeight(self, product=None): # default return empty list sh = [''] # check if product is availabe for station if not product in self._data.values: return sh # if product is available but no sampling height is defined, return # count returns zero, if no sampling heights found if not self._data['samplingheight'][self._data.specLabel.str.match(product)].count(): return sh # finally get all sampling heights and create a unique list sh = self._data.samplingheight[self._data.specLabel == product].unique() sh = list(filter(None, sh)) if not sh: return [''] # at this point we have to assume we have an unsorted list of # samplingheights as strings. cast to float and sort. sh = [float(s) for s in sh] sh.sort() return sh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sh(self, product=None):\n return self.getSamplingHeight(product)", "def get_sealed_products_data(\n self, set_code: str\n ) -> List[MtgjsonSealedProductObject]:\n LOGGER.info(f\"Getting booster data for {set_code}\")\n products_list = []\n for sealed_product_name, sealed_product in self.sealed_products.get(\n set_code.lower(), {}\n ).items():\n product_obj = MtgjsonSealedProductObject()\n product_obj.name = sealed_product_name\n product_obj.release_date = sealed_product.get(\"release_date\")\n\n try:\n product_obj.category = getattr(\n MtgjsonSealedProductCategory,\n sealed_product.get(\"category\", \"UNKNOWN\").upper(),\n )\n except AttributeError:\n product_obj.category = None\n try:\n product_obj.subtype = getattr(\n MtgjsonSealedProductSubtype,\n sealed_product.get(\"subtype\", \"UNKNOWN\").upper(),\n )\n except AttributeError:\n product_obj.subtype = None\n\n product_obj.raw_purchase_urls = sealed_product.get(\"purchase_url\", {})\n products_list.append(product_obj)\n\n for location, identifier in sealed_product.get(\"identifiers\", {}).items():\n setattr(product_obj.identifiers, location, identifier)\n return products_list", "def _get_highcharts_data(self, data_product_id='', visualization_parameters=None):\n\n # An empty dict is returned in case there is no data in coverage\n empty_hc = []\n\n # error check\n if not data_product_id:\n raise BadRequest(\"The data_product_id parameter is missing\")\n\n use_direct_access = False\n if visualization_parameters == {}:\n visualization_parameters = None\n\n # Extract the parameters. Definitely init first\n query = None\n if visualization_parameters:\n #query = {'parameters':[]}\n query = {}\n # Error check and damage control. Definitely need time\n if 'parameters' in visualization_parameters and len(visualization_parameters['parameters']) > 0:\n if not 'time' in visualization_parameters['parameters']:\n visualization_parameters['parameters'].append('time')\n\n query['parameters'] = visualization_parameters['parameters']\n\n # The times passed from UI are system times so convert them to NTP\n if 'start_time' in visualization_parameters:\n query['start_time'] = int(visualization_parameters['start_time'])\n\n if 'end_time' in visualization_parameters:\n query['end_time'] = int((visualization_parameters['end_time']))\n\n # stride time\n if 'stride_time' in visualization_parameters:\n try:\n query['stride_time'] = int(visualization_parameters['stride_time'])\n except TypeError: \n # There are some (rare) situations where the AJAX request has 'null' in the request\n # Example:\n # {\"query_type\":\"google_dt\",\"parameters\":[],\"start_time\":-2208988800,\"end_time\":-2208988800,\"stride_time\":null,\"use_direct_access\":0}\n query['stride_time'] = 1\n else:\n query['stride_time'] = 1\n\n # direct access parameter\n if 'use_direct_access' in visualization_parameters:\n if (int(visualization_parameters['use_direct_access']) == 1):\n use_direct_access = True\n else:\n use_direct_access = False\n\n # get the dataset_id and objs associated with the data_product. Need it to do the data retrieval\n ds_ids,_ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataset, RT.Dataset, True)\n\n if ds_ids is None or not ds_ids:\n raise NotFound(\"Could not find dataset associated with data product\")\n stream_def_ids, _ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=True)\n if not stream_def_ids:\n raise NotFound('Could not find stream definition associated with data product')\n stream_def_id = stream_def_ids[0]\n try:\n\n if use_direct_access:\n retrieved_granule = DataRetrieverService.retrieve_oob(ds_ids[0], query=query, delivery_format=stream_def_id)\n else:\n #replay_granule = self.clients.data_retriever.retrieve(ds_ids[0],{'start_time':0,'end_time':2})\n retrieved_granule = self.clients.data_retriever.retrieve(ds_ids[0], query=query, delivery_format=stream_def_id)\n except BadRequest:\n dp = self.container.resource_registry.read(data_product_id)\n log.exception('Problem visualizing data product: %s (%s).\\n_get_highcharts_data(data_product_id=\"%s\", visualization_parameters=%s)', dp.name, data_product_id, data_product_id, visualization_parameters)\n raise\n\n # If thereis no data, return an empty dict\n if retrieved_granule is None:\n return simplejson.dumps(empty_hc)\n\n # send the granule through the transform to get the google datatable\n hc_pdict_id = self.clients.dataset_management.read_parameter_dictionary_by_name('highcharts',id_only=True)\n hc_stream_def = self.clients.pubsub_management.create_stream_definition('HighCharts_out', parameter_dictionary_id=hc_pdict_id)\n\n hc_data_granule = VizTransformHighChartsAlgorithm.execute(retrieved_granule, params=hc_stream_def, config=visualization_parameters)\n\n if hc_data_granule == None:\n return simplejson.dumps(empty_hc)\n\n hc_rdt = RecordDictionaryTool.load_from_granule(hc_data_granule)\n # Now go through this redundant step of converting the hc_data into a non numpy version\n hc_data_np = (get_safe(hc_rdt, \"hc_data\"))[0]\n hc_data = []\n\n for series in hc_data_np:\n s = {}\n for key in series:\n if key == \"data\":\n s[\"data\"] = series[\"data\"].tolist()\n continue\n s[key] = series[key]\n hc_data.append(s)\n\n # return the json version of the table\n return json.dumps(hc_data)", "def bv_data():\n heights = [1000., 1500., 2000., 2500.] * units('m')\n potential_temperatures = [[290., 290., 290., 290.],\n [292., 293., 293., 292.],\n [294., 296., 293., 293.],\n [296., 295., 293., 296.]] * units('K')\n return heights, potential_temperatures", "def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability", "def getDataVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, DATA_VARIABLE_NAMES_60m)", "def heights(self):\r\n H = self.MNR()\r\n He = []\r\n for h in H:\r\n he=[]\r\n for i in range(0,len(self.p_rho)):\r\n c = 0\r\n for g in h:\r\n if ((i+1) in g):\r\n c = c+1\r\n he.append(c-1)\r\n He.append(sum(he))\r\n return He", "def calculate_soil_demand_probabilities(self):\n soil_damand_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil demand probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n if self.vegetation.soil_demand.id == self.controller.soil_ids_map.image[y][x]:\n probability = 1.0\n else:\n probability = 0.0\n row.append(probability)\n soil_damand_probabilities.append(row)\n return soil_damand_probabilities", "def products_at_locations():\r\n db = get_db()\r\n b_id = session.get(\"user_id\")\r\n locs = check_warehouse()\r\n warehouse = {}\r\n for ids in locs:\r\n l = []\r\n prods = db.execute(\r\n \"SELECT prod_id, qty FROM warehouse where b_id = ? AND loc_id = ?\",\r\n (b_id, ids,),\r\n ).fetchall()\r\n locname = db.execute(\r\n \"SELECT location_name FROM location WHERE location_id = ? AND for_business = ?\",\r\n (ids, b_id,),\r\n ).fetchone()[\"location_name\"]\r\n for data in prods:\r\n prodname = db.execute(\r\n \"SELECT product_name FROM product WHERE for_business = ? AND product_id = ?\",\r\n (b_id, data[\"prod_id\"],),\r\n ).fetchone()[\"product_name\"]\r\n l.append([data[\"prod_id\"] + \" \" + prodname, data[\"qty\"]])\r\n warehouse[locname] = l\r\n return warehouse", "def get_all_data(self, site: str) -> List[Tuple[int, str, str, float]]:\n try:\n self._cursor.execute(f\"SELECT productId,productIdStr,imageUrl,dateAdded FROM {site}\")\n except sqlite3.OperationalError:\n raise sqlite3.OperationalError(f\"Table '{site}' does not exist. You can create it by the `create_table_safe` method.\")\n return self._cursor.fetchall()", "def get_products_statistics(loop, products):\n description_placeholder = config[\"placeholders\"][\"description\"]\n img_url_placeholder = config[\"placeholders\"][\"img_url\"]\n\n # collect offers for all products asynchronously\n futures = [get_offers_async(product[\"productId\"]) for product in products]\n all_offers = loop.run_until_complete(asyncio.gather(*futures))\n\n for product, offers in zip(products, all_offers):\n product[\"normalized_title\"] = clean_string(product[\"title\"])\n product[\"min_price\"] = sys.maxsize\n product[\"max_price\"] = 0\n product[\"description\"] = description_placeholder\n product[\"img_url\"] = img_url_placeholder\n\n for offer in offers:\n product[\"min_price\"] = min(product[\"min_price\"], offer[\"price\"])\n product[\"max_price\"] = max(product[\"max_price\"], offer[\"price\"])\n\n if product[\"description\"] == description_placeholder and offer.get(\"description\"):\n product[\"description\"] = offer[\"description\"]\n\n if product[\"img_url\"] == img_url_placeholder and offer.get(\"img_url\"):\n product[\"img_url\"] = offer[\"img_url\"]\n\n return products", "def get_slotted_data(data, maximum):\n\tstep = maximum / len(data)\n\treturn [(int((i + 1) * step), d) for i, d in enumerate(data)]", "def generate_data(self, start_height, end_height, chunk_blocks=10):\n data = []\n\n for height in range(start_height, end_height + 1):\n\n # Supports due this block\n supports = [1E8*s[1] for s in self.supports if s[0] == height]\n\n if height % chunk_blocks == 0:\n if len(supports) > 0:\n # height, max, min, sum, count, unique\n row = [height,\n max(supports), min(supports), sum(supports),\n len(supports), 1]\n data.append(row)\n\n return data", "def inventory_report(products):\n unique_names = []\n total_price = 0\n total_weight = 0\n total_flammability = 0\n num_products = len(products)\n for i in range(num_products):\n if products[i].name not in unique_names:\n unique_names.append(products[i].name) \n total_price += products[i].price\n total_weight += products[i].weight\n total_flammability += products[i].flammability\n mean_price = total_price / num_products\n mean_weight = total_weight / num_products\n mean_flammability = total_flammability / num_products\n print('ACME CORPORATION OFFICIAL INVENTORY REPORT')\n print(f'Unique product names: {len(unique_names)}')\n print(f'Average price: {mean_price}')\n print(f'Average weight {mean_weight}')\n print(f'Average flammabilitiy {mean_flammability}')\n return unique_names, mean_price, mean_weight, mean_flammability", "def executeScriptToGetData():\n ulv = random.randrange(42, 420)\n llv = random.randrange(42, 420)\n urv = random.randrange(42, 420)\n lrv = ulv + llv + urv\n return {\n 'title': random.choice(['Sensors title', None]),\n 'description': random.choice(['Sensors description', None]),\n 'big-value': random.randrange(214, 514),\n 'upper-left-label': 'Critical:',\n 'upper-left-value': ulv,\n 'lower-left-label': 'Major:',\n 'lower-left-value': llv,\n 'upper-right-label': 'Minor:',\n 'upper-right-value': urv,\n 'lower-right-label': 'All:',\n 'lower-right-value': lrv\n }", "def hwt(data):\n sz = len(data)\n i = 0\n res1 = []\n res2 = []\n while i < sz:\n s0 = data[i]\n s1 = data[i+1]\n res1.append((s0+s1)/2.)\n res2.append((s0-s1)/2.)\n i += 2\n return (res1,res2)", "def GetSampleDistribution(self) :\r\n\t\ttry :\r\n\t\t\tSetList = self.SQLCMDs['SetList']\r\n\t\t\tSetIDs = [0,]*len(SetList)\r\n\t\t\tSetDistr = [0,]*len(SetList)\r\n\t\t\tfor ii,setname in enumerate(SetList) :\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SelectSetID'],(setname,))\r\n\t\t\t\tSetIDs[ii] = self.DB_Cursor.fetchone()[0]\r\n\t\t\t\tself.DB_Cursor.execute(self.SQLCMDs['SampleSetCount'],(SetIDs[ii],))\r\n\t\t\t\tSetDistr[ii] = float(self.DB_Cursor.fetchone()[0])\r\n\t\texcept Exception as detail :\r\n\t\t\tlogging.info(\"Failed to retrieve sample set distribution: %s\"%detail)\r\n\t\t\tSetIDs = (0,1,2)\r\n\t\t\tSetDistr = (0,0,0)\r\n\t\treturn SetIDs,SetDistr", "def data_quartiles(self):\n data = []\n for graph in self._graphs.values():\n data += graph.data.values()\n data.sort()\n datalen = len(data)\n return(data[0], data[datalen/4], data[datalen/2],\n data[3*datalen/4], data[-1])", "def Render(shelf, **options):\n #low, high = options.pop('low', None), options.pop('high', None)\n steps = options.pop('steps')\n low = steps.min()\n high = steps.max()\n n = len(steps)\n\n print(n)\n\n xs = numpy.linspace(low, high, 1001)\n \n ds = shelf.Density(xs)\n return xs, ds", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def getDataVariableNames(self, product):\r\n return []", "def _calculate_hash(self, product):\n product_path = self._product_path(product)\n if not product_path:\n raise Error(\"no data available for product '%s' (%s)\" % (product.core.product_name, product.core.uuid))\n\n # Get the product type specific plug-in.\n plugin = self.product_type_plugin(product.core.product_type)\n\n # Determine product hash\n if plugin.use_enclosing_directory:\n paths = [os.path.join(product_path, basename) for basename in os.listdir(product_path)]\n else:\n paths = [product_path]\n return util.product_hash(paths)", "def _hydro_metrics(self) -> list:\n\n return self._minimal() + [\n 'fdc_flv', 'fdc_fhv',\n 'kge', 'kge_np', 'kge_mod', 'kge_bound', 'kgeprime_c2m', 'kgenp_bound',\n 'nse', 'nse_alpha', 'nse_beta', 'nse_mod', 'nse_bound']", "def information(counts: list) -> list:\n heights = []\n # magic\n e = (1 / math.log(2)) * ((4 - 1) / (2 * sum([counts[1][base] for base in \"ACGT\"])))\n for column_count in counts:\n relative_frqs = {base: column_count[base] / sum(column_count.values()) for base in \"ACGT\"}\n H = -1 * sum([relative_frqs[base] * math.log2(relative_frqs[base]) for base in \"ACGT\"])\n R = math.log2(4) - (H + e)\n heights.append({base: relative_frqs[base] * R for base in \"ACGT\"})\n # end magic\n return heights", "def get_pids_formatted(self, site: str) -> List[str]:\n try:\n self._cursor.execute(f\"SELECT productIdStr FROM {site}\")\n except sqlite3.OperationalError:\n raise sqlite3.OperationalError(f\"Table '{site}' does not exist. You can create it by the `create_table_safe` method.\")\n rows: List[Tuple[int]] = self._cursor.fetchall()\n return [row[0] for row in rows]", "def get_data(dataset, **kwargs):\n age_df = load_age_sample_from_mcmc_chains(dataset, **kwargs)\n hr_df = load_hr()\n age_df, hr_df = clean_data(age_df, hr_df)\n snids = age_df.index.unique().tolist()\n return snids, age_df, hr_df", "def calculate_sh(self):\n if self.data.get('Specific_Humidity') is None:\n if self.data.get('Mixing_Ratio') is None:\n raise KeyError('Calculate mixing ratio first!')\n else:\n w_kg = self.data['Mixing_Ratio'] / 1000\n self.data['Specific_Humidity'] = (w_kg / (w_kg + 1)) * 1000", "def get_data():\n \n data = {\n 'loadAvg1Min': 0, #load average 1 min\n 'loadAvg5Min': 0, #load average 5 min\n 'loadAvg15Min': 0, #load average 15 min\n 'cpuUsage': [], #usage distribution for each cpu\n 'memUsage': {}, #memory usage \n 'networkReads': [], #network reads per second for each interface\n 'networkWrites': [], #network writes per second for each interface\n 'diskReads': [], #disk reads per second for each disk\n 'diskWrites': [] #disk writes per second for each disk\n }\n \n #metrics that doesnt need sampling\n data['loadAvg1Min'], data['loadAvg5Min'], data['loadAvg15Min'] = get_load_avg() #get load avg\n data['memUsage'].update(get_mem_usage()) #memory usage\n \n #metrics that needs sampling\n #they are written as a generator so that we can sleep before collection again\n sampling_duration = 1\n cpu_usage_gen = get_cpu_usage(sampling_duration) #generator for cpu usage\n net_rw_gen = get_net_rw(sampling_duration) #generator for network read write\n disk_rw_gen = get_disk_rw(sampling_duration) #generator for disk read write\n \n while 1: #now start sampling, whenever we have walid data, we can exit the loop\n cpu_usage = next(cpu_usage_gen)\n net_rw = next(net_rw_gen)\n disk_rw = next(disk_rw_gen)\n \n if cpu_usage or net_rw or disk_rw: #we have valid data\n break\n \n time.sleep(sampling_duration)\n \n #append cpu usage for each cpu core\n for cpu, usage in cpu_usage.items():\n data['cpuUsage'].append({'name': cpu, 'value': usage})\n \n #append network read and write for each interface\n for interface, rw in net_rw.items():\n data['networkReads'].append({'name': interface, 'value': rw['reads']})\n data['networkWrites'].append({'name': interface, 'value': rw['writes']}) \n \n #append disk read and write for each logical disk\n for device, rw in disk_rw.items():\n data['diskReads'].append({'name': device, 'value': rw['reads']})\n data['diskWrites'].append({'name': device, 'value': rw['writes']})\n \n return data", "def inventory_report(self):\n mean_price = sum(Product.price for Product in sample) / len(sample)\n mean_weight = sum(Product.weight for Product in sample) / len(sample)\n mean_flam = sum(Product.flammability for Product in sample) / len(sample)\n return 'Unique Product Names: ', sample.unique, '/n Average Price: ', mean_price, \n '/n Average Weight: ', mean_weight, '/n Average Flammability: ', mean_flam", "def getInfoVariableNames(self, product):\r\n\r\n h = product.getSceneRasterHeight()\r\n\r\n # 10m resolution\r\n if h == 10980:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_10m)\r\n\r\n # 20m resolution\r\n elif h == 5490:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_20m)\r\n\r\n # 20m resolution\r\n elif h == 1830:\r\n return self.return_available_variables(product, INFO_VARIABLE_NAMES_60m)\r\n return []" ]
[ "0.6176316", "0.54340583", "0.51967186", "0.5194723", "0.51105845", "0.5095346", "0.50621283", "0.49846748", "0.49410042", "0.49322653", "0.48972636", "0.4896876", "0.48816198", "0.48781964", "0.48656213", "0.48565215", "0.4841422", "0.4831362", "0.4813755", "0.48079696", "0.48063248", "0.4805139", "0.48012534", "0.47822034", "0.47729573", "0.4772173", "0.47426125", "0.4740697", "0.47388482", "0.47257054" ]
0.7926699
0
Function to match attributes from converter to instruments attributes
def _match_type_object_attributes_to_instrument_attributes(self, converted_data): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testattributes(self):\n for attr in ('ST', 'DX', 'IQ', 'MA', 'Dam', 'Hit'):\n AttributeAbility([attr,])", "def testattributes(self):\n for attr in AmuletAbility.attributes:\n a = AmuletAbility('Attribute', attr=attr)\n self.assert_(attr in str(a))\n self.assertEqual(a.attribute, attr)\n self.assertTrue(isinstance(a.AC, int))\n self.assertTrue(isinstance(a.description(), str))", "def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass", "def match_attribute_names(*arrays):\n rep = arrays[0].sdbtype.full_rep\n result = [arrays[0]]\n for a in arrays[1:]:\n renames = []\n reserved = list(a.att_names) # reserved att names\n for r in a.sdbtype.full_rep:\n nm = r[0]\n if _att_match(rep, r):\n reserved.append(nm)\n continue\n newname = _find_rename(rep, r, reserved)\n if newname is None:\n raise ValueError(\"Cannot rename %s in %s\" % (nm, a))\n renames.extend((nm, newname))\n reserved.append(newname)\n if renames:\n a = a.attribute_rename(a, *renames)\n result.append(a)\n return tuple(result)", "def _get_representation_attrs(frame, units, kwargs):\n frame_attr_names = frame.representation_component_names.keys()\n repr_attr_classes = frame.representation_type.attr_classes.values()\n\n valid_kwargs = {}\n for frame_attr_name, repr_attr_class, unit in zip(\n frame_attr_names, repr_attr_classes, units\n ):\n value = kwargs.pop(frame_attr_name, None)\n if value is not None:\n try:\n valid_kwargs[frame_attr_name] = repr_attr_class(value, unit=unit)\n except u.UnitConversionError as err:\n error_message = (\n f\"Unit '{unit}' ({unit.physical_type}) could not be applied to\"\n f\" '{frame_attr_name}'. This can occur when passing units for some\"\n \" coordinate components when other components are specified as\"\n \" Quantity objects. Either pass a list of units for all components\"\n \" (and unit-less coordinate data), or pass Quantities for all\"\n \" components.\"\n )\n raise u.UnitConversionError(error_message) from err\n\n # also check the differentials. They aren't included in the units keyword,\n # so we only look for the names.\n\n differential_type = frame.differential_type\n if differential_type is not None:\n for frame_name, repr_name in frame.get_representation_component_names(\n \"s\"\n ).items():\n diff_attr_class = differential_type.attr_classes[repr_name]\n value = kwargs.pop(frame_name, None)\n if value is not None:\n valid_kwargs[frame_name] = diff_attr_class(value)\n\n return valid_kwargs", "def test_get_attributes(self):\n pass", "def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")", "def _check_attrs(ds_in, dset_attrs):\n attrs = dset_attrs['attrs']\n for key, value in attrs.items():\n src_value = ds_in.attrs.get(key)\n if src_value:\n if isinstance(src_value, bytes):\n src_value = src_value.decode('utf-8')\n\n if src_value != value:\n msg = ('Attr {} value ({}) does not match '\n 'source value ({}), using source value.'\n .format(key, value, src_value))\n logger.warning(msg)\n warn(msg)\n\n dset_attrs['attrs'][key] = src_value\n\n return dset_attrs", "def getter_attributes_test(name, from_xml, from_dict, result):\n assert getattr(from_xml, name) == result\n assert getattr(from_dict, name) == result", "def test_init(attributes):\n namespace = Namespace(miriam_id=\"MIR:00000022\", prefix=\"go\", pattern=r\"^GO:\\d{7}$\")\n instance = CompartmentAnnotation(namespace=namespace, **attributes)\n for attr, value in attributes.items():\n assert getattr(instance, attr) == value", "def test_attributes(self):\n comp = str(self.test1)\n attr = ['BaseModel', 'id', 'created_at', 'updated_at']\n counter = 0\n for a in attr:\n if a in attr:\n counter += 1\n self.assertTrue(counter == 4)", "def test_attributes(self):\n attributes = {}\n header = BDFHeader.from_path(TestData.bdf_256)\n\n for key in BDFHeader.MAPPING.keys():\n attributes[key] = getattr(header, key)\n assert attributes == {\n \"identification_code\": b\"\\xffBIOSEMI\",\n \"subject_identification\": \"\",\n \"local_recording_identification\": \"\",\n \"start_date\": datetime.date(year=2001, month=11, day=5),\n \"start_time\": time.strptime(\n \"19:38:42\",\n \"%H:%M:%S\",\n ),\n \"bytes_in_header\": 4608,\n \"data_format_version\": \"24BIT\",\n \"nb_data_records\": 60,\n \"data_duration\": 1,\n \"nb_channels\": 17,\n \"channel_labels\": [\n \"A1\",\n \"A2\",\n \"A3\",\n \"A4\",\n \"A5\",\n \"A6\",\n \"A7\",\n \"A8\",\n \"A9\",\n \"A10\",\n \"A11\",\n \"A12\",\n \"A13\",\n \"A14\",\n \"A15\",\n \"A16\",\n \"Status\",\n ],\n \"transducer_types\": [\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"ActiveElectrode,pintype\",\n \"TriggersandStatus\",\n ],\n \"dimensions\": [\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"uV\",\n \"Boolean\",\n ],\n \"min_dimensions\": [\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -262144,\n -8388608,\n ],\n \"max_dimensions\": [\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 262144,\n 8388607,\n ],\n \"min_digital\": [\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n -8388608,\n ],\n \"max_digital\": [\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n 8388607,\n ],\n \"prefiltering\": [\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"HP:DC;LP:113Hz\",\n \"Nofiltering\",\n ],\n \"samples_per_record\": [\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n 256,\n ],\n \"reserved\": [\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n \"Reserved\",\n ],\n }", "def test_attributes(self):\n attributes = storage.attributes()[\"Review\"]\n b = Review()\n for k, v in attributes.items():\n self.assertTrue(hasattr(b, k))\n self.assertEqual(type(getattr(b, k, None)), v)", "def buildMatch(self, att, state):\n att_info = cons.env.format_data.attribute_info[att]\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n attribute_range = att_info[1][1] - att_info[1][0]\n range_radius = random.randint(25,75)*0.01*attribute_range / 2.0 #Continuous initialization domain radius.\n low = state[att] - range_radius\n high = state[att] + range_radius\n condition_list = [low,high] #ALKR Representation, Initialization centered around training instance with a range between 25 and 75% of the domain size.\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n condition_list = state[att] #State already formatted like GABIL in DataManagement\n\n return condition_list", "def prepare_attributes(attributes):\n new_attributes = []\n for attribute in attributes:\n new_attributes.append(\"e_\" + attribute)\n return new_attributes", "def __convertAttributes__(xml_source):\n attributes = {}\n for attrName, attrValue in xml_source.attributes.items():\n attributes[attrName] = attrValue\n return attributes", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def selector(self, dataset, attributes, target_attr):\n\n best_gain = 0.0\n best_attr = None\n \n for attr in attributes:\n gain = self.splitmetric(dataset, attr, target_attr)\n if (gain >= best_gain and attr != target_attr):\n best_gain = gain\n best_attr = attr\n \n return best_attr", "def test_required_attributes(self):\n\n required_attributes = ('ID', )\n\n for attribute in required_attributes:\n self.assertIn(attribute, dir(DatasetLoader_Jakob2019))", "def _instrument_estimator_attribute(\n self, estimator: BaseEstimator, attributes: Attributes = None\n ):\n attribs = self.recurse_attribs.get(estimator.__class__, [])\n for attrib in attribs:\n attrib_value = getattr(estimator, attrib)\n if isinstance(attrib_value, Sequence):\n for value in attrib_value:\n self.instrument_estimator(\n estimator=value, attributes=attributes\n )\n elif isinstance(attrib_value, MutableMapping):\n for value in attrib_value.values():\n self.instrument_estimator(\n estimator=value, attributes=attributes\n )\n else:\n self.instrument_estimator(\n estimator=attrib_value, attributes=attributes\n )", "def UseAttribute(self) -> bool:", "def get_image_attributes(self, element):", "def _attributes(self, ext1, ext2):\n errorlist = []\n for attr in ['data', 'mask', 'variance', 'OBJMASK', 'OBJCAT']:\n attr1 = getattr(ext1, attr, None)\n attr2 = getattr(ext2, attr, None)\n if (attr1 is None) ^ (attr2 is None):\n errorlist.append(f'Attribute error for {attr}: '\n f'{attr1 is not None} v {attr2 is not None}')\n elif attr1 is not None:\n if isinstance(attr1, Table):\n if len(attr1) != len(attr2):\n errorlist.append(f'attr lengths differ: '\n f'{len(attr1)} v {len(attr2)}')\n else: # everything else is pixel-like\n if attr1.dtype.name != attr2.dtype.name:\n errorlist.append(f'Datatype mismatch for {attr}: '\n f'{attr1.dtype} v {attr2.dtype}')\n if attr1.shape != attr2.shape:\n errorlist.append(f'Shape mismatch for {attr}: '\n f'{attr1.shape} v {attr2.shape}')\n if 'int' in attr1.dtype.name:\n try:\n assert_most_equal(attr1, attr2, max_miss=self.max_miss)\n except AssertionError as e:\n errorlist.append(f'Inequality for {attr}: '+str(e))\n else:\n try:\n assert_most_close(attr1, attr2, max_miss=self.max_miss,\n rtol=self.rtol, atol=self.atol)\n except AssertionError as e:\n errorlist.append(f'Mismatch for {attr}: '+str(e))\n return errorlist", "def testAttributes(self):\n ddict = {\n \"group\": {\"dataset\": 100, \"@group_attr1\": 10},\n \"dataset\": 200,\n \"@root_attr\": 11,\n \"dataset@dataset_attr\": \"12\",\n \"group@group_attr2\": 13,\n }\n dictdump.dicttonx(ddict, self.h5_fname)\n ddict = dictdump.nxtodict(self.h5_fname, include_attributes=True)\n self.assertEqual(ddict[\"group\"][\"@group_attr1\"], 10)\n self.assertEqual(ddict[\"@root_attr\"], 11)\n self.assertEqual(ddict[\"dataset@dataset_attr\"], \"12\")\n self.assertEqual(ddict[\"group\"][\"@group_attr2\"], 13)", "def is_attribute(tag, kmip_version=None):\n kmip_1_0_attribute_tags = [\n Tags.UNIQUE_IDENTIFIER,\n Tags.NAME,\n Tags.OBJECT_TYPE,\n Tags.CRYPTOGRAPHIC_ALGORITHM,\n Tags.CRYPTOGRAPHIC_LENGTH,\n Tags.CRYPTOGRAPHIC_PARAMETERS,\n Tags.CRYPTOGRAPHIC_DOMAIN_PARAMETERS,\n Tags.CERTIFICATE_TYPE,\n Tags.CERTIFICATE_IDENTIFIER,\n Tags.CERTIFICATE_SUBJECT,\n Tags.CERTIFICATE_ISSUER,\n Tags.DIGEST,\n Tags.OPERATION_POLICY_NAME,\n Tags.CRYPTOGRAPHIC_USAGE_MASK,\n Tags.LEASE_TIME,\n Tags.USAGE_LIMITS,\n Tags.STATE,\n Tags.INITIAL_DATE,\n Tags.ACTIVATION_DATE,\n Tags.PROCESS_START_DATE,\n Tags.PROTECT_STOP_DATE,\n Tags.DEACTIVATION_DATE,\n Tags.DESTROY_DATE,\n Tags.COMPROMISE_OCCURRENCE_DATE,\n Tags.COMPROMISE_DATE,\n Tags.REVOCATION_REASON,\n Tags.ARCHIVE_DATE,\n Tags.OBJECT_GROUP,\n Tags.LINK,\n Tags.APPLICATION_SPECIFIC_INFORMATION,\n Tags.CONTACT_INFORMATION,\n Tags.LAST_CHANGE_DATE,\n Tags.CUSTOM_ATTRIBUTE\n ]\n kmip_1_1_attribute_tags = copy.deepcopy(kmip_1_0_attribute_tags) + [\n Tags.CERTIFICATE_LENGTH,\n Tags.X_509_CERTIFICATE_IDENTIFIER,\n Tags.X_509_CERTIFICATE_SUBJECT,\n Tags.X_509_CERTIFICATE_ISSUER,\n Tags.DIGITAL_SIGNATURE_ALGORITHM,\n Tags.FRESH\n ]\n kmip_1_2_attribute_tags = copy.deepcopy(kmip_1_1_attribute_tags) + [\n Tags.ALTERNATIVE_NAME,\n Tags.KEY_VALUE_PRESENT,\n Tags.KEY_VALUE_LOCATION,\n Tags.ORIGINAL_CREATION_DATE\n ]\n kmip_1_3_attribute_tags = copy.deepcopy(kmip_1_2_attribute_tags) + [\n Tags.RANDOM_NUMBER_GENERATOR\n ]\n kmip_1_4_attribute_tags = copy.deepcopy(kmip_1_3_attribute_tags) + [\n Tags.PKCS12_FRIENDLY_NAME,\n Tags.DESCRIPTION,\n Tags.COMMENT,\n Tags.SENSITIVE,\n Tags.ALWAYS_SENSITIVE,\n Tags.EXTRACTABLE,\n Tags.NEVER_EXTRACTABLE\n ]\n kmip_2_0_attribute_tags = copy.deepcopy(kmip_1_4_attribute_tags) + [\n Tags.CERTIFICATE_SUBJECT_CN,\n Tags.CERTIFICATE_SUBJECT_O,\n Tags.CERTIFICATE_SUBJECT_OU,\n Tags.CERTIFICATE_SUBJECT_EMAIL,\n Tags.CERTIFICATE_SUBJECT_C,\n Tags.CERTIFICATE_SUBJECT_ST,\n Tags.CERTIFICATE_SUBJECT_L,\n Tags.CERTIFICATE_SUBJECT_UID,\n Tags.CERTIFICATE_SUBJECT_SERIAL_NUMBER,\n Tags.CERTIFICATE_SUBJECT_TITLE,\n Tags.CERTIFICATE_SUBJECT_DC,\n Tags.CERTIFICATE_SUBJECT_DN_QUALIFIER,\n Tags.CERTIFICATE_ISSUER_CN,\n Tags.CERTIFICATE_ISSUER_O,\n Tags.CERTIFICATE_ISSUER_OU,\n Tags.CERTIFICATE_ISSUER_EMAIL,\n Tags.CERTIFICATE_ISSUER_C,\n Tags.CERTIFICATE_ISSUER_ST,\n Tags.CERTIFICATE_ISSUER_L,\n Tags.CERTIFICATE_ISSUER_UID,\n Tags.CERTIFICATE_ISSUER_SERIAL_NUMBER,\n Tags.CERTIFICATE_ISSUER_TITLE,\n Tags.CERTIFICATE_ISSUER_DC,\n Tags.CERTIFICATE_ISSUER_DN_QUALIFIER,\n Tags.KEY_FORMAT_TYPE,\n Tags.NIST_KEY_TYPE,\n Tags.OPAQUE_DATA_TYPE,\n Tags.PROTECTION_LEVEL,\n Tags.PROTECTION_PERIOD,\n Tags.PROTECTION_STORAGE_MASK,\n Tags.QUANTUM_SAFE,\n Tags.SHORT_UNIQUE_IDENTIFIER,\n Tags.ATTRIBUTE\n ]\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_IDENTIFIER)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_SUBJECT)\n kmip_2_0_attribute_tags.remove(Tags.CERTIFICATE_ISSUER)\n kmip_2_0_attribute_tags.remove(Tags.OPERATION_POLICY_NAME)\n kmip_2_0_attribute_tags.remove(Tags.CUSTOM_ATTRIBUTE)\n\n if kmip_version == KMIPVersion.KMIP_1_0:\n return tag in kmip_1_0_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_1:\n return tag in kmip_1_1_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_2:\n return tag in kmip_1_2_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_3:\n return tag in kmip_1_3_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_1_4:\n return tag in kmip_1_4_attribute_tags\n elif kmip_version == KMIPVersion.KMIP_2_0:\n return tag in kmip_2_0_attribute_tags\n else:\n all_attribute_tags = set(\n kmip_1_0_attribute_tags +\n kmip_1_1_attribute_tags +\n kmip_1_2_attribute_tags +\n kmip_1_3_attribute_tags +\n kmip_1_4_attribute_tags +\n kmip_2_0_attribute_tags\n )\n return tag in all_attribute_tags", "def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def parse_attribute(self,attr,table):\n \n attr_dict = {}\n \n for child in attr:\n name = child.attrib['name']\n \n #attributes can either have string or bool as the value we need\n #checking for boolean\n if 'val' in child[0].attrib:\n val = child[0].attrib['val']\n \n if val == 'true':\n flag = True\n else:\n flag = False\n \n attr_dict[name] = flag\n \n #else it's string stroed as text\n else:\n attr_dict[name] = super(DatabaseUmlParser,self).stripHashtags(child[0].text)\n \n attr = db_attribute.DbAttribute(table,attr_dict)\n \n return attr", "def attribute(self, data, model, model_name):", "def scan_attrs(self, prob):\r\n \r\n # Keep track of the names of objects in the current problem\r\n # (useful to determine if attributes are referring to other objects)\r\n object_names = []\r\n for fig in prob['figures'].values():\r\n for object_name in fig.keys():\r\n if not object_name in object_names:\r\n object_names.append(object_name)\r\n \r\n if not 'attributes' in self.kb:\r\n self.kb['attributes'] = {}\r\n \r\n attrs = self.kb['attributes']\r\n \r\n # process the attributes in the current problem\r\n for fig in prob['figures'].values():\r\n for obj in fig.values():\r\n for attr, subvalues in obj.items():\r\n if not attr in attrs:\r\n attrs[attr] = {'values': [],\r\n 'relative': 'unknown',\r\n 'multi': 'unknown',\r\n 'count': 0}\r\n data = attrs[attr]\r\n \r\n data['count'] += 1\r\n \r\n if data['multi'] == 'unknown':\r\n if len(subvalues) > 1:\r\n data['multi'] = 'yes'\r\n else:\r\n data['multi'] = 'no'\r\n else:\r\n if len(subvalues) > 1 and data['multi'] == 'no':\r\n data['multi'] = 'sometimes'\r\n elif len(subvalues) == 1 and data['multi'] == 'yes':\r\n data['multi'] = 'sometimes'\r\n \r\n # process each subvalue\r\n values = data['values']\r\n for subvalue in subvalues:\r\n # check to see if this attr refers to other objects\r\n relative = False\r\n if subvalue in object_names:\r\n relative = True\r\n if data['relative'] == 'unknown':\r\n data['relative'] = 'yes'\r\n elif data['relative' ] == 'no':\r\n data['relative'] = 'sometimes'\r\n else:\r\n if data['relative'] == 'unknown':\r\n data['relative'] = 'no'\r\n elif data['relative'] == 'yes':\r\n data['relative'] = 'sometimes'\r\n \r\n # add this to the seen values if it isn't already\r\n # in there and it isn't a relative value\r\n if not relative and not subvalue in values:\r\n values.append(subvalue)\r\n \r\n # update the kb's attribute priorities based upon frequency of encounters\r\n \r\n sorted_attrs = sorted(attrs.items(), key=lambda attr: attr[1]['count'], reverse=True)\r\n priorities = self.kb['attribute_priorities'] = []\r\n for attr in sorted_attrs:\r\n priorities.append(attr[0])" ]
[ "0.6252583", "0.6062729", "0.56403756", "0.5609397", "0.5576679", "0.55505896", "0.547663", "0.547364", "0.5460027", "0.54585034", "0.54178935", "0.5402644", "0.53940463", "0.5393521", "0.5341078", "0.5337362", "0.5318856", "0.52873087", "0.5284203", "0.5272638", "0.5266676", "0.5245445", "0.52277523", "0.5206858", "0.5206486", "0.51995504", "0.5197951", "0.519501", "0.51949894", "0.51947594" ]
0.67192864
0
Get meta data from single cdf file So far only manually for 'SOHO_ERNEHED_L21MIN' and 'SOHO_ERNELED_L21MIN'
def _get_metadata(dataset, path_to_cdf): metadata = [] cdf = cdflib.CDF(path_to_cdf) if dataset=='SOHO_ERNE-HED_L2-1MIN' or dataset=='SOHO_ERNE-LED_L2-1MIN': if dataset=='SOHO_ERNE-HED_L2-1MIN': m = 'H' if dataset=='SOHO_ERNE-LED_L2-1MIN': m = 'L' metadata = {'He_E_label': cdf.varget('He_E_label')[0], 'He_energy': cdf.varget('He_energy'), 'He_energy_delta': cdf.varget('He_energy_delta'), f'A{m}_LABL': cdf.varattsget(f'A{m}')['LABLAXIS'], f'A{m}_UNITS': cdf.varattsget(f'A{m}')['UNITS'], f'A{m}_FILLVAL': cdf.varattsget(f'A{m}')['FILLVAL'], 'P_E_label': cdf.varget('P_E_label')[0], 'P_energy': cdf.varget('P_energy'), 'P_energy_delta': cdf.varget('P_energy_delta'), f'P{m}_LABL': cdf.varattsget(f'P{m}')['LABLAXIS'], f'P{m}_UNITS': cdf.varattsget(f'P{m}')['UNITS'], f'P{m}_FILLVAL': cdf.varattsget(f'P{m}')['FILLVAL'], } channels_dict_df_He = pd.DataFrame(cdf.varget('He_E_label')[0], columns=['ch_strings']) channels_dict_df_He['lower_E'] = cdf.varget("He_energy")-cdf.varget("He_energy_delta") channels_dict_df_He['upper_E'] = cdf.varget("He_energy")+cdf.varget("He_energy_delta") channels_dict_df_He['DE'] = cdf.varget("He_energy_delta") # channels_dict_df_He['mean_E'] = np.sqrt(channels_dict_df_He['upper_E'] * channels_dict_df_He['lower_E']) channels_dict_df_He['mean_E'] = cdf.varget("He_energy") channels_dict_df_p = pd.DataFrame(cdf.varget('P_E_label')[0], columns=['ch_strings']) channels_dict_df_p['lower_E'] = cdf.varget("P_energy")-cdf.varget("P_energy_delta") channels_dict_df_p['upper_E'] = cdf.varget("P_energy")+cdf.varget("P_energy_delta") channels_dict_df_p['DE'] = cdf.varget("P_energy_delta") # channels_dict_df_p['mean_E'] = np.sqrt(channels_dict_df_p['upper_E'] * channels_dict_df_p['lower_E']) channels_dict_df_p['mean_E'] = cdf.varget("P_energy") metadata.update({'channels_dict_df_He': channels_dict_df_He}) metadata.update({'channels_dict_df_p': channels_dict_df_p}) return metadata
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metadata(filename):\n import numpy as np\n import pandas as pd\n\n infos = \"\"\"IGRAID 1- 11 Character\nWMOID 13- 17 Integer\nNAME 19- 48 Character\nNAMFLAG 50- 50 Character\nLATITUDE 52- 60 Real\nLATFLAG 62- 62 Character\nLONGITUDE 64- 72 Real\nLONFLAG 74- 74 Character\nELEVATION 76- 81 Real\nELVFLAG 83- 83 Character\nYEAR 85- 88 Integer\nMONTH 90- 91 Integer\nDAY 93- 94 Integer\nHOUR 96- 97 Integer\nDATEIND 99- 99 Integer\nEVENT 101-119 Character\nALTIND 121-122 Character\nBEFINFO 124-163 Character\nBEFFLAG 164-164 Character\nLINK 166-167 Character\nAFTINFO 169-208 Character\nAFTFLAG 209-209 Character\nREFERENCE 211-235 Character\nCOMMENT 236-315 Character\nUPDCOM 316-346 Character\nUPDDATE 348-354 Character\n\"\"\"\n\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n\n elif it == 'Real':\n it = 'float'\n\n else:\n it = 'int'\n\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data", "def extract_metadata(rawfile,codeversions={}):\r\n import datetime\r\n add_standard_metadata(rawfile)\r\n # get monochromator-related information\r\n mom = average_metadata(rawfile['$entry/instrument/crystal/omega'])\r\n tk_angle = average_metadata(rawfile['$entry/instrument/crystal/takeoff_angle'])\r\n # get the date\r\n date_form = datetime.datetime.strptime(str(rawfile['$entry/start_time']),\"%Y-%m-%d %H:%M:%S\")\r\n mono_change = datetime.datetime(2009,04,01)\r\n if date_form < mono_change:\r\n monotype = \"115\"\r\n else:\r\n monotype = \"335\"\r\n hklval = pick_hkl(mom - tk_angle/2.0,monotype)\r\n if len(hklval)==3: # i.e. h,k,l found\r\n rawfile.add_metadata(\"_pd_instr_monochr_pre_spec\",\r\n hklval + \" reflection from Ge crystal, \"+monotype+\" cut\",tag=\"CIF\")\r\n wavelength = calc_wavelength(hklval,tk_angle)\r\n rawfile.add_metadata(\"_diffrn_radiation_wavelength\",\"%.3f\" % wavelength,tag=\"CIF\")\r\n rawfile.add_metadata(\"_[local]_diffrn_radiation_wavelength_determination\",\r\n \"Wavelength is calculated from monochromator hkl and takeoff angle and is therefore approximate\",\r\n tag=\"CIF\")\r\n # The following is changed later if the primary collimator is found to be inserted\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"%.3f\" % (0.099*2.0*wavelength),tag=\"CIF\")\r\n # Do some logic to obtain collimator positions\r\n pcr = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_rotation\"])\r\n pcx = average_metadata(rawfile[\"$entry/instrument/collimator/primary_collimator_translation\"])\r\n if pcx > 120:\r\n if abs(pcr-360.0)<5 or abs(pcr) < 5: # 5' collimator\r\n coll_string = \"A 5' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.0833\",tag=\"CIF\")\r\n else:\r\n coll_string = \"A 10' primary collimator pre-monochromator\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_src/mono\",\"0.1667\",tag=\"CIF\")\r\n else: coll_string = \"No primary monochromator \"\r\n try:\r\n scr = average_metadata(rawfile['$entry/sample/secondary_collimator'])\r\n if scr>0.5:\r\n coll_string += \" and a 10' secondary collimator post-monochromator.\"\r\n rawfile.add_metadata(\"_pd_instr_divg_eq_mono/spec\",\"0.1667\",tag=\"CIF\")\r\n else:\r\n coll_string += \" and no secondary collimator.\"\r\n rawfile.add_metadata(\"_diffrn_radiation_collimation\",coll_string,tag=\"CIF\")\r\n except AttributeError: #some early files are missing secondary collimator\r\n pass\r\n # These values were in the CIF writing area of the Java routines, best put here\r\n try:\r\n program_release = str(rawfile[\"$entry/program_revision\"])\r\n except AttributeError:\r\n program_release = str(rawfile[\"$entry/sics_release\"])\r\n rawfile.add_metadata(\"_computing_data_collection\",str(rawfile[\"$entry/program_name\"]) + \" \" + \\\r\n program_release,\"CIF\")\r\n # List the code versions used for data reduction\r\n codelist = \"\"\r\n for key in codeversions.keys():\r\n codelist += \"%-20s: %s\\n\" % (key,codeversions[key])\r\n rawfile.add_metadata(\"_computing_data_reduction\", str(\"Gumtree Echidna/Python routines, Git versions:\\n\" + codelist),\"CIF\")\r\n rawfile.add_metadata(\"_pd_spec_special_details\",sanitize(str(rawfile[\"$entry/sample/name\"])),\"CIF\")\r\n rawfile.add_metadata(\"_[local]_data_collection_description\",str(rawfile[\"$entry/sample/description\"]),\"CIF\")\r\n start_time = str(rawfile[\"$entry/start_time\"]).replace(\" \",\"T\")\r\n end_time = str(rawfile[\"$entry/end_time\"]).replace(\" \",\"T\")\r\n rawfile.add_metadata(\"_pd_meas_datetime_initiated\", start_time,\"CIF\")\r\n rawfile.add_metadata(\"_[local]_datetime_completed\", end_time,\"CIF\")\r\n try:\r\n username = str(rawfile[\"user_name\"])\r\n except:\r\n username = \"?\"\r\n rawfile.add_metadata(\"_pd_meas_info_author_name\", sanitize(username),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_email\", str(rawfile[ \"$entry/user/email\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_meas_info_author_phone\", str(rawfile[ \"$entry/user/phone\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_2theta_monochr_pre\",\"%.3f\" % tk_angle,\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_mono/spec\", \"%.1f\" % average_metadata(rawfile[ \"$entry/sample/mono_sample_mm\"]),\"CIF\")\r\n rawfile.add_metadata(\"_pd_instr_dist_spec/detc\",\"%.1f\" % average_metadata(rawfile[\"$entry/instrument/detector/radius\"]),\"CIF\")\r\n try:\r\n rawfile.add_metadata(\"_diffrn_source_power\", \"%.2f\" % (average_metadata(rawfile[\"$entry/instrument/source/power\"])*1000),\"CIF\")\r\n except AttributeError: #sometimes source power is missing\r\n pass\r\n # imgCIF information about geometry\r\n # axis loop\r\n names = (('_axis.id','_axis.type','_axis.equipment','_axis.depends_on'),)\r\n values = [['source','gravity','stth','horizontal','vertical'],\r\n ['.','.','rotation','rotation','translation'],\r\n ['source','gravity','detector','detector','detector'],\r\n ['.','.','.','stth','stth']]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n radius = rawfile.__dict__['ms'][\"_pd_instr_dist_spec/detc\"]\r\n # add the vectors:\r\n \"\"\"\r\n source 0 0 1 . . .\r\n gravity -1 0 0 . . .\r\n stth 1 0 0 . . .\r\n horizontal 1 0 0 . . .\r\n vertical 1 0 0 0 0 -728\r\n \"\"\"\r\n vector_dict = {\"_axis.vector[1]\":['0','-1','1','1','1'],\r\n \"_axis.vector[2]\":['0','0','0','0','0'],\r\n \"_axis.vector[3]\":['1','0','0','0','0'],\r\n \"_axis.offset[1]\":['.','.','.','.','.'],\r\n \"_axis.offset[2]\":['.','.','.','.','.'],\r\n \"_axis.offset[3]\":['1','0','0','0',\"-\"+radius]}\r\n rawfile.__dict__['ms'].AddToLoop('_axis.id',vector_dict)\r\n # Add information about the stth positions for later use\r\n rawfile.add_metadata(\"_diffrn_scan.id\",\"1\",\"CIF\")\r\n rawfile.add_metadata(\"_diffrn_scan.frames\",rawfile.shape[0],\"CIF\")\r\n frame_ids = map(lambda a:\"%d\" % a,range(rawfile.shape[0]))\r\n stths = rawfile.stth[:]\r\n names = ((\"_diffrn_scan_frame.frame_id\",\"_diffrn_scan_frame.frame_number\"),)\r\n values = [frame_ids,range(1,rawfile.shape[0]+1)] #Spec says start from 1\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n names = ((\"_diffrn_scan_frame_axis.frame_id\",\"_diffrn_scan_frame_axis.axis_id\",\r\n \"_diffrn_scan_frame_axis.angle\"),)\r\n values = [frame_ids,['stth']*rawfile.shape[0],map(float,stths)]\r\n rawfile.__dict__['ms'].AddCifItem((names,(values,)))\r\n return rawfile", "def build_plasticc_metadata(fname_meta: str, snana_dir: str, out_fname,\n screen=False, extragal=True, field='DDF'):\n\n # map between zenodo and SNANA types\n SNANA_types = {90:11, 62:{1:3, 2:13}, 42:{1:2, 2:12, 3:14},\n 67:41, 52:43, 64:51, 95:60, 994:61, 992:62,\n 993:63, 15:64, 88:70, 92:80, 65:81, 16:83,\n 53:84, 991:90, 6:{1:91, 2:93}}\n \n extragal_zenodo_types = [15, 42, 52, 62, 64, 67, 88, 90, 95]\n \n # read zenodo metadata\n meta = pd.read_csv(fname_meta)\n\n if field == 'DDF':\n # identify only DDF objects\n ddf_flag = meta['ddf_bool'].values == 1\n elif field == 'WFD':\n ddf_flag = meta['ddf_bool'].values == 0\n else:\n ddf_flag = np.array([True for i in range(meta.shape[0])])\n\n # get ids\n ids = meta['object_id'].values[ddf_flag] \n\n names = get_SNR_headers()\n\n if not os.path.isfile(out_fname):\n op = open(out_fname, 'w+')\n for item in names[:-1]:\n op.write(item + ',')\n op.write(names[-1] + '\\n')\n\n else: \n op = open(out_fname, 'a+')\n \n # which group to search for\n if extragal:\n search_group = extragal_zenodo_types\n else:\n search_group = list(SNANA_types.keys())\n \n for code_zenodo in search_group:\n \n if screen:\n print('code_zenodo: ', code_zenodo)\n\n if code_zenodo not in [62, 42, 6]:\n code_snana = SNANA_types[code_zenodo]\n\n for n in range(1, 11):\n fname2 = snana_dir + 'LSST_DDF_MODEL' + str(code_snana).zfill(2) + \\\n '/LSST_DDF_NONIa-00' + str(n).zfill(2) + '_PHOT.FITS.gz'\n\n photo = read_fits(fname2)\n\n for indx in range(photo[0].shape[0]):\n # read data for 1 object\n snid_raw = photo[0]['SNID'].values[indx]\n snid = int(re.sub(\"[^0-9]\", \"\", str(snid_raw)))\n\n if snid in ids: \n line = calculate_SNR(snid=snid, \n code_zenodo=code_zenodo,\n photo_data=photo[1],\n head_data=photo[0],\n snana_file_index=n,\n code_snana=code_snana)\n \n if len(line) > 0:\n for item in line[:-1]:\n op.write(str(item) + ',')\n op.write(str(line[-1]) + '\\n')\n \n del photo\n \n else:\n for subtype in SNANA_types[code_zenodo].keys():\n code_snana = SNANA_types[code_zenodo][subtype]\n \n for n in range(1, 11): \n fname2 = snana_dir + 'LSST_DDF_MODEL' + str(code_snana).zfill(2) + \\\n '/LSST_DDF_NONIa-00' + str(n).zfill(2) + '_PHOT.FITS.gz'\n\n photo = read_fits(fname2)\n\n for indx in range(photo[0].shape[0]):\n\n # read data for 1 object\n snid_raw = photo[0]['SNID'].values[indx]\n snid = int(re.sub(\"[^0-9]\", \"\", str(snid_raw)))\n\n if snid in ids:\n line = calculate_SNR(snid=snid, \n code_snana=code_snana,\n code_zenodo=code_zenodo,\n photo_data=photo[1], \n head_data=photo[0],\n snana_file_index=n)\n \n if len(line) > 0:\n for item in line[:-1]:\n op.write(str(item) + ',')\n op.write(str(line[-1]) + '\\n')\n \n del photo\n \n op.close()", "def read_meta(metafn=None):\n\n metadata = {}\n\n # potential future improvement: strip quotation marks from strings, where applicable. Will then need to adjust\n # the indices used to get the dates and times in the functions above \n # (get_DEM_img_times: dtstrings = {\"sourceImage1\":(5,19, '%Y%m%d%H%M%S')})\n\n #each key is equated with '='. This loop strips and seperates then fills the dictonary.\n with open(metafn) as f: \n for line in f:\n if not line.strip(';') == \"END\":\n val = line.strip().split('=')\n if len(val) == 1:\n continue\n else:\n metadata.setdefault(val[0].strip(), []).append(val[1].strip().strip(';')) \n else:\n break\n\t\n return metadata", "def create_meta_dict_L1(adcp_meta):\n meta_dict = {}\n with open(adcp_meta) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n next(csv_reader, None) # Skip header row\n for row in csv_reader:\n # extract all metadata from csv file into dictionary -- some items not passed to netCDF file but are extracted anyway\n if row[0] == '' and row[1] == '':\n print('Metadata file contains a blank row; skipping this row !')\n elif row[0] != '' and row[1] == '':\n print('Metadata item in csv file has blank value; skipping this row '\n 'in metadata file !')\n else:\n meta_dict[row[0]] = row[1]\n\n # Add conventions metadata to meta_dict\n meta_dict['deployment_type'] = 'Sub Surface'\n meta_dict['flag_meaning'] = 'no_quality_control, good_value, probably_good_value, probably_bad_value, ' \\\n 'bad_value, changed_value, value_below_detection, value_in_excess, ' \\\n 'interpolated_value, missing_value'\n meta_dict['flag_references'] = 'BODC SeaDataNet'\n meta_dict['flag_values'] = '0, 1, 2, 3, 4, 5, 6, 7, 8, 9'\n meta_dict['keywords'] = 'Oceans > Ocean Circulation > Ocean Currents'\n meta_dict['keywords_vocabulary'] = 'GCMD Science Keywords'\n meta_dict['naming_authority'] = 'BODC, MEDS, CF v72'\n meta_dict['variable_code_reference'] = 'BODC P01'\n meta_dict['Conventions'] = \"CF-1.8\"\n\n return meta_dict", "def read_meta(self):\n meta = cPickle.load(open('../sugar_analysis_data/META-CABALLO2.pkl'))\n self.meta_sn_name_list = []\n self.meta_zcmb = []\n self.meta_x0 =[]\n self.meta_x0_err = []\n self.meta_x1 =[]\n self.meta_x1_err = []\n self.meta_c = []\n self.meta_c_err = []\n self.meta_mb = []\n self.meta_mb_err = []\n self.meta_cov_x0_x1 = [] \n self.meta_cov_x0_c = []\n self.meta_cov_x1_c = []\n self.meta_cov_mb_x1 = []\n self.meta_cov_mb_c = [] \n self.meta_zhl = []\n self.meta_zhl_err = []\n self.meta_idr = []\n for meta_sn_name in meta.keys(): \n \n if meta[meta_sn_name]['idr.subset'] != 'bad' and meta[meta_sn_name]['idr.subset'] != 'auxiliary':\n \n self.meta_sn_name_list.append(meta_sn_name)\n self.meta_zhl_err.append(meta[meta_sn_name]['host.zhelio.err'])\n self.meta_zhl.append(meta[meta_sn_name]['host.zhelio'])\n self.meta_zcmb.append(meta[meta_sn_name]['host.zcmb'])\n self.meta_x0.append(meta[meta_sn_name]['salt2.X0'])\n self.meta_x0_err.append(meta[meta_sn_name]['salt2.X0.err'])\n self.meta_x1.append(meta[meta_sn_name]['salt2.X1'])\n self.meta_x1_err.append(meta[meta_sn_name]['salt2.X1.err'])\n self.meta_c.append(meta[meta_sn_name]['salt2.Color'])\n self.meta_c_err.append(meta[meta_sn_name]['salt2.Color.err'])\n self.meta_mb.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B'])\n self.meta_mb_err.append(meta[meta_sn_name]['salt2.RestFrameMag_0_B.err'])\n self.meta_cov_x0_x1.append(meta[meta_sn_name]['salt2.CovX0X1'])\n self.meta_cov_x0_c.append(meta[meta_sn_name]['salt2.CovColorX0'])\n self.meta_cov_x1_c.append(meta[meta_sn_name]['salt2.CovColorX1'])\n self.meta_cov_mb_x1.append(meta[meta_sn_name]['salt2.CovRestFrameMag_0_BX1'])\n self.meta_cov_mb_c.append(meta[meta_sn_name]['salt2.CovColorRestFrameMag_0_B'])\n self.meta_idr.append(meta[meta_sn_name]['idr.subset'])\n \n self.meta_idr = np.array(self.meta_idr)\n self.meta_zcmb = np.array(self.meta_zcmb)\n self.meta_zhl = np.array(self.meta_zhl)\n self.meta_zhl_err = np.array(self.meta_zhl_err)\n self.meta_x0 = np.array(self.meta_x0)\n self.meta_x0_err = np.array(self.meta_x0_err)\n self.meta_x1 = np.array(self.meta_x1)\n self.meta_x1_err = np.array(self.meta_x1_err) \n self.meta_c = np.array(self.meta_c)\n self.meta_c_err = np.array(self.meta_c_err)\n self.meta_mb = np.array(self.meta_mb)\n self.meta_mb_err = np.array(self.meta_mb_err)\n self.meta_cov_x0_x1 = np.array(self.meta_cov_x0_x1)\n self.meta_cov_x0_c = np.array(self.meta_cov_x0_c)\n self.meta_cov_x1_c = np.array(self.meta_cov_x1_c)\n self.meta_cov_mb_x1 = np.array(self.meta_cov_mb_x1)\n self.meta_cov_mb_c = np.array(self.meta_cov_mb_c)", "def identify_cell_measures_metadata(cfreader, filename):\n metadata = {}\n\n try:\n # This could be None if cube.var_name isn't defined\n cmor_name = os.path.basename(filename).split('_')[0]\n metadata['var_name'] = cmor_name\n metadata['units'] = str(cfreader.cf_group[cmor_name].\n getncattr('units'))\n metadata['long_name'] = cfreader.cf_group[cmor_name].getncattr(\n 'long_name')\n metadata['standard_name'] = cfreader.cf_group[cmor_name].getncattr(\n 'standard_name')\n metadata['time_units'] = None\n metadata['calendar'] = None\n # CMIP5 doesn't have an activity id and so supply a default\n metadata['activity_id'] = cfreader.cf_group.global_attributes.get(\n 'activity_id', 'HighResMIP')\n try:\n metadata['institute'] = (cfreader.cf_group.\n global_attributes['institution_id'])\n except KeyError:\n # CMIP5 uses institute_id but we should not be processing CMIP5\n # data but handle it just in case\n metadata['institute'] = (cfreader.cf_group.\n global_attributes['institute_id'])\n except Exception as exc:\n msg = ('Unable to extract metadata from the contents of file {}\\n{}'.\n format(filename, exc.__str__()))\n raise FileValidationError(msg)\n\n return metadata", "def get_ctffind_4_1_0_meta(file_name: str) -> pd.DataFrame:\n extract_dict: typing.Dict[str, str]\n ctffind_meta_data: pd.DataFrame\n lines: typing.List[str]\n match: typing.Optional[typing.Match[str]]\n non_string_values: typing.Set[str]\n\n extract_dict = get_ctffind_4_1_0_extract_dict()\n ctffind_meta_data = pd.DataFrame(index=[0], columns=extract_dict.keys())\n with open(file_name, 'r') as read:\n lines = read.readlines()\n\n non_string_values = set([\n 'MicrographNameNoDW',\n 'version'\n ])\n for line in lines:\n for key, value in extract_dict.items():\n match = re.match(value, line)\n if match is not None:\n try:\n ctffind_meta_data[key] = float(match.group(1))\n except ValueError:\n assert key in non_string_values, f'{key}: {match.group(1)}'\n ctffind_meta_data[key] = match.group(1)\n else:\n pass\n return ctffind_meta_data", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def get_Metadata(metafile):\n\n mslist_file = open(metafile, 'r')\n LINES = mslist_file.readlines()\n mslist_file.close()\n\n nBlocks = 6 # these are the number of correlator cards (PILOT survey value)\n \n obs_date = 'Observed from'\n code = 'Code'\n duration = 'Total elapsed time'\n antenna = 'antennas'\n frame = 'Frame'\n \n for i in range(len(LINES)):\n line = LINES[i]\n if line.find(antenna) >=0:\n TOKS = line.split()\n n_ant = TOKS[5][-2:]\n if line.find(obs_date) >=0:\n TOKS = line.split()\n start_obs_date = TOKS[6]\n end_obs_date = TOKS[8]\n if line.find(duration) >=0:\n TOKS = line.split()\n tobs = float(TOKS[10]) # in second\n if line.find(code) >= 0:\n next_line = LINES[i+1]\n TOKS = next_line.split()\n field = TOKS[5]\n ra = TOKS[6][:-5]\n dec = TOKS[7][:-4]\n if line.find(frame) >= 0:\n next_line = LINES[i+1]\n TOKS = next_line.split()\n total_obs_bw = float(TOKS[10])*nBlocks/1000.0 # kHz to MHz \n \n return n_ant, start_obs_date, end_obs_date, tobs, field, ra, dec, total_obs_bw", "def return_hdr(self):\r\n # information contained in .erd\r\n orig = self._hdr['erd']\r\n if orig['patient_id']:\r\n subj_id = orig['patient_id']\r\n else:\r\n subj_id = (orig['pat_first_name'] + orig['pat_middle_name'] +\r\n orig['pat_last_name'])\r\n\r\n start_time = self._hdr['stc']['creation_time']\r\n s_freq = orig['sample_freq']\r\n\r\n # information contained in .stc\r\n n_samples = self._hdr['stamps'][-1]['end_stamp']\r\n\r\n try:\r\n ent_file = join(self.filename, self._basename + '.ent')\r\n if not exists(ent_file):\r\n ent_file = join(self.filename, self._basename + '.ent.old')\r\n ent_notes = _read_ent(ent_file)\r\n except (FileNotFoundError, PermissionError):\r\n lg.warning('could not find .ent file, channels have arbitrary '\r\n 'names')\r\n chan_name = ['chan{0:03}'.format(x) for x in\r\n range(orig['num_channels'])]\r\n else:\r\n # use the last montage, hoping that it's the most accurate\r\n for ent_note in reversed(ent_notes):\r\n try:\r\n chan_name = _find_channels(ent_note['value'])\r\n chan_name = chan_name[:orig['num_channels']]\r\n except:\r\n continue\r\n else:\r\n break\r\n\r\n try:\r\n vtc_file = join(self.filename, self._basename + '.vtc')\r\n orig['vtc'] = _read_vtc(vtc_file)\r\n except (FileNotFoundError, PermissionError):\r\n orig['vtc'] = None\r\n\r\n try:\r\n snc_file = join(self.filename, self._basename + '.snc')\r\n orig['snc'] = _read_snc(snc_file)\r\n except (FileNotFoundError, PermissionError):\r\n orig['snc'] = None\r\n\r\n return subj_id, start_time, s_freq, chan_name, n_samples, orig", "def get_meta(filename):\n with fiona.open(filename) as collection:\n return collection.meta", "def get_meta_from_wrf( ds ):\n import pyproj, rasterio\n\n wgs84 = pyproj.Proj( '+units=m +proj=latlong +datum=WGS84' )\n pargs = dict()\n # get some metadata from the RAW WRF file we got from Peter.\n cen_lon = ds.CEN_LON\n cen_lat = ds.CEN_LAT\n dx = ds.DX\n dy = ds.DY\n pargs['lat_1'] = ds.TRUELAT1\n pargs['lat_2'] = ds.TRUELAT2\n pargs['lat_0'] = ds.MOAD_CEN_LAT\n pargs['lon_0'] = ds.STAND_LON\n pargs['center_lon'] = ds.CEN_LON\n proj_id = ds.MAP_PROJ\n\n # setup the projection information from the information in the raw file\n # Polar stereo\n p4 = '+proj=stere +lat_ts={lat_1} +lon_0={lon_0} +lat_0=90.0' \\\n '+x_0=0 +y_0=0 +a=6370000 +b=6370000'\n p4 = p4.format( **pargs )\n\n proj = pyproj.Proj(p4)\n if proj is None:\n raise RuntimeError('WRF proj not understood: {}'.format(p4))\n\n # get dims from xarray dataset\n nx = ds.dims['west_east']\n ny = ds.dims['south_north']\n\n meta = dict()\n # make grid in polar coordinate system of SNAP-WRF\n e, n = pyproj.transform(wgs84, proj, cen_lon, cen_lat)\n # [ NOTE ]: these are centroid x0,y0 values of the lower-left...\n x0 = -(nx-1) / 2. * dx + e # DL corner\n y0 = -(ny-1) / 2. * dy + n # DL corner\n\n # flip the origin (UPPER LEFT CENTROID)\n res = 20000 # meters\n y0_ulcen = y0 + ((ny-1)*dy)\n x0_ulcen = x0\n\n ulx_cen = np.arange( x0_ulcen, x0_ulcen + ((dx*nx)), step=res )\n uly_cen = np.arange( y0_ulcen, y0_ulcen + (-(dy*ny)), step=-res )\n\n xc, yc = np.meshgrid( ulx_cen, uly_cen )\n\n # upper left corner coordinate origin... NOT CENTROID for the proper affine\n origin = (x0_ulcen-(res/2.0), y0_ulcen+(res/2.0))\n transform = rasterio.transform.from_origin( origin[0], origin[1], res, res )\n \n # build affine transform\n meta.update( resolution=(nx, ny), \n origin=origin, \n shape=(dx, dy),\n crs=proj,\n origin_corner='UPPER-LEFT', \n xc=xc, yc=yc, \n transform=transform )\n\n return meta", "def read_file(file_name):\n fits_file = fits.open(file_name)\n\n header = fits_file[0].header\n image_data = fits_file[1].data\n\n segmentation_data = fits_file[2].data\n\n header_keywords = {'CRVAL3': 0, 'CRPIX3': 0, 'CD3_3': 0}\n # clause to differentiate between CDELT3 and CD3_3\n\n for hdr_key, hdr_value in header_keywords.items():\n # finding required header values\n hdr_value = header[hdr_key]\n header_keywords[hdr_key] = hdr_value\n\n return header_keywords, image_data, segmentation_data", "def metadata(filename, header=fits.PrimaryHDU().header, clear=True):\n\n if clear:\n header.clear()\n\n header.append(('comment', ''), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', '*'*18 + ' Time and Pointing Data ' + '*'*18), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', ''), end=True)\n\n try:\n origname = re.sub('.*CRSA', '', re.sub('.fits', '', filename))\n header.append(('origname', origname, 'Original file ID number'), end=True)\n except:\n pass\n\n ####################################################################\n # Attempt to get the mean time of the exposure. Try three things:\n # 1. The mean of mjd-str and mjd-end in the main header (HDU 0)\n # 2. mjd in the main header (HDU 0)\n # 3. The mean acquisition time in the headers of the individual \n # reads, computed as acqtime in HDU 1 plus 1.48s/2*nreads\n ####################################################################\n\n mjd_ok = True\n try:\n head = fits.open(filename)[0].header\n try:\n mean_mjd = 0.5*(head['mjd-str'] + head['mjd-end'])\n except:\n try:\n mean_mjd = head['mjd'] + 1.48*0.5*len(fits.open(filename))/86400\n except:\n ########################################################\n # Note: acqtime is unreliable--doesn't always update.\n ########################################################\n #head1 = fits.open(filename)[1].header\n #mean_mjd = head1['acqtime'] - 2400000.5\n #mean_mjd += 1.48*0.5*len(fits.open(filename))/86400\n ########################################################\n # This is pretty bad: use the checksum time of the\n # middle read as the time stamp of last resort.\n ########################################################\n head1 = fits.open(filename)[len(fits.open(filename))//2].header\n t = head1.comments['checksum'].split()[-1]\n t = Time(t, format='isot')\n t.format = 'mjd'\n mean_mjd = float(str(t)) \n except:\n mjd_ok = False\n mean_mjd = np.nan\n utc_date = 'unavailable'\n utc_time = 'unavailable'\n\n pos_ok = True\n\n ####################################################################\n # Need RA and Dec to compute parallactic angle\n ####################################################################\n\n try:\n head = fits.open(filename)[0].header\n ra, dec = [head['ra'], head['dec']]\n except:\n #ra, dec = ['05:02:27.5438', '+07:27:39.265']\n \t#ra, dec = ['04:37:36.182', '-02:28:25.87']\n pos_ok = False\n \n if mjd_ok:\n\n ################################################################\n # Subaru's coordinates in degrees\n ################################################################\n \n lng, lat = [-155.4760187, 19.825504]\n subaru = (str(lng) + 'd', str(lat) + 'd')\n t = Time(mean_mjd, format='mjd', location=subaru)\n \n if pos_ok:\n\n ############################################################\n # Precess from J2000 to the appropriate epoch\n ############################################################\n\n c = coord.SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg), frame='fk5')\n \n equinox = 'J%.5f' %(2000 + (mean_mjd - 51544.5)/365.25)\n c = c.transform_to(coord.FK5(equinox=equinox))\n\n ################################################################\n # Compute hour angle to get parallactic angle\n ################################################################\n\n ha = (t.sidereal_time('apparent') - c.ra).rad\n lat = lat*np.pi/180\n \n pa = -np.arctan2(-np.sin(ha), np.cos(c.dec.rad)*np.tan(lat)\n - np.sin(c.dec.rad)*np.cos(ha))\n pa = float(pa%(2*np.pi))\n else:\n pa = np.nan\n\n t.format = 'isot'\n utc_date = str(t).split('T')[0]\n utc_time = str(t).split('T')[1]\n else:\n pa = np.nan\n\n if not np.isfinite(mean_mjd):\n mean_mjd = utc_date = utc_time = 'unavailable'\n\n header['mjd'] = (mean_mjd, 'Mean MJD of exposure') \n header['utc-date'] = (utc_date, 'UTC date of exposure') \n header['utc-time'] = (utc_time, 'Mean UTC time of exposure')\n\n ####################################################################\n # Attempt to fetch useful/important keywords from the original\n # file's FITS header\n ####################################################################\n\n header.append(_fetch('ra', filename, comment='RA of telescope pointing'))\n header.append(_fetch('dec', filename, comment='DEC of telescope pointing'))\n\n if np.isfinite(pa):\n header['parang'] = (pa*180/np.pi, 'Mean parallactic angle (degrees)')\n else:\n header['parang'] = ('unavailable', 'Mean parallactic angle (degrees)')\n header.append(_fetch('d_imrpap', filename, comment='Image rotator pupil position angle (degrees)'))\n\n header.append(_fetch('HIERARCH CHARIS.FILTER.NAME', filename, \n comment='CHARIS filter name', newkey='filtname'))\n header.append(_fetch('HIERARCH CHARIS.FILTER.SLOT', filename, \n comment='CHARIS filter slot', newkey='filtpos'))\n header.append(_fetch('HIERARCH CHARIS.SHUTTER', filename, \n comment='CHARIS shutter position', newkey='shutter'))\n\n return header", "def get_meta_file_type(metaDictionary, logger, filename):\n # GENETIC_ALTERATION_TYPE DATATYPE meta\n alt_type_datatype_to_meta = {\n # cancer type\n (\"CANCER_TYPE\", \"CANCER_TYPE\"): MetaFileTypes.CANCER_TYPE,\n # clinical and timeline\n (\"CLINICAL\", \"PATIENT_ATTRIBUTES\"): MetaFileTypes.PATIENT_ATTRIBUTES,\n (\"CLINICAL\", \"SAMPLE_ATTRIBUTES\"): MetaFileTypes.SAMPLE_ATTRIBUTES,\n (\"CLINICAL\", \"TIMELINE\"): MetaFileTypes.TIMELINE,\n # rppa\n (\"PROTEIN_LEVEL\", \"LOG2-VALUE\"): MetaFileTypes.RPPA,\n (\"PROTEIN_LEVEL\", \"Z-SCORE\"): MetaFileTypes.RPPA,\n # cna\n (\"COPY_NUMBER_ALTERATION\", \"DISCRETE\"): MetaFileTypes.CNA,\n (\"COPY_NUMBER_ALTERATION\", \"CONTINUOUS\"): MetaFileTypes.CNA_CONTINUOUS, \n (\"COPY_NUMBER_ALTERATION\", \"LOG2-VALUE\"): MetaFileTypes.CNA_LOG2,\n (\"COPY_NUMBER_ALTERATION\", \"SEG\"): MetaFileTypes.SEG,\n # expression\n (\"MRNA_EXPRESSION\", \"CONTINUOUS\"): MetaFileTypes.EXPRESSION,\n (\"MRNA_EXPRESSION\", \"Z-SCORE\"): MetaFileTypes.EXPRESSION,\n (\"MRNA_EXPRESSION\", \"DISCRETE\"): MetaFileTypes.EXPRESSION,\n # mutations\n (\"MUTATION_EXTENDED\", \"MAF\"): MetaFileTypes.MUTATION,\n # others\n (\"METHYLATION\", \"CONTINUOUS\"): MetaFileTypes.METHYLATION,\n (\"FUSION\", \"FUSION\"): MetaFileTypes.FUSION,\n # cross-sample molecular statistics (for gene selection)\n (\"GISTIC_GENES_AMP\", \"Q-VALUE\"): MetaFileTypes.GISTIC_GENES,\n (\"GISTIC_GENES_DEL\", \"Q-VALUE\"): MetaFileTypes.GISTIC_GENES,\n (\"MUTSIG\", \"Q-VALUE\"): MetaFileTypes.MUTATION_SIGNIFICANCE\n }\n result = None\n if 'genetic_alteration_type' in metaDictionary and 'datatype' in metaDictionary:\n genetic_alteration_type = metaDictionary['genetic_alteration_type']\n data_type = metaDictionary['datatype']\n if (genetic_alteration_type, data_type) in alt_type_datatype_to_meta:\n result = alt_type_datatype_to_meta[(genetic_alteration_type, data_type)]\n else:\n logger.error(\n 'Could not determine the file type. Please check your meta files for correct configuration.',\n extra={'filename_': filename,\n 'cause': ('genetic_alteration_type: %s, '\n 'datatype: %s' % (\n metaDictionary['genetic_alteration_type'],\n metaDictionary['datatype']))})\n elif 'cancer_study_identifier' in metaDictionary and 'type_of_cancer' in metaDictionary:\n result = MetaFileTypes.STUDY\n elif 'type_of_cancer' in metaDictionary:\n result = MetaFileTypes.CANCER_TYPE\n else:\n logger.error('Could not determine the file type. Did not find expected meta file fields. Please check your meta files for correct configuration.',\n extra={'filename_': filename})\n return result", "def get_metadata(diagnostics_dir, verbose=False):\n metafile = find_metadata_file(diagnostics_dir, 'mslist-2*txt', verbose=False)\n\n with open(metafile, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n nBlocks = 6 # these are the number of correlator cards (PILOT survey value)\n \n obs_metadata = ObservationMetadata()\n\n obs_date = 'Observed from'\n fields = 'Fields'\n code = 'Code'\n duration = 'Total elapsed time'\n antenna = 'antennas'\n frame = 'Frame'\n \n field_list = []\n\n for i in range(len(lines)):\n line = lines[i]\n if line.find(antenna) >=0:\n toks = line.split()\n obs_metadata.n_ant = toks[5][-2:]\n if line.find(obs_date) >=0:\n toks = line.split()\n obs_metadata.start_obs_date = toks[6]\n obs_metadata.end_obs_date = toks[8]\n if line.find(duration) >=0:\n toks = line.split()\n obs_metadata.tobs = float(toks[10]) # in second\n\n # Field details\n if line.find(fields) >=0:\n toks = line.split()\n obs_metadata.num_fields = int(toks[-1])\n\n if line.find(code) >= 0:\n for j in range(obs_metadata.num_fields):\n field_metadata = FieldMetadata()\n field_line = lines[i+j+1]\n toks = field_line.split()\n field_metadata.name = toks[5]\n field_metadata.ra = toks[6][:-5]\n field_metadata.dec = toks[7][:-4]\n field_metadata.num_rows = int(toks[9])\n obs_metadata.fields.append(field_metadata)\n\n if line.find(frame) >= 0:\n next_line = lines[i+1]\n toks = next_line.split()\n obs_metadata.total_obs_bw = float(toks[10])*nBlocks/1000.0 # kHz to MHz \n \n return obs_metadata #n_ant, start_obs_date, end_obs_date, tobs, field, ra, dec, total_obs_bw", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata", "def read_grp(fname):\n global DAYS\n uint_types = [DAYS,\n 'Current crop type', \n 'Current residue on ground type', \n 'Previous residue on ground type', \n 'Old residue on ground type', \n 'Current dead root type', \n 'Previous dead root type', \n 'Old dead root type']\n\n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'rb')\n for i, line in enumerate(fid.readlines()):\n line_as_list = line.strip().split()\n\n if len(line_as_list) == 0:\n continue\n\n elif line_as_list[0][0] == '#':\n continue\n\n elif line_as_list[0] == 'int':\n try:\n meta[line[1]] = int(line[2])\n except:\n pass\n \n elif line_as_list[0] == 'float':\n try:\n meta[line[1]] = float(line[2])\n except:\n pass\n\n elif line_as_list[0] == 'char':\n continue\n\n elif line_as_list[0][0] == '{':\n cname = line.strip()[1:-1].replace(r'kg/m', r'kg*m**-1') \\\n .replace(r'kg/m**2', r'kg*m**-2') \\\n .replace(r'kg/m**3', r'kg*m**-3') \\\n .replace(r'kg/m**4', r'kg*m**-4') \\\n .replace(r'mm/hr', r'mm*hr**-1') \\\n .replace(r'mm/h', r'mm*hr**-1') \\\n .replace(r'm/day', r'm*day**-1') \\\n .replace(r'g/cc', r'g*cc**-1') \\\n .replace(r'kg-s/m**4', r'kg-s*m**-4') \\\n .replace(r's/m', r's*m**-1') \\\n .replace(r'Irrigation_volume_supplied/unit_area',\n r'Irrigation_volume_supplied*unit_area**-1')\n header.append(cname)\n\n else:\n if len(header) == len(line_as_list):\n \n # if we are here and data == None we need to initialize the data dictionary\n if data == None:\n data = {}\n for cname in header:\n typecode = ('f', 'h')[any([cname==s for s in uint_types])]\n data[cname] = array.array(typecode)\n\n for (cname, string) in zip(header, line_as_list):\n if any([cname==s for s in uint_types]):\n value = int(string)\n else:\n value = float(string)\n\n if cname == DAYS:\n\n if value in set(data[DAYS]):\n break\n\n data[cname].append(value)\n\n else:\n raise Exception('Failed to parse line %i, unexpected number of columns.'%(i+1))\n \n fid.close()\n\n # pack the table data into numpy arrays\n for (cname, v) in data.items():\n dtype = (np.float32, np.int16)[any([cname==s for s in uint_types])]\n data[cname] = np.array(v, dtype=dtype)\n\n return (meta, data)", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def find_meta(filename, source_directory):\n metafile = os.path.join(source_directory, filename + '_Metadata.csv')\n metadf = pd.read_csv(metafile)\n metadf = metadf.rename(str.lower, axis='columns')\n\n schfile = metadf['schedule_file_name'][0].split('\\\\')[-1].split('.sdu')[0].split('-')[1]\n param = schfile.replace('_', '.')\n\n return param", "def read_cli(fname):\n \n meta = {}\n data = None\n header = []\n\n meta['fname'] = fname\n meta['id'] = ''.join([L for L in fname if L in '0123456789'])\n \n fid = open(fname, 'r')\n meta['CLIGEN Version'] = fid.readline().strip()\n fid.readline()\n meta['Station'] = ' '.join(fid.readline().strip().split())\n\n fid.readline()\n line = fid.readline().strip().split()\n meta['Latitude'] = float(line[0])\n meta['Longitude'] = float(line[1])\n meta['Elevation'] = float(line[2])\n meta['Obs. Years'] = float(line[3])\n meta['Beginning Year'] = float(line[4])\n meta['Years Simulated'] = float(line[5])\n meta['Command Line'] = ' '.join(line[6:])\n\n fid.readline()\n meta['Observed monthly ave max temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave min temperature (C)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave solar radiation (Langleys/day)'] = \\\n list(map(float, fid.readline().split()))\n\n fid.readline()\n meta['Observed monthly ave precipitation (mm)'] = \\\n list(map(float, fid.readline().split()))\n\n header = fid.readline().strip().split()\n \n fid.readline()\n\n _data = []\n for line in fid.readlines():\n cells = line.split()\n\n if len(cells) != len(header):\n break\n\n _data.append([float(c) for c in cells])\n \n data = {}\n for h,v in zip(header, zip(*_data)):\n data[h] = v\n\n del _data\n del header\n\n return (meta,data)", "def open_igra_metadata(filename):\n import pandas as pd\n infos = \"\"\"\n IGRAID 1- 11 Character\n WMOID 13- 17 Integer\n NAME 19- 48 Character\n NAMFLAG 50- 50 Character\n LATITUDE 52- 60 Real\n LATFLAG 62- 62 Character\n LONGITUDE 64- 72 Real\n LONFLAG 74- 74 Character\n ELEVATION 76- 81 Real\n ELVFLAG 83- 83 Character\n YEAR 85- 88 Integer\n MONTH 90- 91 Integer\n DAY 93- 94 Integer\n HOUR 96- 97 Integer\n DATEIND 99- 99 Integer\n EVENT 101-119 Character\n ALTIND 121-122 Character\n BEFINFO 124-163 Character\n BEFFLAG 164-164 Character\n LINK 166-167 Character\n AFTINFO 169-208 Character\n AFTFLAG 209-209 Character\n REFERENCE 211-235 Character\n COMMENT 236-315 Character\n UPDCOM 316-346 Character\n UPDDATE 348-354 Character\n \"\"\"\n import numpy as np\n colspecs = []\n header = []\n types = {}\n for iline in infos.splitlines():\n if iline == '':\n continue\n ih = iline[0:11].strip().lower()\n header.append(ih)\n ii = int(iline[13:16]) - 1\n ij = int(iline[17:20])\n colspecs.append((ii, ij))\n it = iline[22:].strip()\n if it == 'Character':\n it = 'str'\n elif it == 'Real':\n it = 'float'\n else:\n it = 'int'\n types[ih] = it\n\n data = pd.read_fwf(filename, colspecs=colspecs, header=None, dtype=types, names=header)\n data = data.replace('nan', '')\n data['date'] = pd.to_datetime((data.year * 1000000 +\n np.where(data.month.values == 99, 6, data.month.values) * 10000 +\n np.where(data.day.values == 99, 15, data.day.values) * 100 +\n np.where(data.hour.values == 99, 0, data.hour.values)).apply(str), format='%Y%m%d%H')\n return data", "def get_data(found_file, created_file):\n\n try:\n fh_f = fits.open(found_file)\n print('Found file has: ', fh_f.info())\n except Exception:\n print(' FATAL ERROR: Unable to open found file ', found_file)\n\n try:\n fh_c = fits.open(created_file)\n print('Created file has: ', fh_c.info())\n except Exception:\n print(' FATAL ERROR: Unable to open created file ', created_file)\n\n try:\n data_f = fh_f['SCI'].data\n except Exception:\n print(' FATAL ERROR: data for found data was expected in SCI extension')\n\n try:\n data_c = fh_c['SCI'].data\n except Exception:\n try:\n data_c = fh_c[0].data\n except Exception:\n print(' FATAL ERROR: created data expected in either SCI or 0 extensions')\n\n return fh_f, fh_c, data_f, data_c", "def _construct_standardized_metadata(self):\n return None", "def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def load_plothrm_detailed(filename):\n\n filename = Path(filename)\n\n book = xlrd.open_workbook(filename)\n xlsx_file = pd.ExcelFile(filename)\n\n df_seq = None\n df_meta = None\n\n for sheetidx, sheetname in enumerate(xlsx_file.sheet_names):\n\n print(f'Parsing \"{filename}\" sheet \"{sheetname}\"', flush=True)\n\n try:\n\n sheetnumber = sheetidx + 1\n worksheet = book.sheet_by_name(sheetname)\n\n # try to load a value from the row and column\n def value_default(row, col, d):\n if row >= worksheet.nrows or col >= worksheet.ncols:\n return d\n s = worksheet.cell_value(row, col)\n if type(s) is str and len(s) == 0:\n return d\n else:\n return s\n\n # load metadata\n meta = dict()\n meta['sheetname'] = sheetname\n meta['sheetnumber'] = sheetnumber\n meta['plothrm_version'] = str(worksheet.cell_value(0, 1))\n meta['data_filename'] = str(worksheet.cell_value(1, 1))\n meta['seq_filename'] = str(worksheet.cell_value(2, 1))\n meta['nchan'] = int(worksheet.cell_value(3, 1))\n meta['nsamp'] = int(worksheet.cell_value(4, 1))\n meta['sampling_hz'] = float(worksheet.cell_value(5, 1))\n meta['opt_zero_above'] = float(value_default(6, 2, np.nan))\n meta['opt_remove_baselines'] = bool (value_default(6, 4, 0))\n meta['opt_remove_sync'] = bool (value_default(6, 6, 0))\n meta['opt_channel_smooth'] = bool (value_default(6, 8, 0))\n meta['opt_sample_smooth'] = float(value_default(6, 10, np.nan))\n meta['height_res'] = float(worksheet.cell_value(7, 1))\n meta['sync_bound'] = float(worksheet.cell_value(8, 1))\n # TODO load region data at rows 9, 10, 11 (Excel 1-based index rows 10-12)\n\n for k, v in meta.items():\n meta[k] = [v]\n meta = pd.DataFrame(meta)\n\n if df_meta is None:\n df_meta = meta\n else:\n df_meta = df_meta.append(meta)\n\n # load sequence data\n df = pd.read_excel(filename, sheetname, header=13, na_values=['ERROR', 'Infinity'])\n df['sheetname'] = sheetname\n df['sheetnumber'] = sheetnumber\n if df_seq is None:\n df_seq = df\n else:\n df_seq = df_seq.append(df)\n\n except Exception as e:\n print(f' error parsing worksheet \"{sheetname}\": {e}')\n continue\n\n return df_seq.reset_index(drop=True), df_meta.reset_index(drop=True)", "def _extract_metadata_from_zipfile(self, extra_metadata):\n\n try:\n\n if type(self) == SAFESentinel2:\n zip_metadata = sentinel2.Sentinel2Scan(self.fname).sentinel_metadata\n elif type(self) == SAFESentinel3:\n zip_metadata = sentinel3.Sentinel3Scan(self.fname).sentinel_metadata\n\n extra_metadata[\"solar_zenith\"] = zip_metadata.return_dict()\n except:\n return \n \n datatake_attr = \"Datatake Type\"\n if hasattr(zip_metadata, datatake_attr):\n extra_metadata[\"product_info\"][datatake_attr] = getattr(zip_metadata, datatake_attr)\n\n cloud_attr = \"Cloud Coverage Assessment\"\n if hasattr(zip_metadata, cloud_attr):\n extra_metadata[\"quality_info\"] = {cloud_attr: float(getattr(zip_metadata, cloud_attr))}" ]
[ "0.6787354", "0.66897845", "0.6515701", "0.6452773", "0.6449452", "0.63816357", "0.6350765", "0.62949073", "0.62506527", "0.6211634", "0.6130489", "0.60176396", "0.6001111", "0.59593594", "0.5942799", "0.59364325", "0.59356487", "0.59337986", "0.59134674", "0.5909135", "0.59081376", "0.5902693", "0.5855389", "0.5830416", "0.58228153", "0.579719", "0.57736266", "0.5772647", "0.5766275", "0.5766225" ]
0.7197249
0
Implementation of the analytical solution to Empirical Variational Bayes Matrix Factorization. This function can be used to calculate the analytical solution to empirical VBMF.
def EVBMF(Y, sigma2=None, H=None): L,M = Y.shape #has to be L<=M if H is None: H = L alpha = L/M tauubar = 2.5129*np.sqrt(alpha) #SVD of the input matrix, max rank of H _,s,_ = torch.svd(Y) s = s[:H] #Calculate residual residual = 0. if H<L: # residual = np.sum(np.sum(Y**2)-np.sum(s**2)) residual = torch.sum(torch.sum(Y**2)-torch.sum(s**2)) #Estimation of the variance when sigma2 is unspecified if sigma2 is None: xubar = (1+tauubar)*(1+alpha/tauubar) eH_ub = int(np.min([np.ceil(L/(1+alpha))-1, H]))-1 upper_bound = (torch.sum(s**2)+residual)/(L*M) lower_bound = np.max([s[eH_ub+1]**2/(M*xubar), torch.mean(s[eH_ub+1:]**2)/M]) scale = 1.#/lower_bound s = s*np.sqrt(scale) residual = residual*scale lower_bound = float(lower_bound)*scale upper_bound = float(upper_bound)*scale sigma2_opt = minimize_scalar(EVBsigma2, args=(L,M,s,residual,xubar), bounds=[lower_bound, upper_bound], method='Bounded') sigma2 = sigma2_opt.x #Threshold gamma term threshold = np.sqrt(M*sigma2*(1+tauubar)*(1+alpha/tauubar)) pos = torch.sum(s>threshold) if pos == 0: return np.array([]) #Formula (15) from [2] d = torch.mul(s[:pos]/2, 1-(L+M)*sigma2/s[:pos]**2 + torch.sqrt( (1-((L+M)*sigma2)/s[:pos]**2)**2 - (4*L*M*sigma2**2)/s[:pos]**4) ) return torch.diag(d)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vbmstep(self):\n for k in range(self.k):\n self.beta_k[k] = self.beta_0 + self.counts[k]\n self.m_k[k] = (1 / self.beta_k[k]) * (self.beta_0 * self.m_0 +\n self.counts[k] * self.means[k])\n\n tmp = (self.beta_0 * self.counts[k]) / (self.beta_0 + self.counts[k])\n tmp2 = (self.means[k] - self.m_0)\n tmp = np.linalg.inv(self.W_0) + self.counts[k] * self.covars[k] + tmp * tmp2 @ tmp2.T\n self.w_k[k] = np.linalg.inv(tmp)\n self.nu_k[k] = self.nu_0 + self.counts[k]\n self.alpha_k[k] = self.alpha_0[k] + self.counts[k]", "def vbe_step(self):\n\n digam_alpha = digamma(np.sum(self.alpha_k))\n for k in range(self.k):\n\n # compute estimate over ln det(lamb)\n tmp = sum(digamma((self.nu_k[k] + 1 - j) / 2) for j in range(self.dim))\n\n det = np.linalg.det(self.w_k[k])\n self.log_lamb[k] = tmp + self.dim * np.log(2) + np.log(det)\n\n # compute estimate for ln pi\n self.log_pi[k] = digamma(self.alpha_k[k]) - digam_alpha\n\n for n in range(self.n):\n tmp = self.x[n] - self.m_k[k]\n # compute estimate over mu and lambda\n self.estimate[n, k] = self.dim * (1 / self.beta_k[k]) + self.nu_k[k] * (tmp.T @ self.w_k[k] @ tmp)", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def solve_VFI(self):\r\n dimC = self.dimA ; dimA = self.dimA ; dimW = self.dimW \r\n C = self.c_grid ; A = self.a_grid ; W = self.W_grid\r\n tol = self.tol ; Niter = self.Niter ; R = self.R\r\n beta = self.beta ; Pi = self.Pi\r\n \r\n V0 = np.zeros((dimA,dimC,dimW))\r\n V1 = np.zeros((dimA,dimC,dimW))\r\n Pol = np.zeros((dimA,dimC,dimW))\r\n U = np.zeros((dimA,dimC,dimW))\r\n \r\n t0 = time()\r\n diff = 1 ; niter = 0\r\n \r\n while diff > tol:\r\n niter += 1\r\n # Value update step\r\n for ia in range(dimA):\r\n for ic in range(dimC):\r\n for iw in range(dimW):\r\n c = W[iw] + R*A[ia] - A\r\n x = C[ic]\r\n \r\n c[c < 0] = np.nan \r\n if x < 0:\r\n x = np.nan\r\n \r\n u = self.u(c,x) \r\n U[:,ic,iw] = u \r\n \r\n Objective = U + beta * V0 @ Pi.T\r\n V1[ia,:,:] = np.nanmax(Objective, axis = 0)\r\n Pol[ia,:,:] = np.nanargmax(Objective, axis = 0)\r\n \r\n # Evaluate distance between the value functions\r\n diff = np.max(np.max(np.abs(V1 - V0))) \r\n V0[:] = V1\r\n \r\n # Break the while loop if too many iterations\r\n #print(\"The current error is \"+str(diff))\r\n if niter > Niter:\r\n print('Ops, no convergence')\r\n break\r\n \r\n t1 = time()\r\n #print('VFI algorithm took {0:0d} iterations and {1:.2f} seconds.'.format(niter, t1 - t0))\r\n \r\n self.V1 = V1 ; self.Pol = Pol", "def _exec_vector(self, a, bd, mask):\n\n npt = bd.shape[0]\n n = self.X_ADJUSTED.shape[0]\n zero_index = None\n zero_value = False\n\n a_inv = scipy.linalg.inv(a)\n\n if np.any(np.absolute(bd) <= self.eps):\n zero_value = True\n zero_index = np.where(np.absolute(bd) <= self.eps)\n\n b = np.zeros((npt, n+1, 1))\n b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)\n if zero_value:\n b[zero_index[0], zero_index[1], 0] = 0.0\n b[:, n, 0] = 1.0\n\n if (~mask).any():\n mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)\n b = np.ma.array(b, mask=mask_b)\n\n x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T\n kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)\n sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)\n\n return kvalues, sigmasq", "def __init__(self,bvec,sqivar,amatrix):\n from numpy.linalg import svd\n #\n # Save the inputs\n #\n #self.bvec = bvec\n self.sqivar = sqivar\n self.amatrix = amatrix\n if len(amatrix.shape) > 1:\n self.nstar = amatrix.shape[1]\n else:\n self.nstar = 1\n self.bvec = bvec*sqivar\n self.mmatrix = self.amatrix * np.tile(sqivar,self.nstar).reshape(self.nstar,bvec.size).transpose()\n mm = np.dot(self.mmatrix.T,self.mmatrix)\n self.uu,self.ww,self.vv = svd(mm,full_matrices=False)\n self.mmi = np.dot((self.vv.T / np.tile(self.ww,self.nstar).reshape(self.nstar,self.nstar)),self.uu.T)\n return", "def a_test2_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def a_test2_bbvi():\n model = ARIMAX(formula=\"y ~ x1 + x2\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 5)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def a_test_bbvi():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100)\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def __q2v_ff(self, vm, beta, q):\n return float((vm * beta - np.sqrt(np.power(vm * beta, 2) - 4 * vm * beta * q)) / (2 * vm))", "def fAVM(RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n#\n# 5.1.1 Initialise Outputs & Check for missing values in inputs:\n# --------------------------------------------------------------\n\tPHIt=MissingValue\n\tPHIe=MissingValue\n\tCBW=MissingValue\n\tBVW=MissingValue\n\tHCPV=MissingValue\n\tVf=MissingValue\n\tVs=MissingValue\n\tSwt=MissingValue\n\tSwe=MissingValue\n\tVc1=MissingValue\n\tVc2=MissingValue\n\tVc3=MissingValue\n\tVk=MissingValue\n\tToc=MissingValue\n\tQc=MissingValue\n\tGDen=MissingValue\n\tif MissingValue in (RHOB,Dw,Ds,Df,Dc1,PHIc1,Ck,Dk,PHIk,RSK):\n\t\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen\n#\n# 5.1.2 Initialise parameters:\n# ----------------------------\n\tNIter=0\n\tNIterMax=100\n\tErrIter=10000\n\tTolErrIter=0.0001\n\tIterEnd=0\n\tVk=0.000 # Initially assumme no kerogen\n\tDh=Df\n#\n#\t5.1.3 Start interative loop:\n#\t-----------------------------\n\twhile IterEnd==0:\n#\n# 5.5.3.1 Organic and Inorganic Component Density Values:\n# -------------------------------------------------------\n\t\tDBI=(1-PHIc1)*Dc1+(PHIc1*Dw) # Bulk Density of Inorganic Component\n\t\tDBO=(1-PHIk)*Dk+(PHIk*Dh)# Bulk Density of Organic Component\n#\n# 5.1.3.2 Compute Volume of Organic and Inorganic Component:\n# ----------------------------------------------------------\n\t\tVOR=(DBI-RHOB)/(DBI-DBO)\n\t\tVOR=ImposeLimits(VOR,0,1)\n\t\tVIN=(1-VOR)\n#\n# 5.1.3.3 Compute Volumetrics, Total & Effective Porosity and Total & Effective Water Saturation:\n# ---------------------------------------\t-------------------------------------------------------\n\t\tVc1=VIN*(1-PHIc1)\n\t\tVc2=0.000\n\t\tVc3=0.000\n\t\tVk=VOR*(1-PHIk)\n\t\tPHIt=VIN*PHIc1+VOR*PHIk\n\t\tPHIe=VOR*PHIk\n\t\tSwt=1-((VOR*PHIk)/PHIt)\n\t\tSwt=ImposeLimits(Swt,0,1)\n\t\tSwe=0.000\n\t\tSxot=Swt\n\t\tSxoe=Swe\n#\n# 5.1.3.4 Compute Bulk Volume of Water, Hydrocarbon Pore Volume and Pore Space Fluid Properties:\n# ---------------------------------------\t------------------------------------------------------\n\t\tBVW=PHIe*Swe\n\t\tHCPV=PHIe*(1-Swe)\n\t\tVs=RSK*Vk # Estimate volume of adsorbed (sorbed) hydrocarbon\n\t\tVs=ImposeLimits(Vs,0,HCPV)\n\t\tVf=(HCPV-Vs)\n\t\tVf=ImposeLimits(Vf,0,(HCPV-Vs))\n#\n# 5.1.3.5 Recompute hydrocarbon properties in the pore space:\n# -----------------------------------------------------------\n\t\tSum=Vs+Vf\n\t\tif(Sum<=0.000):\n\t\t\tDh=Df\n\t\telse:\n\t\t\tDh=(Ds*Vs+Df*Vf)/(Vs+Vf)\n#\n# 5.1.4 Test for interative computations:\n# ---------------------------------------\n\t\tNIter=NIter+1\n\t\tif(NIter>=NIterMax):\n\t\t\tIterEnd=1\n\t\telse:\t\t\t\n\t\t\tif(NIter<=2):\n\t\t\t\tResultOld=[1,1,1,1,1,1,1,1,1] # Initial Setting\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\telse:\n\t\t\t\tResultNew=[Vc1,Vc2,Vc3,Vk,Vs,Vf,PHIe,Swt,Swe] # Current Results\n\t\t\t\tErrIter=ComputeMatrixDifference(ResultOld,ResultNew)\n\t\t\t\tResultOld=ResultNew\n\t\t\t\tif(ErrIter<=TolErrIter):\n\t\t\t\t\tIterEnd=1\n#\n# 5.1.6 Preoutput computations:\n# ------------------------------\n\tQc=MissingValue\n\tDc2=0.00\n\tDc3=0.00\n\tCBW=PHIt-PHIe # The assumption is that all microporosity can be considered to be clay bound water.\n\tToc=fToc_Wtf(Vc1,Vc2,Vc3,Vk,0,Ck,Dc1,Dc2,Dc3,Dk,Dw) # TOC-wt fraction. Note: Vrw=0 in fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw)\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,0,Dc1,Dc2,Dc3,Dk,Dw) # Grain Density. Note: Vrw=0 in fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n#\n# 5.5.7 Output Results:\n# \t-------------------\n\treturn PHIt,PHIe,CBW,BVW,HCPV,Vf,Vs,Swt,Swe,Vc1,Vc2,Vc3,Vk,Toc,Qc,GDen", "def Empirical_Bayes_Estimator(df_train, df_test):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n\n # define the prior weights (simple average, with intercept equal zero)\n beta_0 = np.append(np.array(0), np.full(K, fill_value=1/K, dtype=float))\n\n # design matrix (intercept + individual forecasts)\n F = df_train.iloc[:, 1:].values\n F = np.insert(F, 0, 1, axis=1)\n\n # define y (observed values)\n y = df_train.iloc[:, 0].values\n\n # OLS weights\n beta_hat = np.dot(np.linalg.inv(np.dot(np.transpose(F), F)),\n np.dot(np.transpose(F), y))\n\n # sigma\n sigma_hat_sq = np.dot(\n np.transpose(y - np.dot(F, beta_hat)),\n y - np.dot(F, beta_hat)\n ) / T\n\n # tau\n num = np.dot(np.transpose(beta_hat - beta_0), beta_hat - beta_0)\n denum = np.trace(np.linalg.inv(np.dot(np.transpose(F), F)))\n tau_hat_sq = num / denum - sigma_hat_sq\n\n # combining weights\n shrinkage = 1 - sigma_hat_sq / (sigma_hat_sq + tau_hat_sq)\n beta_1_hat = beta_0 + shrinkage*(beta_hat - beta_0)\n\n # predictions\n df_pred = pd.DataFrame(\n {\"Empirical Bayes Estimator\":\n beta_1_hat[0] + np.dot(df_test, beta_1_hat[1:])},\n index=df_test.index)\n\n return df_pred", "def test_bayes_factor_b(self):\n model_1 = ufloat(2, 1)\n model_2 = ufloat(4, 1)\n expected_result = ufloat(-4, 2.82842712474619032)\n actual_result = utils.bayes_factor(model_1, model_2)\n assert_almost_equal(actual_result.n, expected_result.n)\n assert_almost_equal(actual_result.s, expected_result.s)", "def _beta(self):\n return _handle_ab(self.solution, self.use_const)[1]", "def bvarm(b):\n return np.nanmean((b - np.nanmean(b, axis=1)[:, np.newaxis])**2, axis=1)", "def vecvari1(array,W,B=None,sqrt=False,BB=False,BS=False,verbose=False,sizz=1,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n \r\n arrs=array.shape\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n if verbose:\r\n print(\"VECVARI1:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n bt=len(B.shape)==2\r\n xi=(-2,-1)#xi=(-1,-2)\r\n x2=(-3,-2,-1)\r\n if len(ashp)==5 :#not all data and all weights == 3d data\r\n xi=(-3,-2,-1)\r\n x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n\r\n if BB :\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n if sizz==1:#not a good idea\r\n mean=np.sum((mul),axis=xi,keepdims=True)/size\r\n else:\r\n mean=np.sum((mul),axis=xi,keepdims=True)/np.broadcast_to([ashp[-2]*ashp[-1]],(ashp[1],1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshape\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n else:\r\n mul=((array-mean)*W)\r\n i=(np.square(mul))/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)#B could be included\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size#B could be included\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B#B could be included\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print(i.shape)\r\n if verbose==2:\r\n print('ishp',i.shape)\r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB)and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)", "def compare():\n eb = 1\n p = 0.5\n R = 0.5\n N = 150 # set the error count to reach\n\n domain = np.linspace(0, 10, 11)\n hard_domain = np.linspace(0, 8, 9)\n soft_domain = np.linspace(0, 6, 7)\n hard_decibels = 10 ** (hard_domain / 10)\n soft_decibels = 10 ** (soft_domain / 10)\n\n # get results for hard decoding\n nos = eb / hard_decibels\n hard_results = []\n for no in nos:\n print(np.where(nos == no)[0][0])\n experiment = ViterbiHard(eb, no / R)\n bit_count = 0\n error_count = 0\n for output in experiment.run():\n bit_count += 1\n if output[0] != output[1]:\n error_count += 1\n if error_count == N:\n hard_results.append(error_count / bit_count)\n break\n\n # get results for soft decoding\n nos = eb / soft_decibels\n soft_results = []\n for no in nos:\n print(np.where(nos == no)[0][0])\n experiment = ViterbiSoft(eb, no / R)\n bit_count = 0\n error_count = 0\n for output in experiment.run():\n bit_count += 1\n if output[0] != output[1]:\n error_count += 1\n if error_count == N:\n soft_results.append(error_count / bit_count)\n break\n\n decibels = 10 ** (domain / 10)\n nos = eb / decibels\n taus = nos / 4 / np.sqrt(eb) * np.log((1 - p) / p)\n plt.semilogy(hard_domain, np.array(hard_results), label='Hard-decision')\n plt.semilogy(soft_domain, np.array(soft_results), label='Soft-decision')\n plt.semilogy(domain, norm.sf((taus + np.sqrt(eb)) / np.sqrt(nos / 2)) * (1 - p) +\n norm.sf((np.sqrt(eb) - taus) / np.sqrt(nos / 2)) * p, label='Expectation ($p=0.5$)')\n plt.xlabel('$E_b/N_0$ (dB)')\n plt.ylabel('Probability of bit error')\n plt.legend(loc='lower left')\n plt.show()", "def vecvari10(array,W,B=None,sqrt=False,BB=1,verbose=False,BS=False,sizz=0,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n arrs=array.shape\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n if verbose:\r\n print(\"VECVARI10:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n xi=(-2,-1)\r\n x2=(-3,-2,-1)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n #if len(ashp)==5 :#not all data and all weights == 3d data\r\n # xi=(-3,-2,-1)\r\n # x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n if not(B is None)and BB:\r\n size=(np.sum(W,axis=xi,keepdims=True)+np.sum(B,axis=xi[-len(B.shape):],keepdims=True))\r\n else:\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n if B is None:\r\n B=np.zeros((ashp[1]))#channel\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n print('array',array.shape,'w',W.shape)\r\n ######################################\r\n if sizz==1:#not a good idea\r\n mean=np.sum(mul,xi,keepdims=1)/size\r\n else:\r\n mean=np.sum(mul,xi,keepdims=1)/np.broadcast_to([ashp[-2]*ashp[-1]],(3,1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshp\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n \r\n else:\r\n mul=((array-mean)*W)\r\n i=np.square(mul)/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print('ishp',i.shape)\r\n if verbose==2:\r\n \r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB) and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)", "def perform_bayesian_optimization(self):\n bounds = {'hunits': (self.hunits_lower, self.hunits_upper),\n 'embedding_dim': (self.embedding_dim_lower, self.embedding_dim_upper)}\n optimizer = BayesianOptimization(f=self.lstm_score, pbounds=bounds, random_state=1)\n optimizer.maximize(init_points=2, n_iter=self.iterations)\n print(optimizer.max)\n print(optimizer.res)", "def test_vector_class():\n points = 10\n riskfree = .03\n maturity = 30/365\n moneyness = np.linspace(-.04, .04, points)\n premium = np.ones_like(moneyness) * .05\n call = True\n data = {'riskfree': riskfree, 'maturity': maturity,\n 'moneyness': moneyness, 'call': call, 'premium': premium}\n\n sigma = np.ones(points) * .13\n bsm = BSmodel(sigma, data)\n\n print(bsm.premium())\n\n weights = np.ones(points) * .63\n means = np.vstack([np.ones(points) * -.01, np.ones(points) * .09])\n stds = np.vstack([np.ones(points) * .16, np.ones(points) * .05])\n param = np.vstack([weights, means, stds])\n mbs = MBSmodel(param, data)\n\n print(mbs.premium())\n\n param_a, param_p = np.ones(points) * 4.5, np.ones(points) * 2\n param_c = -.05 * np.ones(points)\n gb2 = GB2model([param_a, param_p, param_c], data)\n\n print(gb2.premium())", "def test_bayes_factor_a(self):\n model_1 = 2\n model_2 = 4\n expected_result = -4\n actual_result = utils.bayes_factor(model_1, model_2)\n assert_almost_equal(actual_result, expected_result)", "def max_evidence(self):\n self.A = np.linalg.inv(self.Sn)\n A_eigval = np.linalg.eigvals(self.A)\n gamma = 0\n for i in range(len(A_eigval)):\n gamma += A_eigval[i]/(self.alpha + A_eigval[i])\n new_alpha = gamma/([email protected])\n\n sum = 0\n for i in range(self.n):\n sum +=(self.t[i][email protected]_matrix[i])**2\n new_beta = 1/((1/(self.n-gamma))*sum)\n\n return new_alpha, new_beta", "def run_test(d):\n\n ######### Problem Specification\n\n # Data generation parameters\n prior_mu_z = np.zeros(d, dtype=np.float32) # Prior mean\n prior_sigma_z = np.eye(d, dtype=np.float32) # Prior covariance matrix\n\n # True model parameters\n num_range = np.arange(-(d-1)/2, (d+1)/2, dtype=np.float32)\n\n t_delta = num_range / 5 \n\n if d == 1:\n t_sigma = np.ones(1)\n else: \n # Allow sigma to range from 0.1 to 1\n t_sigma = 36/(10*(d-1)**2) * num_range**2 + 0.1 \n\n ######### Variable Initialization\n\n # Initial model parameters - same across all methods\n init_delta = prior_mu_z.copy()\n init_log_sigma = 3 * np.ones(d)\n\n # Initial HVAE variational parameters\n init_T = 5.\n init_eps = 0.005 * np.ones(d)\n max_eps = params['max_eps'] * np.ones(d)\n init_logit_eps = np.log(init_eps/(max_eps - init_eps))\n init_log_T_0 = np.log(init_T - 1)\n\n # Initial NF variational parameters\n init_u_pre_reparam = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_w = scipy.stats.truncnorm.rvs(-2, 2, scale=0.1, size=d)\n init_b = 0.1\n\n # Initial VAE parameters\n init_mu_z = prior_mu_z.copy()\n init_log_sigma_z = np.ones(d)\n\n ######### Set up models\n\n HVAE_model_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_1', d, params['HVAE_K_1'])\n HVAE_model_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps', 'log_T_0'],\n [init_delta, init_log_sigma, init_logit_eps, init_log_T_0], \n 'HVAE_2', d, params['HVAE_K_2'])\n\n HVAE_model_notemp_1 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'],\n [init_delta, init_log_sigma, init_logit_eps], \n 'HVAE_notemp_1', d, params['HVAE_K_1'])\n HVAE_model_notemp_2 = HVAE(\n ['delta', 'log_sigma', 'logit_eps'], \n [init_delta, init_log_sigma, init_logit_eps],\n 'HVAE_notemp_2', d, params['HVAE_K_2'])\n\n NF_model_1 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_1', d, params['NF_K_1'])\n NF_model_2 = NF(\n ['delta', 'log_sigma', 'u_pre_reparam', 'w', 'b'],\n [init_delta, init_log_sigma, init_u_pre_reparam, init_w, init_b],\n 'NF_2', d, params['NF_K_2'])\n\n VB_model = VB(['delta', 'log_sigma', 'mu_z', 'log_sigma_z'], \n [init_delta, init_log_sigma, init_mu_z, init_log_sigma_z], 'VB', d)\n\n model_list = [HVAE_model_1, HVAE_model_2, HVAE_model_notemp_1, \n HVAE_model_notemp_2, NF_model_1, NF_model_2, VB_model]\n \n ######### Generate Training Data & Save - One for each test\n\n train_data_list = []\n\n for i in range(params['n_tests']):\n z = np.random.multivariate_normal(prior_mu_z, prior_sigma_z)\n x = np.random.multivariate_normal(z + t_delta, np.diag(t_sigma**2), \n size=params['n_data'])\n train_data_list.append(x)\n\n # Folder should have already been created in the initializations\n data_path = os.path.join('save', str(d), 'train_data.p')\n pickle.dump(train_data_list, open(data_path, 'wb')) \n\n ######### Train models\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n\n # Store the final parameter values for all test runs in this dictionary\n final_params = {}\n\n for m in model_list:\n\n final_values = []\n\n for i in range(params['n_tests']):\n (delta, sigma) = m.train(sess, train_data_list[i], i)\n final_values.append((delta, sigma))\n\n final_params[m.model_name] = final_values.copy()\n\n ######### Test models using difference between parameters\n\n param_diffs = {}\n\n for m in model_list:\n\n diffs = []\n\n for i in range(params['n_tests']):\n delta = final_params[m.model_name][i][0]\n sigma = final_params[m.model_name][i][1]\n\n delta_diff = np.sum((delta - t_delta)**2)\n sigma_diff = np.sum((sigma - t_sigma)**2)\n\n diffs.append((delta_diff, sigma_diff))\n\n param_diffs[m.model_name] = diffs.copy()\n\n # Save parameter differences in a pickle file\n diff_path = os.path.join('save', str(d), 'all_diffs.p')\n pickle.dump(param_diffs, open(diff_path, 'wb'))", "def updateBeta(self):\n\n priorBeta = np.copy(self.beta) # 返り値で更新幅を与えるので初期値を保持しておく\n W = self.__genW() # diag Matrix\n # update beta : Fisher Scoring Update\n result = np.matmul(np.matmul(self.X.T, W), self.X)\n result = np.matmul(np.linalg.inv(result), self.X.T)\n result = np.matmul(result, W)\n # claimFreq=0の人は, firstIterationでmu=0の0割が必ず発生する. 適切な対処法は+epsilonで良い?\n z = (self.Y - self.mu)/(self.mu + DoubleGLM.EPSILON) + np.log(self.mu + DoubleGLM.EPSILON)\n self.beta = np.matmul(result, z)\n\n # update current mu\n self.mu = np.exp(np.matmul(self.X, self.beta))\n # update current deviance\n d1 = self.Y * (self.Y**(1-self.p) - self.mu**(1-self.p)) / (1-self.p)\n d2 = (self.Y**(2-self.p) - self.mu**(2-self.p)) / (2-self.p)\n self.d = 2*self.w * (d1 - d2)\n\n return np.abs(priorBeta - self.beta)", "def run_vqe(\n self,\n backend=Aer.get_backend(\"statevector_simulator\"),\n var_form=None,\n optimizer=None,\n reps=None,\n mode=\"min_val\",\n ):\n # N=int(np.ceil(np.log2(len(self.mat))))\n # hk = np.zeros((2**N,2**N),dtype='complex')\n # hk[:self.mat.shape[0], :self.mat.shape[1]] = self.mat\n N = self.n_qubits()\n if mode == \"max_val\":\n Hamil_mat = aqua.operators.MatrixOperator(-1 * self.mat)\n # Hamil_mat = MatrixOperator(-1 * self.mat)\n else:\n Hamil_mat = aqua.operators.MatrixOperator(self.mat)\n # Hamil_mat = MatrixOperator(self.mat)\n Hamil_qop = aqua.operators.op_converter.to_weighted_pauli_operator(\n Hamil_mat\n )\n if var_form is None:\n if reps is None:\n reps = 2\n # reps=5\n from qiskit.circuit.library import EfficientSU2\n\n var_form = EfficientSU2(N, reps=reps)\n if optimizer is None:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form)\n # vqe = VQE(Hamil_qop, var_form)\n else:\n vqe = aqua.algorithms.VQE(Hamil_qop, var_form, optimizer)\n # vqe = VQE(Hamil_qop, var_form, optimizer)\n vqe_result = vqe.run(backend)\n en = np.real(vqe_result[\"eigenvalue\"])\n # params=vqe.optimal_params\n # circuit=vqe.construct_circuit(params)\n if mode == \"max_val\":\n en = -1 * en\n # states = np.sort(\n # np.real(\n # vqe.expectation.convert(\n # StateFn(vqe.operator, is_measurement=True)\n # ).to_matrix()\n # )\n # )\n return en, vqe_result, vqe", "def svm_admm(X, y, mylambda=1., rho=1., rel_par=1., QUIET = False, MAX_ITER = 200, ABSTOL = 1e-6, RELTOL = 1e-2):\n if not QUIET:\n tic = time.time()\n m, n = X.shape \n y_raveld = y.ravel() \n # A is a matrix given by [-y_j*x_j -y_j]\n A = - np.dot(np.diag(y_raveld), np.concatenate((X, np.ones((m, 1))), axis = 1))\n\n #Data preprocessing\n m, n = A.shape\n \n #ADMM solver\n x = np.zeros((n, N))\n z = np.zeros((n, N))\n u = np.zeros((n, N))\n\n if not QUIET:\n print('\\n%3s\\t%10s\\t%10s\\t%10s\\t%10s\\t%10s' %('iter',\n 'r np.linalg.norm', \n 'eps pri', \n 's np.linalg.norm', \n 'eps dual', \n 'objective'))\n\n # Saving state\n h = {}\n h['objval'] = np.zeros(MAX_ITER)\n h['r_norm'] = np.zeros(MAX_ITER)\n h['s_norm'] = np.zeros(MAX_ITER)\n h['eps_pri'] = np.zeros(MAX_ITER)\n h['eps_dual'] = np.zeros(MAX_ITER)\n\n for k in range(MAX_ITER):\n # x-update \n for i in range(N):\n A_temp = A[i * num_per_batch: (i + 1) * num_per_batch, :]\n y_temp = y[i * num_per_batch: (i + 1) * num_per_batch, :]\n #\n # temp1 = -z[:, i] + u[:, i]\n # fun = lambda x: np.sum(np.maximum(np.dot(A_temp, x.reshape((n, 1))) + 1, np.zeros((num_per_batch, 1)))) + \\\n # rho/2. * np.dot(x + temp1, x + temp1)\n # # np.random.uniform(-1, 1, (n,1))\n # result = scipy.optimize.minimize(fun, 0.1 * np.ones((n, 1)), tol = 1e-8, method = 'Nelder-Mead')\n # x_temp = result.x\n #\n x_var = Variable(n)\n constraints = []\n objective = Minimize(sum_entries(pos( A_temp * x_var + 1)) + rho/2. * sum_squares((x_var - z[:, i] + u[:, i])))\n prob = Problem(objective, constraints)\n result = prob.solve()\n x_temp = x_var.value\n\n x_temp = x_temp.reshape((x_temp.shape[0], 1))\n x[:, i] = x_temp.ravel()\n\n xave = np.mean(x, axis = 1)\n\n # z-update\n zold = np.copy(z)\n x_hat = rel_par * x + (1. - rel_par) * zold\n z = N * rho/(1./mylambda + N * rho) * np.mean(x_hat + u, axis = 1)\n z = z.reshape((z.shape[0], 1))\n z = np.dot(z, np.ones((1, N))) # N columns of the same values\n\n # u-update\n u = u + x_hat - z\n\n # diagnostics, reporting, termination checks\n h['objval'][k] = myobjective(A, mylambda, x, z)\n h['r_norm'][k] = np.linalg.norm(x - z)\n h['s_norm'][k] = np.linalg.norm(rho * (z - zold))\n h['eps_pri'][k] = np.sqrt(n) * ABSTOL+ RELTOL * np.maximum(np.linalg.norm(x), np.linalg.norm(-z))\n h['eps_dual'][k] = np.sqrt(n) * ABSTOL + RELTOL * np.linalg.norm(rho * u)\n if not QUIET:\n print('%4d\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.4f\\t%10.2f' %(k + 1,\\\n h['r_norm'][k],\\\n h['eps_pri'][k],\\\n h['s_norm'][k],\\\n h['eps_dual'][k],\\\n h['objval'][k]))\n\n if (h['r_norm'][k] < h['eps_pri'][k]) and (h['s_norm'][k] < h['eps_dual'][k]):\n break\n\n if not QUIET:\n toc = time.time()-tic\n print(\"\\nElapsed time is %.2f seconds\"%toc)\n\n return z, h", "def run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n #T - Score matrix same as in assignement pdf\r\n T = np.zeros(shape=(L,N))\r\n #Back pointers - to store the previous best tag for word at (i-1)th position\r\n #that resulted into current best tag for (i)th word \r\n back_pointer = np.full((L,N), -1)\r\n\r\n for i in xrange(L):\r\n emission = emission_scores[0][i]\r\n combined = emission + start_scores[i]\r\n T[i][0] = combined\r\n\r\n # Loop over all the words in a sequesnce\r\n for i in xrange(1, N):\r\n # Loop over all the tags for the word at index i \r\n for j in xrange(L):\r\n # Varibale for maximum tag score from previous word (word at i-1)\r\n tmp_max = float('-inf')\r\n tmp_max_idx = -1\r\n #Emission value of word at idx i from state (i.e tag) j\r\n emission = emission_scores[i][j]\r\n #Loop over all the possibile tags for previous word T[tag (1..L), word at i-1]\r\n #and get max among them. Store the corresponding back pointer for there T[tag (1..L), word at i-1]\r\n for k in xrange(L):\r\n transition = trans_scores[k][j]\r\n prev_path = T[k][i-1]\r\n combined = transition + prev_path\r\n if (tmp_max < combined):\r\n tmp_max = combined\r\n tmp_max_idx = k\r\n\r\n back_pointer[j][i] = tmp_max_idx\r\n T[j][i] = tmp_max + emission\r\n\r\n # Doing this step outside because if N == 1 then above loop will not run\r\n # Variable for maximum tag score\r\n tag_max = float('-inf')\r\n # Variable for back pointer(previous T[tag, word])\r\n tag_max_idx = -1\r\n for i in xrange(L):\r\n T[i][N-1] = T[i][N-1] + end_scores[i]\r\n if (tag_max < T[i][N-1]):\r\n tag_max = T[i][N-1]\r\n tag_max_idx = i\r\n # print(\"Max tag -> \" + str(tag_max_idx))\r\n\r\n #Variable to track the path length - should be equal to N\r\n path_length = 0\r\n #Variable to back track on the tags\r\n tag_idx = tag_max_idx\r\n #Varibale to track the word index in N\r\n word_idx = N-1 \r\n #Path strored using backtracking\r\n y = []\r\n\r\n #Getting the best path using backtracking on back_pointers\r\n while path_length != N-1:\r\n y.append(back_pointer[tag_idx][word_idx])\r\n tag_idx = back_pointer[tag_idx][word_idx]\r\n word_idx = word_idx - 1\r\n path_length = path_length + 1\r\n\r\n #Reversing the backtracked path\r\n y = y[::-1]\r\n #Adding the tag for the last word idx in N\r\n y.append(tag_max_idx)\r\n # print(\"Path -> \" + str(y))\r\n\r\n return (tag_max, y)", "def fit_fembv_varx(X, n_components=2, max_tv_norm=None, memory=0,\n n_init=10, tolerance=1e-4, max_iterations=500,\n verbose=0, random_state=None):\n\n rng = check_random_state(random_state)\n\n best_fit = None\n best_weights = None\n\n for i in range(n_init):\n\n fit = FEMBVVARX(n_components=n_components, max_tv_norm=max_tv_norm,\n memory=memory, tolerance=tolerance,\n max_iterations=max_iterations, verbose=verbose,\n random_state=rng)\n\n weights = fit.fit_transform(X)\n\n if best_fit is None or fit.cost_ < best_fit.cost_:\n best_fit = deepcopy(fit)\n best_weights = weights.copy()\n\n return best_fit, best_weights", "def bellman_operator(self, v):\n new_v = np.empty(v.shape)\n for i in range(self.N):\n for j in range(self.N):\n # stay put\n v1 = self.theta[i] + self.epsilon[j] + self.beta * v[i, j]\n\n # new job\n v2 = (self.theta[i] + self.G_mean + self.beta *\n np.dot(v[i, :], self.G_probs))\n\n # new life\n v3 = (self.G_mean + self.F_mean + self.beta *\n np.dot(self.F_probs, np.dot(v, self.G_probs)))\n new_v[i, j] = max(v1, v2, v3)\n return new_v" ]
[ "0.6568823", "0.6196563", "0.6113917", "0.6092184", "0.6078253", "0.6074075", "0.6015432", "0.59387404", "0.59387404", "0.592884", "0.58894825", "0.5857658", "0.5835821", "0.57507086", "0.5727656", "0.5717088", "0.57143795", "0.5711402", "0.5685124", "0.5682099", "0.5679074", "0.567222", "0.5671618", "0.56697303", "0.56450874", "0.5616761", "0.5603962", "0.55981374", "0.559571", "0.5580341" ]
0.63317454
1
Update the entity list to make things quicker reuse entity ctrls if possible. First, if there are more controls currently in the list than are needed, remove those that are redundant. Then work throught the list of entities, grabbing an existing control and updating it if possible. Create new controls only as necessary.
def update_entity_list(self): list_sizer = self.scroll_entity_list.GetSizer() number_of_ctrls = self.get_number_of_entity_ctrls() number_of_entities = self.get_number_of_entities() difference = number_of_ctrls - number_of_entities if difference > 0: for unused_temp in range(difference): redundant_ctrl = self.entity_ctrl_list.pop() redundant_ctrl.Destroy() for entity_index in range(number_of_entities): entity = self.entities[entity_index] if entity_index + 1 > number_of_ctrls: self.add_entity_ctrl(entity) else: # set the entity on the existing control self.entity_ctrl_list[entity_index].set_entity( entity ) list_sizer.Layout()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateAllEntities():\n entityKeys=list(registeredEntities.keys())\n for currKey in entityKeys:\n try:\n currEntity=registeredEntities[currKey]\n currEntity.update()\n except KeyError:\n #this should only be called if an entity is deleted (like if a rock got hit by a bullet)\n continue", "def update_entities(self):\n raise NotImplementedError()", "def update(self):\n self.platform_list.update()\n #self.enemy_list.update()\n self.enemy_list.update()\n self.bullet_list.update()\n self.active_sprite_list.update()", "def _update_attr_list(self) -> None:\n old_flex_grid = self.flex_grid\n self.flex_grid = wx.FlexGridSizer(cols=3, vgap=5, hgap=10)\n wx_elements = []\n for attr_id in self.attr_ids:\n button = self.attr_buttons[attr_id]\n label_input = self.attr_labels[attr_id]\n value_input = self.attr_values[attr_id]\n wx_elements.extend([\n (button, 0, wx.ALIGN_CENTER_VERTICAL),\n (label_input, 0, wx.EXPAND),\n (value_input, 1, wx.EXPAND)\n ])\n self.flex_grid.AddGrowableCol(2, 1)\n self.flex_grid.AddMany(wx_elements)\n if old_flex_grid is not None:\n self.box.Replace(old_flex_grid, self.flex_grid)\n self.box.Layout()", "def updateWidgets(self):\n super(AdminRulesForm, self).updateWidgets()\n available_templates = getUtility(IVocabularyFactory, name='collective.imagetags.templates')(self.context)\n skins = getToolByName(self.context, 'portal_skins')\n path = skins.getSkinPath(skins.getDefaultSkin())\n paths = [i.strip() for i in path.split(',')]\n include = False\n improved_templates = []\n for template in available_templates.by_value:\n # If template directory is available and (is before 'plone_content' or 'plone_content' isn't available)...\n include = (template in paths and 'plone_content' in paths and paths.index(template)<paths.index('plone_content')) or \\\n (template in paths and not 'plone_conent' in paths)\n \n # ... then check it\n if include:\n term = available_templates.getTerm(template)\n improved_templates.append(term.token)\n\n for template in self.widgets['improved_templates'].items:\n template['checked'] = template['value'] in improved_templates", "def _modify_controls(self, catalog: cat.Catalog) -> cat.Catalog:\n logger.debug(f'modify specify catalog {catalog.metadata.title} for profile {self._profile.metadata.title}')\n self._catalog_interface = CatalogInterface(catalog)\n param_dict: Dict[str, prof.SetParameter] = {}\n alters: Optional[List[prof.Alter]] = None\n # find the modify and alters\n # build a param_dict for all the modifys\n if self._profile.modify is not None:\n if self._profile.modify.set_parameters is not None:\n param_list = self._profile.modify.set_parameters\n for param in param_list:\n param_dict[param.param_id] = param\n alters = self._profile.modify.alters\n\n if alters is not None:\n for alter in alters:\n if alter.control_id is None:\n raise TrestleError('Alters must have control id specified.')\n if alter.removes is not None:\n raise TrestleError('Alters not supported for removes.')\n if alter.adds is None:\n raise TrestleError('Alter has no adds to perform.')\n for add in alter.adds:\n if add.position is not None and add.position.name is not None and add.position.name != 'after':\n raise TrestleError('Alter position must be \"after\" or None.')\n control = self._catalog_interface.get_control(alter.control_id)\n if add.by_id is not None:\n self._add_to_parts(control, add.by_id, add.parts)\n self._catalog_interface.replace_control(control)\n continue\n if add.props is not None:\n if add.by_id is not None:\n TrestleError('Alter cannot add props by id.')\n if not control.props:\n control.props = []\n control.props.extend(add.props)\n continue\n TrestleError('Alter must either add parts or props')\n\n # use the param_dict to apply all modifys\n control_ids = self._catalog_interface.get_control_ids()\n for control_id in control_ids:\n control = self._catalog_interface.get_control(control_id)\n if control.parts is not None:\n for part in control.parts:\n self._replace_part_prose(control, part, param_dict)\n self._catalog_interface.replace_control(control)\n\n catalog = self._catalog_interface._catalog\n\n # update the original profile metadata with new contents\n # roles and responsible-parties will be pulled in with new uuid's\n new_metadata = self._profile.metadata\n new_metadata.title = f'{catalog.metadata.title}: Resolved by profile {self._profile.metadata.title}'\n links: List[common.Link] = []\n for import_ in self._profile.imports:\n links.append(common.Link(**{'href': import_.href, 'rel': 'resolution-source'}))\n new_metadata.links = links\n # move catalog controls from dummy group 'catalog' into the catalog\n if catalog.groups:\n for group in catalog.groups:\n if group.id == 'catalog':\n catalog.controls = group.controls\n catalog.groups = [group for group in catalog.groups if group.id != 'catalog']\n break\n\n catalog.metadata = new_metadata\n\n return catalog", "def update_editor ( self ):\n list_pane = self.control\n editor = self._editor\n\n # Disconnext the editor from any control about to be destroyed: \n for control in list_pane.GetChildren():\n if hasattr( control, '_editor' ):\n control._editor.control = None\n \n # Get rid of any previous contents:\n list_pane.SetSizer( None )\n list_pane.DestroyChildren()\n \n # Create all of the list item trait editors:\n trait_handler = self.factory.trait_handler\n resizable = ((trait_handler.minlen != trait_handler.maxlen) and\n (self.kind != 'readonly_editor'))\n item_trait = trait_handler.item_trait\n list_sizer = wx.FlexGridSizer( 0, 1 + resizable, 0, 0 )\n list_sizer.AddGrowableCol( resizable )\n values = self.value\n index = 0\n width, height = 100, 18\n is_fake = (resizable and (len( values ) == 0))\n if is_fake:\n values = [ item_trait.default_value()[1] ]\n \n for value in values:\n width = height = 0\n if resizable: \n control = ImageControl( list_pane, \n bitmap_cache( 'list_editor', False ),\n -1, self.popup_menu ) \n width, height = control.GetSize()\n width += 4\n try:\n proxy = ListItemProxy( self.object, self.name, index, \n item_trait, value )\n peditor = editor( self.ui, proxy, 'value', self.description, \n list_pane )\n pcontrol = peditor.control\n pcontrol.proxy = proxy\n if resizable:\n control.proxy = proxy\n except:\n if not is_fake:\n raise\n pcontrol = wx.Button( list_pane, -1, 'sample' )\n width2, height2 = pcontrol.GetSize()\n width += width2\n height = max( height, height2 )\n if resizable:\n list_sizer.Add( control, 0, wx.LEFT | wx.RIGHT, 2 )\n list_sizer.Add( pcontrol, 1, wx.EXPAND )\n index += 1\n \n list_pane.SetSizer( list_sizer )\n \n if is_fake:\n self._cur_control = control \n self.empty_list()\n control.Destroy() \n pcontrol.Destroy()\n \n rows = [ self.factory.rows, 1 ][ self.kind == 'simple_editor' ]\n list_pane.SetSize( wx.Size( \n width + ((trait_handler.maxlen > rows) * scrollbar_dx), \n height * rows ) )\n list_pane.SetScrollRate( 16, height )\n list_pane.SetVirtualSize( list_sizer.GetMinSize() )\n list_pane.GetParent().Layout()", "def _update_attr_list(self) -> None:\n old_flex_grid = self.flex_grid\n self.flex_grid = wx.FlexGridSizer(cols=3, vgap=5, hgap=10)\n wx_elements = []\n for attr_id in self.attr_req_ids:\n button = self.attr_req_buttons[attr_id]\n attr_req_label_ctrl = self.attr_req_labels[attr_id]\n attr_req_element_ctrl = self.attr_req_elements[attr_id]\n wx_elements.extend([\n (button, 0, wx.ALIGN_CENTER_VERTICAL),\n (attr_req_label_ctrl, 0, wx.EXPAND),\n (attr_req_element_ctrl, 1, wx.EXPAND)\n ])\n self.flex_grid.AddMany(wx_elements)\n if old_flex_grid is not None:\n self.box.Replace(old_flex_grid, self.flex_grid)\n self.box.Layout()", "def __reload_caption(self, new_list):\n # Remove all UI elements\n for item in self.__ui_controls:\n if self.__ui_controls.get(item) is None:\n continue\n self.__ui_controls.get(item).delete()\n for item in self.__teachpanel_sliders:\n item.delete()\n self.__teachpanel_sliders = []\n # Restore the caption\n self.scene.caption = self.__default_caption\n # Create the updated caption.\n self.__load_mode_ui(new_list)", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()", "async def refresh_entity_state(self):", "def try_remove_and_add(self):\n\n async def _async_remove_and_add():\n await self.async_remove(force_remove=True)\n self.entity_id = None\n await self.platform.async_add_entities([self])\n\n if self.hass and self.platform:\n self.hass.add_job(_async_remove_and_add)", "def updateFields(self):\n super(AdminRulesForm, self).updateFields()\n self.fields['improved_templates'].widgetFactory = CheckBoxFieldWidget\n self.fields['iframe_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['js_enabled'].widgetFactory = SingleCheckBoxFieldWidget\n self.fields['inline_images'].widgetFactory = SingleCheckBoxFieldWidget", "def update_entities(self, entities: List[str]) -> None:\n self._entities = entities or []", "def update_controller():\n update_items(inst, mikrotik_controller, async_add_entities, switches)", "def reload_sizer ( self, controls, extra = 0 ):\n sizer = self.control.GetSizer()\n for i in xrange( 2 * len( controls ) + extra ):\n sizer.Remove( 0 )\n index = 0\n for control, pcontrol in controls:\n sizer.Add( control, 0, wx.LEFT | wx.RIGHT, 2 )\n sizer.Add( pcontrol, 1, wx.EXPAND )\n control.proxy.index = index\n index += 1\n sizer.Layout()\n self.control.SetVirtualSize( sizer.GetMinSize() )", "def make_changes(self):\n dd = mg.DATADETS_OBJ\n if not self.read_only:\n ## NB must run Validate on the panel because the objects are\n ## contained by that and not the dialog itself.\n ## http://www.nabble.com/validator-not-in-a-dialog-td23112169.html\n if not self.panel.Validate(): ## runs validators on all assoc ctrls\n raise Exception(_('Invalid table design.'))\n gui_tblname = self.txt_tblname.GetValue()\n if self.new:\n try:\n del self.tblname_lst[0] ## empty ready to repopulate\n except Exception:\n pass ## OK to fail to delete item in list if already empty\n self.tblname_lst.append(gui_tblname)\n self.make_new_tbl()\n dd.set_tbl(tbl=gui_tblname)\n else:\n if not self.read_only:\n orig_tblname = self.tblname_lst[0]\n del self.tblname_lst[0] ## empty ready to repopulate\n self.tblname_lst.append(gui_tblname)\n dd.set_tbl(tbl=orig_tblname) ## The new one hasn't hit the database yet\n self.modify_tbl()\n self.changes_made = True", "def update(self):\n for object in reversed(self.addList):\n self.objects.append(object)\n self.addList.remove(object)\n\n for object in reversed(self.removeList):\n self.objects.remove(object)\n self.removeList.remove(object)\n\n self.objects = sorted(self.objects,key=priority)\n\n for object in self.objects:\n object.update()", "def update_listed_eqns(self):\n self.clear_rightside()\n self.lst_eqns.clear()\n self.lst_eqns.addItems(list(self.eqn_data[self.cmb_sections.currentText()]))", "def run(self):\n split_old_type = self.old_type.split(\"/\")\n types_list = None\n \n if isinstance(self.object, scg_objects.SCgNode):\n types_list = scg_alphabet.get_node_types()\n elif isinstance(self.object, scg_objects.SCgPair):\n types_list = scg_alphabet.get_pair_types()\n else:\n raise Exception(\"Unknown object type %s\" % str(self.object))\n \n assert len(types_list) > 0\n \n split_types = [] \n for _type in types_list:\n split_types.append(_type.split(\"/\"))\n \n # parse possible values and create controls for changing them\n for idx in xrange(0, len(split_types[0])):\n values = []\n for _type in split_types:\n if not _type[idx] in values:\n values.append(_type[idx])\n \n ctrl = ChoiceControl(values, split_old_type[idx], self.panel, self.updateType)\n \n self.controls.append(ctrl)\n \n # layout created controls\n height = 5\n \n for ctrl in self.controls:\n ctrl.panel.setPosition(3, height)\n height += ctrl.panel.getHeight() + 5\n width = ctrl.panel.getWidth()\n \n self.button_ok.setPosition(10, height + 10)\n self.button_cancel.setPosition(65, height + 10)\n \n pos = render_engine.pos3dTo2dWindow(self.object.getPosition() + self.object.getScale() / 2)\n # make some offset\n width += 20\n height += 45 \n self.panel.setSize(width, height)\n \n # sure that dialog isn't placed out of screen \n x, y = pos\n x2 = x + width\n y2 = y + height\n \n if x2 >= render_engine.Window.width:\n x = render_engine.Window.width - width\n elif x < 0:\n x = 0\n \n if y2 >= render_engine.Window.height:\n y = render_engine.Window.height - height\n elif y < 0:\n y = 0 \n \n self.panel.setPosition(x, y)\n \n # show panel\n self.panel.setVisible(True)", "def get_new(self, env):\n for sprite in env.new_sprites:\n if not isinstance(sprite, Dirt):\n if not isinstance(sprite, tako.Tako):\n self.widget_sprites.add(sprite)\n else:\n self.all_sprites.add(sprite)\n env.new_sprites.remove(sprite)", "def update_list_view(self):\n # Clear the list/tree view.\n self.list_view.clear()\n\n # Find all the selected things in Maya.\n selected = cmds.ls(selection=True)\n\n # For each of the selected things, create a widget item.\n for thing in selected:\n item = QtGui.QListWidgetItem(thing)\n item.setFlags(item.flags() | QtCore.Qt.ItemIsEditable)\n self.list_view.addItem(item)\n # Set the flags on the widget item so it is editable.", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def updateControl(self, event):\r\n print(\"updating...\")\r\n product_dict = [\r\n {\"title\":\"Core Python Programming\", \"author\":\"Wesley Chun\",\r\n \"isbn\":\"0132269937\", \"mfg\":\"Prentice Hall\"},\r\n {\"title\":\"Python Programming for the Absolute Beginner\",\r\n \"author\":\"Michael Dawson\", \"isbn\":\"1598631128\",\r\n \"mfg\":\"Course Technology\"},\r\n {\"title\":\"Learning Python\", \"author\":\"Mark Lutz\",\r\n \"isbn\":\"0596513984\", \"mfg\":\"O'Reilly\"}\r\n ]\r\n data = self.products + product_dict\r\n self.dataOlv.SetObjects(data)", "def update_associations(self):\n for dt_format, old_value, new_value in self.own_list:\n DescriptorFormatTypeManager.own(dt_format, self.entity, old_value, new_value)", "def update_buttons(self):\n # Enable the Add/Remove step buttons if a Generator is loaded\n enable = self.mgr.obj is not None\n self.addButton.setEnabled(enable)\n self.removeButton.setEnabled(enable)\n self.upButton.setEnabled(enable)\n self.downButton.setEnabled(enable)", "def _refresh_model_fields(self) -> None:\n if self.model and self.model.actions:\n self.crud.add_multi(terms=self.model.actions)\n if self.model and not self.model.actions:\n self.crud.reset()", "def _adjust_widgets(self):\n\t\teditEnabled = self.directoryList.get_sensitive()\n\t\tselection = self.directoryList.get_selection()\n\t\tmodel, selected = selection.get_selected()\n\t\tnRows = self.directoryModel.get_row_count()\n\t\thasSelection = selected is not None\n\t\tcanMoveUp = False\n\t\tcanMoveDown = False\n\t\tif hasSelection:\n\t\t\tpath = model.get_path(selected)\n\t\t\tiRow = path[0]\n\t\t\tcanMoveUp = iRow > 0\n\t\t\tcanMoveDown = iRow < (nRows -1)\n\t\tself.buttonNew.set_sensitive(editEnabled)\n\t\tself.buttonEdit.set_sensitive(hasSelection and editEnabled)\n\t\tself.buttonUp.set_sensitive(canMoveUp and editEnabled)\n\t\tself.buttonDown.set_sensitive(canMoveDown and editEnabled)\n\t\tself.buttonRemove.set_sensitive(hasSelection and editEnabled)\n\n\t\tself.logView.set_sensitive(not editEnabled)\n\t\tself.buttonImport.set_sensitive(bool(nRows))\n\t\tif self.buttonImport.get_active() and not bool(nRows):\n\t\t\tself.buttonImport.set_active(False)", "def update_listbox(self):\n self.listbox.delete(0, tk.END)\n self.listbox.insert(tk.END, \"..\")\n for item in self.traverser.content_nice():\n self.listbox.insert(tk.END, item)" ]
[ "0.57126147", "0.5587496", "0.5402336", "0.5387726", "0.5372032", "0.53281385", "0.52710485", "0.5269086", "0.52376354", "0.5144977", "0.5120092", "0.5041901", "0.5036595", "0.5007323", "0.49805024", "0.4979292", "0.49675938", "0.4917916", "0.49164057", "0.49124953", "0.49018002", "0.4887826", "0.48814753", "0.4877588", "0.4873045", "0.48349115", "0.48268196", "0.48263422", "0.4823942", "0.4814529" ]
0.85483783
0
r"""Collapses cube coordinates and calculate percentiled data. Calculate percentiled data over a given coordinate by collapsing that coordinate. Typically used to convert realization data into percentiled data, but may calculate over any dimension coordinate. Alternatively calling this with a dataset containing probabilities will convert those to percentiles using the ensemble coupla coupling plugin. If no particular percentiles are given at which to calculate values and no 'number of percentiles' to calculate are specified, the following defaults will be used. '[0, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 100]'
def process(cube, coordinates=None, ecc_bounds_warning=False, percentiles=None, no_of_percentiles=None): if no_of_percentiles is not None: percentiles = choose_set_of_percentiles(no_of_percentiles, sampling="quantile") # TODO: Correct when formal cf-standards exists if 'probability_of_' in cube.name(): result = GeneratePercentilesFromProbabilities( ecc_bounds_warning=ecc_bounds_warning).process( cube, percentiles=percentiles) if coordinates: warnings.warn("Converting probabilities to percentiles. The " "provided COORDINATES_TO_COLLAPSE variable will " "not be used.") else: if not coordinates: raise ValueError("To collapse a coordinate to calculate " "percentiles, a coordinate or list of " "coordinates must be provided.") # Switch back to use the slow scipy method if the cube contains masked # data which the numpy method cannot handle. fast_percentile_method = True if np.ma.is_masked(cube.data): # Check for masked points: fast_percentile_method = False elif np.ma.isMaskedArray(cube.data): # Check if we have a masked array with an empty mask. If so, # replace it with a non-masked array: cube.data = cube.data.data result = PercentileConverter( coordinates, percentiles=percentiles, fast_percentile_method=fast_percentile_method).process(cube) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_percentiles_cube():\n\n test_data = np.full((5, 4, 4), -1, dtype=float)\n for i in range(5):\n test_data[i].fill(100*i + 200)\n\n percentiles = DimCoord(np.linspace(0, 100, 5), long_name=\"percentiles\",\n units=\"%\")\n grid_x = DimCoord(np.arange(4), standard_name=\"projection_x_coordinate\",\n units=\"km\")\n grid_y = DimCoord(np.arange(4), standard_name=\"projection_y_coordinate\",\n units=\"km\")\n test_cube = iris.cube.Cube(test_data, long_name=\"snow_level\", units=\"m\",\n dim_coords_and_dims=[(percentiles, 0),\n (grid_y, 1), (grid_x, 2)])\n return test_cube", "def test_percentile_coord(self):\n result = Plugin()._probabilities_to_percentiles(self.cube, self.percentiles)\n self.assertIsInstance(result.coord(\"percentile\"), DimCoord)\n self.assertArrayEqual(result.coord(\"percentile\").points, self.percentiles)\n self.assertEqual(result.coord(\"percentile\").units, unit.Unit(\"%\"))", "def matlab_percentile(in_data, percentiles):\n data = np.sort(in_data)\n p_rank = 100.0 * (np.arange(data.size) + 0.5) / data.size\n perc = np.interp(percentiles, p_rank, data, left=data[0], right=data[-1])\n return perc", "def test_coordinate_collapse(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n with self.assertRaises(CoordinateNotFoundError):\n result.coord_dims(self.percentile_coordinate)", "def test_all_equal_percentiles(self):\n self.percentiles_cube.data[:, :, 0:2].fill(300.)\n expected = set_reference_probabilities()\n expected[0:2, 0:2] = 0\n expected[2:, 0:2] = 1\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def perc(data,percentile):\n clip_value = np.percentile(data,percentile)\n data = clip(data,clip_value)\n return data", "def main(argv=None):\n parser = ArgParser(\n description=\"Calculate percentiled data over a given coordinate by \"\n \"collapsing that coordinate. Typically used to convert realization \"\n \"data into percentiled data, but may calculate over any \"\n \"dimension coordinate. Alternatively, calling this CLI with a dataset\"\n \" containing probabilities will convert those to percentiles using \"\n \"the ensemble copula coupling plugin. If no particular percentiles \"\n \"are given at which to calculate values and no 'number of percentiles'\"\n \" to calculate are specified, the following defaults will be used: \"\n \"[0, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 100]\")\n parser.add_argument(\"input_filepath\", metavar=\"INPUT_FILE\",\n help=\"A path to an input NetCDF file to be processed\")\n parser.add_argument(\"output_filepath\", metavar=\"OUTPUT_FILE\",\n help=\"The output path for the processed NetCDF\")\n parser.add_argument(\"--coordinates\", metavar=\"COORDINATES_TO_COLLAPSE\",\n nargs=\"+\",\n help=\"Coordinate or coordinates over which to collapse\"\n \" data and calculate percentiles; e.g. \"\n \"'realization' or 'latitude longitude'. This argument \"\n \"must be provided when collapsing a coordinate or \"\n \"coordinates to create percentiles, but is redundant \"\n \"when converting probabilities to percentiles and may \"\n \"be omitted. This coordinate(s) will be removed \"\n \"and replaced by a percentile coordinate.\")\n parser.add_argument('--ecc_bounds_warning', default=False,\n action='store_true',\n help='If True, where calculated percentiles are '\n 'outside the ECC bounds range, raise a warning '\n 'rather than an exception.')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--percentiles\", metavar=\"PERCENTILES\",\n nargs=\"+\", default=None, type=float,\n help=\"Optional definition of percentiles at which to \"\n \"calculate data, e.g. --percentiles 0 33.3 66.6 100\")\n group.add_argument('--no-of-percentiles', default=None, type=int,\n metavar='NUMBER_OF_PERCENTILES',\n help=\"Optional definition of the number of percentiles \"\n \"to be generated, these distributed regularly with the \"\n \"aim of dividing into blocks of equal probability.\")\n\n args = parser.parse_args(args=argv)\n\n # Load Cube\n cube = load_cube(args.input_filepath)\n\n # Process Cube\n result = process(cube, args.coordinates, args.ecc_bounds_warning,\n args.percentiles, args.no_of_percentiles)\n\n # Save Cube\n save_netcdf(result, args.output_filepath)", "def test_equal_percentiles(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n expected = set_reference_probabilities()\n expected[np.where(expected < 0.25)] = 0.\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def _percentile(self, data, percent):\n if not data:\n return None\n k = (len(data) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return data[int(k)]\n d0 = data[int(f)] * (c - k)\n d1 = data[int(c)] * (k - f)\n return d0 + d1", "def test_lots_of_percentiles(self):\n data = np.array(\n [\n [[14.4, -46, 10.2], [-73.0, -46, -89.2], [-89.2, -93.25, -94.6]],\n [[19.2, 8.25, 10.6], [-19, 8.25, -67.6], [-67.6, -79.75, -83.8]],\n [[24.0, 8.75, 11.0], [8.33333, 8.75, -46.0], [-46.0, -66.25, -73.0]],\n [[28.8, 9.25, 11.4], [9.0, 9.25, -24.4], [-24.4, -52.75, -62.2]],\n [[33.6, 9.75, 11.8], [9.666667, 9.75, -2.8], [-2.8, -39.25, -51.4]],\n [\n [38.4, 10.333333, 16.8],\n [10.333333, 10.2, 8.5],\n [8.333333, -25.75, -40.6],\n ],\n [[43.2, 11.0, 26.4], [11.0, 10.6, 9.5], [9.0, -12.25, -29.8]],\n [\n [48.0, 11.666667, 36.0],\n [11.666667, 11.0, 10.5],\n [9.666667, 1.25, -19.0],\n ],\n [[52.8, 24, 45.6], [24, 11.4, 11.5], [10.5, 8.5, -8.2]],\n [[57.6, 48, 55.2], [48, 11.8, 36.0], [11.5, 9.5, 2.6]],\n ],\n dtype=np.float32,\n )\n\n percentiles = np.arange(5, 100, 10)\n result = Plugin()._probabilities_to_percentiles(self.cube, percentiles)\n self.assertArrayAlmostEqual(result.data, data, decimal=5)", "def test_check_data_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, percentiles=[25, 50, 75])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def test_check_data_specifying_no_of_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube, no_of_percentiles=3)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):\r\n ps = [p_vals] if np.isscalar(p_vals) else p_vals\r\n\r\n if not sorted_:\r\n data = sorted(data)\r\n n = len(data)\r\n d = []\r\n for p in ps:\r\n fi = p * n / 100 - 0.5\r\n if fi <= 0: # maybe extrapolate?\r\n d.append(data[0])\r\n elif fi >= n - 1:\r\n d.append(data[-1])\r\n else:\r\n i = int(fi)\r\n d.append((i+1 - fi) * data[i] + (fi - i) * data[i+1])\r\n return d[0] if np.isscalar(p_vals) else d", "def calc_percentile(self, p):\n bounds = self.range_bins\n r = []\n v = []\n for b in bounds:\n d = self._get_data_distance(0., b)\n if len(d) < 1:\n continue\n r.append(b)\n v.append(np.percentile(d, p * 100.)) # percentile value\n\n r = np.asarray(r)\n np.asarray(v)\n\n o = {'r': np.asarray(r), 'value': np.asarray(v)}\n if 'percentiles' not in self.statistic.keys():\n self.statistic.update({'percentiles': {}})\n\n self.statistic['percentiles'].update({p: o})", "def write_preflop_percentiles():\n suited_result = [\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # not used\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 2\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 5\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 6\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 7\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 8\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 9\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 10\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # J\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Q\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # K\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # A\n ]\n unsuited_result = [\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # not used\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 2\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 5\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 6\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 7\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 8\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 9\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # 10\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # J\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # Q\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], # K\n [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # A\n ]\n hands = combinations(LookupTables.deck, 2)\n # This could be faster, but it doesn't run very much so whatever\n for hand in hands:\n sorted_rank = sorted([hand[0].rank, hand[1].rank])\n if hand[0].suit == hand[1].suit:\n preflop_order = LookupTables.Two.preflop_order_matrix[sorted_rank[1] - 2][sorted_rank[0] - 2]\n else:\n preflop_order = LookupTables.Two.preflop_order_matrix[sorted_rank[0] - 2][sorted_rank[1] - 2]\n\n # this is fraction of hands you beat\n preflop_percentile = 1 - sum(LookupTables.Two.preflop_count_matrix[0:preflop_order - 1]) / \\\n LookupTables.Two.preflop_count_sum\n\n if hand[0].suit == hand[1].suit:\n suited_result[hand[0].rank][hand[1].rank] = preflop_percentile\n else:\n unsuited_result[hand[0].rank][hand[1].rank] = preflop_percentile\n \n print \"suited_ranks_to_percentile = [\"\n for sublist in suited_result:\n print sublist\n print \"]\"\n \n print \"unsuited_ranks_to_percentile = [\"\n for sublist in unsuited_result:\n print sublist\n print \"]\"", "def test_values(self):\n expected = set_reference_probabilities()\n\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_equal_percentiles_inverse_ordering(self):\n self.percentiles_cube.data[0, :, :].fill(300.)\n # Invert the values associated with the percentiles.\n self.percentiles_cube.data = np.flipud(self.percentiles_cube.data)\n expected = set_reference_probabilities()\n expected[np.where(expected <= 0.25)] = 0.\n expected = 1.0 - expected\n probability_cube = ProbabilitiesFromPercentiles2D(\n self.percentiles_cube, 'new_name').percentile_interpolation(\n self.orography_cube, self.percentiles_cube)\n self.assertArrayAlmostEqual(probability_cube.data, expected)", "def test_check_data_specifying_single_percentile(self):\n expected_data = np.array(self.percentile_25)\n result = Plugin().process(self.cube, percentiles=[25])\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def quartiles(x, percentile):\n length = len(x)\n\n if percentile == 25:\n center = length // 4\n elif percentile == 75:\n center = length // 2 + length // 4\n\n x.sort()\n\n if length % 2 == 0:\n return (x[center - 1] + x[center]) / 2\n else:\n return x[center]", "def test_preservation_of_single_valued_dimension(self):\n percentiles_cube = set_up_percentiles_cube()\n new_model_coord = build_coordinate([0],\n long_name='leading_coord',\n coord_type=DimCoord,\n data_type=int)\n percentiles_cube.add_aux_coord(new_model_coord)\n percentiles_cube = iris.util.new_axis(percentiles_cube,\n scalar_coord='leading_coord')\n plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n probability_cube = plugin_instance.process(self.orography_cube)\n self.assertEqual(percentiles_cube.coords(dim_coords=True)[0],\n probability_cube.coords(dim_coords=True)[0])", "def test_check_data_not_specifying_percentiles(self):\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n result = Plugin().process(self.cube)\n self.assertArrayAlmostEqual(result.data, expected_data, decimal=5)", "def scale_pc_data_matrix(coords, pct_var):\n return coords * (pct_var / pct_var.max())", "def percentile(self, values, percent):\r\n if not values:\r\n return None\r\n k = (len(values)-1) * percent\r\n f = math.floor(k)\r\n c = math.ceil(k)\r\n if f == c:\r\n return values[int(k)]\r\n d0 = values[int(f)] * (c-k)\r\n d1 = values[int(c)] * (k-f)\r\n return d0+d1", "def _pctiles_from_sorted(self, values, pcs):\n if not all(0 <= pc <= 100 for pc in pcs):\n raise ValueError(\"pctiles must be between 0 and 100\")\n nvals = len(values)\n pctiles = []\n for pc in pcs:\n if pc == 0:\n new_pct = values[0]\n elif pc == 100:\n new_pct = values[nvals-1]\n else:\n n = pc * nvals / 100\n if n == int(n):\n new_pct = (values[int(n)-1] + values[int(n)]) / 2\n else:\n new_pct = values[floor(n)]\n pctiles.append(new_pct)\n return pctiles", "def scalar_percentiles(**kwargs):\n attributes = [\"ping_type\", \"os\", \"app_version\", \"app_build_id\", \"channel\"]\n fixed_attributes = [\"app_version\", \"channel\"]\n cubed_attributes = [x for x in attributes if x not in fixed_attributes]\n\n return dict(\n # TODO: be consistent with naming of attributes (e.g. attributes_list)\n attributes=attributes,\n cubed_attributes=cubed_attributes,\n attribute_combinations=compute_datacube_groupings(cubed_attributes),\n aggregate_attributes=\"\"\"\n metric,\n metric_type,\n key\n \"\"\",\n **kwargs,\n )", "def Quartiles(data):\n q = np.percentile(data, [25, 50, 75])\n\n return q[0], q[1], q[2]", "def percentile_plots_ref(plot_var, i_case, plot_var_ref, i_case_ref, plot_settings_abs, plot_settings_rel):\n column_titles = [\"5th percentile\", \"32nd percentile\", \"50th percentile\"]\n row_titles = ['Absolute value', 'Relative to reference case']\n plot_var_suffix = [\"_perc5\", \"_perc32\", \"_perc50\"]\n\n # Read data from NetCDF source file.\n plot_items = [[], []]\n plot_data_max, plot_data_relative_max = 0, 0\n for s in plot_var_suffix:\n d = nc.variables[plot_var+s][i_case, :, :]\n if plot_var[0] == \"p\":\n d *= 1e-3\n plot_items[0].append({'data': d})\n if np.amax(d) > plot_data_max:\n plot_data_max = np.amax(d)\n\n d_ref = nc.variables[plot_var_ref+s][i_case_ref, :, :]\n if plot_var[0] == \"p\":\n d_ref *= 1e-3\n d_relative = d/d_ref\n plot_items[1].append({'data': d_relative})\n if np.amax(d_relative) > plot_data_relative_max:\n plot_data_relative_max = np.amax(d_relative)\n\n print(\"Max absolute and relative value are respectively {:.2f} and {:.2f}\"\n .format(plot_data_max, plot_data_relative_max))\n\n # Mapping plot properties and splitting up into individual and shared properties.\n plot_handling = plot_settings_abs[\"plot_handling\"]\n contour_fill_levels = plot_handling[\"contour_fill_levels\"]\n contour_line_levels = plot_handling.get(\"contour_line_levels\", 3*[contour_fill_levels])\n colorbar_ticks = plot_handling.get(\"colorbar_ticks\", contour_fill_levels)\n\n contour_fill_levels_rel = plot_settings_rel[\"contour_fill_levels\"]\n contour_line_levels_rel = plot_settings_rel.get(\"contour_line_levels\", 3*[contour_fill_levels_rel])\n colorbar_ticks_rel = plot_settings_rel.get(\"colorbar_ticks\", contour_fill_levels_rel)\n\n # Write the contour handling to plot_items.\n for i, plot_item in enumerate(plot_items[0]):\n plot_item['contour_line_levels'] = contour_line_levels[i]\n for i, plot_item in enumerate(plot_items[1]):\n plot_item['contour_line_levels'] = contour_line_levels_rel[i]\n\n # Write the row dependent settings to row_items.\n row_items = []\n for i in range(2):\n row_items.append({\n 'title': row_titles[i],\n })\n row_items[0]['colorbar_ticks'] = colorbar_ticks\n row_items[0]['colorbar_label'] = plot_settings_abs[\"color_label\"]\n row_items[0]['contour_fill_levels'] = contour_fill_levels\n if 'colorbar_tick_fmt' in plot_handling:\n row_items[0]['colorbar_tick_fmt'] = plot_handling[\"colorbar_tick_fmt\"]\n row_items[0]['contour_line_label_fmt'] = '%.1f'\n\n row_items[1]['colorbar_ticks'] = colorbar_ticks_rel\n row_items[1]['colorbar_label'] = \"Increase factor [-]\"\n row_items[1]['contour_fill_levels'] = contour_fill_levels_rel\n if 'colorbar_tick_fmt' in plot_settings_rel:\n row_items[1]['colorbar_tick_fmt'] = plot_settings_rel[\"colorbar_tick_fmt\"]\n row_items[1]['extend'] = plot_settings_rel.get('extend', \"neither\")\n\n plot_panel_2x3(plot_items, column_titles, row_items)", "def _calc_perc(arr: np.array, p: Sequence[float] = None):\n if p is None:\n p = [50]\n\n nan_count = np.isnan(arr).sum(axis=-1)\n out = np.moveaxis(np.percentile(arr, p, axis=-1), 0, -1)\n nans = (nan_count > 0) & (nan_count < arr.shape[-1])\n if np.any(nans):\n out_mask = np.stack([nans] * len(p), axis=-1)\n # arr1 = arr.reshape(int(arr.size / arr.shape[-1]), arr.shape[-1])\n # only use nanpercentile where we need it (slow performance compared to standard) :\n out[out_mask] = np.moveaxis(\n np.nanpercentile(arr[nans], p, axis=-1), 0, -1\n ).ravel()\n return out", "def memory_percentage_for_pivoting(self, memory_percentage_for_pivoting):\n\n self._memory_percentage_for_pivoting = memory_percentage_for_pivoting", "def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff" ]
[ "0.6452051", "0.60577875", "0.59541893", "0.589441", "0.5876835", "0.57781804", "0.57659763", "0.5726601", "0.5721728", "0.5720009", "0.5646027", "0.5432934", "0.5351184", "0.53393996", "0.5324208", "0.5303023", "0.52893645", "0.5286576", "0.526351", "0.5245997", "0.51418597", "0.50957394", "0.50623274", "0.50437236", "0.49769554", "0.49030238", "0.489358", "0.48759606", "0.487201", "0.48542044" ]
0.6905458
0
Check we receive a warning about weldx_widgets not being available.
def test_redirection_weldx_widgets_not_found(): orig_import = __import__ # Store original __import__ def import_mock(name, *args, **kwargs): if "weldx_widgets" in name: raise ModuleNotFoundError("weldx_widgets not found") if "matplotlib" in name: raise ModuleNotFoundError("matplotlib not found") return orig_import(name, *args, **kwargs) pattern = ".*weldx_widget.*unavailable" with patch("builtins.__import__", side_effect=import_mock): with pytest.warns(match=pattern): import weldx.visualization as vs # ensure that using declared features emits the warning again. for name in vs.__all__: with pytest.warns(match=pattern): obj = getattr(vs, name) obj()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_warnings_active(self) -> bool:", "def has_warnings(self) -> bool:", "def check_alive(cw: CustomWidget) -> NoReturn:\r\n ...", "def check_warnings():\n user_warned = False\n # Warn the user about problematic key bindings that may conflict with\n # vimode.\n # The solution is to remove these key bindings, but that's up to the user.\n infolist = weechat.infolist_get(\"key\", \"\", \"default\")\n problematic_keybindings = []\n while weechat.infolist_next(infolist):\n key = weechat.infolist_string(infolist, \"key\")\n command = weechat.infolist_string(infolist, \"command\")\n if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key):\n problematic_keybindings.append(\"%s -> %s\" % (key, command))\n if problematic_keybindings:\n user_warned = True\n print_warning(\"Problematic keybindings detected:\")\n for keybinding in problematic_keybindings:\n print_warning(\" %s\" % keybinding)\n print_warning(\"These keybindings may conflict with vimode.\")\n print_warning(\"You can remove problematic key bindings and add\"\n \" recommended ones by using /vimode bind_keys, or only\"\n \" list them with /vimode bind_keys --list\")\n print_warning(\"For help, see: %s\" % FAQ_KEYBINDINGS)\n del problematic_keybindings\n # Warn tmux/screen users about possible Esc detection delays.\n if \"STY\" in os.environ or \"TMUX\" in os.environ:\n if user_warned:\n weechat.prnt(\"\", \"\")\n user_warned = True\n print_warning(\"tmux/screen users, see: %s\" % FAQ_ESC)\n if (user_warned and not\n weechat.config_string_to_boolean(vimode_settings['no_warn'])):\n if user_warned:\n weechat.prnt(\"\", \"\")\n print_warning(\"To force disable warnings, you can set\"\n \" plugins.var.python.vimode.no_warn to 'on'\")", "def get_unsupported_widgets(self):\n return self._unsupported_widgets", "def no_xb_gui():\n logger.warning(\"Could not import the GUI.\")\n logger.warning(\"For instructions on how to install the GUI,\")\n logger.warning(\"check the docs janclemenslab.org/das/install.html.\")", "def test_filter_not_available_plugins(plugin_dialog_constructor):\n item = plugin_dialog_constructor.available_list.item(0)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n if widget:\n assert not widget.action_button.isEnabled()\n assert widget.warning_tooltip.isVisible()\n\n item = plugin_dialog_constructor.available_list.item(1)\n widget = plugin_dialog_constructor.available_list.itemWidget(item)\n assert widget.action_button.isEnabled()\n assert not widget.warning_tooltip.isVisible()", "def has_warn(self):\r\n return self._arm.has_warn", "def has_warnings(self) -> bool:\n return len(self.warnings) > 0", "def warning_function():\r\n app = QApplication(sys.argv)\r\n ex = WarningBox()\r\n sys.exit(app.exec_())", "def has_off_hook_warning(self) -> bool:", "def has_warning(self) -> bool:\n return self._has_warning", "def XPIsWidgetVisible(inWidget):\n pass", "def _maybe_show_deprecation_warning(self):\n if self._deprecation_warning is not None:\n show_deprecation_warning(self._deprecation_warning)", "def is_widget_supported(self, major, minor=None):\n assert isinstance(major, int)\n assert isinstance(minor, int) or minor is None\n\n # no restrictions exists\n if 'supported_by' not in self.config:\n return True\n\n if minor is not None:\n version_specific = 'wx%s%s' % (major, minor)\n if version_specific in self.config['supported_by']:\n return True\n\n version_generic = 'wx%s' % major\n if version_generic in self.config['supported_by']:\n return True\n\n return False", "def _check_xml_deprecated_qweb_directive(self):\n valid_versions = set(self.linter._all_options[\n 'valid_odoo_versions'].config.valid_odoo_versions)\n if not valid_versions & {'10.0', '11.0'}:\n return True\n\n deprecated_directives = {\n 't-esc-options',\n 't-field-options',\n 't-raw-options',\n }\n directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)\n xpath = '|'.join(\n '/%s//template//*[%s]' % (tag, directive_attrs)\n for tag in ('odoo', 'openerp')\n )\n\n self.msg_args = []\n for xml_file in self.filter_files_ext('xml', relpath=False):\n doc = self.parse_xml(xml_file)\n if isinstance(doc, string_types):\n continue\n for node in doc.xpath(xpath):\n # Find which directive was used exactly.\n directive = next(\n iter(set(node.attrib) & deprecated_directives))\n self.msg_args.append((\n '%s:%d' % (xml_file, node.sourceline), directive))\n return not bool(self.msg_args)", "def is_warning(self) -> bool:\n return not self.get_warning()", "def is_warning(self) -> bool:\n return not self.get_warning()", "def has_err_warn(self):\r\n return self._arm.has_err_warn", "def has_warnings_active(self) -> bool:\n return len(self.warnings_active) > 0", "def is_available(self) -> bool:\n return self.on_hand > self.warn_limit", "def handle_uncaught_event(self, event):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_uncaught_event(event):\r\n return True\r\n return False", "def warn():\n pass", "def needs_migrations(self):\n # TODO(majklk): also check models etc.\n if len(self.widgets) > 0:\n return True\n return False", "def has_warning(self):\n \n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n else:\n fraction = 0.0\n \n if self['skipped_subchannel'] > 0:\n return True\n elif fraction > 1.0e-4:\n return True\n else:\n return False", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass", "def log_check_warnings(self):\n pass" ]
[ "0.65154344", "0.6439584", "0.6356937", "0.6303061", "0.61383975", "0.6066735", "0.6012545", "0.5965545", "0.5897515", "0.5897374", "0.5860277", "0.5803844", "0.57967067", "0.5743022", "0.5737353", "0.5673264", "0.563405", "0.563405", "0.563183", "0.56248", "0.55991155", "0.5572572", "0.5508209", "0.55043864", "0.549366", "0.54486406", "0.54486406", "0.54486406", "0.54486406", "0.54486406" ]
0.6596005
0
Tiles x on dimension dim count times.
def tile(x, count, dim=0): perm = list(range(len(x.size()))) if dim != 0: perm[0], perm[dim] = perm[dim], perm[0] x = x.permute(perm).contiguous() out_size = list(x.size()) out_size[0] *= count x = x.repeat(count, *(1,) * x.dim()).transpose(0, 1).contiguous().view(*out_size) if dim != 0: x = x.permute(perm).contiguous() return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tile(x, count, dim=0):\n perm = list(range(len(x.shape)))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.shape)\n out_size[0] *= count\n batch = x.shape[0]\n x = x.view(batch, -1) \\\n .transpose(0, 1) \\\n .repeat(count, 1) \\\n .transpose(0, 1) \\\n .contiguous() \\\n .view(*out_size)\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x", "def tile(x: Tensor, count: int, dim=0) -> Tensor:\n if isinstance(x, tuple):\n h, c = x\n return tile(h, count, dim=dim), tile(c, count, dim=dim)\n\n perm = list(range(len(x.size())))\n if dim != 0:\n perm[0], perm[dim] = perm[dim], perm[0]\n x = x.permute(perm).contiguous()\n out_size = list(x.size())\n out_size[0] *= count\n batch = x.size(0)\n x = (\n x.view(batch, -1)\n .transpose(0, 1)\n .repeat(count, 1)\n .transpose(0, 1)\n .contiguous()\n .view(*out_size)\n )\n if dim != 0:\n x = x.permute(perm).contiguous()\n return x", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling", "def split_into_tiles(self, x: torch.Tensor):\n tiles, self._coords, self._overlap = self._get_tiles_and_coords(x)\n self._num_tiles = tiles.shape[0]\n return tiles", "def repeat_to_batch(x, batch_size):\n shape = tf.shape(x)\n rnk = tf.rank(x)\n tileshp = tf.ones([rnk - 1], dtype=tf.int32)\n tileshpfinal = tf.concat([[batch_size], tileshp], 0)\n return tf.tile(x, tileshpfinal)", "def tile(x, reps, ndim=None):\r\n\r\n try:\r\n assert python_all([int(i) == i for i in iter(reps)])\r\n except (TypeError, AssertionError):\r\n raise ValueError(\"reps argument to tile must be a constant (e.g. \"\r\n \"tuple, list of integers)\")\r\n if len(reps) != x.ndim:\r\n raise ValueError(\"len(reps) != x.ndim not currently supported\")\r\n elif (ndim is not None) and ndim != x.ndim:\r\n raise ValueError(\"if specified, ndim must be equal to both x.ndim and \"\r\n \"len(reps)\")\r\n\r\n if not hasattr(tile, 'op'):\r\n tile.op = {}\r\n\r\n if ndim is None:\r\n ndim = len(reps)\r\n\r\n # backport\r\n # ndim = len(reps) if ndim is None else ndim\r\n # not sure if len(shp) is going to work.\r\n if ndim not in tile.op:\r\n tile.op[ndim] = Tile(ndim)\r\n return tile.op[ndim](x, reps)", "def split_image_into_number_of_tiles(\n arr: Image, x_ntiles: int, y_ntiles: int, overlap: int\n):\n img_width, img_height = arr.shape[-1], arr.shape[-2]\n tile_w = img_width // x_ntiles\n tile_h = img_height // y_ntiles\n return split_image_into_tiles_of_size(arr, tile_w, tile_h, overlap)", "def arrange_tiles(self, layer):\n\n # número de tiles en 'x'\n width = self.width\n arranged_tiles = layer.arranged_tiles\n\n row = -1\n\n # convierte una lista en un diccionario\n for col, tile in enumerate(layer.tiles):\n # calcula la ubicación en dos dimensiones (fila y columna) de cada tile,\n # los tiles originalmente están ordenados en línea\n col %= width\n if col == 0:\n row += 1\n\n # excluye los tiles con id 0,\n # id 0 representa un espacio vacío en el tilemap\n if tile != 0:\n arranged_tiles[(row, col)] = tile\n\n # libera la memoria ocupada por la lista de tiles\n layer.tiles = None", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )", "def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles", "def repeat(x, repeats):\n x = tf.expand_dims(x, 1)\n max_repeats = tf.reduce_max(repeats)\n tile_repeats = [1, max_repeats]\n arr_tiled = tf.tile(x, tile_repeats)\n mask = tf.less(tf.range(max_repeats), tf.expand_dims(repeats, 1))\n result = tf.reshape(tf.boolean_mask(arr_tiled, mask), [-1])\n return result", "def _assemble_tiles(i, n, tile, tsincr_g, output_dir, outtype):\n # pylint: disable=too-many-arguments\n tsincr_file = os.path.join(output_dir, '{}_{}.npy'.format(outtype, n))\n tsincr = np.load(file=tsincr_file)\n tsincr_g[tile.top_left_y:tile.bottom_right_y, tile.top_left_x:tile.bottom_right_x] = tsincr[:, :, i]", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images", "def tile_images(image_stack):\n assert len(image_stack.shape) == 4\n image_list = [image_stack[i, :, :, :] for i in range(image_stack.shape[0])]\n tiled_images = np.concatenate(image_list, axis=1)\n return tiled_images", "def batch(size, iterable):\r\n return list(xbatch(size, iterable))", "def fill_tiles(tiles, fill_func):\n return np.array([[fill_func(x) for x in row] for row in tiles])", "def repeat_elements(x, rep, axis):\n x_shape = x.get_shape().as_list()\n if x_shape[axis] is None:\n raise ValueError('Axis ' + str(axis) + ' of input tensor '\n 'should have a defined dimension, but is None. '\n 'Full tensor shape: ' + str(tuple(x_shape)) + '. '\n 'Typically you need to pass a fully-defined '\n '`input_shape` argument to your first layer.')\n # slices along the repeat axis\n splits = array_ops.split(value=x, num_or_size_splits=x_shape[axis], axis=axis)\n # repeat each slice the given number of reps\n x_rep = [s for s in splits for _ in range(rep)]\n return concatenate(x_rep, axis)", "def tile(arrayin, N, M = None):\r\n if M == None :\r\n M = N\r\n Ny, Nx = arrayin.shape\r\n arrayout = np.zeros((Ny * N, Nx * M), dtype = arrayin.dtype) \r\n for i in range(N):\r\n for j in range(M):\r\n arrayout[i * Ny : (i+1) * Nx, j * Ny : (j+1) * Nx] = np.copy(arrayin)\r\n return arrayout", "def repeat_batch(t, K, dim=0):\n shape = t.shape\n tiling = [1] * (len(shape) + 1)\n tiling[dim + 1] = K\n tiled = t.unsqueeze(dim + 1).repeat(tiling)\n old_bsz = shape[dim]\n new_bsz = old_bsz * K\n new_shape = list(shape[:dim]) + [new_bsz] + list(shape[dim+1:])\n return tiled.view(new_shape)", "def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out", "def test_generate_tiles(self):\n tile_list = utils.generate_tiles()\n self.assertEqual(len(tile_list), utils.TILE_COUNT)", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def replicate(self, nx, ny, nz):\n contents_list = []\n numreplicate = 0\n for i in range(nx):\n for j in range(ny):\n for k in range(nz):\n new_df = self.contents.copy()\n new_df['X'] += i * self.lengthx\n new_df['Y'] += j * self.lengthy\n new_df['Z'] += k * self.lengthz\n contents_list.append(new_df)\n numreplicate += 1\n self.numatom *= numreplicate\n self.contents = pd.concat(contents_list)", "def tile_and_bind(s, tensor, y, x, y_factor, x_factor=None):\n x_factor = x_factor or y_factor\n yo, xo, yi, xi = s[tensor].tile(y, x, y_factor, x_factor)\n s[tensor].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(xi, te.thread_axis(\"threadIdx.x\"))\n s[tensor].bind(yo, te.thread_axis(\"blockIdx.y\"))\n s[tensor].bind(yi, te.thread_axis(\"threadIdx.y\"))\n return yo, xo, yi, xi", "def tile(self, n, keys=None, axis=1, wrap_kwargs=None):\n tiled = reshape_fns.tile(self._obj, n, axis=axis)\n if keys is not None:\n if axis == 1:\n new_columns = index_fns.combine_indexes(keys, self.wrapper.columns)\n return tiled.vbt.wrapper.wrap(\n tiled.values, **merge_dicts(dict(columns=new_columns), wrap_kwargs))\n else:\n new_index = index_fns.combine_indexes(keys, self.wrapper.index)\n return tiled.vbt.wrapper.wrap(\n tiled.values, **merge_dicts(dict(index=new_index), wrap_kwargs))\n return tiled", "def tile_window(shape, px):\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def draw(self):\n\n super().draw()\n \n self.dim = self.getdim()\n start_x, start_y, = self.x(), self.y()\n\n for y in range(self.r):\n for x in range(self.c):\n x_pos, y_pos = start_x + (self.dim * x), start_y + (self.dim * y)\n self.tiles[y][x].resize(x_pos, y_pos, self.dim, self.dim)" ]
[ "0.7995799", "0.7743472", "0.72251785", "0.72251785", "0.6892366", "0.68706846", "0.6489282", "0.63585883", "0.6300268", "0.6292242", "0.6080058", "0.606337", "0.60176843", "0.59804887", "0.5967274", "0.5953083", "0.5943857", "0.5932811", "0.59011155", "0.58835983", "0.58835024", "0.5882617", "0.58666354", "0.58633816", "0.58547026", "0.5822757", "0.580127", "0.57851195", "0.5777354", "0.5763576" ]
0.8115603
0
r""" Generate a square mask for the sequence. The masked positions are filled with float('inf'). Unmasked positions are filled with float(0.0).
def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor: mask = (torch.triu(torch.ones(sz, sz, device=device)) == 1).transpose(0, 1) mask = ( mask.float() .masked_fill(mask == 0, float("-inf")) .masked_fill(mask == 1, float(0.0)) ) return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _generate_square_subsequent_mask(self, sz: int):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float(\n '-inf')).masked_fill(mask == 1, float(0.0))\n return mask", "def generate_square_subsequent_mask(self, sz: int) -> Tensor:\n\t\tmask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n\t\tmask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n\t\treturn mask", "def _generate_mask(self):\r\n mask = np.zeros((self.width, self.height), np.uint8)\r\n size = int((self.width + self.height) * 0.01)\r\n if self.width < 32 or self.height < 32:\r\n raise Exception(\"Width and Height of mask must be at least 64!\")\r\n for _ in range(randint(1,int(0.5*self.width))):\r\n x1 = randint(0, self.width-1)\r\n thickness = 1\r\n cv2.line(mask, (0, x1),(self.height-1, x1), 1, thickness)\r\n return 1 - mask", "def gen_square_subsequent_mask(sz: int) -> torch.Tensor:\n return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)", "def generate_square_subsequent_mask(sz: int) -> Tensor:\n return torch.triu(torch.ones(sz, sz) * float('-inf'), diagonal=1)", "def fix_generate_mask(self):\r\n mask = np.zeros((self.width, self.height), np.uint8)\r\n size = int((self.width + self.height) * 0.01)\r\n if self.width < 32 or self.height < 32:\r\n raise Exception(\"Width and Height of mask must be at least 64!\")\r\n i = 0\r\n for _ in range(1, int(0.3 * self.width)):\r\n index_xline = random.sample(range(0, self.width), int(0.5 * self.width))\r\n x1 = index_xline[i]\r\n i += 1\r\n thickness = 1\r\n cv2.line(mask, (0, x1), (self.height - 1, x1), 1, thickness)\r\n\r\n return 1 - mask", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def mask_region(self, ypos, xpos, r):\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r ** 2 and 0 <= j<= self.shapes[0] - 1 and 0<= i <=self.shapes[1] - 1:\r\n j = int(j)\r\n i = int(i)\r\n self.masked[j, i] = 0", "def Mask(self) -> int:", "def square_mask(ys_in_pad, ignore_id):\n ys_mask = (ys_in_pad != ignore_id).unsqueeze(-2)\n ymax = ys_mask.size(-1)\n ys_mask_tmp = ys_mask.transpose(1, 2).repeat(1, 1, ymax)\n ys_mask = ys_mask.repeat(1, ymax, 1) & ys_mask_tmp\n\n return ys_mask", "def mask(self):\n return np.ones((self.size, self.size))", "def mask(self):", "def generate_mask(sequence_length):\n if FLAGS.mask_strategy == 'random':\n mask = []\n for seq in range(FLAGS.batch_size):\n p = np.random.choice(\n [True, False],\n size=[sequence_length[seq]],\n p=[FLAGS.is_present_rate, 1. - FLAGS.is_present_rate])\n while p.size<FLAGS.sequence_length:\n p = np.append(p, np.array([True]))\n mask.append(p)\n p = np.array(mask)\n\n elif FLAGS.mask_strategy == 'contiguous':\n mask = []\n for seq in range(FLAGS.batch_size):\n masked_length = int((1 - FLAGS.is_present_rate) * sequence_length[seq]) - 1\n # Determine location to start masking.\n start_mask = np.random.randint(\n 1, sequence_length[seq] - masked_length + 1, size=None)\n p = np.full([sequence_length[seq]], True, dtype=bool)\n #print(masked_length)\n # Create contiguous masked section to be False.\n p[start_mask:start_mask + masked_length] = False\n #print(p)\n\n while p.size<FLAGS.sequence_length:\n #print(p.size, FLAGS.sequence_length)\n #input('maskk')\n p = np.append(p, np.array([True]))\n #print(p)\n mask.append(p)\n p = np.array(mask) \n else:\n raise NotImplementedError\n\n return p", "def make_square_mask(img, size, xy_center=None, angle=None):\n offset = 2 # from center\n xcen, ycen = img.shape[0] // 2, img.shape[1] // 2\n if xy_center is None: # use the middle of the image\n y, x = np.unravel_index(np.argmax(img), img.shape)\n xy_center = [x, y]\n # check if near edge\n if np.any([abs(x - xcen) > offset, abs(y - ycen) > offset]):\n print(\"Brightest star detected is far from the center.\")\n print(\"Aperture mask is placed at the center instead.\\n\")\n xy_center = [xcen, ycen]\n mask = np.zeros_like(img, dtype=bool)\n mask[ycen - size : ycen + size + 1, xcen - size : xcen + size + 1] = True\n # if angle:\n # #rotate mask\n # mask = rotate(mask, angle, axes=(1, 0), reshape=True, output=bool, order=0)\n return mask", "def filter_squares(sequences):\r\n for i in range(1, len(sequences) - 1):\r\n for j in range(1, len(sequences[0])):\r\n if all([sequences[i-1][j-1] == 0,\r\n sequences[i-1][j] == 1,\r\n sequences[i][j-1] == 1,\r\n sequences[i][j] == 0]):\r\n if r.random() > 0.5:\r\n sequences[i][j] = 1\r\n else:\r\n sequences[i-1][j] = 0\r\n return sequences", "def mask_tile(self):\n if self.size == 1:\n m = np.array([[0]])\n elif self.size ==2:\n m = np.array([[1,2],[4,3]])\n else:\n m = 9 * np.ones((self.size, self.size))\n m[0,0] = 1\n m[0,-1] = 2\n m[-1,-1] = 3\n m[-1,0] = 4 \n m[0,1:-1] = 5 * np.ones(self.size-2)\n m[1:-1,-1] = 6 * np.ones(self.size-2)\n m[-1,1:-1] = 7 * np.ones(self.size-2)\n m[1:-1,0] = 8 * np.ones(self.size-2)\n return m.astype(np.int8)", "def generate_square_subsequent_mask(dim1: int, dim2: int) -> Tensor:\n return torch.triu(torch.ones(dim1, dim2) * float('-inf'), diagonal=1)", "def mask(self, mask):\n return MaskedDistribution(self, mask)", "def mask_tokens(self, sequence):\n n_tokens = len(sequence)\n n_masked_tokens = int(self.masking_proportion*n_tokens/100)\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n while len(set(indexes))!=n_masked_tokens:\n indexes = [random.randint(0, n_tokens-1) for i in range(n_masked_tokens)]\n sequence = np.array(sequence)\n sequence[indexes] = 4\n return list(sequence)", "def cartesian_mask(self, shape, centred=False, uniform=True):\n\t\tR = self.R\n\t\tsample_n = self.sample_n\n\t\tif uniform:\n\t\t\tN, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]\n\t\t\tn_lines = int(Nx / R)\n\t\t\t\n\t\t\tmask = np.zeros((N, Nx, Ny))\n\t\t\tfor i in range(N):\n\t\t\t\tidx = np.arange(0,Nx,R)\n\t\t\t\tmask[i, idx, :] = 1\n\t\t\t\t\n\t\t\tif sample_n:\n\t\t\t\tmask[:, Nx//2-sample_n//2:(Nx//2+sample_n//2),:] = 1\n \n\t\telse:\n\t\t\tN, Nx, Ny = int(np.prod(shape[:-2])), shape[-2], shape[-1]\n\t\t\tpdf_x = self.normal_pdf(Nx, 0.5/(Nx/10.)**2)\n\t\t\tlmda = Nx/(2.*R)\n\t\t\tn_lines = int(Nx / R)\n \n # add uniform distribution\n\t\t\tpdf_x += lmda * 1./Nx\n \n\t\t\tif sample_n:\n\t\t\t\tpdf_x[Nx//2-sample_n//2:Nx//2+sample_n//2] = 0\n\t\t\t\tpdf_x /= np.sum(pdf_x)\n\t\t\t\tn_lines -= sample_n\n \n\t\t\tmask = np.zeros((N, Nx))\n\t\t\tfor i in range(N):\n\t\t\t\tidx = np.random.choice(Nx, n_lines, False, pdf_x)\n\t\t\t\tmask[i, idx] = 1\n \n\t\t\tif sample_n:\n\t\t\t\tmask[:, Nx//2-sample_n//2:Nx//2+sample_n//2] = 1\n \n\t\t\tsize = mask.itemsize\n\t\t\tmask = as_strided(mask, (N, Nx, Ny), (size * Nx, size, 0))\n \n\t\t\tmask = mask.reshape(shape)\n \n\t\tif not centred:\n\t\t\tmask = np.fft.ifftshift(mask, axes=(-1, -2))\n \n\t\treturn mask", "def mask(self):\n\n mask = np.zeros(shape=(self._info.height, self._info.width), dtype=np.uint8)\n\n self.draw(image=mask, color=constants.COLOR_WHITE_MONO)\n\n mask_with_border = np.pad(mask, 1, 'constant', constant_values=255)\n\n cv2.floodFill(image=mask,\n mask=mask_with_border,\n seedPoint=(int(self.middle_point[0]), int(self.middle_point[1])),\n newVal=constants.COLOR_WHITE_MONO)\n\n return mask", "def static_mask(self, frames):\r\n c, t, h, w = frames.shape\r\n rand_t = random.randint(0, t-1)\r\n mask_size_ratio = random.uniform(self.mask_size_ratio[0], self.mask_size_ratio[1])\r\n mask_size_x, mask_size_y = [int(w*mask_size_ratio), int(h*mask_size_ratio)]\r\n start_x = random.randint(0, w-mask_size_x)\r\n start_y = random.randint(0, h-mask_size_y)\r\n frames_out = frames[:, rand_t].unsqueeze(1).expand(-1, t, -1, -1).clone()\r\n frames_out[:, :, start_y:start_y+mask_size_y, start_x:start_x+mask_size_x] = frames[\r\n :, :, start_y:start_y+mask_size_y, start_x:start_x+mask_size_x\r\n ]\r\n return frames_out", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def test(shape=(1000,2000)):\n mask = Mask()\n mask.addCircle(400,300,250)\n mask.subtractCircle(400,300,150)\n mask.addRectangle(350,250,1500,700)\n plt.imshow( mask.getMask(shape) )\n return mask", "def getMask(self):\r\n mask = np.array(self.array, dtype=np.float32)\r\n mask[mask == 0] = np.nan\r\n return mask", "def mask(self):\n return ((2**(self.width) - 1) << self.lsb)", "def get_mask(self):\n w, h = self.rect.w, self.rect.h\n colorkey = (0, 0, 0)\n surface = pg.Surface((w, h))\n surface.set_colorkey(colorkey)\n # fill the surface with the spherical object\n color, center, radius = (255, 255, 255), self.rect.center, round(self.rect.w/2)\n pg.draw.circle(surface, color, center, radius)\n mask = pg.mask.from_surface(surface)\n return mask", "def mask(self):\n return list(self._mask_generator())", "def get_sample_mask(self):" ]
[ "0.7153673", "0.68876415", "0.6759619", "0.6754629", "0.6736664", "0.6734393", "0.6703915", "0.653634", "0.64891696", "0.6488388", "0.63707256", "0.62912", "0.60452783", "0.6013539", "0.6002349", "0.5912298", "0.59067696", "0.59046054", "0.59028065", "0.58848965", "0.5863874", "0.58383", "0.5836374", "0.5836374", "0.58066905", "0.5791868", "0.57741505", "0.5744578", "0.573433", "0.57157737" ]
0.7210654
0
Constructs an instance of ResourceSpec from yaml data.
def FromYaml(cls, yaml_data, api_version=None): if not yaml_data: return None collection = registry.GetAPICollection( yaml_data['collection'], api_version=api_version) attributes = ParseAttributesFromData( yaml_data.get('attributes'), collection.detailed_params) return cls( resource_collection=collection.full_name, resource_name=yaml_data['name'], api_version=collection.api_version, disable_auto_completers=yaml_data['disable_auto_completers'], plural_name=yaml_data.get('plural_name'), **{attribute.parameter_name: attribute for attribute in attributes})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromData(cls, data):\n if not data:\n return None\n\n attribute_name = data['attribute_name']\n parameter_name = data['parameter_name']\n help_text = data['help']\n completion_id_field = data.get('completion_id_field', None)\n completion_request_params_list = data.get('completion_request_params', [])\n completion_request_params = {\n param.get('fieldName'): param.get('value')\n for param in completion_request_params_list\n }\n\n # Add property fallthroughs.\n fallthroughs = []\n prop = properties.FromString(data.get('property', ''))\n if prop:\n fallthroughs.append(deps_lib.PropertyFallthrough(prop))\n default_config = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name)\n if default_config:\n fallthroughs += [\n f for f in default_config.fallthroughs if f not in fallthroughs]\n # Add fallthroughs from python hooks.\n fallthrough_data = data.get('fallthroughs', [])\n fallthroughs_from_hook = [\n deps_lib.Fallthrough(util.Hook.FromPath(f['hook']), hint=f['hint'])\n for f in fallthrough_data\n ]\n fallthroughs += fallthroughs_from_hook\n return cls(\n name=attribute_name,\n help_text=help_text,\n fallthroughs=fallthroughs,\n completion_id_field=completion_id_field,\n completion_request_params=completion_request_params,\n parameter_name=parameter_name)", "def from_yaml_documents(cls, component_yaml: str) -> 'ComponentSpec':\n\n def extract_description(component_yaml: str) -> Union[str, None]:\n heading = '# Description: '\n multi_line_description_prefix = '# '\n index_of_heading = 2\n if heading in component_yaml:\n description = component_yaml.splitlines()[index_of_heading]\n\n # Multi line\n comments = component_yaml.splitlines()\n index = index_of_heading + 1\n while comments[index][:len(multi_line_description_prefix\n )] == multi_line_description_prefix:\n description += '\\n' + comments[index][\n len(multi_line_description_prefix) + 1:]\n index += 1\n\n return description[len(heading):]\n else:\n return None\n\n pipeline_spec_dict, platform_spec_dict = load_documents_from_yaml(\n component_yaml)\n\n is_v1 = 'implementation' in set(pipeline_spec_dict.keys())\n if is_v1:\n v1_component = _load_component_spec_from_component_text(\n component_yaml)\n return cls.from_v1_component_spec(v1_component)\n else:\n component_spec = ComponentSpec.from_ir_dicts(\n pipeline_spec_dict, platform_spec_dict)\n if not component_spec.description:\n component_spec.description = extract_description(\n component_yaml=component_yaml)\n return component_spec", "def from_yaml(cls, y):\n return cls(yaml.load(y, AttrLoader))", "def from_yaml(cls, b):\n return cls.from_dict(yaml.safe_load(b))", "def parse_spec(inp_file):\n try:\n y_spec = yaml.load(inp_file, Loader=yaml.SafeLoader)\n spec = create_spec(y_spec)\n except jsonschema.exceptions.RefResolutionError:\n logging.error(\"Could not load specification. Check your network or try again\")\n raise err.BeaconTestError()\n except openapi_spec_validator.exceptions.OpenAPIValidationError:\n logging.error(\"Could not read specification. Check tat your file is valid\")\n raise err.BeaconTestError()\n return spec", "def load(cls, data: TextIO) -> \"OpenAPI\":\n return cls(yaml.safe_load(data))", "def from_dict(cls: Type[\"GraphSet\"], data: Dict[str, Any]) -> \"GraphSet\":\n resources: List[Resource] = []\n name = data[\"name\"]\n start_time = data[\"start_time\"]\n end_time = data[\"end_time\"]\n version = data[\"version\"]\n errors = data[\"errors\"]\n stats = MultilevelCounter.from_dict(data[\"stats\"])\n for resource_id, resource_data in data[\"resources\"].items():\n resource = Resource.from_dict(resource_id, resource_data)\n resources.append(resource)\n return cls(\n name=name,\n version=version,\n start_time=start_time,\n end_time=end_time,\n resources=resources,\n errors=errors,\n stats=stats,\n )", "def from_yaml(cls, yaml_string=None, filename=None, encoding='utf-8', errors='strict', loader=yaml.SafeLoader, **kwargs):\n bx_args = {}\n for arg in kwargs.copy():\n if arg in BOX_PARAMETERS:\n bx_args[arg] = kwargs.pop(arg)\n data = _from_yaml(yaml_string=yaml_string, filename=filename, encoding=encoding, errors=errors, Loader=loader, **kwargs)\n if not isinstance(data, dict):\n raise BoxError('yaml data not returned as a dictionarybut rather a {0}'.format(type(data).__name__))\n return cls(data, **bx_args)", "def from_yaml(cls, yml: str):\n\n return cls.from_dict(feast_yaml.yaml_loader(yml, load_single=True))", "def from_yaml(self, yaml):\n self.hwAddress = yaml.get('hwAddress')\n if self.hwAddress:\n self.hwAddress = self.hwAddress.lower()\n self.ip = yaml.get('IP')\n self.formulas = {}\n for f in yaml:\n if isinstance(yaml[f], dict):\n self.formulas[f] = yaml[f]\n\n self.hwtype = yaml.get('hwtype')", "def from_yaml(cls: Type[R], rkey: str, location: str, serialization: str) -> R:\n\n attrs = parse_yaml(serialization)\n\n return cls.from_dict(rkey, location, serialization, attrs)", "def from_yaml(c: Any, s: str, de: Type[Deserializer[str]] = YamlDeserializer, **opts: Any) -> Any:\n return from_dict(c, de.deserialize(s, **opts), reuse_instances=False)", "def _build_pod_spec(self):\n logger.debug(\"Building Pod Spec\")\n crds = []\n try:\n crds = [\n yaml.load(Path(f).read_text())\n for f in [\n \"files/configs.config.gatekeeper.sh.yaml\",\n \"files/constrainttemplates.templates.gatekeeper.sh.yaml\",\n \"files/constraintpodstatuses.status.gatekeeper.sh.yaml\",\n \"files/constrainttemplatepodstatuses.status.gatekeeper.sh.yaml\",\n ]\n ]\n except yaml.YAMLError as exc:\n logger.error(\"Error in configuration file:\", exc)\n\n crd_objects = [\n CustomResourceDefintion(crd[\"metadata\"][\"name\"], crd[\"spec\"])\n for crd in crds\n ]\n\n config = self.model.config\n spec_template = {}\n with open(\"files/pod-spec.yaml.jinja2\") as fh:\n spec_template = Template(fh.read())\n\n try:\n image_details = self.image.fetch()\n except OCIImageResourceError as e:\n self.model.unit.status = e.status\n return\n\n template_args = {\n \"crds\": crd_objects,\n \"image_details\": image_details,\n \"imagePullPolicy\": config[\"imagePullPolicy\"],\n \"app_name\": self.app.name,\n \"audit_cli_args\": self._audit_cli_args(),\n \"namespace\": os.environ[\"JUJU_MODEL_NAME\"],\n }\n\n spec = yaml.load(spec_template.render(**template_args))\n\n print(f\"Pod spec: {spec}\")\n return spec", "def from_yaml(self, content):\r\n if yaml is None:\r\n raise UnsupportedDeserializationFormat(\"Usage of the YAML aspects requires yaml.\")\r\n\r\n return yaml.load(content, Loader=DeliciousCakeLoader)", "def from_dict(cls, data):\n requirements = {k: v for k, v in data['params']['requirements'].items()}\n return cls(requirements)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input[pulumi.InputType['DataIntegrationScheduleConfigArgs']]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def createSpecWithMetadataInYamlAndUserdata(self):\n metadataYamlFilePath = os.path.join(os.path.dirname(\n os.path.realpath(__file__)),\n 'sample_metadata.yaml')\n userdataFilePath = os.path.join(os.path.dirname(\n os.path.realpath(__file__)),\n 'sample_userdata')\n with open(metadataYamlFilePath, \"r\") as fp:\n self.metadata = fp.read().rstrip('\\n')\n with open(userdataFilePath, \"r\") as fp:\n self.userdata = fp.read().rstrip('\\n')\n self.createCloudinitDataSpec('specWithMetadataInYamlAndUserdata',\n 'linux cloud-init data customization spec'\n 'with metadata in yaml format and '\n 'userdata')", "def load_spec(spec_file, caption_file=None, pandoc_processor=None):\n if isinstance(spec_file, Sequence) and not isinstance(spec_file, str):\n spec = spec_file\n else:\n try:\n with open(spec_file) as f:\n spec = yaml.safe_load(f.read())\n except TypeError as err:\n secho(str(err), err=True, fg=\"red\")\n raise err\n\n if caption_file is not None:\n captions = load_captions(caption_file)\n spec = list(integrate_captions(spec, captions))\n\n return spec", "def from_yaml_string(cls, yaml_string: Text, check_params=False):\n Params._check_yaml_import()\n import yaml\n\n lparams = yaml.safe_load(yaml_string)\n if check_params:\n return cls(**lparams)\n else:\n return cls.from_dict(lparams, return_instance=True, return_unused=False)", "def from_yaml(input_yaml: Dict) -> \"DBRevision\":\n return DBRevision(input_yaml[\"revision_name\"],\n set(input_yaml.get(\"dependencies\")),\n input_yaml[\"sql_text\"],\n input_yaml[\"active\"],\n input_yaml.get(\"description\"))", "def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"BotoError\":\n value = loader.construct_mapping(node, deep=True)\n return cls(value)", "def FromDict(resource, raw_config):\r\n resource.name = raw_config[\"name\"]\r\n resource.raw_config = raw_config", "def from_yaml_string(cls, string):\n return cls(_yaml_load(string))", "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def from_json_spec(cls,\n name: str = None,\n k8s_name: str = None,\n run_name: str = None,\n json_spec: str = None):\n\n spec = json.loads(json_spec)\n return cls(name=name, k8s_name=k8s_name, run_name=run_name, spec=spec)", "def from_yaml(config_file : str, template=None):\n with open(config_file, \"r\") as stream:\n yaml_dict = yaml.load(stream, Loader=Loader)\n return Config(yaml_dict)", "def from_data(cls, data):\n self = object.__new__(cls)\n self.required = parse_required(data)\n self.title = parse_title(data)\n self.type = parse_type(data)\n self.values = parse_values(data)\n return self", "def from_yaml_file(cls, file_path: str) -> Config:\n return cls(**read_yaml_file(file_path))", "def LoadTPUResourceSpecs(custom_help=None):\n resource_file_contents = pkg_resources.GetResource(TPU_YAML_RESOURCE_PATH,\n 'resources.yaml')\n if not resource_file_contents:\n raise calliope_exceptions.BadFileException(\n 'Resources not found in path [{}]'.format(TPU_YAML_RESOURCE_PATH))\n\n resource_dict = yaml.load(resource_file_contents)\n resource_specs = []\n for resource_name in TPU_YAML_SPEC_TEMPLATE:\n spec = resource_dict.get(resource_name, None)\n if not spec:\n raise ValueError(\n 'Resource spec [{}] not found in resource spec {}.yaml'.format(\n resource_name, TPU_YAML_RESOURCE_PATH))\n\n # Don't modify template\n temp_spec = copy.deepcopy(TPU_YAML_SPEC_TEMPLATE[resource_name])\n\n temp_spec['spec'] = spec\n if custom_help and custom_help.get(resource_name):\n temp_spec['help_text'] = custom_help[resource_name]\n resource_specs.append(resource_arg_schema.YAMLResourceArgument.FromData(\n temp_spec))\n return resource_specs", "def from_yaml(cls, yaml_filename):\n # TODO: Implement YAML validation routine?\n # Note - a try/catch block sin't really necessary here because this can\n # fail in two ways:\n # 1) IO error, which will probably be a FileNotFoundError\n # 2) YAML decoding error.\n logger.info(\"Reading Experiment configuration from {}\".format(\n yaml_filename\n ))\n with open(yaml_filename, \"rb\") as f:\n yaml_data = yaml.safe_load(f)\n\n exp_kwargs = yaml_data.copy()\n\n # Try to instantiate cases\n logger.debug(\"Reading case\")\n cases = []\n for case_short, case_kws in exp_kwargs['cases'].items():\n logger.debug(\" {}: {}\".format(case_short, case_kws))\n cases.append(Case(case_short, **case_kws))\n exp_kwargs['cases'] = cases\n\n # Create and return the Experiment\n exp = cls(**exp_kwargs)\n logger.debug(exp)\n\n return exp" ]
[ "0.6480427", "0.63688", "0.6167031", "0.61638653", "0.6083558", "0.6032994", "0.5951733", "0.59479475", "0.5927682", "0.5812073", "0.5791038", "0.5777207", "0.5646751", "0.5624482", "0.56070876", "0.5604127", "0.55825603", "0.5564748", "0.5553098", "0.55462116", "0.55418426", "0.5532589", "0.55139375", "0.55029076", "0.54868513", "0.54648066", "0.5451223", "0.544671", "0.544459", "0.5442906" ]
0.64526975
1
Initializes a ResourceSpec. To use a ResourceSpec, give a collection path such as 'cloudiot.projects.locations.registries', and optionally an API version. For each parameter in the collection path, an attribute is added to the resource spec. Names can be created by default or overridden in the attribute_configs dict, which maps from the parameter name to a ResourceParameterAttributeConfig object. ResourceParameterAttributeConfigs also contain information about the help text that describes the attribute.
def __init__(self, resource_collection, resource_name='resource', api_version=None, disable_auto_completers=True, plural_name=None, **kwargs): self._name = resource_name self.plural_name = plural_name self.collection = resource_collection self._resources = resources.REGISTRY.Clone() self._collection_info = self._resources.GetCollectionInfo( resource_collection, api_version=api_version) self.disable_auto_completers = disable_auto_completers collection_params = self._collection_info.GetParams('') self._attributes = [] self._param_names_map = {} orig_kwargs = list(six.iterkeys(kwargs)) # Add attributes. anchor = False for i, param_name in enumerate(collection_params): if i == len(collection_params) - 1: anchor = True attribute_config = kwargs.pop(param_name, ResourceParameterAttributeConfig()) attribute_name = self._AttributeName(param_name, attribute_config, anchor=anchor) new_attribute = Attribute( name=attribute_name, help_text=attribute_config.help_text, required=True, fallthroughs=attribute_config.fallthroughs, completer=attribute_config.completer, value_type=attribute_config.value_type, completion_request_params=attribute_config.completion_request_params, completion_id_field=attribute_config.completion_id_field) self._attributes.append(new_attribute) # Keep a map from attribute names to param names. While attribute names # are used for error messaging and arg creation/parsing, resource parsing # during command runtime requires parameter names. self._param_names_map[new_attribute.name] = param_name if not self._attributes: raise ResourceConfigurationError('Resource [{}] has no parameters; no ' 'arguments will be generated'.format( self._name)) if kwargs: raise ResourceConfigurationError('Resource [{}] was given an attribute ' 'config for unknown attribute(s): ' 'Expected [{}], Found [{}]' .format(self._name, ', '.join(collection_params), ', '.join(orig_kwargs)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, specs, resources, properties=None):\n if not properties:\n properties = {}\n self.init_collections()\n self.properties = properties\n self.set_paths(specs, resources)\n self.parse_paths()\n self.find_resources()", "def __init__(__self__,\n resource_name: str,\n args: Optional[ResourceDefaultVersionArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n kind: Optional[pulumi.Input[Union[str, 'Kind']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n managed_network_group_name: Optional[pulumi.Input[str]] = None,\n managed_network_name: Optional[pulumi.Input[str]] = None,\n management_groups: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n subnets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n virtual_networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ResourceIdArgs']]]]] = None,\n __props__=None):\n ...", "def __init__(__self__, resource_name, opts=None, block_device_mappings=None, capacity_reservation_specification=None, credit_specification=None, description=None, disable_api_termination=None, ebs_optimized=None, elastic_gpu_specifications=None, elastic_inference_accelerator=None, iam_instance_profile=None, image_id=None, instance_initiated_shutdown_behavior=None, instance_market_options=None, instance_type=None, kernel_id=None, key_name=None, license_specifications=None, monitoring=None, name=None, name_prefix=None, network_interfaces=None, placement=None, ram_disk_id=None, security_group_names=None, tag_specifications=None, tags=None, user_data=None, vpc_security_group_ids=None, __props__=None, __name__=None, __opts__=None):\n if __name__ is not None:\n warnings.warn(\"explicit use of __name__ is deprecated\", DeprecationWarning)\n resource_name = __name__\n if __opts__ is not None:\n warnings.warn(\"explicit use of __opts__ is deprecated, use 'opts' instead\", DeprecationWarning)\n opts = __opts__\n if opts is None:\n opts = pulumi.ResourceOptions()\n if not isinstance(opts, pulumi.ResourceOptions):\n raise TypeError('Expected resource options to be a ResourceOptions instance')\n if opts.version is None:\n opts.version = utilities.get_version()\n if opts.id is None:\n if __props__ is not None:\n raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')\n __props__ = dict()\n\n __props__['block_device_mappings'] = block_device_mappings\n __props__['capacity_reservation_specification'] = capacity_reservation_specification\n __props__['credit_specification'] = credit_specification\n __props__['description'] = description\n __props__['disable_api_termination'] = disable_api_termination\n __props__['ebs_optimized'] = ebs_optimized\n __props__['elastic_gpu_specifications'] = elastic_gpu_specifications\n __props__['elastic_inference_accelerator'] = elastic_inference_accelerator\n __props__['iam_instance_profile'] = iam_instance_profile\n __props__['image_id'] = image_id\n __props__['instance_initiated_shutdown_behavior'] = instance_initiated_shutdown_behavior\n __props__['instance_market_options'] = instance_market_options\n __props__['instance_type'] = instance_type\n __props__['kernel_id'] = kernel_id\n __props__['key_name'] = key_name\n __props__['license_specifications'] = license_specifications\n __props__['monitoring'] = monitoring\n __props__['name'] = name\n __props__['name_prefix'] = name_prefix\n __props__['network_interfaces'] = network_interfaces\n __props__['placement'] = placement\n __props__['ram_disk_id'] = ram_disk_id\n __props__['security_group_names'] = security_group_names\n __props__['tag_specifications'] = tag_specifications\n __props__['tags'] = tags\n __props__['user_data'] = user_data\n __props__['vpc_security_group_ids'] = vpc_security_group_ids\n __props__['arn'] = None\n __props__['default_version'] = None\n __props__['latest_version'] = None\n super(LaunchTemplate, __self__).__init__(\n 'aws:ec2/launchTemplate:LaunchTemplate',\n resource_name,\n __props__,\n opts)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n access_config: Optional[pulumi.Input[pulumi.InputType['RuntimeAccessConfigArgs']]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None,\n runtime_id: Optional[pulumi.Input[str]] = None,\n software_config: Optional[pulumi.Input[pulumi.InputType['RuntimeSoftwareConfigArgs']]] = None,\n virtual_machine: Optional[pulumi.Input[pulumi.InputType['VirtualMachineArgs']]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config_bundle: Optional[pulumi.Input[str]] = None,\n detect_md5hash: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n org_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n api_management_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, api_groups=None, attribute_restrictions=None, non_resource_ur_ls=None, resource_names=None, resources=None, verbs=None):\n self.swagger_types = {\n 'api_groups': 'list[str]',\n 'attribute_restrictions': 'RuntimeRawExtension',\n 'non_resource_ur_ls': 'list[str]',\n 'resource_names': 'list[str]',\n 'resources': 'list[str]',\n 'verbs': 'list[str]'\n }\n\n self.attribute_map = {\n 'api_groups': 'apiGroups',\n 'attribute_restrictions': 'attributeRestrictions',\n 'non_resource_ur_ls': 'nonResourceURLs',\n 'resource_names': 'resourceNames',\n 'resources': 'resources',\n 'verbs': 'verbs'\n }\n\n self._api_groups = api_groups\n self._attribute_restrictions = attribute_restrictions\n self._non_resource_ur_ls = non_resource_ur_ls\n self._resource_names = resource_names\n self._resources = resources\n self._verbs = verbs", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n metadata_endpoint: Optional[pulumi.Input[str]] = None,\n opid: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n use_in_api_documentation: Optional[pulumi.Input[bool]] = None,\n use_in_test_console: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n key: Optional[pulumi.Input[str]] = None,\n values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n config: Optional[pulumi.Input[pulumi.InputType['ConfigArgs']]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n application_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n source_resource_type: Optional[pulumi.Input[Union[str, 'ApplicationSourceResourceType']]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n spec: Optional[pulumi.Input['InstanceSpecArgs']] = None):\n if spec is not None:\n pulumi.set(__self__, \"spec\", spec)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FileShareConfigArgs']]]]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NetworkConfigArgs']]]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n virtual_hub_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n type_name: Optional[pulumi.Input[str]] = None,\n type_version_arn: Optional[pulumi.Input[str]] = None,\n version_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n metadata: Optional[pulumi.Input[pulumi.InputType['SyntheticsPrivateLocationMetadataArgs']]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n network: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n request_id: Optional[pulumi.Input[str]] = None,\n severity: Optional[pulumi.Input['EndpointSeverity']] = None,\n threat_exceptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n traffic_logs: Optional[pulumi.Input[bool]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n certificate_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n ipv6_gateway_name: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n spec: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n active: Optional[pulumi.Input[bool]] = None,\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n builtin: Optional[pulumi.Input[bool]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n droplet_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def resource_spec(self) -> pulumi.Input['ZoneResourceSpecArgs']:\n return pulumi.get(self, \"resource_spec\")", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n block_size_bytes: Optional[pulumi.Input[int]] = None,\n container_id: Optional[pulumi.Input[str]] = None,\n disk_file_format: Optional[pulumi.Input[Union[str, 'DiskFileFormat']]] = None,\n disk_size_gb: Optional[pulumi.Input[float]] = None,\n dynamic: Optional[pulumi.Input[bool]] = None,\n extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,\n hyper_v_generation: Optional[pulumi.Input[Union[str, 'HyperVGeneration']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logical_sector_bytes: Optional[pulumi.Input[int]] = None,\n physical_sector_bytes: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_hard_disk_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n label: Optional[pulumi.Input[str]] = None,\n permissions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ..." ]
[ "0.6471468", "0.5993948", "0.5986623", "0.5976555", "0.5975396", "0.5903518", "0.5798204", "0.5712617", "0.56915796", "0.5652143", "0.56374675", "0.56139106", "0.56022555", "0.5597308", "0.5595672", "0.5583856", "0.55816966", "0.5577824", "0.5571314", "0.5570658", "0.5567015", "0.5560553", "0.5546839", "0.554557", "0.5544707", "0.5544643", "0.55377024", "0.55339193", "0.55319893", "0.5530949" ]
0.7300264
0
A map from all attribute names to param names.
def attribute_to_params_map(self): return self._param_names_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_param_names(self):\n temp_params = {'function': self.function, 'target': self.target}\n\n temp_params.update(self.kwargs)\n\n return temp_params", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def param_name_dict():\n\n layer = caffe_pb2.LayerParameter()\n # get all parameter names (typically underscore case) and corresponding\n # type names (typically camel case), which contain the layer names\n # (note that not all parameters correspond to layers, but we'll ignore that)\n param_names = [s for s in dir(layer) if s.endswith('_param')]\n param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]\n # strip the final '_param' or 'Parameter'\n param_names = [s[:-len('_param')] for s in param_names]\n param_type_names = [s[:-len('Parameter')] for s in param_type_names]\n return dict(zip(param_type_names, param_names))", "def get_param_names(self):\n return list(self.params.keys())", "def _identifying_params(self) -> Mapping[str, Any]:\n return {**{\"model_name\": self.model_name}, **self._default_params}", "def parameters_dict(self):\n return dict(zip(self.parameters_names(), self.parameters_list))", "def get_params(self) -> dict:\n # initialize dictionary\n params = dict()\n\n # loop through parameters, adding to parameter dictionary\n for key in self._get_param_names():\n params[key] = getattr(self, key)\n\n return params", "def _create_param_dict(self, func_args):\n for i, a in enumerate(func_args):\n self.fn.args[i].name = str(a)\n self.param_dict[a] = self.fn.args[i]", "def get_params(self, deep=True):\n return {p: getattr(self, p) for p in self.params}", "def get_params(self):\n outputs = ['sample',\n 'ratio_params',\n 'despike_params',\n 'autorange_params',\n 'bkgcorrect_params']\n\n out = {}\n for o in outputs:\n out[o] = getattr(self, o)\n\n out['filter_params'] = self.filt.params\n out['filter_sequence'] = self.filt.sequence\n out['filter_used'] = self.filt.make_keydict()\n\n return out", "def get_likelihood_param_values(self):\n likelihood_param_values = {}\n for name in self.likelihood_params:\n likelihood_param_values[name] = getattr(self.model, name)\n return likelihood_param_values", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def _get_parameters(self, *keys):\n return {k: v for k, v in self.param.items() if k in keys}", "def _get_param_names(self):\r\n return sorted([p\r\n for p in self.__dict__\r\n if p != 'additional_args'])", "def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name", "def get_params(self, deep=True):\n if not hasattr(self, \"_non_sklearn_base\"):\n return super().get_params(deep)\n import inspect\n return {param.name:getattr(self, param.name) for param in inspect.signature(self.__init__).parameters.values()}", "def _get_parameter_dict(par):\n\n filter_func = lambda x: x[0:2] != '__' and x != '_numba_type_'\n attrs = list(filter(filter_func, par.__dir__()))\n\n par_dict = {}\n for attr in attrs:\n par_dict[attr] = par.__getattribute__(attr)\n\n return par_dict", "def ParamName(self, attribute_name):\n if attribute_name not in self.attribute_to_params_map:\n raise ValueError(\n 'No param name found for attribute [{}]. Existing attributes are '\n '[{}]'.format(attribute_name,\n ', '.join(sorted(self.attribute_to_params_map.keys()))))\n return self.attribute_to_params_map[attribute_name]", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def parameter_names(self) -> List[str]:", "def _hack_get_named_params(self):\n named_params = OrderedDict()\n params = self.get_parameters()\n if params:\n # See if we can hack to gether what the param names are\n unused = OrderedDict(sorted(self.__dict__.items()))\n for p in params:\n found = False\n for key, value in list(unused.items()):\n if p is value:\n named_params[key] = p\n unused.pop(key)\n found = True\n if not found:\n key = '__UNKNOWN_PARAM_NAME_{}__'.format(len(named_params))\n named_params[key] = p\n return named_params", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def get_params(self):\r\n param_names = ['aws_access_key_id', 'aws_secret_access_key',\r\n 'is_secure', 'port', 'proxy', 'proxy_port',\r\n 'proxy_user', 'proxy_pass',\r\n 'debug', 'https_connection_factory']\r\n params = {}\r\n for name in param_names:\r\n params[name] = getattr(self, name)\r\n return params", "def configuration(self) -> Dict[str, Any]:\n return {self.__class__.__qualname__: self._param_names}", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def parameters_names(cls):\n return cls._Parameters._fields", "def _getAttrMap(self):\r\n if not getattr(self, 'attrMap'):\r\n self.attrMap = {}\r\n for (key, value) in self.attrs:\r\n self.attrMap[key] = value\r\n return self.attrMap", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")" ]
[ "0.71225196", "0.70732814", "0.6803996", "0.6746649", "0.6736647", "0.6706346", "0.6622288", "0.65926117", "0.65598536", "0.64562553", "0.6440021", "0.6430904", "0.64086884", "0.6398749", "0.6383543", "0.63519055", "0.6318669", "0.6314571", "0.63041973", "0.63021564", "0.63005644", "0.62899417", "0.628183", "0.6259626", "0.6246907", "0.61942184", "0.613071", "0.61145157", "0.6113645", "0.6107139" ]
0.8711842
0
Chooses attribute name for a param name. If attribute_config gives an attribute name, that is used. Otherwise, if the param is an anchor attribute, 'name' is used, or if not, param_name is used.
def _AttributeName(self, param_name, attribute_config, anchor=False): if attribute_config.attribute_name: return attribute_config.attribute_name if anchor: return 'name' return param_name.replace('Id', '_id').lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name", "def ParamName(self, attribute_name):\n if attribute_name not in self.attribute_to_params_map:\n raise ValueError(\n 'No param name found for attribute [{}]. Existing attributes are '\n '[{}]'.format(attribute_name,\n ', '.join(sorted(self.attribute_to_params_map.keys()))))\n return self.attribute_to_params_map[attribute_name]", "def attribute_name(name: str) -> str:\n return text.snake_case(utils.safe_snake(name))", "def param_name(self, value):\n self._param_name = value", "def get_param(self, param_name, memo=None):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n explicit_attr = getattr(self, param_name)\n if explicit_attr is not None:\n return explicit_attr\n else:\n return self.build_param(param_name, memo=memo)", "def param_name(self):\n return self._param_name", "def rename_param(self, param, name):\n old_name = param.name\n new_name = self._get_unique_param_name(name, param.mode)\n \n param._name = new_name\n \n if param.mode == NodeParam.INPUT:\n self._input_params.pop(old_name)\n self._input_params[new_name] = param\n else:\n self._output_params.pop(old_name)\n self._output_params[new_name] = param\n \n return new_name", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def attr_namer(name, renames=renames):\n if name in renames:\n return renames[name]\n return name", "def get_param_with_name(self, param_name):\n return self.params[param_name]", "def _get_unique_param_name(self, name, mode):\n _name = name\n inc = 1\n \n if mode == NodeParam.INPUT:\n existing_params = self._input_params\n else:\n existing_params = self._output_params\n \n while _name in existing_params:\n _name = \"%s%i\" % (name, inc) \n inc += 1 \n return _name", "def rewrite_attribute_name(name, default_namespace='html'):\n\n # Handle any namespaces (just in case someday we support XHTML)\n if ':' in name:\n ns, name = name.split(':', 1)\n elif '__' in name:\n ns, name = name.split('__', 1)\n elif name == 'xmlns':\n ns = ''\n else:\n ns = default_namespace\n\n name.replace('__', '-')\n if ns == 'html':\n # We have an HTML attribute, fix according to DTD\n if name == 'content_type': # MIME type such as in <a> and <link> elements\n name = 'type'\n elif name == 'content_id': # moin historical convention\n name = 'id'\n elif name in ('css_class', 'css'): # to avoid python word 'class'\n name = 'class'\n elif name.startswith('on'): # event handler hook\n name = name.lower()\n return ns, name", "def name(self, value):\n\n if value is not None:\n assert is_string(value), (\n ('\"{0}\" attribute: \"{1}\" is not a \"string\" like object!'\n ).format('name', value))\n self._name = value", "def Attribute(name, value=None):\r\n if value:\r\n return '{}=\"{}\"'.format(name, value)\r\n else:\r\n return ''", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def getAttrName(self, context):\r\n return self.attr if self.attr is not None else context.attr", "def setName(self, *args):\n return _libsbml.Parameter_setName(self, *args)", "def __getattr__(self, name):\n #TODO: Make all parameters case-insensitive.\n try:\n return super(\n JobSubmission,\n self).__getattribute__('params')[str(name)]\n\n except KeyError:\n raise AttributeError(\"'JobSubmission' object has no attribute or \"\n \"parameter: {atr}\".format(atr=name))", "def set_name(self, a_name):\n self.set_parameter('name', a_name)\n return self", "def getattr(self, name, *default):\n for attr in self.attributes:\n if attr.name.lower() == name.lower():\n return attr\n else:\n if default:\n return default[0]\n raise KeyError(name)", "def get_name(self) -> str:\n # read the original value passed by the command\n name = self.raw_param.get(\"name\")\n\n # this parameter does not need dynamic completion\n # this parameter does not need validation\n return name", "def _get_custom_attribute_field_name(self, attribute):\n return 'attribute_{0}'.format(attribute.id)", "def __getattr__(self,attributeName):\n attributeValue = None\n if (attributeName in StackParameterNames):\n attributeValue = StackParameters.get(attributeName)\n else:\n raise AttributeError(\"%s is not a StackParameterName\" % attributeName)\n #endIf\n \n return attributeValue", "def get_param_filter_name(cls, param, rel=None):\n # check for empty param\n if not param:\n return param\n\n # strip the rel prefix from the param name.\n prefix = '%s%s' % (rel or '', LOOKUP_SEP)\n if rel and param.startswith(prefix):\n param = param[len(prefix):]\n\n # Attempt to match against filters with lookups first. (username__endswith)\n if param in cls.base_filters:\n return param\n\n # Attempt to match against exclusion filters\n if param[-1] == '!' and param[:-1] in cls.base_filters:\n return param[:-1]\n\n # Match against relationships. (author__username__endswith).\n # Preference more specific filters. eg, `note__author` over `note`.\n for name in reversed(sorted(cls.related_filters)):\n # we need to match against '__' to prevent eager matching against\n # like names. eg, note vs note2. Exact matches are handled above.\n if param.startswith(\"%s%s\" % (name, LOOKUP_SEP)):\n return name", "def apply_mapping_attr_name(self, attr_name, domain_taxon_set, range_taxon_set=None):\n return self.apply_mapping_func(lambda x: getattr(x, attr_name), domain_taxon_set=domain_taxon_set, range_taxon_set=range_taxon_set)", "def get_name(self, ):\n return self.get_parameter('name')", "def get_attribute(self, name):\n\n pass", "def getName(self):\n return _libsbml.Parameter_getName(self)", "def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None", "def getParam(self, params, name):\n return params.get(name)" ]
[ "0.7922777", "0.7051831", "0.6452999", "0.6426694", "0.61500454", "0.60328346", "0.601027", "0.5978671", "0.585918", "0.58558106", "0.57965505", "0.5747554", "0.57251555", "0.57141495", "0.5692946", "0.56223017", "0.5602715", "0.5579822", "0.55760527", "0.5572492", "0.556937", "0.55475366", "0.55373555", "0.5475503", "0.54437417", "0.54373884", "0.5429453", "0.54090613", "0.5401365", "0.5396753" ]
0.8635494
0
Given an attribute name, gets the param name for resource parsing.
def ParamName(self, attribute_name): if attribute_name not in self.attribute_to_params_map: raise ValueError( 'No param name found for attribute [{}]. Existing attributes are ' '[{}]'.format(attribute_name, ', '.join(sorted(self.attribute_to_params_map.keys())))) return self.attribute_to_params_map[attribute_name]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AttributeName(self, param_name):\n for attribute_name, p in six.iteritems(self.attribute_to_params_map):\n if p == param_name:\n return attribute_name", "def _AttributeName(self, param_name, attribute_config, anchor=False):\n if attribute_config.attribute_name:\n return attribute_config.attribute_name\n if anchor:\n return 'name'\n return param_name.replace('Id', '_id').lower()", "def get_param_with_name(self, param_name):\n return self.params[param_name]", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def get_param(self, param_name, memo=None):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n explicit_attr = getattr(self, param_name)\n if explicit_attr is not None:\n return explicit_attr\n else:\n return self.build_param(param_name, memo=memo)", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def __getattr__(self, attribute):\n return self.parameters.get(attribute, None)", "def param_name(self):\n return self._param_name", "def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None", "def getParam(self, params, name):\n return params.get(name)", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name", "def __getattr__(self,attributeName):\n attributeValue = None\n if (attributeName in StackParameterNames):\n attributeValue = StackParameters.get(attributeName)\n else:\n raise AttributeError(\"%s is not a StackParameterName\" % attributeName)\n #endIf\n \n return attributeValue", "def _get_arg_name(self, arg, variable_name):", "def _get_variable_name(param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def find_attribute_id(self, attribute_name):\n attribute_id = None\n pr = self.layer.dataProvider()\n for field_id, field in enumerate(pr.fields()):\n if field.name() == attribute_name:\n attribute_id = field_id\n return attribute_id\n # In case the attribute has not been found, raise exception\n raise AttributeError('Attribute name %s not found' % attribute_name)", "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def get_arg(self, name):\n return getattr(self.args, f\"{self.key}_{self.alias}_{name}\")", "def input_param(self, name):\n return self._input_params.get(name, None)", "def attribute_name(name: str) -> str:\n return text.snake_case(utils.safe_snake(name))", "def get_attribute(self, attribute: str) -> str:\n pass", "def get_param_name(self, param_id, syselem):\n\n with self.__connection.cursor() as cursor:\n query = \"SELECT NAME FROM %s WHERE PID= '%s' AND SYSTEM_ELEMENT= '%s'\" % (self.__schema, param_id, syselem)\n cursor.execute(query)\n result = cursor.fetchone()\n return result['NAME']" ]
[ "0.7986248", "0.75536495", "0.6849631", "0.66188323", "0.6480736", "0.639842", "0.6344476", "0.6323198", "0.6190031", "0.61724794", "0.61705315", "0.61705315", "0.61705315", "0.61705315", "0.61705315", "0.6147806", "0.6147806", "0.61333215", "0.6112564", "0.6066664", "0.6046501", "0.6046501", "0.6046501", "0.60303175", "0.59845567", "0.597347", "0.5972449", "0.596699", "0.596658", "0.5916794" ]
0.7948914
1
Given a param name, gets the attribute name.
def AttributeName(self, param_name): for attribute_name, p in six.iteritems(self.attribute_to_params_map): if p == param_name: return attribute_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _AttributeName(self, param_name, attribute_config, anchor=False):\n if attribute_config.attribute_name:\n return attribute_config.attribute_name\n if anchor:\n return 'name'\n return param_name.replace('Id', '_id').lower()", "def ParamName(self, attribute_name):\n if attribute_name not in self.attribute_to_params_map:\n raise ValueError(\n 'No param name found for attribute [{}]. Existing attributes are '\n '[{}]'.format(attribute_name,\n ', '.join(sorted(self.attribute_to_params_map.keys()))))\n return self.attribute_to_params_map[attribute_name]", "def get_param_with_name(self, param_name):\n return self.params[param_name]", "def _get_one_param(self, param_name):\n return getattr(self, '__' + param_name)", "def get_param(self, param_name, memo=None):\n # Cast param_name to str once, for convenience:\n # (This is needed because Parameter members are Enum objects,\n # which can't be used in place of string-valued indexes)\n param_name = str(param_name)\n explicit_attr = getattr(self, param_name)\n if explicit_attr is not None:\n return explicit_attr\n else:\n return self.build_param(param_name, memo=memo)", "def param_name(self):\n return self._param_name", "def attribute_name(name: str) -> str:\n return text.snake_case(utils.safe_snake(name))", "def get_param(self, param_name):\n if hasattr(self, param_name):\n return getattr(self, param_name)\n else:\n return None", "def getAttrName(self, *args):\n return _libsbml.XMLToken_getAttrName(self, *args)", "def get_attribute(self, name):\n\n pass", "def getParam(self, params, name):\n return params.get(name)", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def get_param_name(self, param_id, syselem):\n\n with self.__connection.cursor() as cursor:\n query = \"SELECT NAME FROM %s WHERE PID= '%s' AND SYSTEM_ELEMENT= '%s'\" % (self.__schema, param_id, syselem)\n cursor.execute(query)\n result = cursor.fetchone()\n return result['NAME']", "def getAttribute(self, name):\n \n return self[self._name][name]", "def __getattr__(self,attributeName):\n attributeValue = None\n if (attributeName in StackParameterNames):\n attributeValue = StackParameters.get(attributeName)\n else:\n raise AttributeError(\"%s is not a StackParameterName\" % attributeName)\n #endIf\n \n return attributeValue", "def getattribute(self, name):\n return self.attributes[name]", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", six.ensure_str(param_name))\n if m is not None:\n param_name = m.group(1)\n return param_name", "def get_attribute_by_name(attributes, attributeName):\n for attrib in attributes:\n if attrib['name'] == attributeName:\n return attrib\n return None", "def get_attribute_by_name(self, name):\n if name in self._attributes:\n return self._attributes[name]", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def _get_variable_name(self, param_name):\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name", "def get(self, attrname):\n return self.__dict__['_'+attrname]", "def getAttrName(self, context):\r\n return self.attr if self.attr is not None else context.attr", "def find_attribute_id(self, attribute_name):\n attribute_id = None\n pr = self.layer.dataProvider()\n for field_id, field in enumerate(pr.fields()):\n if field.name() == attribute_name:\n attribute_id = field_id\n return attribute_id\n # In case the attribute has not been found, raise exception\n raise AttributeError('Attribute name %s not found' % attribute_name)" ]
[ "0.8341303", "0.7594844", "0.7139493", "0.7003457", "0.6891393", "0.6869919", "0.68379277", "0.6723565", "0.67190063", "0.6652686", "0.6646669", "0.64897734", "0.64897734", "0.64897734", "0.64897734", "0.64897734", "0.648274", "0.6474743", "0.646516", "0.64248013", "0.64187026", "0.64187026", "0.6398612", "0.63944167", "0.6354959", "0.6354959", "0.6354959", "0.6318829", "0.6318288", "0.6271294" ]
0.89631
0
Initializes a resource given its fallthroughs. If the attributes have a property or arg fallthrough but the full resource name is provided to the anchor attribute flag, the information from the resource name is used over the properties and args. This preserves typical resource parsing behavior in existing surfaces.
def Initialize(self, fallthroughs_map, parsed_args=None): params = {} # Returns a function that can be used to parse each attribute, which will be # used only if the resource parser does not receive a fully qualified # resource name. def LazyGet(name): f = lambda: deps_lib.Get(name, fallthroughs_map, parsed_args=parsed_args) return f for attribute in self.attributes: params[self.ParamName(attribute.name)] = LazyGet(attribute.name) self._resources.RegisterApiByName(self._collection_info.api_name, self._collection_info.api_version) try: return self._resources.Parse( deps_lib.Get( self.anchor.name, fallthroughs_map, parsed_args=parsed_args), collection=self.collection, params=params) except deps_lib.AttributeNotFoundError as e: raise InitializationError( 'The [{}] resource is not properly specified.\n' '{}'.format(self.name, six.text_type(e)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n args: AclBindingRuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: Optional[AclArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: AclRuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, resource, *args):\n self.args = list(args)\n self.flags = OrderedDict()\n self.additional_flags = []\n self._AddCommonFlags(resource)", "def __init__(__self__,\n resource_name: str,\n args: RuleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__, *,\n non_resource_attributes: Optional[pulumi.Input['NonResourceAttributesArgs']] = None,\n resource_attributes: Optional[pulumi.Input['ResourceAttributesArgs']] = None):\n if non_resource_attributes is not None:\n pulumi.set(__self__, \"non_resource_attributes\", non_resource_attributes)\n if resource_attributes is not None:\n pulumi.set(__self__, \"resource_attributes\", resource_attributes)", "def FromData(cls, data):\n if not data:\n return None\n\n attribute_name = data['attribute_name']\n parameter_name = data['parameter_name']\n help_text = data['help']\n completion_id_field = data.get('completion_id_field', None)\n completion_request_params_list = data.get('completion_request_params', [])\n completion_request_params = {\n param.get('fieldName'): param.get('value')\n for param in completion_request_params_list\n }\n\n # Add property fallthroughs.\n fallthroughs = []\n prop = properties.FromString(data.get('property', ''))\n if prop:\n fallthroughs.append(deps_lib.PropertyFallthrough(prop))\n default_config = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name)\n if default_config:\n fallthroughs += [\n f for f in default_config.fallthroughs if f not in fallthroughs]\n # Add fallthroughs from python hooks.\n fallthrough_data = data.get('fallthroughs', [])\n fallthroughs_from_hook = [\n deps_lib.Fallthrough(util.Hook.FromPath(f['hook']), hint=f['hint'])\n for f in fallthrough_data\n ]\n fallthroughs += fallthroughs_from_hook\n return cls(\n name=attribute_name,\n help_text=help_text,\n fallthroughs=fallthroughs,\n completion_id_field=completion_id_field,\n completion_request_params=completion_request_params,\n parameter_name=parameter_name)", "def __init__(self, client, href=None, resource=None):\n self.client = client\n self.href = href\n self.resource = resource\n if resource is not None:\n self.href = resource.get('href')\n self.name = resource.get('name')", "def __init__(self, resource_collection, resource_name='resource',\n api_version=None, disable_auto_completers=True, plural_name=None,\n **kwargs):\n self._name = resource_name\n self.plural_name = plural_name\n self.collection = resource_collection\n self._resources = resources.REGISTRY.Clone()\n self._collection_info = self._resources.GetCollectionInfo(\n resource_collection, api_version=api_version)\n self.disable_auto_completers = disable_auto_completers\n collection_params = self._collection_info.GetParams('')\n self._attributes = []\n self._param_names_map = {}\n\n orig_kwargs = list(six.iterkeys(kwargs))\n # Add attributes.\n anchor = False\n for i, param_name in enumerate(collection_params):\n if i == len(collection_params) - 1:\n anchor = True\n attribute_config = kwargs.pop(param_name,\n ResourceParameterAttributeConfig())\n attribute_name = self._AttributeName(param_name, attribute_config,\n anchor=anchor)\n new_attribute = Attribute(\n name=attribute_name,\n help_text=attribute_config.help_text,\n required=True,\n fallthroughs=attribute_config.fallthroughs,\n completer=attribute_config.completer,\n value_type=attribute_config.value_type,\n completion_request_params=attribute_config.completion_request_params,\n completion_id_field=attribute_config.completion_id_field)\n self._attributes.append(new_attribute)\n # Keep a map from attribute names to param names. While attribute names\n # are used for error messaging and arg creation/parsing, resource parsing\n # during command runtime requires parameter names.\n self._param_names_map[new_attribute.name] = param_name\n if not self._attributes:\n raise ResourceConfigurationError('Resource [{}] has no parameters; no '\n 'arguments will be generated'.format(\n self._name))\n if kwargs:\n raise ResourceConfigurationError('Resource [{}] was given an attribute '\n 'config for unknown attribute(s): '\n 'Expected [{}], Found [{}]'\n .format(self._name,\n ', '.join(collection_params),\n ', '.join(orig_kwargs)))", "def __init__(__self__,\n resource_name: str,\n args: BundleArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: MetaTagArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: EnvironmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: AssessmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: TagArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: FlowArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ImageInitArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n direction: Optional[pulumi.Input[str]] = None,\n dry_run: Optional[pulumi.Input[bool]] = None,\n listener_id: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n rule_actions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleRuleActionArgs']]]]] = None,\n rule_conditions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RuleRuleConditionArgs']]]]] = None,\n rule_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: PrefixArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n account_alias: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: WorkflowArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: AccessConfigurationArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, resource):\n if resource:\n self.__resource = str(os.path.realpath(str(resource)));\n else:\n self.__resource = '';", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: RouteMapArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n disable_bgp_route_propagation: Optional[pulumi.Input[bool]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n routes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RouteTableRouteArgs']]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n acl_id: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n dest_cidr: Optional[pulumi.Input[str]] = None,\n dest_port_range: Optional[pulumi.Input[str]] = None,\n direction: Optional[pulumi.Input[str]] = None,\n ip_protocol: Optional[pulumi.Input[str]] = None,\n policy: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[int]] = None,\n source_cidr: Optional[pulumi.Input[str]] = None,\n source_port_range: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(self, resource_path):\n self.resource_path = resource_path", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n definition: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n role_arn: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: ConnectorMappingArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..." ]
[ "0.66332495", "0.6537106", "0.65020514", "0.6459604", "0.62304676", "0.6204393", "0.6201695", "0.618227", "0.61458963", "0.6075082", "0.60665816", "0.6006086", "0.5999377", "0.5999337", "0.5999146", "0.59941", "0.59921694", "0.5991608", "0.5982053", "0.59734714", "0.5961451", "0.5955558", "0.59505904", "0.5946522", "0.59368753", "0.5932529", "0.59289914", "0.5918373", "0.591313", "0.5911476" ]
0.69918567
0
Helper for parsing a list of results from a plural fallthrough.
def _ParseFromPluralValue(self, attribute_to_args_map, base_fallthroughs_map, plural_attribute, parsed_args): attribute_name = plural_attribute.name fallthroughs_map = self.BuildFullFallthroughsMap( attribute_to_args_map, base_fallthroughs_map, plural=True, with_anchor_fallthroughs=False) current_fallthroughs = fallthroughs_map.get(attribute_name, []) # Iterate through the values provided to the argument, creating for # each a separate parsed resource. parsed_resources = [] for fallthrough in current_fallthroughs: try: values = fallthrough.GetValue(parsed_args) except deps_lib.FallthroughNotFoundError: continue for value in values: def F(return_value=value): return return_value new_fallthrough = deps_lib.Fallthrough( F, fallthrough.hint, active=fallthrough.active) fallthroughs_map[attribute_name] = [new_fallthrough] # Add the anchor fallthroughs for this particular value, so that the # error messages will contain the appropriate hints. self._AddAnchorFallthroughs(plural_attribute, fallthroughs_map) parsed_resources.append( self.Initialize( fallthroughs_map, parsed_args=parsed_args)) return parsed_resources
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plural(record, sequence, zero=None, one=None, two=None, more=''):\n\n l = len (sequence(record))\n \n if l == 0 and zero is not None:\n return zero(record)\n elif l == 1 and one is not None:\n return one(record)\n elif l == 2 and two is not None:\n return two(record)\n else:\n return more(record)", "def sentence_parse(list_of_posts): \n for parsedPosts in nlp.pipe(line_review(list_of_posts)):\n for sent in parsedPosts.sents:\n yield str(sent)", "def parse_results_from_list(retrieved_l):\n for l in retrieved_l:\n #fields = l.split()\n query_name = l[0]\n ranks = [int(rank) for rank in l[1::2]]\n yield (query_name, list(zip(ranks, l[2::2])) )", "def merge_singular_plural(ctx):\n asyncio.run(merge_singular_plural_impl(ctx.obj[\"config\"]))", "def plural(num, one, many):\n\n return \"%i %s\" % (num, one if num == 1 else many)", "def lf_findall_abnormal_interp(report):\n\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_in_text(interp_text)\n else:\n candtext = get_section_with_name(SEIZURE_SECTION_NAMES_LOWER, report)\n if candtext:\n return abnormal_interp_in_text(candtext)\n else:\n return ABSTAIN_VAL", "def pluralisation(self, plural):\n return \"item\"", "def parse_list(cls, data):\n results = ResultSet()\n data = data or []\n for obj in data:\n if obj:\n results.append(cls.parse(obj))\n return results", "def parse(name: unicode) -> List[unicode]:\n ...", "def parser(sent_list): #input: list of sentences", "def parse_search_results(search_results):\n return \", \".join(search_results)", "def lf_abnormal_interp(report):\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_in_text(interp_text)\n elif 'summary' in report.sections:\n return abnormal_interp_in_text(report.sections['summary']['text'])\n elif 'findings' in report.sections: # fall back to look in the findings \n if 'summary' in report.sections['findings']: # fall back to look for a summary instead\n return abnormal_interp_in_text(report.sections['findings']['summary'])\n if 'impression' in report.sections['findings']:\n return abnormal_interp_in_text(report.sections['findings']['impression'])\n return ABSTAIN_VAL\n elif 'narrative' in report.sections: # fall back to look in the findings \n ky = 'narrative'\n if 'summary' in report.sections[ky]: # fall back to look for a summary instead\n return abnormal_interp_in_text(report.sections[ky]['summary'])\n if 'impression' in report.sections[ky]:\n return abnormal_interp_in_text(report.sections[ky]['impression'])\n # could try running on all of findings but would really need to check then\n \n return ABSTAIN_VAL \n else:\n return ABSTAIN_VAL", "def _format_response(self, response):\n texts = []\n for result in response.results: \n texts.append(result.alternatives[0].transcript)\n return texts", "def test_list_representation(self):\n \n lr = ['- L1\\n- L2\\n- L3',\n 'text\\n- L1\\n- L2\\ntext\\n- L3',\n '* H\\n- L1\\n - L2\\n** H\\n- L3',\n ' - L1\\n - L2\\n - L3',\n '- L1\\n - L2\\n - L3'\n ]\n\n for l in lr:\n self.assertEqual(l, str(parser.parse(l)))", "def generate_results_string(player_list, singular_result, plural_result):\n string = \"\"\n plural = len(player_list) > 1\n player_number = 1\n if len(player_list) != 0:\n string += \"Player \"\n for player in player_list:\n string += player.get_name()\n if player_number < len(player_list) - 1:\n string += \", \"\n elif player_number < len(player_list):\n string += \" & \"\n player_number += 1\n if plural:\n string = string[:6] + \"s\" + string[6:] + plural_result\n else:\n string += singular_result\n return string", "def Parse(self, attribute_to_args_map, base_fallthroughs_map,\n parsed_args=None, plural=False, allow_empty=False):\n raise NotImplementedError", "def format_plural(text):\n if ',' in text:\n index = text.rfind(',') + 2\n text = text[:index] + 'and ' + text[index:]\n return text", "def _apply_default_format(self, results_list):\n if len(results_list) != 0:\n result_names = [res[\"record_name\"] for res in results_list]\n results_list = [f\"{res}\\n\" for res in result_names]\n formatted_results = [\n (self.style[\"unselected\"], res) for res in results_list\n ]\n else:\n formatted_results = []\n return formatted_results", "def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results", "def process_input(\n input_data: List[ArticleInfoStemmed]\n) -> List[ArticleInfoStemmed]:\n stop_words = stopwords.words('russian')\n stop_words.extend(['что', 'это', 'так', 'вот', 'быть', 'как', 'в', 'к', 'на'])\n\n my_stemmer = Mystem()\n result = []\n\n for elem in input_data:\n title_without_sw = ' '.join(\n [\n elem1\n for elem1 in elem.title.split(' ')\n if elem1 not in stop_words and elem1 not in string.punctuation\n ]\n )\n\n text_without_sw = ' '.join(\n [\n elem1\n for elem1 in elem.text.split(' ')\n if elem1 not in stop_words and elem1 not in string.punctuation\n ]\n )\n\n final_title = ' '.join(\n my_stemmer.lemmatize(title_without_sw)\n ).replace(\n '\\n', ' '\n ).replace(\n '.', ' '\n ).replace(\n '-', ' '\n )\n\n final_text = ' '.join(\n my_stemmer.lemmatize(text_without_sw)\n ).replace(\n '\\n', ''\n ).replace(\n '.', ''\n ).replace(\n '-', ' '\n )\n\n result.append(\n ArticleInfoStemmed(\n title=final_title,\n text=final_text\n )\n )\n\n return result", "def process_list(a_list: list):\n\n return ', '.join(str(s) for s in a_list) if a_list else Presenter.DEFAULT", "def lf_findall_interp_with_discont(report):\n\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_discont(interp_text)\n else:\n candtext = get_section_with_name(SEIZURE_SECTION_NAMES_LOWER, report)\n if candtext:\n return abnormal_interp_with_discont(candtext)\n else:\n return ABSTAIN_VAL", "def test_with_4_items(self):\n self.assertEqual(humanize_list(['a', 'b', 'c', 'd']),\n 'a, b, c, and d')", "def transliterate(trans, item):\n if isinstance(item, str):\n return trans.get(item, item)\n if isinstance(item, list) and len(item) > 0:\n if isinstance(item[0], str):\n return [trans.get(i, i) for i in item]\n if isinstance(item[0], list):\n return [[trans.get(i, i) for i in first] for first in item]\n return item", "def simplelist(inline):\n return paramfinder.findall(inline)", "def lf_findall_interp_with_spikes(report):\n\n if 'interpretation' in report.sections.keys():\n interpretation = report.sections['interpretation']\n interp_text = interpretation['text']\n return abnormal_interp_with_spikes(interp_text)\n else:\n candtext = get_section_with_name(SEIZURE_SECTION_NAMES_LOWER, report)\n if candtext:\n return abnormal_interp_with_spikes(candtext)\n else:\n return ABSTAIN_VAL", "def parse_ingredients(inp):\n # Standardize to uppercase\n # inp = inp.upper()\n parsed, _ = _parse(inp)\n return parsed", "def role_list_helper(item, value):\n\n if isinstance(value, Message):\n # translate the message\n return translate(value, context=getRequest())\n\n elif any([isinstance(value, t) for t in (str, unicode)]):\n # is it a string or unicode or a subtype of them?\n return translate_participation_role(value)\n\n elif any([isinstance(value, t) for t in (list, tuple, set,\n PersistentList)]):\n # if it's a list, lets iterate over it\n translated_values = []\n for role in value:\n translated_values.append(translate_participation_role(role))\n return ', '.join(translated_values)\n\n else:\n return value", "def parse_listing(self, raw_output):\n return output_parser.listing(raw_output)", "def get_text(parent, tag, plural = False):\n text = None\n for item in parent.findall(tag):\n t = item.text\n if not text:\n if plural:\n text = [t]\n else:\n text = t\n elif isinstance(text, list):\n text.append(t)\n else:\n text = [text, t]\n return text" ]
[ "0.5154282", "0.5042861", "0.5010712", "0.4999677", "0.49668226", "0.49542627", "0.4946884", "0.4855669", "0.48430166", "0.48237655", "0.47569048", "0.47480172", "0.47471583", "0.473727", "0.4729921", "0.47268867", "0.4725763", "0.47083244", "0.4693469", "0.46605954", "0.4631673", "0.46286935", "0.46247318", "0.4623542", "0.46079552", "0.45993003", "0.4591976", "0.45859528", "0.45801136", "0.45729485" ]
0.58729494
0
Builds map of all fallthroughs including arg names. Fallthroughs are a list of objects that, when called, try different ways of getting values for attributes (see googlecloudsdk.calliope.concepts. deps_lib._Fallthrough). This method builds a map from the name of each attribute to its fallthroughs, including the "primary" fallthrough representing its corresponding argument value in parsed_args if any, and any fallthroughs that were configured for the attribute beyond that.
def BuildFullFallthroughsMap(self, attribute_to_args_map, base_fallthroughs_map, plural=False, with_anchor_fallthroughs=True): fallthroughs_map = {} for attribute in self.attributes: fallthroughs_map[attribute.name] = ( self.GetArgAndBaseFallthroughsForAttribute(attribute_to_args_map, base_fallthroughs_map, attribute, plural=plural)) if not with_anchor_fallthroughs: return fallthroughs_map for attribute in self.attributes: if self.IsAnchor(attribute): self._AddAnchorFallthroughs(attribute, fallthroughs_map) return fallthroughs_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetArgAndBaseFallthroughsForAttribute(self,\n attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=False):\n attribute_name = attribute.name\n attribute_fallthroughs = []\n # The only args that should be lists are anchor args for plural\n # resources.\n attribute_is_plural = self.IsAnchor(attribute) and plural\n\n # Start the fallthroughs list with the primary associated arg for the\n # attribute.\n arg_name = attribute_to_args_map.get(attribute_name)\n if arg_name:\n attribute_fallthroughs.append(\n deps_lib.ArgFallthrough(arg_name, plural=attribute_is_plural))\n\n given_fallthroughs = base_fallthroughs_map.get(attribute_name, [])\n for fallthrough in given_fallthroughs:\n if attribute_is_plural:\n fallthrough = copy.deepcopy(fallthrough)\n fallthrough.plural = attribute_is_plural\n attribute_fallthroughs.append(fallthrough)\n return attribute_fallthroughs", "def Parse(self, attribute_to_args_map, base_fallthroughs_map,\n parsed_args=None, plural=False, allow_empty=False):\n raise NotImplementedError", "def Parse(self, attribute_to_args_map, base_fallthroughs_map,\n parsed_args=None, plural=False, allow_empty=False):\n if not plural:\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map,\n with_anchor_fallthroughs=False)\n try:\n return self.Initialize(\n fallthroughs_map, parsed_args=parsed_args)\n except InitializationError:\n if allow_empty:\n return None\n raise\n\n results = self._ParseFromPluralValue(attribute_to_args_map,\n base_fallthroughs_map,\n self.anchor,\n parsed_args)\n if results:\n return results\n\n if allow_empty:\n return []\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map)\n return self.Initialize(\n base_fallthroughs_map, parsed_args=parsed_args)", "def Initialize(self, fallthroughs_map, parsed_args=None):\n params = {}\n\n # Returns a function that can be used to parse each attribute, which will be\n # used only if the resource parser does not receive a fully qualified\n # resource name.\n def LazyGet(name):\n f = lambda: deps_lib.Get(name, fallthroughs_map, parsed_args=parsed_args)\n return f\n\n for attribute in self.attributes:\n params[self.ParamName(attribute.name)] = LazyGet(attribute.name)\n self._resources.RegisterApiByName(self._collection_info.api_name,\n self._collection_info.api_version)\n try:\n return self._resources.Parse(\n deps_lib.Get(\n self.anchor.name, fallthroughs_map, parsed_args=parsed_args),\n collection=self.collection,\n params=params)\n except deps_lib.AttributeNotFoundError as e:\n raise InitializationError(\n 'The [{}] resource is not properly specified.\\n'\n '{}'.format(self.name, six.text_type(e)))", "def apply(self, attributes):\n return {\n self.attribute_names[k]: v\n for k, v in attributes.items()\n }", "def _ParseFromPluralValue(self, attribute_to_args_map, base_fallthroughs_map,\n plural_attribute, parsed_args):\n attribute_name = plural_attribute.name\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map, plural=True,\n with_anchor_fallthroughs=False)\n current_fallthroughs = fallthroughs_map.get(attribute_name, [])\n # Iterate through the values provided to the argument, creating for\n # each a separate parsed resource.\n parsed_resources = []\n for fallthrough in current_fallthroughs:\n try:\n values = fallthrough.GetValue(parsed_args)\n except deps_lib.FallthroughNotFoundError:\n continue\n for value in values:\n def F(return_value=value):\n return return_value\n\n new_fallthrough = deps_lib.Fallthrough(\n F, fallthrough.hint, active=fallthrough.active)\n fallthroughs_map[attribute_name] = [new_fallthrough]\n # Add the anchor fallthroughs for this particular value, so that the\n # error messages will contain the appropriate hints.\n self._AddAnchorFallthroughs(plural_attribute, fallthroughs_map)\n parsed_resources.append(\n self.Initialize(\n fallthroughs_map, parsed_args=parsed_args))\n return parsed_resources", "def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}", "def fetch_mapped_args(self, args_mapping,\n atom_name=None, scope_walker=None,\n optional_args=None):\n def _extract_first_from(name, sources):\n \"\"\"Extracts/returns first occurrence of key in list of dicts.\"\"\"\n for i, source in enumerate(sources):\n if not source:\n continue\n if name in source:\n return (i, source[name])\n raise KeyError(name)\n if optional_args is None:\n optional_args = []\n if atom_name:\n source, _clone = self._atomdetail_by_name(atom_name)\n injected_sources = [\n self._injected_args.get(atom_name, {}),\n source.meta.get(META_INJECTED, {}),\n ]\n if scope_walker is None:\n scope_walker = self._scope_fetcher(atom_name)\n else:\n injected_sources = []\n if not args_mapping:\n return {}\n get_results = lambda atom_name: \\\n self._get(atom_name, 'last_results', 'failure',\n _EXECUTE_STATES_WITH_RESULTS, states.EXECUTE)\n mapped_args = {}\n for (bound_name, name) in args_mapping.items():\n if LOG.isEnabledFor(logging.TRACE):\n if atom_name:\n LOG.trace(\"Looking for %r <= %r for atom '%s'\",\n bound_name, name, atom_name)\n else:\n LOG.trace(\"Looking for %r <= %r\", bound_name, name)\n try:\n source_index, value = _extract_first_from(\n name, injected_sources)\n mapped_args[bound_name] = value\n if LOG.isEnabledFor(logging.TRACE):\n if source_index == 0:\n LOG.trace(\"Matched %r <= %r to %r (from injected\"\n \" atom-specific transient\"\n \" values)\", bound_name, name, value)\n else:\n LOG.trace(\"Matched %r <= %r to %r (from injected\"\n \" atom-specific persistent\"\n \" values)\", bound_name, name, value)\n except KeyError:\n try:\n maybe_providers = self._reverse_mapping[name]\n except KeyError:\n if bound_name in optional_args:\n LOG.trace(\"Argument %r is optional, skipping\",\n bound_name)\n continue\n raise exceptions.NotFound(\"Name %r is not mapped as a\"\n \" produced output by any\"\n \" providers\" % name)\n locator = _ProviderLocator(\n self._transients,\n functools.partial(self._fetch_providers,\n providers=maybe_providers), get_results)\n searched_providers, providers = locator.find(\n name, scope_walker=scope_walker)\n if not providers:\n raise exceptions.NotFound(\n \"Mapped argument %r <= %r was not produced\"\n \" by any accessible provider (%s possible\"\n \" providers were scanned)\"\n % (bound_name, name, len(searched_providers)))\n provider, value = _item_from_first_of(providers, name)\n mapped_args[bound_name] = value\n LOG.trace(\"Matched %r <= %r to %r (from %s)\",\n bound_name, name, value, provider)\n return mapped_args", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def parse_arguments(args: List[Dict]) -> 'Dict[str, Argument]':\n if not args:\n return {}\n result = {}\n for a in args:\n if not a:\n continue\n arg = Argument(a)\n result[arg.name] = arg\n return result", "def _set_attr_name_map(self):\n self.attr_name_map = {}\n for object_query in self.query:\n object_name = object_query[\"object_name\"]\n object_class = self.object_map[object_name]\n aliases = AttributeInfo.gather_aliases(object_class)\n self.attr_name_map[object_class] = {}\n for key, value in aliases.items():\n filter_by = None\n if isinstance(value, dict):\n filter_name = value.get(\"filter_by\", None)\n if filter_name is not None:\n filter_by = getattr(object_class, filter_name, None)\n value = value[\"display_name\"]\n if value:\n self.attr_name_map[object_class][value.lower()] = (key.lower(),\n filter_by)\n custom_attrs = AttributeInfo.get_custom_attr_definitions(\n object_class)\n for key, definition in custom_attrs.items():\n if not key.startswith(\"__custom__:\") or \\\n \"display_name\" not in definition:\n continue\n try:\n # Global custom attribute definition can only have a single id on\n # their name, so it is safe for that. Currently the filters do not\n # work with object level custom attributes.\n attr_id = definition[\"definition_ids\"][0]\n except KeyError:\n continue\n filter_by = CustomAttributeValue.mk_filter_by_custom(object_class,\n attr_id)\n name = definition[\"display_name\"].lower()\n self.attr_name_map[object_class][name] = (name, filter_by)", "def buildTagMap(default, *args):\r\n built = {}\r\n for portion in args:\r\n if hasattr(portion, 'items'):\r\n #It's a map. Merge it.\r\n for k,v in portion.items():\r\n built[k] = v\r\n elif isList(portion):\r\n #It's a list. Map each item to the default.\r\n for k in portion:\r\n built[k] = default\r\n else:\r\n #It's a scalar. Map it to the default.\r\n built[portion] = default\r\n return built", "def _map_args_kwargs_to_input(self, *args, **kwargs) -> Dict[str, Any]:\n input_dict = {k: v for k, v in zip(self.inputs, args)}\n input_dict.update(kwargs)\n\n return input_dict", "def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic", "def _parse_args(self, args : dict):\n result = {}\n for key, value in args.items():\n if key in self._subparsers:\n # if it's a list, it is because it's a preset\n if isinstance(value, list):\n result[key] = value[0]\n else:\n result[key] = self._subparsers[key]._parse_args(value)\n elif key in self._actions:\n result[key] = self._actions[key](value)\n else:\n raise ValueError(f\"Unknown argument {key}\")\n\n return result", "def build_default_catch_all_map(self):\n self.default_catch_all_map = self.data['catchall']", "def map_attributes(order: dict) -> dict:\n map_dict = {\n \"has_batteries\": FactoryMapping.cast_str_to_bool,\n \"has_glow\": FactoryMapping.cast_str_to_bool,\n \"has_lactose\": FactoryMapping.cast_str_to_bool,\n \"has_nuts\": FactoryMapping.cast_str_to_bool,\n \"min_age\": int,\n \"num_rooms\": int,\n \"num_sound\": int,\n \"pack_size\": int,\n \"dimensions\": lambda x: float(x.replace(\",\", '.')),\n \"spider_type\": SpiderType.map_str_to_enum,\n \"colour\": Colours.map_str_to_enum,\n \"variety\": ToffeeVariety.map_str_to_enum,\n \"stuffing\": Stuffing.map_str_to_enum,\n \"size\": Size.map_str_to_enum,\n \"fabric\": Fabric.map_str_to_enum\n }\n for key, value in map_dict.items():\n if key in order:\n order[key] = value(order[key])\n return order", "def parts(self):\n if hasattr(self, '_parts'):\n return self._parts\n argspec = inspect.getargspec(self.primary_preprocessor)\n needed_attrs = {attr_name: getattr(self, attr_name)\n for attr_name in argspec.args}\n if argspec.varargs:\n needed_attrs[argspec.varargs] = getattr(self, argspec.varargs)\n\n if argspec.keywords:\n needed_attrs[argspec.keywords] = getattr(self, argspec.keywords)\n\n return needed_attrs", "def mof_metadata(self):\n\n arg_dict = collections.defaultdict(dict)\n\n arg_dict[self.name]['type'] = self.arg_type\n arg_dict[self.name]['qualifiers'] = self.qualifiers\n arg_dict[self.name]['valuemap'] = self.valuemap\n\n return dict(arg_dict)", "def args_map_custom(cls) -> dict:\n args = {}\n args.update(cls.args_map_export())\n args.update({\"json_flat\": False})\n return args", "def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict", "def _AddAnchorFallthroughs(self, anchor, fallthroughs_map):\n anchor_fallthroughs = fallthroughs_map.get(anchor.name, [])\n for attribute in self.attributes:\n if attribute == anchor:\n continue\n anchor_based_fallthroughs = self._GetAttributeAnchorFallthroughs(\n anchor_fallthroughs, attribute)\n fallthroughs_map[attribute.name] = (\n anchor_based_fallthroughs + fallthroughs_map[attribute.name])", "def default_flaky_attributes(max_runs=None, min_passes=None, rerun_filter=None):\n if max_runs is None:\n max_runs = 2\n if min_passes is None:\n min_passes = 1\n if min_passes <= 0:\n raise ValueError('min_passes must be positive')\n if max_runs < min_passes:\n raise ValueError('min_passes cannot be greater than max_runs!')\n\n return {\n FlakyNames.MAX_RUNS: max_runs,\n FlakyNames.MIN_PASSES: min_passes,\n FlakyNames.CURRENT_RUNS: 0,\n FlakyNames.CURRENT_PASSES: 0,\n FlakyNames.RERUN_FILTER: FilterWrapper(rerun_filter or _true),\n }", "def parse_args_dict(args=None):\n return vars(parse_args(args))", "def action_map(self) -> Dict[str, CLIActionType]:\n return add_dicts({\n \"dump\": self.dump_action,\n \"dump-macrosizes\": self.dump_macrosizes_action,\n \"dump_macrosizes\": self.dump_macrosizes_action,\n \"synthesis\": self.synthesis_action,\n \"syn\": self.synthesis_action,\n \"par\": self.par_action,\n \"synthesis_to_par\": self.synthesis_to_par_action,\n \"synthesis-to-par\": self.synthesis_to_par_action,\n \"syn_to_par\": self.synthesis_to_par_action,\n \"syn-to-par\": self.synthesis_to_par_action,\n \"synthesis_par\": self.synthesis_par_action,\n \"synthesis-par\": self.synthesis_par_action,\n \"syn_par\": self.synthesis_par_action,\n \"syn-par\": self.synthesis_par_action,\n \"hier_par_to_syn\": self.hier_par_to_syn_action,\n \"hier-par-to-syn\": self.hier_par_to_syn_action,\n \"par_to_drc\": self.par_to_drc_action,\n \"par-to-drc\": self.par_to_drc_action,\n \"par_to_lvs\": self.par_to_lvs_action,\n \"par-to-lvs\": self.par_to_lvs_action,\n \"drc\": self.drc_action,\n \"lvs\": self.lvs_action\n }, self.all_hierarchical_actions)", "def as_kwargs(self) -> Dict[str, Any]:\n ret = {}\n for arg in self.args.values():\n ret[arg.name] = arg.value\n return ret", "def _build_kwargs(element, plugin):\n lookup_table = PrefixLookupDict(plugin['args'])\n kwargs = {}\n for attr in element.attributes:\n if attr.name in lookup_table:\n kwargs[lookup_table[attr.name]] = attr.value\n element.removeAttribute(attr.name)\n return kwargs", "def parse_attributes(data, attributes):\n result = {}\n for key, val in attributes.items():\n if type(val) is list:\n attr_val = get_tree_data(data, val)\n else:\n attr_val = data.get(val, None)\n if attr_val is not None:\n result[key] = attr_val\n return result", "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"bug\":[self.from_entity(\n entity=\"bug\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"beverage\": [self.from_entity(\n entity=\"beverage\", \n intent=\"inform\"), \n self.from_text(\n intent=\"inform\")],\n \"second_person_plural\": [self.from_entity(\n entity=\"second_person_plural\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"cot_caught\": [self.from_entity(\n entity=\"cot_caught\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rain_sun\": [self.from_entity(\n entity=\"rain_sun\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"crawfish\": [self.from_entity(\n entity=\"crawfish\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"halloween\": [self.from_entity(\n entity=\"halloween\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"sandwich\": [self.from_entity(\n entity=\"sandwich\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"side_road\": [self.from_entity(\n entity=\"side_road\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"shoes\": [self.from_entity(\n entity=\"shoes\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"highway\": [self.from_entity(\n entity=\"highway\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"yard_sale\": [self.from_entity(\n entity=\"yard_sale\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"rubbernecking\": [self.from_entity(\n entity=\"rubbernecking\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"frosting\": [self.from_entity(\n entity=\"frosting\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"lawyer\": [self.from_entity(\n entity=\"lawyer\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"kitty_corner\": [self.from_entity(\n entity=\"kitty_corner\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"firefly\": [self.from_entity(\n entity=\"firefly\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"verge\": [self.from_entity(\n entity=\"verge\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"brew_thru\": [self.from_entity(\n entity=\"brew_thru\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")],\n \"water_fountain\": [self.from_entity(\n entity=\"water_fountain\", \n intent=\"inform\"),\n self.from_text(\n intent=\"inform\")]\n }", "def standing_level_access_map(self) -> dict:\n names_map = {\n self.StandingLevel.NONE: \"NONE\",\n self.StandingLevel.TERRIBLE: \"TERRIBLE\",\n self.StandingLevel.BAD: \"BAD\",\n self.StandingLevel.NEUTRAL: \"NEUTRAL\",\n self.StandingLevel.GOOD: \"GOOD\",\n self.StandingLevel.EXCELLENT: \"EXCELLENT\",\n }\n return {\n names_map[self.StandingLevel(level)]: (\n self.allow_access_with_standings and level >= self.standing_level\n )\n for level in self.StandingLevel.values\n }" ]
[ "0.66037655", "0.64677685", "0.60660607", "0.5610746", "0.5434453", "0.54003733", "0.52738345", "0.5240895", "0.51854205", "0.5154537", "0.5153767", "0.51498824", "0.5075057", "0.50633425", "0.5063299", "0.49745247", "0.48896033", "0.4885709", "0.48491773", "0.4836923", "0.4833068", "0.48197696", "0.4816996", "0.47789592", "0.47633553", "0.4750599", "0.47038147", "0.47029147", "0.46942383", "0.46864238" ]
0.6850987
0
Helper to get anchordepednent fallthroughs for a specific attribute.
def _GetAttributeAnchorFallthroughs(self, anchor_fallthroughs, attribute): parameter_name = self.ParamName(attribute.name) anchor_based_fallthroughs = [ deps_lib.FullySpecifiedAnchorFallthrough( anchor_fallthrough, self.collection_info, parameter_name) for anchor_fallthrough in anchor_fallthroughs ] return anchor_based_fallthroughs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetArgAndBaseFallthroughsForAttribute(self,\n attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=False):\n attribute_name = attribute.name\n attribute_fallthroughs = []\n # The only args that should be lists are anchor args for plural\n # resources.\n attribute_is_plural = self.IsAnchor(attribute) and plural\n\n # Start the fallthroughs list with the primary associated arg for the\n # attribute.\n arg_name = attribute_to_args_map.get(attribute_name)\n if arg_name:\n attribute_fallthroughs.append(\n deps_lib.ArgFallthrough(arg_name, plural=attribute_is_plural))\n\n given_fallthroughs = base_fallthroughs_map.get(attribute_name, [])\n for fallthrough in given_fallthroughs:\n if attribute_is_plural:\n fallthrough = copy.deepcopy(fallthrough)\n fallthrough.plural = attribute_is_plural\n attribute_fallthroughs.append(fallthrough)\n return attribute_fallthroughs", "def _AddAnchorFallthroughs(self, anchor, fallthroughs_map):\n anchor_fallthroughs = fallthroughs_map.get(anchor.name, [])\n for attribute in self.attributes:\n if attribute == anchor:\n continue\n anchor_based_fallthroughs = self._GetAttributeAnchorFallthroughs(\n anchor_fallthroughs, attribute)\n fallthroughs_map[attribute.name] = (\n anchor_based_fallthroughs + fallthroughs_map[attribute.name])", "def BuildFullFallthroughsMap(self, attribute_to_args_map,\n base_fallthroughs_map, plural=False,\n with_anchor_fallthroughs=True):\n fallthroughs_map = {}\n for attribute in self.attributes:\n fallthroughs_map[attribute.name] = (\n self.GetArgAndBaseFallthroughsForAttribute(attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=plural))\n if not with_anchor_fallthroughs:\n return fallthroughs_map\n for attribute in self.attributes:\n if self.IsAnchor(attribute):\n self._AddAnchorFallthroughs(attribute, fallthroughs_map)\n return fallthroughs_map", "def anchor(self):\n # self.attributes cannot be empty; will cause an error on init.\n return self.attributes[-1]", "def anchor_pairs(self):\n # TODO unit test for this method\n def _anchors(given_anchor):\n if given_anchor is not None:\n yield given_anchor\n else:\n yield from anchors.Anchor\n for src_anch in _anchors(self.orig_anchor):\n for dest_anch in _anchors(self.dest_anchor):\n yield (src_anch, dest_anch)", "def a_attr_dict (self) :\n return dict (href = self.abs_href)", "def _getanchor(self, line):\n m = anchor_re.match(line)\n if not m:\n return self._getsec(line)\n self.anchors.append(m.group(1))", "def select_attribute(instances, available_attributes, domain):\n\n\n entropies = {}\n for att in available_attributes:\n entropies[att] = entropy_new(instances, att, domain)\n \n next_attrib, (_ent, leaves) = min(list(entropies.items()), key=lambda x: x[1][0])\n \n return next_attrib, leaves", "def html_anchor_tags(self):\n return self.findall_markdown_cells(r'<a [^>]*>')", "def A(ball, goal, tolerance_ball = 0.2, tolerance_goal = 0.3):\n return Aligned(ball, goal, tolerance_ball, tolerance_goal)", "def get_attrs(foreground, background, style):\n return foreground + (background << 4) + style", "def Parse(self, attribute_to_args_map, base_fallthroughs_map,\n parsed_args=None, plural=False, allow_empty=False):\n if not plural:\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map,\n with_anchor_fallthroughs=False)\n try:\n return self.Initialize(\n fallthroughs_map, parsed_args=parsed_args)\n except InitializationError:\n if allow_empty:\n return None\n raise\n\n results = self._ParseFromPluralValue(attribute_to_args_map,\n base_fallthroughs_map,\n self.anchor,\n parsed_args)\n if results:\n return results\n\n if allow_empty:\n return []\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map)\n return self.Initialize(\n base_fallthroughs_map, parsed_args=parsed_args)", "def follow(link, cold_data, behavior_data, coming_from, seq_idx, cold_followed):\n to_ret = []\n (link_index, link_length) = parse_arrayindexandlength(link)\n for (cold_order_idx, cold_index) in enumerate(range(link_index, link_index+link_length)):\n full_cold_index = '{}_{}'.format(seq_idx, cold_index)\n if full_cold_index in cold_followed:\n continue\n else:\n cold_followed.add(full_cold_index)\n try:\n cold = cold_data[cold_index]\n except IndexError:\n broken_id = 'broken_{}_{}'.format(seq_idx, cold_order_idx)\n print(' {} [label=<BROKEN> {}];'.format(broken_id, style_broken_cold))\n to_ret.append((coming_from, broken_id, None))\n return to_ret\n (link_id, bindex) = parse_linkidandlinkedbehavior(cold['LinkIdAndLinkedBehavior'])\n behavior = behavior_data[bindex]\n going_to = 'behavior_{}_{}'.format(seq_idx, bindex)\n delay = round(float(cold['ActivateDelay']), 6)\n if delay == 0:\n delay_extra = ''\n else:\n if int(delay) == delay:\n delay = round(delay)\n delay_extra = '<br/>d{}'.format(delay)\n to_ret.append((coming_from, going_to,\n 'taillabel=<{}<br/>[{}]<br/>{}{}>'.format(cold_order_idx, cold_index, link_id, delay_extra)))\n to_ret.extend(follow(behavior['OutputLinks']['ArrayIndexAndLength'],\n cold_data,\n behavior_data,\n going_to,\n seq_idx,\n cold_followed,\n ))\n return to_ret", "def test_can_filter_attributes(self):\n text = '<b><a href=\"\" target=\"_blank\">Example</a></b>'\n filter = Bleach(\n tags=['a'],\n attributes=dict(a=['href', 'title'])\n )\n filtered = filter.filter(text)\n expected = '<a href=\"\">Example</a>'\n self.assertEquals(expected, filtered)", "def generate_resources(self, attr):\n resource = self._branching_cls(\n self.resource_name, attr, predicate=self.predicate)\n return ((path, resource) for path in chain([attr], self.aliases))", "def get_links(tag, attribute, url, content):\n soup = BeautifulSoup(content, 'html.parser')\n links = []\n for link in soup.findAll(tag):\n link = link.get(attribute)\n links.append(sanitize_link(link, url))\n return links", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))\n return anchors", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors", "def get_attributes_from_page(pagetree, css, attribute):\n elements = get_elements_from_page(pagetree, css)\n attributes = [element.attrib[attribute] for element in elements]\n return attributes", "def handle_starttag(self, tag, attrs):\n # Identify anchor tags\n if tag == \"a\":\n for(attribute, value) in attrs:\n # Anchor tags may have more than 1 attribute, but handle_starttag will only target href\n # Attribute examples: href, target, rel, etc\n # Attribute list can be found at: https://www.w3schools.com/tags/tag_a.asp\n if attribute == \"href\":\n # Form an absolute URL based on the relative URL\n absoluteUrl = urljoin(self.baseURL, value)\n # We want to avoid href values that are not http/https\n # Example: <a href=\"mailto:[email protected]\">Send Email Now!</a>\n if urlparse(absoluteUrl).scheme in [\"http\", \"https\"]:\n # Once a new hyperlink is obtained, add it to the set\n self.pageLinks.add(absoluteUrl)", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack(\n (\n x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1),\n )\n )\n return anchors", "def AB(ball, goal, tolerance = 0.3, des_bearing = 0.5):\n return AlignedWithBearing(ball, goal, tolerance, des_bearing)", "def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))", "def _ParseFromPluralValue(self, attribute_to_args_map, base_fallthroughs_map,\n plural_attribute, parsed_args):\n attribute_name = plural_attribute.name\n fallthroughs_map = self.BuildFullFallthroughsMap(\n attribute_to_args_map, base_fallthroughs_map, plural=True,\n with_anchor_fallthroughs=False)\n current_fallthroughs = fallthroughs_map.get(attribute_name, [])\n # Iterate through the values provided to the argument, creating for\n # each a separate parsed resource.\n parsed_resources = []\n for fallthrough in current_fallthroughs:\n try:\n values = fallthrough.GetValue(parsed_args)\n except deps_lib.FallthroughNotFoundError:\n continue\n for value in values:\n def F(return_value=value):\n return return_value\n\n new_fallthrough = deps_lib.Fallthrough(\n F, fallthrough.hint, active=fallthrough.active)\n fallthroughs_map[attribute_name] = [new_fallthrough]\n # Add the anchor fallthroughs for this particular value, so that the\n # error messages will contain the appropriate hints.\n self._AddAnchorFallthroughs(plural_attribute, fallthroughs_map)\n parsed_resources.append(\n self.Initialize(\n fallthroughs_map, parsed_args=parsed_args))\n return parsed_resources", "def cross_link_attribute(self, attribute_name, node_list1, node_list2):\n W = self.link_attribute(attribute_name)\n return W[node_list1, :][:, node_list2]", "def _mkanchors(ws, hs, x_ref, y_ref):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n\n anchors = np.hstack(\n (\n x_ref - 0.5 * (ws - 1),\n y_ref - 0.5 * (hs - 1),\n x_ref + 0.5 * (ws - 1),\n y_ref + 0.5 * (hs - 1)\n )\n )\n return anchors", "def getAB(self):\n\n return self.alt_base_1, self.alt_base_2", "def handle_a(self, tag, attrs):\n ad = dict(attrs)\n if 'href' in ad.keys() \\\n and ad['href'].startswith('http:') \\\n and 'target' not in ad.keys():\n self.errmsg(\"External link with no target attribute\")", "def __attr_index(self, attribute: str) -> Optional[int]:\n attr_index = None\n for i, (key_node, _) in enumerate(self.yaml_node.value):\n if key_node.value == attribute:\n attr_index = i\n break\n return attr_index", "def get_fasta(self):\n\t\tif not self.fastas:\n\t\t\treturn None\n\t\telif self.fastas.get('twodirections') is not None:\n\t\t\treturn self.fastas.get('twodirections')\n\t\telif self.fastas.get('template') is not None:\n\t\t\treturn self.fastas.get('template')\n\t\telif self.fastas.get('complement') is not None:\n\t\t\treturn self.fastas.get('complement')" ]
[ "0.64066035", "0.62550104", "0.5431331", "0.53608054", "0.5328746", "0.47851092", "0.47500005", "0.46948504", "0.4661473", "0.45768866", "0.45704085", "0.45176873", "0.4516535", "0.44741994", "0.44652265", "0.4453551", "0.4448734", "0.44419", "0.44351763", "0.44257838", "0.44205403", "0.4408743", "0.43915898", "0.43904948", "0.4389876", "0.43760988", "0.4365485", "0.43337035", "0.4331928", "0.43288022" ]
0.80926263
0
Helper for adding anchor fallthroughs to the fallthroughs map.
def _AddAnchorFallthroughs(self, anchor, fallthroughs_map): anchor_fallthroughs = fallthroughs_map.get(anchor.name, []) for attribute in self.attributes: if attribute == anchor: continue anchor_based_fallthroughs = self._GetAttributeAnchorFallthroughs( anchor_fallthroughs, attribute) fallthroughs_map[attribute.name] = ( anchor_based_fallthroughs + fallthroughs_map[attribute.name])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BuildFullFallthroughsMap(self, attribute_to_args_map,\n base_fallthroughs_map, plural=False,\n with_anchor_fallthroughs=True):\n fallthroughs_map = {}\n for attribute in self.attributes:\n fallthroughs_map[attribute.name] = (\n self.GetArgAndBaseFallthroughsForAttribute(attribute_to_args_map,\n base_fallthroughs_map,\n attribute,\n plural=plural))\n if not with_anchor_fallthroughs:\n return fallthroughs_map\n for attribute in self.attributes:\n if self.IsAnchor(attribute):\n self._AddAnchorFallthroughs(attribute, fallthroughs_map)\n return fallthroughs_map", "def adjust_anchors(self):\n pass", "def _GetAttributeAnchorFallthroughs(self, anchor_fallthroughs, attribute):\n parameter_name = self.ParamName(attribute.name)\n anchor_based_fallthroughs = [\n deps_lib.FullySpecifiedAnchorFallthrough(\n anchor_fallthrough, self.collection_info, parameter_name)\n for anchor_fallthrough in anchor_fallthroughs\n ]\n return anchor_based_fallthroughs", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))\n return anchors", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors", "def mapAdd(block, posMap):\n for (x, y) in block.coords:\n theFallener(x + block.x, y + block.y, block.color, posMap)", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack(\n (\n x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1),\n )\n )\n return anchors", "def _do_anchor(self, anchor):\n if anchor:\n for x in anchor.split(\"-\"):\n A, P = None, None\n if x.startswith(\"A\") and len(self.chunks) > 0: # anchor\n A, P = x, x.replace(\"A\",\"P\")\n self._anchors[A] = self.chunks[-1]\n if x.startswith(\"P\") and len(self.pnp) > 0: # attachment (PNP)\n A, P = x.replace(\"P\",\"A\"), x\n self._anchors[P] = self.pnp[-1]\n if A in self._anchors and P in self._anchors and not self._anchors[P].anchor:\n pnp = self._anchors[P]\n pnp.anchor = self._anchors[A]\n pnp.anchor.attachments.append(pnp)", "def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1", "def _mkanchors(ws, hs, x_ref, y_ref):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n\n anchors = np.hstack(\n (\n x_ref - 0.5 * (ws - 1),\n y_ref - 0.5 * (hs - 1),\n x_ref + 0.5 * (ws - 1),\n y_ref + 0.5 * (hs - 1)\n )\n )\n return anchors", "def mk_anchor(hs, ws, h_c, w_c):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((w_c - 0.5 * ws, h_c - 0.5 * hs, w_c + 0.5 * ws, h_c + 0.5 * hs))\n return anchors", "def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n anchors = generate_base_anchors(base_size=base_size, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))\n A = anchors.shape[0]\n shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_x)\n shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1)\n all_anchors = shifts[:, :, None] + anchors[None, None]\n return all_anchors", "def _mkanchors(ws, ctr):\n ws = ws[:, np.newaxis]\n anchors = np.hstack(\n (\n ctr - 0.5 * ws,\n ctr + 0.5 * ws,\n )\n )\n return anchors", "def _shift_anchors(anchors, direction):\n new_anchors = deepcopy(anchors)\n if direction == 'center':\n pass\n\n elif direction == 'top':\n heights = new_anchors[:,3] - new_anchors[:,1] + 1\n heights = heights[:,np.newaxis]\n new_anchors[:,[1,3]] = new_anchors[:,[1,3]] - heights/2\n\n elif direction == 'bottom':\n heights = new_anchors[:,3] - new_anchors[:,1] + 1\n heights = heights[:,np.newaxis]\n new_anchors[:,[1,3]] = new_anchors[:,[1,3]] + heights/2\n\n elif direction == 'right':\n widths = new_anchors[:,2] - new_anchors[:,0] + 1\n widths = widths[:,np.newaxis]\n new_anchors[:,[0,2]] = new_anchors[:,[0,2]] + widths/2\n\n elif direction == 'left':\n widths = new_anchors[:,2] - new_anchors[:,0] + 1\n widths = widths[:,np.newaxis]\n new_anchors[:,[0,2]] = new_anchors[:,[0,2]] - widths/2\n\n return new_anchors", "def adjust_regular_roi_anchors(bounds: QRectF, anchors: list):\n for point in anchors:\n off = point.boundingRect().width() / 2\n if point.position == AnchorPosition.LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.TOP:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off)\n elif point.position == AnchorPosition.BOTTOM:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_LEFT:\n point.setPos(bounds.left() - off, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_RIGHT:\n point.setPos(bounds.right() - off, bounds.bottom() - off)", "def set_anchors(mc):\n H, W, C = _get_output_shape(mc)\n B = mc.ANCHOR_PER_GRID\n X = np.array(mc.INITIAL_ANCHOR_SHAPES)\n X[:,0] *= mc.IMAGE_WIDTH\n X[:,1] *= mc.IMAGE_HEIGHT\n anchor_shapes = np.reshape( # it refers to the anchor width and height\n [X] * H * W,\n (H, W, B, 2)\n )\n center_x = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B), \n (B, H, W)\n ),\n (1, 2, 0)\n ),\n (H, W, B, 1)\n )\n center_y = np.reshape(\n np.transpose(\n np.reshape(\n np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),\n (B, W, H)\n ),\n (2, 1, 0)\n ),\n (H, W, B, 1)\n )\n anchors = np.reshape(\n np.concatenate((center_x, center_y, anchor_shapes), axis=3),\n (-1, 4)\n )\n\n return anchors", "def __call__(self, loc, scores, anchors, img_size):\n anchors = bbox.loc2bbox(anchor)", "def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n A = num_anchors\n total_anchors = all_anchors.shape[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # map of shape (..., H, W)\n height, width = rpn_cls_score.shape[1:3]\n\n # only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -_allowed_border) &\n (all_anchors[:, 1] >= -_allowed_border) &\n (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors[:, 3] < im_info[0] + _allowed_border) # height\n )[0]\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps[gt_argmax_overlaps,\n np.arange(overlaps.shape[1])]\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = np.where(labels == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)\n bg_inds = np.where(labels == 0)[0]\n if len(bg_inds) > num_bg:\n disable_inds = npr.choice(\n bg_inds, size=(len(bg_inds) - num_bg), replace=False)\n labels[disable_inds] = -1\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # only the positive ones have regression targets\n bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0)\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n np.sum(labels == 0))\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)\n labels = labels.reshape((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights", "def add_anchors(annotation_sets, anchored_nodes):\n for (hash, node) in anchored_nodes.iteritems():\n filename = node.location.file.name\n if filename not in annotation_sets:\n continue\n\n annotation_set = annotation_sets[filename]\n annotation_set.add_tag('span',\n [('id', str(node.hash))],\n node.extent)", "def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))", "def _add_mapping(self, mother_element: GraphElement,\n daughter_element: GraphElement) -> None:\n pass", "def __create_anchors(self, sizes, aspects):\n k = len(sizes) * len(aspects)\n img_anchors = []\n for i in sizes:\n for j in aspects:\n img_anchors.append(\n [0, 0, 2 * i * j[0] / (j[0] + j[1]), 2 * i * j[1] / (j[0] + j[1])])\n\n self.anchors = np.asarray(img_anchors)", "def anchor(self, drifting_t, anchored_t):\n\n drifting_t = T(drifting_t)\n anchored_t = T(anchored_t)\n\n assert (drifting_t in self) and (drifting_t.drifting)\n assert anchored_t.anchored\n\n if anchored_t not in self:\n self.add_node(anchored_t)\n\n self._merge(drifting_t, anchored_t)", "def update_schedule_phase_anchors(self, new_anchors):\n for anchor in new_anchors:\n id = anchor[0] - 1\n new_anchor = anchor[1]\n try:\n list(self.get_schedule_phases())[id].change_anchor(new_anchor)\n except IndexError:\n pass # That phase wasn't found, so we can't change it's anchor. Ignore it", "def processJumpTable(jt_ea):", "def fix_jump(self):\n pass", "def setup_maps(self):\n super().setup_maps()\n sprite_classes = {\n \"walls\": Wall,\n \"play\": Background,\n \"exit\": Background,\n }\n island_map = TiledMap((\"images/qwerty_game_1.tmx\"), sprite_classes)\n self.add_map(island_map)", "def add_teacher(base_map, n_steps=1, danger='H', empty='F', teacher='T'):\n if n_steps == 0:\n return base_map\n base_map = np.array([list(s) for s in base_map])\n\n base_danger_mask = base_map == danger\n base_empty_mask = base_map == empty\n\n previous_mask = np.copy(base_danger_mask)\n for _ in range(n_steps):\n reach_mask = one_step_reach(previous_mask)\n previous_mask[:] |= reach_mask[:]\n\n new_map = np.copy(base_map)\n new_map[reach_mask & base_empty_mask] = teacher\n new_map_list = [''.join(row) for row in new_map]\n return new_map_list", "def add_landing(self, exit_waypoints=None, exit_times=None, add_on_der_fixed=dict(x=None,y=None,z=None,yaw=None)):\n\n exit_ID = self.exit_ID\n\n exit_time_unit = self.exit_time\n\n if exit_waypoints is None:\n # Default load\n if self.last_position is not None:\n # Set start to above drone\n exit_waypoints = dict()\n for key in self.last_position.keys():\n exit_waypoints[key] = self.last_position[key]\n\n exit_waypoints['z'] += 0.4\n\n q = np.array([self.last_orientation['w'],\n self.last_orientation['x'],\n self.last_orientation['y'],\n self.last_orientation['z']])\n\n exit_waypoints['yaw'] = transforms3d.euler.quat2euler(q,'rzyx')[0]\n\n # TODO ([email protected]) make this an input\n exit_times = np.array([exit_time_unit])\n else:\n # set to a default 0 0 0\n exit_waypoints = dict(x=0.0,y=0.0,z=0.0,yaw=0.0)\n\n # TODO ([email protected]) make this an input\n exit_times = np.array([exit_time_unit])\n elif exit_times is None:\n exit_times = np.array([exit_time_unit]*exit_waypoints['x'].shape[1])\n\n #Set last waypoints\n exit_waypoints = utils.form_waypoints_polytraj(exit_waypoints,self.qr_polytraj.n_der)\n exit_waypoints, exit_der_fixed = utils.form_entry_or_exit_waypoints(\"exit\",self.qr_polytraj, exit_waypoints, exit_ID, add_on_der_fixed=add_on_der_fixed)\n print(\"exit waypoints are: {}\".format(exit_waypoints))\n # Create new qr_polytraj\n self.qr_p_exit = traj_qr.traj_qr(exit_waypoints,\n costs=self.qr_polytraj.costs,order=self.qr_polytraj.order,\n seed_times=exit_times,\n curv_func=self.curv_func,\n der_fixed=exit_der_fixed,path_weight=None,yaw_to_traj=False)\n self.qr_p_exit.run_astro()\n\n # Create Controls and trajectory for display\n print(\"forming landing\")\n self.update_path_markers(qr_type=\"exit\")\n self.interactive_marker_worker_exit.make_controls(self.qr_p_exit.waypoints)\n acc_wp = self.get_accel_at_waypoints(\"exit\")\n self.interactive_marker_worker_exit.update_controls(self.qr_p_exit.waypoints,acc_wp=acc_wp)", "def setAnchor(self,a):\n self.anchor = a" ]
[ "0.6128773", "0.5863155", "0.5690524", "0.5404224", "0.5352024", "0.5344817", "0.5318971", "0.5303139", "0.5210181", "0.52095926", "0.51509917", "0.5122511", "0.5094019", "0.4990103", "0.4977628", "0.4973901", "0.4971675", "0.49588776", "0.49393559", "0.4937994", "0.49252173", "0.48898232", "0.48638478", "0.4780729", "0.47596642", "0.47579652", "0.4743247", "0.4715571", "0.47071743", "0.4706386" ]
0.8732982
0
Parses a list of ResourceParameterAttributeConfig from yaml data.
def ParseAttributesFromData(attributes_data, expected_param_names): raw_attributes = [ ResourceParameterAttributeConfig.FromData(a) for a in attributes_data ] registered_param_names = [a.parameter_name for a in raw_attributes] final_attributes = [] # TODO(b/78851830): improve the time complexity here. for expected_name in expected_param_names: if raw_attributes and expected_name == raw_attributes[0].parameter_name: # Attribute matches expected, add it and continue checking. final_attributes.append(raw_attributes.pop(0)) elif expected_name in IGNORED_FIELDS: # Attribute doesn't match but is being ignored. Add an auto-generated # attribute as a substitute. # Currently, it would only be the project config. attribute_name = IGNORED_FIELDS[expected_name] ignored_attribute = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name) # Manually add the parameter name, e.g. project, projectId or projectsId. ignored_attribute.parameter_name = expected_name final_attributes.append(ignored_attribute) else: # It doesn't match (or there are no more registered params) and the # field is not being ignored, error. raise InvalidResourceArgumentLists(expected_param_names, registered_param_names) if raw_attributes: # All expected fields were processed but there are still registered # attribute params remaining, they must be extra. raise InvalidResourceArgumentLists(expected_param_names, registered_param_names) return final_attributes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromYaml(cls, yaml_data, api_version=None):\n if not yaml_data:\n return None\n collection = registry.GetAPICollection(\n yaml_data['collection'], api_version=api_version)\n attributes = ParseAttributesFromData(\n yaml_data.get('attributes'), collection.detailed_params)\n return cls(\n resource_collection=collection.full_name,\n resource_name=yaml_data['name'],\n api_version=collection.api_version,\n disable_auto_completers=yaml_data['disable_auto_completers'],\n plural_name=yaml_data.get('plural_name'),\n **{attribute.parameter_name: attribute for attribute in attributes})", "def list(self):\n for attrname in dir(self.config):\n if PARAM_PAT.match(attrname):\n yield attrname", "def FromData(cls, data):\n if not data:\n return None\n\n attribute_name = data['attribute_name']\n parameter_name = data['parameter_name']\n help_text = data['help']\n completion_id_field = data.get('completion_id_field', None)\n completion_request_params_list = data.get('completion_request_params', [])\n completion_request_params = {\n param.get('fieldName'): param.get('value')\n for param in completion_request_params_list\n }\n\n # Add property fallthroughs.\n fallthroughs = []\n prop = properties.FromString(data.get('property', ''))\n if prop:\n fallthroughs.append(deps_lib.PropertyFallthrough(prop))\n default_config = DEFAULT_RESOURCE_ATTRIBUTE_CONFIGS.get(attribute_name)\n if default_config:\n fallthroughs += [\n f for f in default_config.fallthroughs if f not in fallthroughs]\n # Add fallthroughs from python hooks.\n fallthrough_data = data.get('fallthroughs', [])\n fallthroughs_from_hook = [\n deps_lib.Fallthrough(util.Hook.FromPath(f['hook']), hint=f['hint'])\n for f in fallthrough_data\n ]\n fallthroughs += fallthroughs_from_hook\n return cls(\n name=attribute_name,\n help_text=help_text,\n fallthroughs=fallthroughs,\n completion_id_field=completion_id_field,\n completion_request_params=completion_request_params,\n parameter_name=parameter_name)", "def load_yaml_file(yaml_file):\n try:\n # Get the configuration parameters which contain the region, vpc name, template filename, VPC CIDR blocks\n s = open(yaml_file).read()\n config = list(yaml.load_all(s))[0]\n\n except Exception as e:\n # We're expecting the user parameters to be encoded as YAML\n # so we can pass multiple values. If the YAML can't be decoded\n # then return failure with a helpful message.\n print(e)\n raise Exception('Input configuration parameters could not be decoded as YAML')\n\n return config", "def _yamlSpeciallyHandledAttributes(self):\n return []", "def load_config(config_path):\n with open(config_path, \"r\") as f:\n conf = yaml.load(f)\n\n params = conf['param']\n return params", "def load_parameters(self):\n with open(INTERNAL_DATA_DIR / self.name_default_params, 'r') as f:\n return yaml.load(f, Loader=yaml.FullLoader)", "def parse_configurations(payload):\n configurations = []\n\n for conf in payload:\n configurations.append((conf, payload[conf]))\n\n return configurations", "def read_attributes(self, dataset):\n if 'attributes' in self.configs:\n for key, value in self.configs['attributes'].items():\n setattr(dataset, key, value)", "def parse_yaml_config(config_file):\n\n def construct_keras_model(loader, node):\n from tensorflow.keras.models import load_model\n\n model_str = loader.construct_scalar(node)\n return load_model(model_str)\n\n yaml.add_constructor('!keras_model', construct_keras_model, Loader=yaml.SafeLoader)\n\n print('Loading configuration from', config_file)\n with open(config_file) as file:\n parsed_config = yaml.safe_load(file)\n return parsed_config", "def test_takes_param_list_attributes(self):\n class Test(pyperry.Base):\n def _config(cls):\n cls.attributes('id', 'name', 'poop')\n\n self.assertEqual(Test.defined_attributes, set(['id', 'name', 'poop']))", "def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list", "def config_params0(data,parameter):\n model = []\n #Range of value of p\n acf = sm.graphics.tsa.acf(data.diff().dropna())\n for i in range(len(acf)):\n acf[i] = abs(acf[i]*10)\n if (ceil(acf[i])) <= 2:\n p = range(ceil(acf[i])-1,ceil(acf[i])+2)\n break\n\n #range of value of q\n pacf = sm.graphics.tsa.pacf(data.diff().dropna())\n for i in range(len(pacf)):\n pacf[i] = abs(pacf[i]*10)\n if (ceil(pacf[i])) <= 2:\n q = range(ceil(pacf[i])-1,ceil(pacf[i])+2)\n break\n\n\t# define config lists\n p_params = p\n d_params = parameter['d']\n q_params = q\n m_params = parameter['m']\n #P_params = p\n #D_params = [0, 1]\n #Q_params = q\n \n pdq_m = list(itertools.product(p_params, d_params, q_params,m_params)) #Generate all different combinations of p, q and q triplets\n params = [[(x[0], x[1], x[2]),(x[0], x[1], x[2], x[3])] for x in pdq_m]\n return params", "def yamlConfigForParsingPlugins():\n parameters = \"\"\"\njoinPaths: !joinPaths\n - a\n - b\n - \"c\"\nrunPageTemplates: !findRunPageTemplates\n - \"templates\"\nbcrypt: !bcrypt\n bcryptLogRounds: 12\n user: \"pass\"\nbcryptNoUser: !bcrypt\n bcryptLogRounds: 12\n null: null\nsecretKey: !secretKey 12345\nsecretKeyGen: !secretKey null\n \"\"\"\n # Load parameters\n parameters = yaml.load(parameters, Loader = yaml.SafeLoader)\n return parameters", "def read_params(self):\n detector_params = namedtuple('params', ['image_path', 'circle_region',\n 'vis_circle_det',\n 'template_path', 'score_thres',\n 'vis_det'])\n with open(self.yaml_path, 'r') as stream:\n try:\n parameters = yaml.load(stream)\n except:\n raise FileNotFoundError\n\n detector_params.image_path = parameters.get('image_path', None)\n if detector_params.image_path is None:\n raise ValueError('Please provide path to image.')\n\n detector_params.circle_region = read_circle_region_params(parameters)\n detector_params.vis_circle_det = read_vis_circle_det(parameters)\n\n detector_params.template_path = parameters.get('template_path', None)\n if detector_params.template_path is None:\n raise ValueError('Please provide path to template image.')\n\n detector_params.vis_det = parameters.get('vis_det', True)\n detector_params.score_thres = parameters.get('score_thres', 0.3)\n\n return detector_params", "def __init__ (self, config_yaml):\n configdef = yaml.safe_load(io.StringIO(config_yaml)) \n\n if \"filters\" not in configdef:\n configdef = dict(filters=[configdef])\n\n self._configs = []\n\n for definition in configdef[\"filters\"]:\n config = Bunch( valid_from = None\n , volume_follows = False\n , copy_last_price = False\n , copy_last_volume = False\n , qualifier_include_filters = []\n , qualifier_exclude_filters = []\n , exclude_filters = [] )\n\n if \"filter\" in definition and definition[\"filter\"] != None:\n for exclude_filter in definition[\"filter\"]:\n parts = exclude_filter.split(\",\")\n if parts[0] == \"floor\":\n config.exclude_filters.append(FloorFilter(float(parts[1]), \"price\"))\n elif parts[0] == \"cap\":\n config.exclude_filters.append(CapFilter(float(parts[1]), \"price\")) \n elif parts[0] == \"step\":\n config.exclude_filters.append(StepFilter(int(parts[1]), float(parts[2]), float(parts[3]), \"price\"))\n else:\n raise Exception(\"Unknown filter (%s)\" % (parts[0])) \n \n if \"remove\" in definition and definition[\"remove\"] != None:\n for exclude_filter in definition[\"remove\"]:\n config.qualifier_exclude_filters.append(QualifierFilter(exclude_filter))\n \n if \"allow\" in definition and definition[\"allow\"] != None:\n for include_filter in definition[\"allow\"]:\n config.qualifier_include_filters.append(QualifierFilter(include_filter))\n\n if \"volFollows\" in definition: config.volume_follows = definition[\"volFollows\"] \n if \"copyLast\" in definition and definition[\"copyLast\"] != None:\n config.copy_last_price = definition[\"copyLast\"] \n config.copy_last_volume = definition[\"copyLast\"] \n if \"volumeLimit\" in definition and definition[\"volumeLimit\"] != None:\n config.exclude_filters.append(CapFilter(definition[\"volumeLimit\"], \"volume\"))\n if \"validFrom\" in definition and definition[\"validFrom\"] != None:\n valid_from = datetime.datetime.strptime(definition[\"validFrom\"], \"%Y-%m-%d %H:%M:%S\")\n valid_from.replace(tzinfo=pytz.utc)\n config.valid_from = common.Time.tick(valid_from)\n if \"weekTimezone\" in definition and definition[\"weekTimezone\"] != None:\n config.exclude_filters.append(WeekendFilter(definition[\"weekTimezone\"], definition[\"weekEnd\"], definition[\"weekStart\"]))\n\n self._configs.append(config)\n \n self._config_index = 0\n self._config_count = len(self._configs)", "def test_expected_configurations_parameters(self):\n allowed_attrs = [\"configuration-parameters\"]\n instance_info.dbaas.configuration_parameters.parameters(\n instance_info.dbaas_datastore,\n instance_info.dbaas_datastore_version)\n resp, body = instance_info.dbaas.client.last_response\n attrcheck = AttrCheck()\n config_parameters_dict = json.loads(body.decode())\n attrcheck.contains_allowed_attrs(\n config_parameters_dict, allowed_attrs,\n msg=\"Configurations parameters\")\n # sanity check that a few options are in the list\n config_params_list = config_parameters_dict['configuration-parameters']\n config_param_keys = []\n for param in config_params_list:\n config_param_keys.append(param['name'])\n expected_configs = self.expected_default_datastore_configs()\n expected_config_params = expected_configs.get('parameters_list')\n # check for duplicate configuration parameters\n msg = \"check for duplicate configuration parameters\"\n assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)\n for expected_config_item in expected_config_params:\n assert_true(expected_config_item in config_param_keys)", "def load_config(path: str) -> None:\n with open(path, \"r\") as file:\n data = yaml.safe_load(file.read())\n\n if data is None:\n data = {}\n\n if not isinstance(data, dict):\n raise RuntimeError(f\"The configuration file: {path} \" f\"does not contain a top-level mapping\")\n\n for name, value in data.items():\n parameter = Parameter.by_name.get(name)\n if parameter is None:\n raise RuntimeError(f\"Unknown parameter: {name} \" f\"specified in the configuration file: {path}\")\n\n if isinstance(value, str):\n assert parameter.parser is not None\n try:\n value = parameter.parser(value)\n except BaseException:\n raise RuntimeError( # pylint: disable=raise-missing-from\n f\"Invalid value: {value} \"\n f\"for the parameter: {name} \"\n f\"specified in the configuration file: {path}\"\n )\n\n parameter.value = value", "def load_params_file(filename):\n with open(filename, 'r') as f:\n params = yaml.safe_load(f)\n return params", "def read_params(config_path):\n with open(config_path) as yaml_file:\n config = yaml.safe_load(yaml_file)\n return config", "def _parse_args(self, cmd_line_list):\n parser = ArgumentParser()\n parser.add_argument('--yaml', help='yaml file specifying config to run')\n args = parser.parse_args(cmd_line_list)\n return vars(args)", "def read_params(config_path):\n\n with open(config_path) as yaml_file:\n config = yaml.safe_load(yaml_file)\n return config", "def _parse_yaml_configs(args, anon_component_prefix=\"anon_app\"):\n # Configuration files are basically nested dictionaries and the command-line arguments\n # are a list with each element being a dictionary. If the dict in the args has the key\n # 'class', then it is anonymous and we should just give it a sequential unique name to\n # ensure it is run. If, however, it does not, then we should assume that it's a NAMED\n # configuration and so we can actually use that to overwrite/modify the configurations\n # pulled in from a file.\n\n new_configs = {}\n for arg in args:\n try:\n arg = yaml.load(arg)\n except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:\n raise ValueError(\"error parsing manual configuration: %s\\nError:%s\" % (arg, e))\n\n # If this config is anonymous, give it a unique name and add it to configs\n # since it couldn't possibly overwrite another config entry.\n # NOTE: if user specified a 'name' entry directly, we will still take that later on...\n if 'class' in arg:\n # TODO: perhaps register these names somewhere to ensure uniqueness?\n global __scale_client_n_anon_apps_added__\n unique_key = anon_component_prefix + str(__scale_client_n_anon_apps_added__)\n __scale_client_n_anon_apps_added__ += 1\n new_configs[unique_key] = arg\n else:\n try:\n new_configs.update(arg)\n except TypeError as e:\n raise ValueError(\"error in your manual configuration: %s\\n\"\n \"couldn't be interpreted as a dict due to error: %s\" % (arg, e))\n\n return new_configs", "def __get_attributes(self, config_details):\n attributes = []\n\n if 'attribute' in config_details:\n if type(config_details['attribute']) == dict:\n attributes.append(config_details['attribute'])\n else:\n attributes = config_details['attribute']\n\n return attributes", "def __mapAzResourceConfigObject(self, azResourceConfigObj: Dict) -> List[Dict[str, str]]:\n resources = []\n for armType in azResourceConfigObj:\n armResources = azResourceConfigObj[armType]\n\n # Flatten the structure by compiling all the resources corresponding to each ARM type.\n mappedResources = [{ARM_TYPE: armType,\n **armResources[instance]} for instance in armResources]\n resources.extend(mappedResources)\n\n return resources", "def parse_attributes(data, attributes):\n result = {}\n for key, val in attributes.items():\n if type(val) is list:\n attr_val = get_tree_data(data, val)\n else:\n attr_val = data.get(val, None)\n if attr_val is not None:\n result[key] = attr_val\n return result", "def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids", "def applyAttrs(data, attrs):\n\tassert(len(data[0]) == len(attrs) + 1)\n\tnum_attrs = len(attrs)\n\tnum_instances = len(data)\n\n\tout = [None] * len(data)\n\tfor row in range(num_instances):\n\t\tinstance = data[row]\n\t\tout[row] = [instance[0]] + ['?' if instance[i+1] == '?' else attrs[i]['vals'][int(instance[i+1])] for i in range(num_attrs)]\n\n\treturn out", "def _parse_pipeline_from(self, config_json: str):\n try:\n configs: List[Dict] = json.loads(config_json)\n augmentors = [\n self._get_augmentor(config[\"type\"], config[\"params\"])\n for config in configs\n ]\n rates = [config[\"prob\"] for config in configs]\n except Exception as e:\n raise ValueError(\"Failed to parse the augmentation config json: %s\" % str(e))\n return augmentors, rates", "def parse_configurations(payload):\n # Initialize variables.\n index = 1\n configurations = []\n\n # Get the configurations from the payload.\n num_configurations = payload[index]\n index += 1\n for i in range(num_configurations):\n sensor_id = payload[index]\n index += 1\n value = int.from_bytes(bytearray(payload[index:index + 4]), \"big\")\n index += 4\n configurations.append((sensor_id, value))\n\n return configurations" ]
[ "0.5625115", "0.5370975", "0.5180128", "0.51600766", "0.5148526", "0.5116538", "0.50916874", "0.50824744", "0.5073696", "0.50610805", "0.50529665", "0.50239223", "0.49906877", "0.49881876", "0.49795628", "0.49705", "0.49678966", "0.49577042", "0.4937298", "0.49367967", "0.49333644", "0.49265626", "0.49003685", "0.48908076", "0.4884593", "0.48826063", "0.4865977", "0.48652598", "0.48628512", "0.4851016" ]
0.65686333
0
Updates suspicion level of all users
def _update_suspicion_1(self): for bucket in self.used_buckets: multiplier = 1 if bucket.attacked else 0 for user in bucket.users: user.suspicion += multiplier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_2(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "async def silentlevels(self, ctx):\n database = db[str(ctx.guild.id)]\n player = ctx.message.author\n guild = ctx.guild\n doc = database.find_one({\"user_id\": player.id, \"server\": guild.id})\n\n try:\n notifications = doc.get('notifications')\n except KeyError:\n notifications = True\n\n if notifications is None or notifications:\n database.update_one({\"user_id\": player.id, \"server\": guild.id},\n {\"$set\": {\"notifications\": False}})\n await ctx.send(f\"{player.display_name} will no longer receive rank up notifications.\")\n return\n\n if notifications is False:\n database.update_one({\"user_id\": player.id, \"server\": guild.id},\n {\"$set\": {\"notifications\": True}})\n await ctx.send(f\"{player.display_name} will now receive rank up notifications.\")\n return", "async def update_level():\n await ex.conn.execute(f\"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2\", level, user_id)", "def update_user():", "def on_update(self):\n\t\tusers = frappe.get_all('User', filters={'role_profile_name': self.name})\n\t\troles = [role.role for role in self.roles]\n\t\tfor d in users:\n\t\t\tuser = frappe.get_doc('User', d)\n\t\t\tuser.set('roles', [])\n\t\t\tuser.add_roles(*roles)", "def upgrade(message, target, num):\n return\n users = hf.get_users()\n\n for user in users:\n if user[\"name\"] != target:\n continue\n try:\n user[\"approval_level\"] = int(num)\n except Exception:\n message.reply(\":x: That's not a number, ya dingus. :)\")\n return\n\n hf.save_users(users)\n\n message.reply(\"Successfully upgraded user {} to approval level \"\n \"{}.\".format(target, num))", "def test_userprofile_modification(self):\n self.user.userprofile.save(update_fields=['enabled'])\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)", "def update_user():\n #TODO user update \n pass", "def update_users(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n users_data = []\n unsaved_histories_data = []\n for key, user in self.__users.items(): # here, key it's actually users id\n users_data.append((user.get_balance(), key))\n for register in user.get_history():\n register_str, is_saved = register\n if not is_saved:\n unsaved_histories_data.append((register_str, key))\n\n cursor.executemany('''\n UPDATE users\n SET balance=?\n WHERE id=?;\n ''', users_data)\n\n cursor.executemany('''\n INSERT INTO history (register, owner)\n VALUES (?, ?);\n ''', unsaved_histories_data)\n\n conn.commit()\n conn.close()\n\n self.load_users() # RELOADING!!! Pew, pew, pew, pew, pew...", "def patch(self, username, level):\n try:\n UserService.set_user_mapping_level(username, level)\n return {\"Success\": \"Level set\"}, 200\n except UserServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "async def level_up(user, channel):\n server = db[str(user.guild.id)]\n stats = list(server.find({'id': user.id}))\n lvl_start = stats[-1]['level']\n print(lvl_start)\n experience = stats[-1]['experience']\n x = 35\n cnt = 1\n while (x < experience):\n x = 2 * x + 10\n cnt += 1\n\n if experience >= x:\n lvl_end = cnt - 1\n else:\n lvl_end = lvl_start\n print(lvl_end)\n\n if lvl_start < lvl_end:\n new_stats = {\"$set\": {'level': lvl_end}}\n server.update_one(stats[-1], new_stats)\n ls = lvl_end * 150\n server = db[str(user.guild.id)]\n stats = list(server.find({'id': user.id}))\n cred = stats[-1]['credits'] + ls\n new_stats = {\"$set\": {'credits': cred}}\n server.update_one(stats[-1], new_stats)\n embed = discord.Embed(title=f'{user} has leveled up to {lvl_end}.', description=f'You have been given\\\n{ls} tears for your active-ness.\\n\\\nSaving {ls} tears in your vault of tears.', color=discord.Color.teal())\n embed.set_footer(text='😭')\n await channel.send(embed=embed)", "def level_up(self):\n self.user_access_level += 1", "def update_all_users():\n\tfor user in User.query.all():\n\t\tadd_or_update_user(user.name)", "async def set_level(user_id, level, command):\n async def update_level():\n \"\"\"Updates a user's level.\"\"\"\n await ex.conn.execute(f\"UPDATE currency.Levels SET {command} = $1 WHERE UserID = $2\", level, user_id)\n\n count = ex.first_result(await ex.conn.fetchrow(f\"SELECT COUNT(*) FROM currency.Levels WHERE UserID = $1\", user_id))\n if not count:\n await ex.conn.execute(\"INSERT INTO currency.Levels VALUES($1, NULL, NULL, NULL, NULL, 1)\", user_id)\n await update_level()\n else:\n await update_level()", "def updateSkillForPlayer(self, userid, name, level):\r\n if not isinstance(userid, int):\r\n userid = self.getUserIdFromSteamId(userid)\r\n self.execute(\"UPDATE Skill SET level=? WHERE UserID=? AND name=?\", level, userid, name)", "def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username", "def update_rankings(conn, users):\n users.sort(key=lambda user: user[\"rank\"])\n # Set tau and draw_probability to more reasonable values than the defaults\n if config.COMPETITION_FINALS_PAIRING:\n trueskill.setup(tau=0.0, draw_probability=0.001)\n else:\n trueskill.setup(tau=0.008, draw_probability=0.001)\n teams = [[trueskill.Rating(mu=user[\"mu\"], sigma=user[\"sigma\"])]\n for user in users]\n new_ratings = trueskill.rate(teams)\n for user, rating in zip(users, new_ratings):\n new_score = rating[0].mu - 3*rating[0].sigma\n conn.execute(model.bots.update().where(\n (model.bots.c.user_id == user[\"user_id\"]) &\n (model.bots.c.id == user[\"bot_id\"]) &\n # Filter on version so we don't update the score for an old\n # version of the bot\n (model.bots.c.version_number == user[\"version_number\"])\n ).values(\n mu=rating[0].mu,\n sigma=rating[0].sigma,\n score=new_score,\n ))\n\n # Update the hackathon scoring tables\n hackathons = conn.execute(sqlalchemy.sql.select([\n model.hackathons.c.id.label(\"hackathon_id\"),\n ]).select_from(\n model.hackathon_participants.join(\n model.hackathons,\n (model.hackathon_participants.c.hackathon_id == model.hackathons.c.id) &\n (model.hackathon_participants.c.user_id == user[\"user_id\"])\n )\n ).where(\n (model.hackathons.c.start_date <= sqlalchemy.sql.func.now()) &\n (model.hackathons.c.end_date > sqlalchemy.sql.func.now())\n ))\n\n for hackathon in hackathons.fetchall():\n hackathon_id = hackathon[\"hackathon_id\"]\n try:\n # Try and insert\n insert_values = {\n \"hackathon_id\": hackathon_id,\n \"user_id\": user[\"user_id\"],\n \"bot_id\": user[\"bot_id\"],\n \"score\": new_score,\n \"mu\": rating[0].mu,\n \"sigma\": rating[0].sigma,\n \"version_number\": user[\"version_number\"],\n \"language\": user[\"language\"],\n \"games_played\": 1,\n }\n\n conn.execute(\n model.hackathon_snapshot.insert().values(\n **insert_values))\n except sqlalchemy.exc.IntegrityError:\n # Row exists, update\n condition = ((model.hackathon_snapshot.c.hackathon_id ==\n hackathon_id) &\n (model.hackathon_snapshot.c.user_id ==\n user[\"user_id\"]) &\n (model.hackathon_snapshot.c.bot_id ==\n user[\"bot_id\"]))\n conn.execute(\n model.hackathon_snapshot.update().values(\n score=new_score,\n mu=rating[0].mu,\n sigma=rating[0].sigma,\n version_number=user[\"version_number\"],\n language=user[\"language\"],\n games_played=model.hackathon_snapshot.c.games_played + 1,\n ).where(condition))", "async def level(self, ctx, user: discord.User = None):\n settings = config.load_settings()\n if settings['guilds'][str(ctx.guild.id)][\"leveling\"] is True:\n guild = ctx.guild.id\n if user is None:\n id = ctx.author.id\n name = ctx.author.display_name\n else:\n id = user.id\n name = user.display_name\n xp = config.load_xp()\n exp = 0\n level = 0\n if str(guild) in xp['guilds']:\n if str(id) in xp['guilds'][str(guild)]:\n exp = xp['guilds'][str(guild)][str(id)]['xp']\n level = xp['guilds'][str(guild)][str(id)]['level']\n await ctx.send(name + \" is currently level: \" + str(level) + \" with \" + str(exp) + \" experience!\")\n else:\n await ctx.send(\"leveling is currently disabled on this server!\")", "async def setwarns(self, ctx, user: discord.Member, warnings: int = None):\r\n server = ctx.message.guild\r\n await self._create_warn(server, user)\r\n dataIO.save_json(self.JSON, self.data)\r\n if not warnings:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings == 0:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings <= 0:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n if warnings >= 5:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] = warnings\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been set to **{}**\".format(user.name, warnings))", "async def add_player_status(self, user_id):\n await self.delete_player_status(user_id)\n await ex.conn.execute(\"INSERT INTO blackjack.currentstatus (userid, stand, total) VALUES ($1, $2, $2)\", user_id, 0)", "def test_set_user_status(self):\n pass", "def update_user_login(sender, user, **kwargs):\n user.userlogin_set.create(timestamp=timezone.now())\n user.save()\n\n bonus_wallet = BonusWallet.objects.filter(user=user)\n if not bonus_wallet.exists():\n bonus_wallet = BonusWallet.objects.create(user=user)\n bonus_wallet.save()\n else:\n bonus_wallet = bonus_wallet[0]\n\n login_bonus = LoginBonus.objects.create(wallet=bonus_wallet)\n bonus_wallet.value += Decimal(login_bonus.value)\n bonus_wallet.save()", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def update_info(self):\n\n with Player.client as client:\n db = client.game_db\n user = db.players.find_one({\"id\": self.id})\n db.players.update({\"_id\": user[\"_id\"]}, {\"$inc\": {\"games_num\": 1},\n \"$set\": {\"rating\": self.rating}})", "def user_list_update(self):\n\t\tclient_log.debug(f'Запрос списка известных пользователей {self.username}')\n\t\treq = {\n\t\t\tACTION: USERS_REQUEST,\n\t\t\tTIME: time.time(),\n\t\t\tACCOUNT_NAME: self.username\n\t\t}\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tself.database.add_users(ans[LIST_INFO])\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список известных пользователей.')", "async def levelup(self, ctx, value: bool):\n await queries.update_setting(ctx, \"guild_settings\", \"levelup_messages\", value)\n self.bot.cache.levelupmessage[str(ctx.guild.id)] = value\n if value:\n await util.send_success(ctx, \"Level up messages are now **enabled**\")\n else:\n await util.send_success(ctx, \"Level up messages are now **disabled**\")" ]
[ "0.71854013", "0.6843317", "0.6716192", "0.670385", "0.66000074", "0.6202914", "0.62022865", "0.60776985", "0.595668", "0.59536815", "0.59249663", "0.5896137", "0.58041495", "0.574681", "0.57329714", "0.5728912", "0.56205404", "0.557669", "0.55413103", "0.5537747", "0.5410775", "0.5398757", "0.5382766", "0.5381747", "0.5341746", "0.5338988", "0.53190196", "0.53145415", "0.53052354", "0.5295403" ]
0.7047431
1
Returns the similarity score between the query data and a snack
def get_score(snack_data, percentage_data, snack, snack_query, protein_query, carb_query, fat_query): start_time = time.time() #Load necessary data """ with open ('../../../Data/percentagesDict.pickle', 'rb') as f: percentage_data = pickle.load(f) with open ('../../../Data/FINAL_snacks_data.pickle', 'rb') as f: snack_data = pickle.load(f)""" #Set constants LOW_FAT = .3 HIGH_FAT = .6 LOW_CARB = .1 HIGH_CARB = .2 LOW_PRO = .2 HIGH_PRO = .4 #Convert macro percentages to 'high', 'med', 'low' categories fat = percentage_data[snack]['fat'] protein = percentage_data[snack]['protein'] carb = percentage_data[snack]['carb'] if fat > HIGH_FAT: fat_content = 'high' elif fat < LOW_FAT: fat_content = 'low' else: fat_content = 'med' if protein > HIGH_PRO: protein_content = 'high' elif protein < LOW_PRO: protein_content = 'low' else: protein_content = 'med' if carb > HIGH_CARB: carb_content = 'high' elif carb < LOW_CARB: carb_content = 'low' else: carb_content = 'med' #Set x values x1 = fat_query == fat_content x2 = carb_query == carb_content x3 = protein_query == protein_content x4 = cooccur(snack_data, snack, snack_query) x5 = snack_data[snack]['rating'] w1 = 1 w2 = 1 w3 = 1 w4 = 1 w5 = 1 #print('x1: {}, x2: {}, x3: {}, x4: {}, x5: {}'.format(x1, x2, x3, x4, x5)) #print("get_score() time: --- %s seconds ---" % (time.time() - start_time)) return w1*x1 + w2*x2 + w3*x3 + w4*x4 + w5*x5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def find_food(snack_data, query):\n\n\tstart_time = time.time()\n\n\tdocs =[title for title in snack_data.keys()]\n\tdocs.insert(0, query)\n\t\n\tvectorizer=TfidfVectorizer()\n\tmatrix=vectorizer.fit_transform(docs)\n\tcs=cosine_similarity(matrix[0], matrix)\n\n\tsorted_row = np.argsort(cs, axis=1)[0][::-1]\n\n\tprint(\"find_food() time: --- %s seconds ---\" % (time.time() - start_time))\n\n\treturn docs[sorted_row[1]]", "def similarity_score(self, lhs, rhs):\n pass", "def dataset_query_similarity(query_file, dataset_root, sim_measure):\n\n scorer = SimilarityScorer(query_file)\n score_func = getattr(scorer, sim_measure)\n dataloader = DirectoryTxtDataLoader(dataset_root)\n sentence_iter = dataloader.sentence_iter()\n all_scores = [score_func(sent.split(' ')) for sent in sentence_iter]\n return np.mean(all_scores)", "def __score(self, name, summary):\n score = 0\n for queryTerm in self.__query:\n if queryTerm.lower() in name.lower():\n score += 4\n if queryTerm.lower() == name.lower():\n score += 4\n \n if queryTerm.lower() in summary.lower():\n if QRegExp(r'\\b{0}\\b'.format(QRegExp.escape(queryTerm)),\n Qt.CaseInsensitive).indexIn(summary) != -1:\n # word match gets even higher score\n score += 2\n else:\n score += 1\n \n return score", "def score(\n self, query_meta: Dict, old_match_scores: Dict, match_meta: Dict\n ) -> \"np.ndarray\":\n from Levenshtein import distance\n\n new_scores = [\n (\n match_id,\n -distance(query_meta['text'], match_meta[match_id]['text']),\n )\n for match_id, old_score in old_match_scores.items()\n ]\n return np.array(\n new_scores,\n dtype=[(self.COL_MATCH_ID, np.object), (self.COL_SCORE, np.float64)],\n )", "def score(self, index, query, doc_id):\n query_vec = self._get_query_representation(query, index)\n doc_vec = self._get_document_representation(doc_id, index)\n return self._similarity(query_vec, doc_vec)", "def score(self, model, probe):\n return scipy.spatial.distance.euclidean(model, probe)", "def similarity_search(self):\n self.ssr = {gene: self.ssw.get_phenotypically_similar_genes(phenotypes, taxon=self.taxon) for gene, phenotypes in self.gene2phenotype_associations.items()}\n self.results = [ssr.get_results() for ssr in self.ssr.values()]\n self.phenogene_score = reduce(lambda x, y: pd.merge(x, y, on='id').set_index('id').sum(axis=1), self.results)", "def get_scores(self, query):\n score = np.zeros(self.data['corpus_size'])\n doc_len = np.array(self.data['doc_len'])\n for q in query:\n q_freq = np.array([(doc.get(q) or 0) for doc in self.data['doc_freqs']])\n score += (self.data['idf'].get(q) or 0) * (q_freq * (self.data['k1'] + 1) /\n (q_freq + self.data['k1'] * (\n 1 - self.data['b'] + self.data['b'] * doc_len /\n self.data['average_docs_len'])))\n return score", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def similarityQuestions(self, row):\n q1 = self.stemQuestion(row[3])\n q2 = self.stemQuestion(row[4])\n \n # Compute similarity of the two questions#\n sim = seqmatch(None, q1, q2).ratio()\n #sim = self.computeSimilarity(q1, q2)\n if sim > 0.6: #we guess they are duplicate questions\n if row[5] == \"1\": #true positive\n self.tp += 1\n else: #false positive\n self.fp += 1\n else: #we guess they are different questions\n if row[5] == \"0\": #true negative\n self.tn += 1\n else: #false negative\n self.fn += 1", "def _get_similarity_score(self, dict1, dict2):\n try:\n majorScoreDeterminer1 = ['primaryGenreId']\n majorScoreDeterminer2 = ['genreIds']\n Score = 0 # Base Score\n for items in majorScoreDeterminer2:\n\n for item1 in self._get_app_param_info(dict1, resultCount=1, resultKey=items):\n if item1 in self._get_app_param_info(dict2, resultCount=1, resultKey=items):\n if Score == 0: # Add 50% base score for this category.\n Score += 2 * .5\n Score += 2 * .5 / len(self._get_app_param_info(dict1, resultCount=1, resultKey=items))\n\n for items in majorScoreDeterminer1:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) and str(\n self._get_app_param_info(dict1, resultCount=1, resultKey=items)):\n Score += (3 / len(majorScoreDeterminer1))\n\n nameMatchScore = difflib.SequenceMatcher(None,\n self._get_app_param_info(dict1, resultCount=1,\n resultKey='trackName'),\n self._get_app_param_info(dict2, resultCount=1,\n resultKey='trackName')).ratio()\n Score += nameMatchScore\n\n minorScoreDeterminer = ['isGameCenterEnabled', 'languageCodesISO2A', 'contentAdvisoryRating', 'artistId',\n 'formattedPrice']\n\n for items in minorScoreDeterminer:\n if items == \"formattedPrice\":\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) == \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) == \"Free\":\n continue\n elif str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) != \"Free\" and str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)) != \"Free\":\n Score += (4 / (len(minorScoreDeterminer)))\n else:\n if str(self._get_app_param_info(dict1, resultCount=1, resultKey=items)) in str(\n self._get_app_param_info(dict2, resultCount=1, resultKey=items)):\n Score += (4 / (len(minorScoreDeterminer)))\n Score = round(Score, 1)\n log_str = \"id\" + str(self._get_app_param_info(dict2, resultCount=1, resultKey='trackId')) + \" - \" + str(\n self._get_app_param_info(dict2, resultCount=1, resultKey='trackName')) + \"\\tScore: \" + str(Score)\n except AssertionError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except TypeError as e:\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n except:\n e = sys.exc_info()[0]\n print(\"Error: _get_similarity_score %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)\n else:\n return log_str", "def findSimilarityScore(self, source, destination):\n\n\n if self.similarityScores is not None:\n return self.similarityScores[source][destination]\n\n # Project graph (if a meta path was provided)\n if self.metaPath is None:\n projectedGraph = self.graph\n else:\n if self.metaPath[0] == self.metaPath[-1]: # Homogeneous projection?\n projectedGraph = self.metaPathUtility.createHomogeneousProjection(self.graph, self.metaPath)\n else:\n projectedGraph = self.metaPathUtility.createHeterogeneousProjection(self.graph, self.metaPath)\n\n # Build initial similarity scores\n self.similarityScores = defaultdict(dict)\n nodes = self.graph.getNodes()\n for a, b in itertools.product(nodes, nodes):\n self.similarityScores[a][b] = 1 if a is b else 0\n\n self.similarityScores = self.__simRank(projectedGraph, self.similarityScores, SimRankStrategy.k)\n\n return self.similarityScores[source][destination]", "def pair(self, reference: Spectrum, query: Spectrum) -> float:\n binned_reference = self.model.spectrum_binner.transform([reference])[0]\n binned_query = self.model.spectrum_binner.transform([query])[0]\n reference_vector = self.model.base.predict(self._create_input_vector(binned_reference))\n query_vector = self.model.base.predict(self._create_input_vector(binned_query))\n\n return cosine_similarity(reference_vector[0, :], query_vector[0, :])", "def search(self, query):\n top_texts, top_ids, top_scores = self.retrieve_topn(query)\n return self.similarity.re_rank(query, top_texts, top_ids)", "def similarity(self, q1, q2, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'similarity')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def artists_match_fixup2(\n song: Song, result: Result, score: float, search_query: Optional[str] = None\n) -> float:\n\n if score > 70 or not result.verified:\n # Don't fixup the score\n # if the artist match is already high\n # or if the result is not verified\n return score\n\n # Slugify some variables\n slug_song_artist = slugify(song.artists[0])\n slug_song_name = slugify(song.name)\n slug_result_name = slugify(result.name)\n slug_result_artists = slugify(\", \".join(result.artists)) if result.artists else \"\"\n\n # Check if the main artist is simlar\n has_main_artist = (score / (2 if len(song.artists) > 1 else 1)) > 50\n\n match_str1, match_str2 = create_match_strings(song, result, search_query)\n\n # Add 10 points to the score\n # if the name match is greater than 75%\n if ratio(match_str1, match_str2) >= 75:\n score += 10\n\n # If the result doesn't have the same number of artists but has\n # the same main artist and similar name\n # we add 25% to the artist match\n if (\n result.artists\n and len(result.artists) < len(song.artists)\n and slug_song_artist.replace(\"-\", \"\")\n in [\n slug_result_artists.replace(\"-\", \"\"),\n slug_result_name.replace(\"-\", \"\"),\n ]\n ):\n score += 25\n\n # Check if the song album name is very similar to the result album name\n # if it is, we increase the artist match\n if result.album:\n if (\n ratio(\n slugify(result.album),\n slugify(song.album_name),\n )\n >= 85\n ):\n score += 10\n\n # Check if other song artists are in the result name\n # if they are, we increase the artist match\n # (main artist is already checked, so we skip it)\n artists_to_check = song.artists[int(has_main_artist) :]\n for artist in artists_to_check:\n artist = slugify(artist).replace(\"-\", \"\")\n if artist in match_str2.replace(\"-\", \"\"):\n score += 5\n\n # if the artist match is still too low,\n # we fallback to matching all song artist names\n # with the result's artists\n if score <= 70:\n # Artists from song/result name without the song/result name words\n artist_list1 = create_clean_string(song.artists, slug_song_name, True)\n artist_list2 = create_clean_string(\n list(result.artists) if result.artists else [result.author],\n slug_result_name,\n True,\n )\n\n artist_title_match = ratio(artist_list1, artist_list2)\n\n if artist_title_match > score:\n score = artist_title_match\n\n return score", "def get_score(self, query_kps, query_descriptor, kps, descriptor):\n\t\tratio = 0.7\n\t\tminMatches = 100\n\t\tmatcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n\t\trawMatches = matcher.knnMatch(descriptor, query_descriptor, 2)\n\t\tmatches = []\n\n\t\t#Matching as per pyImageSearch\n\n\t\tfor m in rawMatches:\n\t\t\t# ensure the distance is within the ratio in the paper of SIFT\n\t\t\tif len(m) == 2 and m[0].distance < m[1].distance * ratio:\n\t\t\t\tmatches.append((m[0].trainIdx, m[0].queryIdx))\n\t\tif len(matches) > minMatches:\n\t\t\t# construct the two sets of points\n\t\t\tptsA = np.float32([query_kps[i] for (i, _) in matches])\n\t\t\tptsB = np.float32([kps[j] for (_, j) in matches])\n\n\t\t\t# compute the homography between the two sets of points\n\t\t\t# and compute the ratio of matched points\n\t\t\t(_, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 4.0)\n\n\t\t\t# return the ratio of the number of matched keypoints\n\t\t\t# to the total number of keypoints\n\t\t\treturn float(status.sum()) / status.size\n\n\t\t# no matches were found\n\t\treturn -1.0", "def getScore(data):\n return score", "def _calculate_scores(self, query, key):\n if self.hyperbolc_input:\n scores = self.manifold.single_query_attn_scores(query, key, self.c)\n else:\n scores = tf.linalg.matmul(query, key, transpose_b=True)\n\n if self.scale is not None:\n scores *= self.scale\n\n return scores", "def similarity(query,id):\n similarity = 0.0\n for term in query:\n if term in dictionary:\n similarity += inverse_document_frequency(term)*imp(term,id)\n similarity = similarity / length[id]\n return similarity", "def _find_matches(self, query, min_match):\n\t\tresult_list = []\n\t\tl_query = query.lower()\n\n\t\t#The card dictionary main keys are the sets card belongs to\n\t\tfor exp in self.card_dict:\n\t\t\tfor card in self.card_dict[exp]:\n\t\t\t\t#Change all uppercase letters to lowercase in preparation for string comparisons\n\t\t\t\tl_cardname = card['name'].lower()\n\n\t\t\t\tpercent_match = 0\n\n\t\t\t\tsearch_words = {}\n\n\t\t\t\t#Create a sub dictionary for each search word in the query\n\t\t\t\tfor word in l_query.split(' '):\n\t\t\t\t\tsearch_words.update({word : {}})\n\n\t\t\t\tcard_words = l_cardname.split(' ')\n\n\t\t\t\t#Calculate the match percentage between every search word and every card word\n\t\t\t\tfor search_word in search_words:\n\t\t\t\t\tfor card_word in card_words:\n\t\t\t\t\t\tmatch = 1 - (Searcher.levenshtein_distance(search_word, card_word) / \n\t\t\t\t\t\t\tmax(len(search_word), len(card_word)))\n\n\t\t\t\t\t\tif search_word not in search_words.keys():\n\t\t\t\t\t\t\tsearch_words[search_word] = {card_word: { 'match' : match } }\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsearch_words[search_word].update( {card_word: { 'match' : match } } )\n\n\t\t\t\t#Calculates the total match mercentage for the entire query and the card name\n\t\t\t\tfor search_word in search_words:\n\n\t\t\t\t\tmax_value_key = list(search_words[search_word].keys())[0]\n\t\t\t\t\tmax_value = search_words[search_word][max_value_key]\n\n\t\t\t\t\tfor card_word in search_words[search_word]:\n\t\t\t\t\t\tif search_words[search_word][card_word]['match'] > max_value['match']:\n\t\t\t\t\t\t\tmax_value_key = card_word\n\t\t\t\t\t\t\tmax_value = search_words[search_word][card_word]\n\n\t\t\t\t\tpercent_card_match = len(max_value_key) / len(l_cardname.replace(\" \", \"\"))\n\t\t\t\t\tpercent_query_match = len(search_word) / len(l_query.replace(\" \", \"\"))\n\n\t\t\t\t\t#These weights emphasizes matching the query more than the entire card\n\t\t\t\t\tcard_match_weight = 0.25\n\t\t\t\t\tquery_match_weight = 1 - card_match_weight\n\n\t\t\t\t\tpercent_match += (percent_query_match * max_value['match'] * query_match_weight + \n\t\t\t\t\t\tpercent_card_match * max_value['match'] * card_match_weight)\n\n\t\t\t\tif percent_match >= min_match:\n\t\t\t\t\tresult_list.append([card, percent_match])\n\n\t\treturn result_list", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def similarity_score(a,b):\n\tjsc_scaler = 15\n\tocs_scaler = 5\n\ttcss_scaler = 0.05\n\n\tjaccard_similarity_coefficient_score = jsc_scaler * jaccard_similarity_coefficient(a,b)\n\toverlap_coefficient_score = ocs_scaler * overlap_coefficient(a,b)\n\ttotal_char_similarity_score = tcss_scaler * total_char_similarity(a,b)\n\ttotal_score = jaccard_similarity_coefficient_score + overlap_coefficient_score + total_char_similarity_score\n\t\n\treturn total_score", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def score_match(phrase, song):\n return SequenceMatcher(None, phrase, song.title).ratio()\n ## Examples of other score metrics and modifiers:\n ## Penalize based on difference in phrase length (word count)\n # return -abs(len(song.split()) - len(phrase.split()))\n ## Penalize based on missing words\n # return -len([w for w in phrase.split() if w not in song.split()])", "def get_score(self, pos):\n score = 0\n gap_count = 0\n align_query, align_target = '', ''\n for xval, yval in pos:\n xunit = self.get_seq(self.target, xval)\n yunit = self.get_seq(self.query, yval)\n align_query += yunit\n align_target += xunit\n if xval != 0 and yval != 0:\n score += self.subs_matrix[xunit][yunit]\n gap_count = 0\n else:\n gap_count += 1\n if gap_count > 1 and len(self.gap_penatly) > 1:\n score += self.gap_penatly[1]\n else:\n score += self.gap_penatly[0]\n return score, align_query, align_target", "def match_score(seq1, seq2):\n\n seq1 = get_sequence_string(seq1)\n seq2 = get_sequence_string(seq2)\n score = align.localxx(seq1, seq2)[0][2]\n return score", "def score(self, index, query, doc_id):\n return 1" ]
[ "0.65184444", "0.63786924", "0.62425673", "0.61836904", "0.6133173", "0.61231256", "0.6054935", "0.59781015", "0.59664744", "0.58743554", "0.58663404", "0.58427", "0.583546", "0.583208", "0.5831294", "0.5812179", "0.58055884", "0.5791727", "0.57866764", "0.5785247", "0.5771111", "0.5747286", "0.57377523", "0.5737158", "0.5737158", "0.57334685", "0.5719802", "0.56847453", "0.5677357", "0.56687" ]
0.7251874
0
Returns the top n snacks with the highest similarity scores to the query snack
def top_n_scores(snack_data, percentage_data, n, snack_query, protein_query, carb_query, fat_query): start_time = time.time() #Loop through the snacks in dictionary and compute the score for each one scores_list = [] for title, info in snack_data.items(): score = get_score(snack_data, percentage_data, title, snack_query, protein_query, carb_query, fat_query) scores_list.append((title, score)) scores_list.sort(key=lambda tup: tup[1], reverse=True) print("top_n_scores() time: --- %s seconds ---" % (time.time() - start_time)) return scores_list[0:n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top_n_satisfy2(content, n):\n #print(n)\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n num_label1 = 0\n for info in all_info:\n if info[0] > 0:\n num_label1 += 1\n label_sort.append([info[0], info[1]])\n label_sort.sort(key=take_second, reverse=True)\n satisfy = 0.0\n count = 0\n size = len(label_sort)\n for i in range(min(n, size)):\n cur_label = label_sort[i][0]\n if cur_label > 0:\n satisfy += 1\n cur_satisfy = satisfy / min(n, num_label1)\n sum_satisfy += cur_satisfy\n query_num += 1\n return sum_satisfy / query_num", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def top_n_satisfy(content, n):\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n for info in all_info:\n label_sort.append([info[0], info[1]])\n score.append(info[1])\n label_sort.sort(key=take_first, reverse=True)\n score.sort(reverse=True)\n satisfy = 0.0\n count = 0\n size = len(label_sort)\n for i in range(size):\n cur_label = label_sort[i][0]\n cur_score = label_sort[i][1]\n if cur_label < 1:\n break\n if i >= n:\n break\n index = score.index(cur_score)\n count += 1\n if index < n:\n satisfy += 1\n if count == 0:\n sum_satisfy += 0.0\n query_num += 1\n else:\n sum_satisfy += satisfy / float(count)\n query_num += 1\n return sum_satisfy / query_num", "def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]", "def top_by_num_of_ratings(self, n):\n return top_movies", "def get_top_answers(self, N):\n return sorted(\n self.answers.iteritems(), key=operator.itemgetter(1),\n reverse=True)[:N]", "def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top", "def get_top_n(predictions, n):\n # First map the predictions to each user.\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def top_sentences(query, sentences, idfs, n):\n # identifies the sentences that are the best match for the query.\n top_sens = dict()\n for sentence, tokens in sentences.items():\n # add query rank to the idfs dictionary\n # top_sens is a dictionary of two columns, both initally empty\n query_tokens = len([word for word in tokens if word in query])\n value = query_tokens / (len(tokens))\n for word, idf_score in idfs.items():\n if word in query and word in tokens:\n # 'matching word measure'\n value += idf_score\n top_sens[sentence] = value\n # if a tie, prefer a higher 'query term density' -- /= : divide by and update value\n # defined as the proportion of words in the sentence that are also words in the query. For example, if a sentence has 10 words, 3 of which are in the query, then the sentence’s query term density is 0.3.\n # list of sentences to query ranked according to idf x[1] and if a tie, then density x[2] ; reverse=True: descending order\n # sentence list x[0] of length n ( [:n] )\n top_sens_rank = sorted(top_sens, key=top_sens.get, reverse=True)\n return top_sens_rank[0:n]", "def get_top_n(predictions, n=10):\n\n # First map the predictions to each user.\n top_n = defaultdict(list)\n for uid, iid, true_r, est, _ in predictions:\n top_n[uid].append((iid, est))\n\n # Then sort the predictions for each user and retrieve the k highest ones.\n for uid, user_ratings in top_n.items():\n user_ratings.sort(key=lambda x: x[1], reverse=True)\n top_n[uid] = user_ratings[:n]\n\n return top_n", "def get_top_spammers(self, n):\n sql_command = \"SELECT * FROM points ORDER BY amount DESC;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [])\n all = cursor.fetchall()\n\n return all[:n]", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_tops(similarities, k):\n tops = similarities.argsort(axis=1)[:, :k].tolist()\n return tops", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def top_5_similar_2(list_string, my_nlp=nlp1, model_type=my_model, doc_topic=my_doc_topic):\n vec = my_nlp.transform(list_string)\n vtrans = model_type.transform(vec)\n array_5 = pairwise_distances(vtrans, doc_topic, metric='cosine').argsort()[0][0:5]\n # result_df = df_reviews[['game_link']].iloc[array_5]\n return df_reviews[['game']].iloc[array_5]\n # return(\"test\")\n return result_df", "def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]", "def top_k_result(self, score, k=1):\n self.candidates = []\n self.candidate_sub = []\n self.top_k = heapq.nlargest(n=k, iterable=score)\n\n # Iterate the Top k scores, print each score and standard term\n for top_i in self.top_k:\n max_index = score.index(top_i)\n\n # Find the synonym term (might include standard terms)\n match_standard = self.synonym_term[max_index]\n\n # Find the standard term\n p_index = self.standard_synonym.tolist().index(match_standard)\n synonym_term = self.knowledge[p_index, 1]\n standard_term = self.knowledge[p_index, 0]\n\n # These lines of codes are mainly for Sub-words frequency\n temp_pre_name = remove_punctuation(term=synonym_term)\n temp_pre_name, _ = find_English_term(term=temp_pre_name)\n synonym_term_sub = self.subword_embed_calss.get_subword(term=temp_pre_name, is_print=False)\n synonym_term_sub = ' '.join(synonym_term_sub)\n self.candidates.append(standard_term)\n self.candidate_sub.append(synonym_term_sub)\n\n print('Top 10 Mapping ::: ', self.input_str, '----->', synonym_term, '----->', standard_term, ' (Similarity: ', top_i, ')')\n return standard_term", "def get_similarities(self, query, n=5):\n scores = self.get_scores(query)\n rank_n = np.argsort(scores)[::-1]\n if n > 0:\n rank_n = rank_n[:n]\n return [self.corpus[i] for i in rank_n]", "def most_similar(self, queries: Union[str, List[str], Dict[str, str]], topn: int = 10,\n score_function: str = \"cos_sim\"):\n result = {}\n if self.corpus_embeddings and self.index is None:\n logger.warning(f\"No index found. Please add corpus and build index first, e.g. with `build_index()`.\"\n f\"Now returning slow search result.\")\n return super().most_similar(queries, topn, score_function=score_function)\n if not self.corpus_embeddings:\n logger.error(\"No corpus_embeddings found. Please add corpus first, e.g. with `add_corpus()`.\")\n return result\n if isinstance(queries, str) or not hasattr(queries, '__len__'):\n queries = [queries]\n if isinstance(queries, list):\n queries = {id: query for id, query in enumerate(queries)}\n result = {qid: {} for qid, query in queries.items()}\n queries_texts = list(queries.values())\n queries_embeddings = self._get_vector(queries_texts)\n # We use hnswlib knn_query method to find the top_k_hits\n corpus_ids, distances = self.index.knn_query(queries_embeddings, k=topn)\n # We extract corpus ids and scores for each query\n for i, (qid, query) in enumerate(queries.items()):\n hits = [{'corpus_id': id, 'score': 1 - distance} for id, distance in zip(corpus_ids[i], distances[i])]\n hits = sorted(hits, key=lambda x: x['score'], reverse=True)\n for hit in hits:\n result[qid][hit['corpus_id']] = hit['score']\n\n return result", "def top_sentences(query, sentences, idfs, n):\n ranking = {}\n qtd = {}\n\n for s in sentences:\n value = 0\n # Calculate qtm for each sentence\n for w in sentences[s]:\n if w in query:\n value += 1\n qtd[s] = value/len(sentences[s])\n # calculate sum of idfs for each sentence\n value = 0\n for word in query:\n if word in sentences[s]:\n value += idfs[word]\n ranking[s] = value\n # sort the ranking according to the values\n sortedRank = sorted(ranking.items(), key=lambda x: x[1], reverse=True)\n # if they have same idfs, sort according to qtd\n change = True\n while change:\n change = False\n for i, s in enumerate(sortedRank):\n if i == len(sortedRank)-1:\n break\n if s[1] == sortedRank[i+1][1]:\n if qtd[s[0]] < qtd[sortedRank[i+1][0]]:\n sortedRank[i], sortedRank[i+1] = sortedRank[i+1], sortedRank[i]\n change = True\n break\n finalRank = []\n for j,s in enumerate(sortedRank):\n if j == n:\n break\n finalRank.append(s[0])\n return finalRank", "def top_n_combined(self, n):\n top = set()\n for feat_set in self.itervalues():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top |= best\n return top", "def top_controversial(self, n):\n return top_movies", "def get_n_highest(self, candidate_scores, n=1):\n res = {}\n for k in candidate_scores.keys():\n cands = candidate_scores[k]\n top_n = sorted(cands, key=lambda x: x[1], reverse=True)[:n]\n res[k] = top_n\n return res", "def get_similar_products(user_input_emb, ref_catalog, n = 5):\r\n sim_list = []\r\n for i in range(len(ref_catalog)):\r\n desc_id = ref_catalog.iloc[i]['id']\r\n emb = ref_catalog.iloc[i]['desc_embedding']\r\n cos_sim = compute_cosine_sim(emb,user_input_emb)\r\n sim_list.append((desc_id, cos_sim))\r\n top_n = sorted(sim_list, key= lambda tup: tup[1], reverse = True)[:n]\r\n return top_n", "def top_sentences(query, sentences, idfs, n):\n sentence_scores = dict()\n\n for sentence, words in sentences.items():\n words_in_query = query.intersection(words)\n \n # idf value of sentence\n idf = 0\n for word in words_in_query:\n idf += idfs[word]\n \n # query term density of sentence\n num_words_in_query = sum(map(lambda x: x in words_in_query, words))\n query_term_density = num_words_in_query / len(words)\n\n # update sentence scores with idf and query term density values\n sentence_scores[sentence] = {'idf': idf, 'qtd': query_term_density}\n \n # rank sentences by idf then query term density\n ranked_sentences = sorted(sentence_scores.items(), key=lambda x: (x[1]['idf'], x[1]['qtd']), reverse=True)\n ranked_sentences = [x[0] for x in ranked_sentences]\n\n return ranked_sentences[:n]", "def get_top_nationalities(result, n=5):\n nat_freq=pd.DataFrame(result['country'].value_counts())\n ratios=nat_freq[:n]/nat_freq.sum()*100\n res='The most common visitors are from'\n for i in range(0,len(ratios)):\n if i!=len(ratios)-1:\n res=res+f' {ratios.index[i]} ({np.round(ratios.country[i],2)}%),'\n else:\n res=res+f' and {ratios.index[i]} ({np.round(ratios.country[i],2)}%).'\n return res" ]
[ "0.70479095", "0.7016488", "0.6949201", "0.68908536", "0.67934215", "0.6757787", "0.664486", "0.6607535", "0.65049905", "0.64892024", "0.64891934", "0.6486207", "0.6456038", "0.64506954", "0.6432667", "0.64052105", "0.63956976", "0.6391418", "0.63406956", "0.6337601", "0.63213044", "0.63080406", "0.6307078", "0.62918293", "0.62796026", "0.62161803", "0.61499214", "0.61422586", "0.6115883", "0.60916895" ]
0.7778652
0
Gets list of buses from database
def get_bus_list(): buses = db.session.query(Bus.bus_name).all() return buses
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_bt(self):\n return list(self.collection.find({\"sensor_type\": \"bt\"}, {\"_id\": False})) # Return a list", "def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n db_boars = cls.dbapi.get_boar_list(\n context, limit=limit, marker=marker, sort_key=sort_key,\n sort_dir=sort_dir, filters=filters)\n\n #import pdb; pdb.set_trace()\n return [Boar._from_db_object(cls(context), obj) for obj in db_boars]", "def filter_buses(list_of_buses):\n for bus in list_of_buses:\n return bus", "def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)", "def get_list_of_bbs(self):\n return self.mfp.get_list_of_bbs()", "def getBusesOfRoute(self,routeCode):\n foundBuses = []\n for bus in self.__busRepo.getAll():\n if bus.getRouteCode() == routeCode:\n foundBuses.append(bus)\n return foundBuses", "def getAllBusesAndRoutes(self):\n return self.__busRepo.getAll(),self.__routeRepo.getAll()", "def get_all_casks(self):", "def __sync_bulbs__() -> list:\n\n bulbs = list()\n\n try:\n discovered_bulbs = discover_bulbs(timeout=2)\n except Exception as e:\n raise Exception(str(e))\n\n for bulb in discovered_bulbs:\n ip = bulb['ip']\n port = bulb['port']\n model = bulb['capabilities']['model']\n name = bulb['capabilities']['name']\n name = name if name != '' else ip\n identifier = bulb['capabilities']['id']\n\n found_bulb = Bulb(\n ip=ip,\n port=port,\n model=model\n )\n\n found_bulb.set_name(name)\n properties = found_bulb.get_properties()\n\n bulbs.append({\n 'bulb': found_bulb,\n 'name': name,\n 'model': model,\n 'ip': ip,\n 'metadata':\n {\n 'id': identifier,\n 'ip': ip,\n 'name': name,\n 'model': model,\n 'properties': properties\n }\n })\n\n return bulbs", "def list_brands(self, **kwargs):\n url = self.api_url('brands')\n\n return requests.get(\n url,\n headers=self.auth_header,\n params=kwargs,\n ).json()", "def get_biases(self):\n return []", "def get_blists(self):\n return self.blists[:]", "def get_plant_batches(db_path: str) -> List[PlantBatch]:\n plant_batches: List[PlantBatch] = []\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'batches.db'))\n cur: Cursor = conn.cursor()\n\n for row in cur.execute('SELECT Plant, Location, Tray, n_trays, planting_time FROM batches'):\n # print('\\n\\n')\n # for i in row:\n # print(f\"{type(i)}: {i}\")\n\n batch: PlantBatch = parse_batch_db_entry(row)\n\n plant_batches.append(batch)\n\n cur.close()\n conn.close()\n return plant_batches", "def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results", "def beer(id):\n try:\n return BreweryDb.beer(id, {'withBreweries': 'Y'})['data']\n except Exception:\n return []", "def get_all(self):\n return self.db", "def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def get_bulbs(ip=None, name=None, model=None, metadata=False) -> list:\n bulbs = list()\n\n param = 'ip'\n value = ip\n return_all = False\n\n if name:\n param = 'name'\n value = name\n elif model:\n param = 'model'\n value = model\n elif not ip:\n return_all = True\n elif ip:\n ipaddress.ip_address(str(ip))\n\n for bulb in __sync_bulbs__():\n if bulb[param] == value or return_all:\n bulbs.append(bulb['metadata'] if metadata else bulb['bulb'])\n return bulbs", "def get_blocks(self):\n cmd = \"\"\" SELECT * FROM %s; \"\"\" %(TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()", "def get_bus_details():\n\n\tbus_detail = db.session.query(Bus.bus_name == (Bus.bus_name)).one()\n\n \n\treturn bus_detail", "def get_all_borrowed_books():\n return BorrowBook.query.all()", "def getBoogies(self):\n return self.boogies", "def get(self):\n query = Boat.query()\n results = query.fetch(limit = MAX_BOATS)\n boat_dicts = []\n for match in results:\n boat_dicts.append({'id': match.id, 'name': match.name, 'type': match.type,\n 'length': match.length, 'at_sea': match.at_sea })\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(boat_dicts))", "def get_all_boards():\n return [board for board in GRAPH_DB.find(\"board\")]", "def vbd_list(name=None, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"This function must be called with -a, --action argument.\"\n )\n if name is None:\n return \"A name kwarg is rquired\"\n ret = {}\n data = {}\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n vm = vms[0]\n vbds = session.xenapi.VM.get_VBDs(vm)\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n data[\"vbd-{}\".format(x)] = vbd_record\n x += 1\n ret = data\n return ret", "def MostUsedBuses(self):\n busKM = lambda bus: bus.getTimesUsedRoute() * self.__routeRepo.getObj(bus.getRouteCode()).getLength()\n buses = self.__busRepo.getAll()\n sortedBuses = sorted(buses,key = busKM,reverse=True)\n return sortedBuses", "def get_all(self):\n cursor = self._dbcon.cursor()\n cursor.execute(u\"select rowid,* from books\")\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]", "def test_get_all_boats(self):\n pass", "def list(refresh):\n # This works too, but is much slower:\n # ogrinfo WFS:http://openmaps.gov.bc.ca/geo/ows?VERSION=1.1.0\n for table in bcdata.list_tables(refresh):\n click.echo(table)", "def get_db_entries(location: str='') -> list:\n db = CarsDb() # pylint: disable=invalid-name\n results = db.get_cars(location)\n db.commit()\n db.close()\n return results" ]
[ "0.67924833", "0.65883785", "0.6407331", "0.62877816", "0.6284882", "0.6264669", "0.61648506", "0.60405076", "0.5935302", "0.59322333", "0.58382916", "0.5751093", "0.5712563", "0.5708815", "0.57086897", "0.5692648", "0.56826633", "0.56777906", "0.56616527", "0.566161", "0.56591594", "0.56433874", "0.564175", "0.56310016", "0.56103873", "0.55933076", "0.5591844", "0.5584448", "0.5565974", "0.55629504" ]
0.7955064
0
Shows info per bus stop
def get_stop_info(stops): api_url = 'http://webservices.nextbus.com/service/publicXMLFeed?command=predictions&a=sf-muni&stopId=' """Stop_dict = {bus_name:'38', minutes: 7, stop_location: 'Geary & Leavenworth'}""" for stop in stops: url = api_url + str(stop) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showInfo(self, stop_number):\n if self.arrivalTime == '': # At origin terminals, there will only be a departure time listed\n if self.getArrivalTime(stop_number) == '00':\n print \"There is a\", self.getDirection(), self.routeID, \"train departing from\", self.getStop(stop_number), \"now.\"\n elif self.getArrivalTime(stop_number) == '01':\n print \"There is a\", self.getDirection(), self.routeID, \"train departing from\", self.getStop(stop_number), \"in 1 minute.\"\n else:\n print \"There is a\", self.getDirection(), self.routeID, \"train departing from\", self.getStop(stop_number), \"in\", int(self.getArrivalTime(stop_number)), \"minutes.\"\n elif self.getArrivalTime(stop_number) == '01':\n print \"There is a\", self.getDirection(), self.routeID, \"train arriving at\", self.getStop(stop_number), \"in 1 minute.\"\n elif self.getArrivalTime(stop_number) == '00':\n print \"There is a\", self.getDirection(), self.routeID, \"train arriving at\", self.getStop(stop_number), \"now.\"\n else:\n print \"There is a\", self.getDirection(), self.routeID, \"train arriving at\", self.getStop(stop_number), \"in\", int(self.getArrivalTime(stop_number)), \"minutes.\"", "def info_for_model(stop_list, stops, route):\n\n # Need to know where the bus number 1 and 2 are\n # This if and elif were put in due to an error where the bus list for bus 1 would come up empty, but not sure if necessary\n if len(stops[0]) == 0:\n bus_1 = stops[1][len(stops[1]) - 1]\n elif len(stops[0]) != 0:\n bus_1 = stops[0][0]\n bus_2 = stops[1][0]\n\n # Create empty lists to hold the information for each bus\n stops_bus_1 = []\n stops_bus_2 = []\n stops_bus_3 = []\n\n # Ste bus_number to 3, we will start filling the buses from the end, the last bus first\n bus_number = 3\n\n # Populate our lists\n for i in stops[len(stops) - 1]:\n # Get the times for the buses at the given stop\n first_3_buses = get_due_time(str(i), route)\n\n if len(first_3_buses) == 0:\n # print('Something went wrong!')\n continue\n # Add in the delay\n get_delay(first_3_buses)\n\n # Have to check if the bus it at the first stop, in which case, we just say 'Starting stop' for previous_stop\n if i == stop_list[0]:\n previous_stop = 'Starting stop'\n # Else, we get the previous stop\n else:\n previous_stop = stop_list[stop_list.index(i) - 1]\n\n # If the bus is the last one, we will only append to bus_number_3\n if bus_number == 3:\n # If we reach the stop where bus number 2 is, we must append this stop to both bus_number_3 and bus_number2 and\n # decrease the bus_number counter\n if i == bus_2:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Now, we keep adding bus 2 and bus 3\n elif bus_number == 2:\n # If we reach the stop where bus number 1 is, we must append this stop to both bus_number_3 and bus_number2 and\n # bus_number1 and decrease the bus_number counter\n if i == bus_1:\n bus_number -= 1\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n else:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n\n # Here, we are now appending all the buses, until we finally reach the source stop\n elif bus_number == 1:\n stops_bus_3.append({'stopid':i, 'delay':first_3_buses[2]['delay'], 'arrival_hour':first_3_buses[2]['arrivaldatetime'][11:13], 'datetime':first_3_buses[2]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_2.append({'stopid':i, 'delay':first_3_buses[1]['delay'], 'arrival_hour':first_3_buses[1]['arrivaldatetime'][11:13], 'datetime':first_3_buses[1]['arrivaldatetime'], 'previous_stop':previous_stop})\n stops_bus_1.append({'stopid':i, 'delay':first_3_buses[0]['delay'], 'arrival_hour':first_3_buses[0]['arrivaldatetime'][11:13], 'datetime':first_3_buses[0]['arrivaldatetime'], 'previous_stop':previous_stop})\n joined = [stops_bus_1, stops_bus_2, stops_bus_3]\n return joined", "def get_times( stop_id ):\n\tstop_id = \"BUS\" + stop_id.upper( ).replace( \"BUS\", \"\" )\n\tapi_response = fetch( \n\t\t'http://myride.gocitybus.com/widget/Default1.aspx?pt=30&code=%s' % \n\t\tstop_id )\n\n\tif ( api_response.status_code == 200 ):\n\t\txml_response = parseString( api_response.content )\n\t\tstop = xml_response.getElementsByTagName( 'Stop' )[ 0 ]\n\t\tcurrent_stop = { 'stop_name': stop.getAttribute( 'name' ), 'buses':[]}\n\t\tbuses = stop.getElementsByTagName( 'Bus' )\n\t\tfor bus in buses:\n\t\t\troute_name = text_content( bus.getElementsByTagName( 'RouteName' )[ 0 ])\n\t\t\ttime_left = text_content( bus.getElementsByTagName( 'TimeTillArrival' )[ 0 ])\n\t\t\tcurrent_stop[ 'buses' ].append({ 'stop_code' : stop_id,\n\t\t\t 'route_name' : route_name, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'time_till_arrival' : time_left })\n\t\treturn current_stop\n\treturn {}", "def next_bus(stop_point):\n stop = str(stop_point)\n r = requests.get('https://api-radon.tfl.gov.uk/StopPoint/' + stop_point + '/Arrivals')\n print(r)\n json_result = r.json()\n all_stops = json_result\n my_buses = []\n for i in all_stops:\n i = '{}, {}-min'.format(str(i['destinationName']),str(round(i['timeToStation']/60)))\n my_buses.append(i)\n return my_buses", "def __repr__(self):\n return '<BusStop {}>'.format(self.name)", "def __display_buses_location(self):\n if len(self.__bus_controller.bus_dict) == 0 and len(self.__bus_controller.stations_dict) == 0:\n return [[]] # breaks the run if there are no buses\n data = []\n empty_list = [] # an empty list that has placeholders for later use\n\n for i in range(self.find_table_length()):\n empty_list.append(\" \")\n if len(self.__bus_controller.stations_dict) != 0:\n # makes sure that there are people waiting somewhere before we start marking them\n for line, stations in self.__bus_controller.stations_dict.items():\n list = [str(line)] + empty_list\n # creates the first line that has the placeholders and the line number\n for station, people_count in stations.items():\n list[station] = people_count\n # overrides the placeholders with the amount of people waiting at the station\n data.append(list)\n\n relevant_lines = []\n # just shows all the lines that are already in the list and showing passengers\n if len(self.__bus_controller.stations_dict) != 0:\n for list in data:\n relevant_lines.append(list[0])\n # buses override passengers if the collide at the same place\n for line, buses in self.__bus_controller.bus_dict.items():\n # puts an X wherever there's a bus\n if str(line) in str(relevant_lines):\n # if the line is already there it doesn't add another list into data\n for bus in buses:\n data[relevant_lines.index(str(line))][bus.station_num] = \"X\"\n else:\n # if the line isn't there yet it adds another list that contains a placeholder and an X for each bus\n list = [str(line)] + empty_list\n for bus in buses:\n list[bus.station_num] = \"X\"\n data.append(list)\n\n data = sorted(data, key=lambda list: int(list[0]))\n # sorts data by the first Value in the list so it would show the lines sorted.\n return data", "def print_stops_for_route(route_id: str) -> None:\n mbta = MBTA(config.CT_MBTA_API_KEY)\n try:\n stops = mbta.get_stops_for_route(route_id)\n except MBTAEmptyResult:\n print(f\"Route '{route_id}' returned no results\")\n return\n title_text = f\"Stops for '{route_id}'\"\n print(f\"{title_text:=^80}\")\n for stop in stops:\n print(f\"ID: {stop['id']}, NAME: {stop['attributes']['name']}\")\n return", "def plot_bus_load(self):\n stops = {key: 0 for key, _ in self.route.timetable().items()}\n for passenger in self.passengers:\n trip = self.passenger_trip(passenger)\n stops[trip[0][1]] += 1\n stops[trip[1][1]] -= 1\n prev = None\n for i, stop in enumerate(stops):\n if i > 0:\n stops[stop] += stops[prev]\n prev = stop\n fig, ax = plt.subplots()\n ax.step(range(len(stops)), list(stops.values()), where=\"post\")\n ax.set_xticks(range(len(stops)))\n ax.set_xticklabels(list(stops.keys()))\n return fig, ax", "def test_get_stops(self):\n obj = self.client.bus.routes.get(self.random_route.id)\n stops = obj.stops\n self.assertEqual(type(stops), type([]))\n [self.assertEqual(type(i), BusStop) for i in stops]\n self.assertEqual(stops[0].longitude, stops[0].x)\n self.assertEqual(stops[0].latitude, stops[0].y)\n stops[0].wkt\n stops[0].geojson\n stops[0].__repr__()\n stops[0].__str__()\n stops[0].__unicode__()", "def get_bus_details():\n\n\tbus_detail = db.session.query(Bus.bus_name == (Bus.bus_name)).one()\n\n \n\treturn bus_detail", "def show():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')\n port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')\n port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)\n rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')\n queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')\n pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')\n pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)\n buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)\n acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL)\n tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL')\n trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP')\n route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE')\n\n header = (\"Type\", \"Interval (in ms)\", \"Status\")\n data = []\n if queue_info:\n data.append([\"QUEUE_STAT\", queue_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), queue_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_info:\n data.append([\"PORT_STAT\", port_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), port_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_drop_info:\n data.append([PORT_BUFFER_DROP, port_drop_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), port_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if rif_info:\n data.append([\"RIF_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if queue_wm_info:\n data.append([\"QUEUE_WATERMARK_STAT\", queue_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), queue_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_wm_info:\n data.append([\"PG_WATERMARK_STAT\", pg_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), pg_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_drop_info:\n data.append(['PG_DROP_STAT', pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), pg_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if buffer_pool_wm_info:\n data.append([\"BUFFER_POOL_WATERMARK_STAT\", buffer_pool_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), buffer_pool_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if acl_info:\n data.append([ACL, pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), acl_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if tunnel_info:\n data.append([\"TUNNEL_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if trap_info:\n data.append([\"FLOW_CNT_TRAP_STAT\", trap_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), trap_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if route_info:\n data.append([\"FLOW_CNT_ROUTE_STAT\", route_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC),\n route_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n\n click.echo(tabulate(data, headers=header, tablefmt=\"simple\", missingval=\"\"))", "def bus_details_SD(adjacent_list):\n\n temp = 0\n for x in results:\n if temp != x.get('ServiceNo'):\n temp = x.get('ServiceNo')\n count = 0\n adja_bus_stop = my_dictionary()\n adjacent_list.add(temp, adja_bus_stop)\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n else:\n adja_bus_stop.add(count, [x.get('BusStopCode'), x.get('Distance')])\n count += 1\n return adjacent_list", "def show_buses_for_line(self, line: int) -> str:\n\n if line not in self.__bus_dict.keys():\n return \"-None-\"\n output = \"\"\n buses = self.bus_dict[line]\n for bus in self.bus_dict[line]:\n output += f\"station: {bus.station_num}\"\n return output", "def test_krakow_single_stop(self):\n # read html data from given template (not from web, links change periodically)\n with open(os.path.join(BASE_DIR, 'collector', 'test_resources', 'mpk_krakow_parse_bus_stop_res.html'), 'r') as file:\n html = file.read()\n\n self.assertListEqual(mpk_krakow.parse_bus_stop(html), [\n ('212', 'http://rozklady.mpk.krakow.pl/?lang=PL&rozklad=20171023&linia=213__1__23'),\n ('213', 'http://rozklady.mpk.krakow.pl/?lang=PL&rozklad=20171023&linia=213__2__32'),\n ('213', 'http://rozklady.mpk.krakow.pl/?lang=PL&rozklad=20171023&linia=213__4__32'),\n ('213', 'http://rozklady.mpk.krakow.pl/?lang=PL&rozklad=20171023&linia=213__3__23')\n ])", "def show(self, update, context):\n\n # TODO: add show how long till bus\n message = update.message.text.lower().split(\" \")\n user = self.User(update)\n if len(message) == 1:\n output = \"hey looks like you still don't know how to use this command\\n\" \\\n \"don't worry, I'll teach you :)\\n\" \\\n \"here's a list of what you can do:\\n\" \\\n \"/show requests - this will show you your pending requests\\n\" \\\n \"/show lines - this will show you the lines that are available\\n\" \\\n \"/show buses for line {number} - this will show you the locations of the buses in the line you've specified\"\n elif message[1] == \"lines\":\n print(\"showing lines\")\n available_lines = self.bus_controller.show_available_lines()\n if available_lines == \"None\":\n output = \"there are currently no available lines\"\n else:\n output = f\"the currently available lines are: {str(available_lines)}\"\n elif message[1] == \"requests\":\n user = self.__find_matching_user(user)\n if len(user.stations) == 0:\n output = \"You don't have any pending requests\"\n else:\n output = \"Your pending requests:\\n\"\n for station in user.stations:\n output += f\"{station}\\n\"\n output = output[:-1:]\n elif message[1:-1:] == ['buses', 'for', 'line']:\n if not message[4].isnumeric(): # checks that the value is a number\n output = f\"{message[4]} isn't a number i support, therefor I can't help you.\"\n elif not (int(message[4]) > 0 and int(message[4]) <= 999): # checks that the number is within limits\n output = f\"sorry, {message[4]} is out of range, we only have lines within the range 1-999\"\n else: # gets here if the number is legit and everything is good\n line_num = int(message[4])\n if not self.bus_controller.check_line(line_num):\n output = \"there are no available buses for that line\"\n else:\n output = f\"the locations of the buses that are available for that line are: \\n\" \\\n f\"{self.bus_controller.show_buses_for_line(line_num)}\"\n else:\n print(message[1:-1:])\n output = \"couldn't recognise this command, try /show for the list of options you have for this command\"\n self.data_base.log(user, update.message.text, output)\n user.send_message(output)", "def printSchedule(dictBusSchedule):\n\n strPrint = \"\"\n for strStop in dictBusSchedule:\n strPrint = \"\"\n lstTimes = dictBusSchedule[strStop]\n for strTime in lstTimes:\n if strPrint == \"\":\n strPrint = strStop + \": \" + strTime\n else:\n strPrint = strPrint + \":\" + strTime\n\n print(strPrint)", "def get_station_boroughs(self):\\", "def __str__(self):\n return self.get_id() + \": \" + str(self.start) + (\" - \" + str(self.stop) if self.stop else \"\")", "def printStations(self):\n print(\"Bus numero \" + str(self._num) + \" :\")\n for i in range(len(self._stations)) :\n print(self._stations[i])\n print('\\n')", "def test_get(self):\n self.assertEqual(type(self.stop), BusStop)\n self.assertEqual(self.stop.id, self.random_stop.id)\n self.assertEqual(self.stop.name, self.random_stop.name)", "def show_info(self):\n\n print(\"Querying the station...\")\n val = getvalues(self.station, '', fixed_format)\n\n print('Fine Offset station settings:')\n print('%s: %s' % ('local time'.rjust(30),\n time.strftime('%Y.%m.%d %H:%M:%S %Z',\n time.localtime())))\n print('%s: %s' % ('polling mode'.rjust(30), self.station.polling_mode))\n\n slist = {'values':[], 'minmax_values':[], 'settings':[],\n 'display_settings':[], 'alarm_settings':[]}\n for x in sorted(val.keys()):\n if type(val[x]) is dict:\n for y in val[x].keys():\n label = x + '.' + y\n s = fmtparam(label, val[x][y])\n slist = stash(slist, s)\n else:\n s = fmtparam(x, val[x])\n slist = stash(slist, s)\n for k in ('values', 'minmax_values', 'settings',\n 'display_settings', 'alarm_settings'):\n print('')\n for s in slist[k]:\n print(s)", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def showtrafficitems():\n middleware.trafficObj.showTrafficItems()", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def get_bus_list():\n\n\tbuses = db.session.query(Bus.bus_name).all()\n\n \n\treturn buses", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def show_business(bus_name):\n\n business = crud.get_bus_by_name(bus_name)\n\n return render_template('/business_details.html', business=business)", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def __repr__(self):\n return '<Bus {}>'.format(self.number)" ]
[ "0.66487247", "0.6538514", "0.6502286", "0.63745207", "0.6317412", "0.5726826", "0.5719049", "0.5625583", "0.55998963", "0.554344", "0.5520937", "0.5518883", "0.55143493", "0.5511896", "0.5466144", "0.5420231", "0.5420191", "0.538806", "0.5379551", "0.5367331", "0.53392893", "0.5320361", "0.5318981", "0.5304253", "0.5294787", "0.52904814", "0.5267189", "0.526505", "0.52296185", "0.52253443" ]
0.66911435
0
Create a modified image by rotating and repositioning the input image.
def create_modified_image(input_image, rotation_angle, x_offset, y_offset): image = Image.open(input_image) width, height = image.size # Calculate the maximum size needed to fit the rotated image max_size = max(image.size) max_width = max_size if width == max_size else int(max_size * (width / height)) max_height = max_size if height == max_size else int(max_size * (height / width)) # Create a blank canvas with the maximum size modified_image = Image.new('RGB', (max_width, max_height), color=0) # Rotate the input image rotated_image = image.rotate(rotation_angle, expand=True) rotated_image = rotated_image.resize((width*5, height*2)) # Calculate the new position based on the offsets new_x = int((max_width - rotated_image.width) / 2 + x_offset) new_y = int((max_height - rotated_image.height) / 2 + y_offset) # Paste the rotated image onto the blank canvas modified_image.paste(rotated_image, (new_x, new_y)) # Resize the modified image to fit the canvas modified_image = modified_image.resize((width*2, height*2)) return modified_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def rotate(self):\r\n # Rotate the image.\r\n self.image = pg.transform.rotozoom(self.orig_image, -self.angle, 1)\r\n # Rotate the offset vector.\r\n offset_rotated = self.offset.rotate(self.angle)\r\n print(\"offset_rotated:\", offset_rotated)\r\n # Create a new rect with the center of the sprite + the offset.\r\n self.rect = self.image.get_rect(center=self.pos+offset_rotated)", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def rot_image(original_image, angle):\n # Tämä copypastettu jostain netistä. Vaatii että kuva on neliö.\n assert original_image.get_width() == original_image.get_height(), \\\n \"Can't rotate image - not square. %r\" % original_image\n orig_rect = original_image.get_rect()\n rot_image = pygame.transform.rotate(original_image, angle)\n rot_rect = orig_rect.copy()\n rot_rect.center = rot_image.get_rect().center\n rot_image = rot_image.subsurface(rot_rect).copy()\n return rot_image", "def img_resize_rotate(file):\n orig_file = source_dir + file \n new_file = output_dir + file\n #print(\"source {} output {}\".format(orig_file, new_file))\n \n # Image.open('newauctionsheet.jpg').convert(mode=\"L\").show()\n img = Image.open( orig_file).resize((128,128)).rotate(90)\n new_img = img.convert('RGB').resize((128,128))\n new_img.save(output_dir + \"/\" + file, format=\"JPEG\")", "def rotate(self, image, angle):\n tmpImage = pygame.transform.rotate(image ,angle)\n imageCentreX = tmpImage.get_rect()[0] + tmpImage.get_rect()[2]/2\n imageCentreY = tmpImage.get_rect()[1] + tmpImage.get_rect()[3]/2\n\n targetWidth = tmpImage.get_rect()[2]\n targetHeight = tmpImage.get_rect()[3]\n\n imageOut = pygame.Surface((targetWidth, targetHeight))\n imageOut.fill((255,255,0))\n imageOut.set_colorkey((255,255,0))\n imageOut.blit(tmpImage,(0,0), pygame.Rect( imageCentreX-targetWidth/2,imageCentreY-targetHeight/2, targetWidth, targetHeight ) )\n return imageOut", "def rotate(self, image, angle):\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result", "def rotate(self):\r\n self.rot = (self.vel.y * -3)\r\n if self.rot < -90:\r\n self.rot = -90\r\n \r\n new_image = pg.transform.rotate(self.bird_sprites[self.sprite_frame], self.rot)\r\n old_center = self.rect.center\r\n self.image = new_image\r\n self.rect = self.image.get_rect()\r\n self.rect.center = old_center\r\n # self.animate()\r", "def rotate_image(image):\n return tf.image.rot90(image)", "def rotateIMG(self):\n self.blit_image = pygame.transform.rotate(self.blit_image, self.blitHeading - 45)\n self.rect = self.blit_image.get_rect()\n self.rect.center = (int(self.pos.x), int(self.pos.y))", "def rotate(image, angle, resize=False, center=None, order=None):\n\n sy, sx, *_ = image.shape # size of image in pixels (note inversion y/x)\n\n try:\n interpolation_method = orders[order]\n except KeyError:\n raise ValueError(f'{order} not a valid interpolation order.')\n\n # Calculate size of new frame if necessary ---------------------------------------------\n\n if resize:\n\n theta = np.deg2rad(angle)\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n\n # Projection legnths of rotated sides on original x and y\n lxx = abs(sx * cos_theta)\n lxy = abs(sx * sin_theta)\n lyx = abs(sy * sin_theta)\n lyy = abs(sy * cos_theta)\n\n sx_new = lxx + lyx\n sy_new = lxy + lyy\n\n output_size = int(sx_new), int(sy_new)\n\n else:\n\n output_size = sx, sy\n\n # Calculate rotation matrix --------------------------------------------------------\n\n if resize or center is None:\n center = (sx / 2, sy / 2)\n\n M = cv2.getRotationMatrix2D(center, angle, scale=1) # no rescaling considered here\n\n if resize: # Add translation to matrix so that the result fits into the new frame\n tranlsation = (sx_new - sx) / 2, (sy_new - sy) / 2\n M[:, 2] += tranlsation # The last column of the matrix is the translation vector\n\n # Create rotated image ------------------------------------------------------------\n\n image_rotated = cv2.warpAffine(image, M, dsize=output_size, flags=interpolation_method)\n\n return image_rotated", "def rotateImage(self):\n self.cnvImgOrig.rotate(\"./images/origPic.tiff\")\n self.cnvImgTest.rotate(\"./images/testPic.tiff\")", "def rotate_augmentation():\n rand_rotate = np.random.randint(180)\n return lambda image: rotate_with_extension(image, rand_rotate)", "def rotate(img, angle, resample=False, expand=False, center=None):\n\n #if not _is_pil_image(img):\n # raise TypeError('img should be PIL Image. Got {}'.format(type(img)))\n\n return img.rotate(angle, resample, expand, center)", "def update(self):\r\n self.__calculate_position()\r\n self.__calculate_angle()\r\n self.image = pygame.transform.rotate(self.origin_image, self.angle)", "def _spin(self):\n center = self.rect.center\n self.dizzy += 12 # rotate 12 degree clockwise\n\n if self.dizzy >= 360:\n self.dizzy = 0\n self.image = self.original # reset the image to its original ones after rotated\n else:\n self.image = pygame.transform.rotate(self.original, self.dizzy)\n\n self.rect = self.image.get_rect()\n self.rect.center = center # make sure the image would not move when spinning", "def Rotate(angle):\n def rotate_img(img, angle=angle):\n img = Ft.rotate(img, angle, resample=BILINEAR)\n return img\n return rotate_img", "def apply_rotation(image):\n\n\t# Load the image into a new BytesIO\n\tsImg = BytesIO(image)\n\tsNewImg = BytesIO(b'')\n\n\t# Create a new Pillow instance from the raw data\n\toImg = Pillow.open(sImg)\n\n\t# Store the image format\n\tsFormat = oImg.format\n\n\t# Get the proper sequence\n\ttry:\n\t\tlSeq = SEQUENCES[oImg._getexif()[ORIENTATION_TAG] - 1]\n\n\t\t# Transpose the image\n\t\tfor i in lSeq:\n\t\t\toImg = oImg.transpose(i)\n\n\t\t# Save the image using the same format as we got it in\n\t\toImg.save(sNewImg, sFormat)\n\n\t\t# Get the raw bytes\n\t\tsRet = sNewImg.getvalue()\n\n\t# If there's no sequence, return the image as is\n\texcept Exception as e:\n\t\tsRet = image\n\n\t# Cleanup\n\toImg.close()\n\tsImg.close()\n\tsNewImg.close()\n\n\t# Return\n\treturn sRet", "def preprocess(self, image):\n if self.rotate == 0:\n return image\n\n angle = self.rotate * -90\n return image.rotate(angle, expand=True).crop((0, 0, self._w, self._h))", "def rotate(self, angle):\n image_center = np.array(self.img.shape[1::-1]) / 2\n rot_mat = cv2.getRotationMatrix2D(tuple(image_center), angle, 1.0)\n\n self.img = cv2.warpAffine(\n self.img, rot_mat, self.img.shape[1::-1], flags=cv2.INTER_LINEAR\n )\n\n self.edits.append(f\"rotate:{angle}\")\n return self", "def rotate(self, angle, resample=NEAREST, expand=0, center=None,\r\n translate=None, fillcolor=None):\r\n angle = angle % 360.0\r\n if fillcolor is None:\r\n fillcolor = (0, 0, 0)\r\n if expand == 0:\r\n # grab the dimensions of the image\r\n h, w = self.size[1], self.size[0]\r\n\r\n # if the center is None, initialize it as the center of\r\n # the image\r\n if center is None:\r\n center = (w // 2, h // 2)\r\n scale = 1.0\r\n # perform the rotation\r\n M = cv2.getRotationMatrix2D(center, angle, scale)\r\n _im = cv2.warpAffine(self._instance, M, (w, h), borderValue=fillcolor)\r\n else:\r\n _im = self.rotate_bound(angle)\r\n if translate is not None:\r\n _im = self.translated(_im, translate[0], translate[0])\r\n return Image(_im)", "def rotated_image(image):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n return image.rotate(orientation)", "def rotate_image(image, angle):\r\n\r\n # Get the image size\r\n # No that's not an error - NumPy stores image matricies backwards\r\n image_size = (image.shape[1], image.shape[0])\r\n image_center = tuple(np.array(image_size) / 2)\r\n\r\n # Convert the OpenCV 3x2 rotation matrix to 3x3\r\n rot_mat = np.vstack(\r\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\r\n )\r\n\r\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\r\n\r\n # Shorthand for below calcs\r\n image_w2 = image_size[0] * 0.5\r\n image_h2 = image_size[1] * 0.5\r\n\r\n # Obtain the rotated coordinates of the image corners\r\n rotated_coords = [\r\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\r\n ]\r\n\r\n # Find the size of the new image\r\n x_coords = [pt[0] for pt in rotated_coords]\r\n x_pos = [x for x in x_coords if x > 0]\r\n x_neg = [x for x in x_coords if x < 0]\r\n\r\n y_coords = [pt[1] for pt in rotated_coords]\r\n y_pos = [y for y in y_coords if y > 0]\r\n y_neg = [y for y in y_coords if y < 0]\r\n\r\n right_bound = max(x_pos)\r\n left_bound = min(x_neg)\r\n top_bound = max(y_pos)\r\n bot_bound = min(y_neg)\r\n\r\n new_w = int(abs(right_bound - left_bound))\r\n new_h = int(abs(top_bound - bot_bound))\r\n\r\n # We require a translation matrix to keep the image centred\r\n trans_mat = np.matrix([\r\n [1, 0, int(new_w * 0.5 - image_w2)],\r\n [0, 1, int(new_h * 0.5 - image_h2)],\r\n [0, 0, 1]\r\n ])\r\n\r\n # Compute the tranform for the combined rotation and translation\r\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\r\n\r\n # Apply the transform\r\n result = cv2.warpAffine(\r\n image,\r\n affine_mat,\r\n (new_w, new_h),\r\n flags=cv2.INTER_LINEAR\r\n )\r\n\r\n return result", "def rotate(image, rect, angle):\n new_image = pygame.transform.rotate(image, angle) # Rotate the original image without modifying it.\n rect = new_image.get_rect(center=rect.center) # Get a new rect with the center of the old rect.\n return new_image, rect", "def rotate(im: Image) -> Image:\n return im.rotate(random.randint(0, 360))", "def rotate_image(image, angle):\n\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result", "def _reorient_image(image, orientation='RAS'):\n\n orig_ornt = nib.io_orientation(image.affine)\n targ_ornt = nib.orientations.axcodes2ornt(orientation)\n transform = nib.orientations.ornt_transform(orig_ornt, targ_ornt)\n image = image.as_reoriented(transform)\n\n return image", "def rotate_image(image, angle):\n\n # Get the image size\n # No that's not an error - NumPy stores image matrices backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result", "def get_rotated_image(\n input_image: np.ndarray,\n rotation_request: int # Currently setting to an int as the possible rotations are fixed.\n) -> np.ndarray:\n # grab the dimensions of the image and then determine the\n # center\n (h, w) = input_image.shape[:2]\n (cX, cY) = (w / 2, h / 2)\n\n # grab the rotation matrix (applying the negative of the\n # angle to rotate clockwise), then grab the sine and cosine\n # (i.e., the rotation components of the matrix)\n M = cv2.getRotationMatrix2D((cX, cY), -rotation_request, 1.0)\n cos = np.abs(M[0, 0])\n sin = np.abs(M[0, 1])\n\n # compute the new bounding dimensions of the image\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n # adjust the rotation matrix to take into account translation\n M[0, 2] += (nW / 2) - cX\n M[1, 2] += (nH / 2) - cY\n\n # perform the actual rotation and return the image\n return cv2.warpAffine(input_image, M, (nW, nH))" ]
[ "0.7296847", "0.7029329", "0.69576144", "0.6940915", "0.68389374", "0.6762388", "0.6726973", "0.67229176", "0.6662857", "0.66358876", "0.6580777", "0.6566921", "0.6560993", "0.6520975", "0.65166694", "0.64928305", "0.648112", "0.64629215", "0.64446527", "0.64421993", "0.6439548", "0.6414636", "0.64124805", "0.6401595", "0.63909715", "0.6375219", "0.6365247", "0.6360646", "0.6350575", "0.6343515" ]
0.7490708
0
Select the th that maximizes a metric
def select_best_th(metrics_dict: Dict, metric: str): max_metric_ix = np.argmax(metrics_dict[metric]) return metrics_dict['metrics_ths'][max_metric_ix]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]", "def find_best_threshold(y, y_hat, step_size, score_func, maximize=True):\n best_thres, best_score = 0.0, 0.0 if maximize else 1.0\n for thres in np.arange(0, 1, step_size):\n score = score_for_threshold(y, y_hat, score_func, thres)\n if (maximize and (score > best_score)) or (not maximize and (score < best_score)):\n best_score = score\n best_thres = thres\n\n return best_thres, best_score", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _get_maxth(self):\n return self.__maxth", "def _metric(self, fpr, tpr, th_):\n def np_divide(a, b):\n \"\"\"Numpy array division.\n\n ignore / 0, div( [-1, 0, 1], 0 ) -> [0, 0, 0]\n \"\"\"\n with np.errstate(divide='ignore', invalid='ignore'):\n c = np.true_divide(a, b)\n c[~np.isfinite(c)] = 0 # -inf inf NaN\n return c\n\n # q = tpr/(fpr+tpr) go through max.\n q = np_divide(tpr, fpr + tpr)\n best_ind = np.argmax(q)\n best_th_ = th_[best_ind]\n return best_th_, best_ind, q", "def _SD_optimal(t):", "def best_metric(self) -> float:\n return self._best_metric", "def is_best(self, metric: float) -> bool:", "def get_max(im, class_name, dets, thresh=0.5):\n inds = np.where(dets[:, -1] >= thresh)[0]\n max_inds = 0\n max_score = 0.0\n if len(inds) == 0:\n # print('Warning: no target detected!')\n return\n elif len(inds) > 1:\n # print('Warning: ' + str(len(inds)) + ' targets detected! Choose the highest one')\n for i in inds:\n if(dets[i, -1] > max_score):\n max_inds = i\n max_score = dets[i, -1]\n bbox = dets[max_inds, :4]\n score = dets[max_inds, -1]\n return [max_inds,score]", "def select_max(td, vocab, A, K):\n V, M = A.shape\n\n d = {}\n\n for m in range(M):\n k = 1\n # best features which are not selected yet\n best_feat = [a for a in A[:,m].argsort()[::-1] if not a in d]\n d.update(dict((a,1) for a in best_feat[:int(K/M)]))\n\n best_feat = np.array(d.keys())\n varr = vocab_array(vocab)\n\n return td[best_feat, :], vocab_dict(varr[best_feat])", "def compute_test_best(self, x, metric, target):\n index = self(x, metric)\n if metric.maximising:\n test = index > target\n else:\n test = index < target\n\n return index, test", "def calibrate_threshold(test_graphs):\r\n best_threshold = None\r\n best_result = None\r\n for threhold in range(1, 50):\r\n cur_res = evaluate_argument_mention(test_graphs, threhold)\r\n if (best_result is None) or (cur_res > best_result):\r\n best_result = cur_res\r\n best_threshold = threhold\r\n return (best_threshold, best_result)", "def maximize(self):\n raise NotImplementedError", "def personal_best(scores):\n return max(scores)", "def maximize(self, budget, optimizer):\n\n\t\tpass", "def max_power_candidate_thermal_rule(_m, g, y, s, t):\r\n\r\n return m.p[g, y, s, t] - sum(m.x_c[g, j] for j in m.Y if j <= y) <= 0", "def best_action(q_table: np.ndarray, state: int) -> int:\n return int(np.argmax(q_table[state]))", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))", "def _get_max_sampled_bandit(self)->Bandit:\n estimates = []\n for bandit in self.bandits:\n Qth = np.random.normal(loc =self.mu[bandit.id], scale = self.var[bandit.id])\n f_hat = self.mu[bandit.id]#computing moving_average here \n estimates.append(max(Qth, f_hat))\n return self.bandits[np.argmax(estimates)]", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def get_specific_heat() -> float:\n return 1006.0", "def best_B(Ag):\n top = 0\n for i in range(len(Ag)):\n etop = np.min(cf.TD20[int(Ag[i]) - 1])\n top += etop\n return top", "def worst_score(self):\r\n pass", "def get_n_best(self):\n pass", "def FMScore(x,p,d):\n \n if x <= d[p][0.20]:\n return 1\n elif x <= d[p][0.4]:\n return 2\n elif x <= d[p][0.6]: \n return 3\n elif x <= d[p][0.8]:\n return 4\n else:\n return 5" ]
[ "0.6139892", "0.61252534", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60230327", "0.60004276", "0.5977269", "0.5831864", "0.5795257", "0.5734608", "0.57190204", "0.56623447", "0.5624177", "0.56057686", "0.56052905", "0.5580644", "0.5558987", "0.55456406", "0.5544143", "0.5531362", "0.55027723", "0.55027056", "0.5482435", "0.5468537", "0.5454216", "0.54419273", "0.54377145" ]
0.7147471
0
Creates a prediction for a label, given a th. If score > th > 1
def label_df_with_th(df: pd.DataFrame, th: float, score_col: str): df['y_pred_class'] = df[score_col].apply(lambda score: 1 if score >= th else 0) return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, prediction, label):\n\n # true positive (tp): we predict a label of 1 (positive), and the true label is 1\n self.tp = np.sum(np.logical_and(prediction == 1, label == 1))\n # true negative (tn): we predict a label of 0 (negative), and the true label is 0\n self.tn = np.sum(np.logical_and(prediction == 0, label == 0))\n # false positive (fp): we predict a label of 1 (positive), but the true label is 0\n self.fp = np.sum(np.logical_and(prediction == 1, label == 0))\n # false negative (fn): we predict a label of 0 (negative), but the true label is 1\n self.fn = np.sum(np.logical_and(prediction == 0, label == 1))\n\n self.n = prediction.size", "def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction", "def predict(self, example):\n label = \"\"\n pred = -99.0\n for w in self.weights:\n current = np.asarray(example.fvector)\n i = self.weights[w] @ current\n if i > pred:\n pred = i\n label = w\n return label", "def score(df, tmo, label):\n\n\tif str(type(tmo)) != \"<class 'sklearn.ensemble._forest.RandomForestRegressor'>\":\n\t\traise TypeError('Wrong model type!')\n\n\tX_test = df.loc[:, df.columns != label]\n\t\n\t# predict on test data\n\ty_pred = tmo.predict(X_test)\n\tdf['predict'] = y_pred\n\n\treturn df", "def build_predict(tf_prob, threshold=0.5):\n prediction = tf.cast(tf_prob, tf.float64)\n threshold = float(threshold)\n return tf.cast(tf.greater(prediction, threshold), tf.float32)", "def predict_label(self, x, weight=None, cutting=0.5, predict_label=None):\n if predict_label is None:\n predict_label = self.pred_label\n if weight is None: weight = self.weights[-1]\n pred = self.predict(x, weight, cutting)\n pred[np.where(pred == 0)] = predict_label[0]\n pred[np.where(pred == 1)] = predict_label[1]\n return pred", "def get_best_thres(self, data, label, score_func = f1_score):\n pred_prob = self.model.predict(data)\n best_score = 0\n for i_thres in range(0, 100):\n pred_label = [int(i > (i_thres / 100.0)) for i in pred_prob]\n fs = score_func(label, pred_label)\n if best_score < fs:\n best_score = fs\n self.config.thres = i_thres / 100.0\n print ('best score: %0.2f best_thres: %0.2f' % (best_score, self.config.thres))", "def predict_label(self, src): # real signature unknown; restored from __doc__\n pass", "def predict_label(img, net_model, label):\n img1 = cv2.resize(img, (80, 80))\n predict = net_model.predict(img1.reshape(1, 80, 80, 3))\n maxi = predict[0][0]\n curs = 0\n test = 0\n for i, pred in enumerate(predict[0]):\n test += pred\n if pred > maxi:\n maxi = pred\n curs = i\n return label[curs]", "def predict_with_threshold(y_pred_proba, threshold):\n\n y_pred = [1 if x >= threshold else 0 for x in y_pred_proba]\n return pd.Series(data=y_pred, name='y_pred')", "def build_predict_2(tf_prob, threshold=0.5):\n prediction = tf.cast(tf_prob, tf.float64)\n threshold = float(threshold)\n return tf.cast(tf.less_equal(prediction, threshold), tf.float32)", "def get_prediction_from_score(score):\n if(score >= 0.03):\n return 'Positive'\n elif(score <= -0.03):\n return 'Negative'\n else:\n return 'Neutral'", "def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)", "def get_label(prob_label, target):\n return target if random.random() <= prob_label else 1 - target", "def _prediction(features, labels, threshold, size_sample, randomised, equal_pos_neg, name_kernel, custom_SVM=None):\n if custom_SVM:\n Classifier = custom_SVM\n else:\n Classifier = load_classifier(size_sample, randomised, equal_pos_neg, name_kernel)\n name_file = str(construct_name_file(size_sample, randomised, equal_pos_neg, name_kernel).split(\".json\")[0])\n return name_file, _performance(Classifier, features, labels, threshold)", "def predict(self,X):\n y_pred = np.random.choice(self.labels, size=(X.shape[0],), p=self.thresholds)\n return y_pred", "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def classify(self, predict_wx, threshold):\n # predict_wx = self.compute_wx(data_instances, self.model_weights.w_, self.model_weights.intercept_)\n\n def predict(x):\n prob = activation.sigmoid(x)\n pred_label = 1 if prob > threshold else 0\n return prob, pred_label\n\n predict_table = predict_wx.mapValues(predict)\n return predict_table", "def predict(self, observation):\n\t\t# TODO - complete this\n\t\tp_max = 0\n\t\tpredict = None\n\t\tfor label in self.possible_labels:\n\t\t\tpossiblity = 1\n\t\t\tlabel_gaussian = self.gaussians.get(label)\n\t\t\tfor i in range(len(observation)):\n\t\t\t\t(mean, std) = label_gaussian[0][i]\n\t\t\t\tvalue = observation[i]\n\t\t\t\tpossiblity *= self.gaussians_calc(value, mean, std)\n\t\t\tif p_max < possiblity:\n\t\t\t\tp_max = possiblity\n\t\t\t\tpredict = label\n\n\t\treturn predict", "def prediction_fn(self, yhat):\n raise NotImplementedError", "def get_onehot_label_threshold(scores, threshold=0.5):\n predicted_onehot_labels = []\n scores = np.ndarray.tolist(scores)\n for score in scores:\n count = 0\n onehot_labels_list = [0] * len(score)\n for index, predict_score in enumerate(score):\n if predict_score >= threshold:\n onehot_labels_list[index] = 1\n count += 1\n if count == 0:\n max_score_index = score.index(max(score))\n onehot_labels_list[max_score_index] = 1\n predicted_onehot_labels.append(onehot_labels_list)\n return predicted_onehot_labels", "def add_prediction(self, truth_label, prediction, doc_id, doc_price=0):\n assert (truth_label == '1' or truth_label == '-1')\n \n if truth_label == prediction:\n if truth_label == '1':\n self.tp += 1\n else:\n self.tn += 1\n else:\n if truth_label == '1':\n self.fn += 1\n else:\n self.fp += 1\n\n if self.is_query_level and prediction != '-1':\n self.query_prediction.add_doc_predicted_relevant(doc_id, truth_label, doc_price)", "def decision_function(self, Xtt):\n # predict decision score on test dataset\n self.logger.info(\n self.__name__ + ' predicts decision scores on {:d} samples.'.format(Xtt.shape[0]))", "def _predict(self, classify: np.array, n_preds=1):\r\n tmp = classify.argsort()[:, :n_preds] # Return the index of the best label classification\r\n preds = copy(tmp) # allow to copy tmp\r\n for index, target in enumerate(self.targets):\r\n preds = np.where(tmp == index, target, preds) # Return the target label corresponding to the index\r\n self.preds = preds", "def predict(test_dataset,test_tX,weights):\n for idx, dataset in enumerate(test_tX):\n test_dataset[idx]['Prediction'] = predict_labels(weights[idx],dataset)\n return test_dataset", "def overlayed_label_and_prediction(img, label, pred, saveto=None):\n # pred = np.argmax(pred, axis=2) # get the most likely class id for each pixel\n assert 2 == label.ndim == pred.ndim, \\\n \"Label and Prediction MUST be of shape 2D arrays with no color channel or batch axis\"\n assert (img.ndim == 3) and (img.shape[-1] == 3), \\\n \"Input image should be of shape [n_rows, n_cols, 3]\"\n assert img.shape[:2] == pred.shape == label.shape, \\\n \"Image height and width for img, label, and pred must all match up\"\n\n # Convert chanel axis to one hot encodings (max of three classes for 3 chanels)\n pred = np.eye(3, dtype=np.uint8)[pred]\n label = np.eye(3, dtype=np.uint)[label]\n\n # Extract JUST the road class (class 1)\n # Red for prediction, Blue for label\n road = np.zeros_like(pred)\n road[:,:,0] = pred[:,:,1]*255\n road[:,:,2] = label[:,:,1]*255\n\n # Overlay the input image with the label and prediction\n img = PIL.Image.fromarray(img)\n road = PIL.Image.fromarray(road)\n overlay = PIL.ImageChops.add(img, road, scale=1.5)\n\n if saveto is not None:\n maybe_make_pardir(saveto)\n overlay.save(saveto, \"JPEG\")\n\n return overlay", "def predict(self, X):\r\n \r\n # To speed up, we apply the scoring function to all the instances\r\n # at the same time.\r\n scores = X.dot(self.w)\r\n \r\n # Create the output array.\r\n # At the positions where the score is positive, this will contain\r\n # self.positive class, otherwise self.negative_class.\r\n out = numpy.select([scores>=0.0, scores<0.0], [self.positive_class, \r\n self.negative_class])\r\n return out", "def prediction(self, score: float):\n data = self.process_data()\n paramters = data[\"param\"]\n return {\"pred\": func(score, paramters[0], paramters[1], paramters[2]),\n \"acc\": data[\"acc\"]}", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1", "def predict(self, x):\n if(self.score(x) > 0):\n return 1\n return -1" ]
[ "0.6739023", "0.6581369", "0.6510297", "0.63756716", "0.63308835", "0.6283337", "0.62540215", "0.6175885", "0.61675656", "0.61640334", "0.615781", "0.6091183", "0.6068421", "0.60517657", "0.6038007", "0.60357845", "0.60301214", "0.5937004", "0.5917283", "0.59122425", "0.58688956", "0.5847", "0.5843765", "0.5839569", "0.5822148", "0.58164495", "0.58132076", "0.57980585", "0.5796976", "0.5796976" ]
0.71840984
0
Creates a new database path unique to the exported subset of ids.
def make_new_dbpath(ibs, id_label, id_list): import wbia tag_hash = ut.hashstr_arr(id_list, hashlen=8, alphabet=ut.ALPHABET_27) base_fmtstr = ( ibs.get_dbname() + '_' + id_label + 's=' + tag_hash.replace('(', '_').replace(')', '_') + '_%d' ) dpath = wbia.get_workdir() new_dbpath = ut.non_existing_path(base_fmtstr, dpath) return new_dbpath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_id_path(base_path, id_) -> Path:\n\n return base_path / (ID_FMT.format(id=id_))", "def _create_ID_files(self):\n for file, IDs in [(self._trn_IDs_file, self._trn_IDs), (self._val_IDs_file,\n self._val_IDs), (self._tst_IDs_file, self._tst_IDs)]:\n with open(file, 'w') as f:\n f.write('\\n'.join('{}###{}###{}'.format(ID[0], ID[1], ID[2]) for ID in IDs))", "def save_ids(self, year, ids, switch):\n path = os.path.join(self.get_ids_path(switch), '{}.csv'.format(year))\n with open(path, 'a') as file:\n writer = csv.writer(file)\n writer.writerows([(id,) for id in ids])\n return path", "def construct_path(id_val):\n id_val = str(id_val)\n path = id_val[:3] + \"/\" + id_val[3:6] + \"/\" + id_val[6:9] + \"/\"\n path += id_val\n return path", "def _path_generator(f_name):\n return os.path.join(\n os.path.split(os.path.abspath(__file__))[0], f_name + \".\" + \"db\"\n )", "def db2gbk_mkdir(self, path, p_list, update):\n if update is True:\n path = where.dir_archive(path, p_list)\n else:\n path = where.dir_make(path, p_list)\n return path", "def generate_sqlite_db_path():\n tmp_dir = str(tempfile.mkdtemp())\n abspath = os.path.abspath( # noqa: PTH100\n os.path.join( # noqa: PTH118\n tmp_dir,\n \"sqlite_db\"\n + \"\".join(\n [random.choice(string.ascii_letters + string.digits) for _ in range(8)]\n )\n + \".db\",\n )\n )\n return abspath", "def new_db(path: str):\r\n conn = sqlite3.connect(path)\r\n cursor = conn.cursor()\r\n cursor.executescript(\"\"\"\r\n CREATE TABLE \"Ads\" (\r\n\t\"id\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Price\"\tREAL,\r\n\t\"Size\"\tREAL,\r\n\t\"DistrictId\"\tINTEGER,\r\n\t\"SeriesId\"\tINTEGER,\r\n\t\"StreetId\"\tINTEGER,\r\n\t\"StrNum\"\tINTEGER,\r\n\t\"Link\"\tTEXT,\r\n\t\"ImportDate\"\tTEXT,\r\n\t\"TypeOfDealId\"\tINTEGER,\r\n\t\"AmenitiesId\"\tINTEGER,\r\n\t\"UploadDate\"\tTEXT,\r\n\t\"Floor\"\tINTEGER,\r\n\t\"BuildingId\"\tINTEGER\r\n);\r\nCREATE TABLE \"Amenities\" (\r\n\t\"AmenitiesId\"\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT NOT NULL UNIQUE\r\n);\r\nCREATE TABLE \"Buildings\" (\r\n\t\"BuildingId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Districts\" (\r\n\t\"DistrictId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Series\" (\r\n\t\"SeriesId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"Streets\" (\r\n\t\"StreetId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n);\r\nCREATE TABLE \"TypeOfDeal\" (\r\n\t\"TypeOfDealId\"\tINTEGER PRIMARY KEY AUTOINCREMENT UNIQUE,\r\n\t\"Name\"\tTEXT UNIQUE\r\n)\r\n \"\"\")\r\n conn.commit()\r\n conn.close()", "def export_db(self, export_location: Path) -> None:\n raise NotImplementedError", "def make_table_path(keys, value, version=None):\n return _make_path(keys, value, '.csv', version)", "def create_temp_folder():\n path_join = os.path.join(tempfile.gettempdir(), id_generator(5))\n os.makedirs(path_join)\n return path_join", "def exportDB(self):\n sourcesession=svc.connect(self.__source,accessMode=coral.access_Update)\n destsession=svc.connect(self.__dest,accessMode = coral.access_Update)\n try:\n dbcp=DBCopy(sourcesession,destsession,1024)\n if self.__all:\n dbcp.copyDB()\n elif self.__inv:\n dbcp.copyInventory()\n elif len(self.__tree) != 0:\n dbcp.copyTrees([self.__tree])\n del sourcesession\n del destsession\n except Exception, e:\n print str(e)\n del sourcesession\n del destsession", "def _make_path(self) -> str:\r\n path_ = Path(path.join(conf.instance.output_path, self.path_prefix, self.name))\r\n if self.is_identifier_in_paths:\r\n path_ = path_ / self.identifier\r\n return path_", "def writeGpDbidFile(directory, dbid, logger=None):\n d = GpDbidFile(directory, logger=logger)\n d.dbid = dbid\n d.write_gp_dbid()", "def clone(source, destination):\n\t\treturn \"CREATE DATABASE {0} WITH TEMPLATE {1};\".format(destination, source)", "def update_gpdbid_file(array):\n \n standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())\n\n # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace\n writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())", "def create_download_list(path, save_path):\n\n df = pd.read_csv(path)\n adjust_target_variable_labels(df)\n\n portions = []\n for target_label, df_dx1 in df.groupby('dx1'):\n\n if target_label not in LABEL_MAP.keys():\n continue\n\n portion = PORTION_MAP[target_label]\n msk = np.random.rand(len(df_dx1)) < portion\n portions.append(df_dx1[msk])\n\n selected_df = pd.concat(portions)\n selected_df = selected_df['MR ID']\n\n with open(save_path, 'w'):\n for selected in selected_df['MR ID']:\n pass\n\n # selected_df.to_csv(save_path, index=False)", "def get_save_dir(base_dir, name, id_max=100):\n for uid in range(1, id_max):\n save_dir = os.path.join(base_dir, f'{name}-{uid:02d}')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n return save_dir\n\n raise RuntimeError('Too many save directories created with the same name. \\\n Delete old save directories or use another name.')", "def make_database(num_files=10):\n for i in range(num_files):\n print('\\n\\n\\nCreating set', str(i), '\\n\\n\\n')\n s_file = 'set' + str(i) + '.hdf5' \n play_dominoes(save_file=s_file)", "def create_data_id(datatype, paths=None, add_uuid=True):\n # Need data or added uuid\n fullpath = [\n datatype,\n ]\n if paths:\n fullpath.extend(list(paths))\n if add_uuid:\n fullpath.append(str(uuid.uuid1()))\n if not fullpath:\n raise Exception(\"No path created\")\n return \"http://earthscope.org/%s/\" % \"/\".join(fullpath)", "def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths", "def main(path_to_cdr_ids, path_to_db):\n from sqlalchemy import create_engine\n import pandas as pd\n\n cdr_ids_to_get = set(open(path_to_cdr_ids).readlines())\n\n cdr_ids_str = ','.join(['\"{}\"'.format(x) for x in cdr_ids_to_get])\n query_fmt = 'select * from cdr_id_to_homology where cdr_id in ({})'.format\n\n sql_con = create_engine('sqlite:///{}'.format(path_to_db))\n\n df = pd.read_sql(query_fmt(cdr_ids_str), sql_con)\n\n df = df.pivot(columns='homology').fillna(False)\n\n df.to_pickle('data/generated/homology_df.pkl')", "def get_ida_exported_files():\n create_random_filename()\n dirname = os.path.dirname(idc.get_idb_path())\n file_path = os.path.join(dirname, GLOBAL_FILENAME)\n xml_file_path = file_path + \".xml\"\n bin_file_path = file_path + \".bytes\"\n\n return xml_file_path, bin_file_path", "def write_gp_dbid(self):\n INFO = self.logger.info\n INFO('%s - write_gp_dbid' % self.filepath)\n\n if os.path.exists(self.filepath):\n INFO('found existing file')\n\n os.remove(self.filepath)\n INFO('removed existing file')\n\n self.logger.info('opening new file')\n with open(self.filepath, 'w') as f:\n self.format(f)\n\n INFO('setting read only')\n os.chmod(self.filepath, stat.S_IRUSR) # user read permissions (0400)\n\n INFO('verifying file')\n v = GpDbidFile(self.datadir, do_read=True)\n assert self.dbid == v.dbid\n assert self.standby_dbid == v.standby_dbid", "def export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=None):\n import wbia\n\n imgsetid_list = ut.unique_unordered(ut.flatten(ibs.get_image_imgsetids(gid_list)))\n gsgrid_list = ut.unique_unordered(ut.flatten(ibs.get_image_gsgrids(gid_list)))\n\n # TODO: write SQL query to do this\n am_rowids = ibs._get_all_annotmatch_rowids()\n flags1_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid1(am_rowids)]\n flags2_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid2(am_rowids)]\n flag_list = ut.and_lists(flags1_list, flags2_list)\n am_rowids = ut.compress(am_rowids, flag_list)\n # am_rowids = ibs.get_valid_aids(ibs.get_valid_aids())\n\n rowid_subsets = {\n const.ANNOTATION_TABLE: aid_list,\n const.NAME_TABLE: nid_list,\n const.IMAGE_TABLE: gid_list,\n const.ANNOTMATCH_TABLE: am_rowids,\n const.GSG_RELATION_TABLE: gsgrid_list,\n const.IMAGESET_TABLE: imgsetid_list,\n }\n ibs_dst = wbia.opendb(dbdir=new_dbpath, allow_newdir=True)\n # Main merge driver\n merge_databases(ibs, ibs_dst, rowid_subsets=rowid_subsets)\n logger.info('Exported to {!r}'.format(new_dbpath))\n return new_dbpath", "def new_database(app):\n app.status.message(\"Opening a folder..\")\n path = app.dialog.directory(\"Select a folder for the new database..\")\n if path == '':\n app.status.message('') \n return\n app.status.cursorToHourglass()\n app.close()\n folder = db.database(path=path, \n status = app.status, \n dialog = app.dialog)\n app.display(folder)\n app.status.hide()\n app.status.cursorToNormal()", "def new_database(self, rel_id=None, name_suffix=\"\"):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n if name_suffix:\n name_suffix = \"_{}\".format(name_suffix)\n db_name = \"juju_db_{}_{}{}\".format(\n sanitize_name(self.charm.model.name),\n sanitize_name(self.charm.app.name),\n sanitize_name(name_suffix),\n )\n # Cassandra does not allow keyspace names longer than 48 characters\n if len(db_name) > 48:\n raise NameLengthError(\"Database name can not be more than 48 characters\")\n dbs = self._requested_databases(rel)\n dbs.append(db_name)\n if not len(dbs) == len(set(dbs)):\n raise NameDuplicateError(\"Database names are not unique\")\n self._set_requested_databases(rel, dbs)", "def create_new_db():\n global data_base, table\n data_base = asksaveasfilename(title=\"Select file\", filetypes=((\"DATA BASE\", \"*.db\"), (\"all files\", \"*.*\")),\n defaultextension='.db')\n\n if Path(data_base).suffix == '.db':\n create_win_create_table()\n else:\n mistake_db_file()", "def GetIdbDir():\n return os.path.dirname(ida_loader.get_path(ida_loader.PATH_TYPE_IDB)) + os.sep", "def build_path_device(device_id):\n padding_device = PAIRS_SHINGLES_DEVICE * 2\n s = padding_zeroes(int(int(device_id) % NUMBER_DEVICES), padding_device)\n res = ''\n for i in range(0, padding_device, 2):\n res += s[i: i+2] + '/'\n return res" ]
[ "0.6192096", "0.58190155", "0.57944864", "0.5495239", "0.5478381", "0.54024965", "0.54021364", "0.53865814", "0.5273472", "0.5242266", "0.5226487", "0.5211098", "0.5210646", "0.51828325", "0.5147821", "0.51385933", "0.5101526", "0.5097332", "0.5086912", "0.508547", "0.5063234", "0.5037976", "0.5035955", "0.5033129", "0.5025619", "0.5020751", "0.50158244", "0.5003124", "0.49728495", "0.4952225" ]
0.7305864
0
PZ_Master1 had annotmatch rowids that did not agree with the current name labeling. Looking at the inconsistencies in the graph interface was too cumbersome, because over 3000 annots were incorrectly grouped together. This function deletes any annotmatch rowid that is not consistent with the current labeling so we can go forward with using the new AnnotInference object
def fix_annotmatch_pzmaster1(): import wbia ibs = wbia.opendb('PZ_Master1') infr = wbia.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5) infr.initialize_graph() annots = ibs.annots() aid_to_nid = ut.dzip(annots.aids, annots.nids) if False: infr.reset_feedback() infr.ensure_mst() infr.apply_feedback_edges() infr.relabel_using_reviews() infr.start_qt_interface() # Get annotmatch rowids that agree with current labeling if False: annotmatch = ibs.db.get_table_as_pandas('annotmatch') import pandas as pd flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision']) flags2 = annotmatch['annotmatch_tag_text'] == '' bad_part = annotmatch[flags1 & flags2] rowids = bad_part.index.tolist() ibs.delete_annotmatch(rowids) if False: # Delete bidirectional annotmatches annotmatch = ibs.db.get_table_as_pandas('annotmatch') df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2']) # Find entires that have both directions pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values f_edges = {tuple(p) for p in pairs1} b_edges = {tuple(p[::-1]) for p in pairs1} isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)} isect_edges1 = list(isect_edges) isect_edges2 = [p[::-1] for p in isect_edges] # cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text'] import pandas as pd custom_ = { (559, 4909): (False, ['photobomb']), (7918, 8041): (False, ['photobomb']), (6634, 6754): (False, ['photobomb']), (3707, 3727): (False, ['photobomb']), (86, 103): (False, ['photobomb']), } extra_ = {} fixme_edges = [] d1 = df.loc[isect_edges1].reset_index(drop=False) d2 = df.loc[isect_edges2].reset_index(drop=False) flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision'] from wbia.tag_funcs import _parse_tags for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()): v1, v2 = r1[1], r2[1] aid1 = v1['annot_rowid1'] aid2 = v1['annot_rowid2'] truth_real = ( ibs.const.EVIDENCE_DECISION.POSITIVE if aid_to_nid[aid1] == aid_to_nid[aid2] else ibs.const.EVIDENCE_DECISION.NEGATIVE ) truth1 = v1['annotmatch_evidence_decision'] truth2 = v2['annotmatch_evidence_decision'] t1 = _parse_tags(v1['annotmatch_tag_text']) t2 = _parse_tags(v2['annotmatch_tag_text']) newtag = ut.union_ordered(t1, t2) if (aid1, aid2) in custom_: continue fixme_flag = False if not pd.isnull(truth1): if truth_real != truth1: fixme_flag = True if not pd.isnull(truth2): if truth_real != truth2: fixme_flag = True if fixme_flag: logger.info('newtag = {!r}'.format(newtag)) logger.info('truth_real = {!r}'.format(truth_real)) logger.info('truth1 = {!r}'.format(truth1)) logger.info('truth2 = {!r}'.format(truth2)) logger.info('aid1 = {!r}'.format(aid1)) logger.info('aid2 = {!r}'.format(aid2)) fixme_edges.append((aid1, aid2)) else: extra_[(aid1, aid2)] = (truth_real, newtag) extra_.update(custom_) new_pairs = extra_.keys() new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0) new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1) new_tag_texts = [';'.join(t) for t in new_tags] aids1, aids2 = ut.listT(new_pairs) # Delete the old ibs.delete_annotmatch( d1['annotmatch_rowid'].values.tolist() + d2['annotmatch_rowid'].values.tolist() ) # Add the new ams = ibs.add_annotmatch_undirected(aids1, aids2) ibs.set_annotmatch_evidence_decision(ams, new_truths) ibs.set_annotmatch_tag_text(ams, new_tag_texts) if False: import wbia.guitool as gt gt.ensure_qapp() ut.qtensure() from wbia.gui import inspect_gui inspect_gui.show_vsone_tuner(ibs, aid1, aid2) # pairs2 = pairs1.T[::-1].T # idx1, idx2 = ut.isect_indices(list(map(tuple, pairs1)), # list(map(tuple, pairs2))) # r_edges = list(set(map(tuple, map(sorted, pairs1[idx1])))) # unique_pairs = list(set(map(tuple, map(sorted, pairs1[idx1])))) # df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2']) x = ut.ddict(list) annotmatch = ibs.db.get_table_as_pandas('annotmatch') import ubelt as ub _iter = annotmatch.iterrows() prog = ub.ProgIter(_iter, length=len(annotmatch)) for k, m in prog: aid1 = m['annot_rowid1'] aid2 = m['annot_rowid2'] if m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.POSITIVE: if aid_to_nid[aid1] == aid_to_nid[aid2]: x['agree1'].append(k) else: x['disagree1'].append(k) elif m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.NEGATIVE: if aid_to_nid[aid1] == aid_to_nid[aid2]: x['disagree2'].append(k) else: x['agree2'].append(k) ub.map_vals(len, x) ut.dict_hist(annotmatch.loc[x['disagree1']]['annotmatch_tag_text']) disagree1 = annotmatch.loc[x['disagree1']] pb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] == 'photobomb'] aids1 = pb_disagree1['annot_rowid1'].values.tolist() aids2 = pb_disagree1['annot_rowid2'].values.tolist() aid_pairs = list(zip(aids1, aids2)) infr = wbia.AnnotInference.from_pairs(aid_pairs, ibs=ibs, verbose=5) if False: feedback = infr.read_wbia_annotmatch_feedback(edges=infr.edges()) infr.external_feedback = feedback infr.apply_feedback_edges() infr.start_qt_interface(loop=False) # Delete these values if False: nonpb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] != 'photobomb'] disagree2 = annotmatch.loc[x['disagree2']] ibs.delete_annotmatch(nonpb_disagree1['annotmatch_rowid']) ibs.delete_annotmatch(disagree2['annotmatch_rowid']) # ut.dict_hist(disagree1['annotmatch_tag_text']) import networkx as nx graph = nx.Graph() graph.add_edges_from(zip(pb_disagree1['annot_rowid1'], pb_disagree1['annot_rowid2'])) list(nx.connected_components(graph)) set(annotmatch.loc[x['disagree2']]['annotmatch_tag_text']) # aid1, aid2 = 2585, 1875 # # pd.unique(annotmatch['annotmatch_evidence_decision']) # from wbia.gui import inspect_gui # inspect_gui.show_vsone_tuner(ibs, aid1, aid2) # from vtool import inspect_matches # aid1, aid2 = 2108, 2040 # pd.unique(annotmatch['annotmatch_tag_text']) # infr.reset_feedback() # infr.relabel_using_reviews()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_04_remove_annotations(self):\n self.addAnnotation(\"annotation1\", self.host.id, \"HOST\")\n self.removeAnnotation(self.added_annotations[-1].annotation.id)\n del self.added_annotations[-1]", "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)", "def clean_duplicate_documents(self):\n title_k = lambda x: x.title\n for k, g in groupby(sorted(self.annotation_documents, key=title_k), title_k):\n g = list(g)\n if len(g) > 1:\n # check first if one is in test set\n to_remove = [x for x in g if x not in self.test]\n if (\n len(to_remove) > 1\n ): # if test is not matched, make subselection based on annotation unit count\n select_k = lambda x: (\n len(x.events) + len(x.sentiment_expressions),\n x.annotator_id != \"gilles\",\n )\n to_remove.sort(key=select_k, reverse=True)\n to_remove = to_remove[1:]\n for docrm in to_remove:\n self.annotation_documents.remove(docrm)\n if docrm in self.dev:\n self.dev.remove(docrm)\n elif docrm in self.test:\n self.test.remove(docrm)\n print(f\"Duplicate doc removed: {docrm}\")", "def reset_annotations(self):\n # FIXME: this state does not make sense\n self.annotation_date_set = False\n self.annotation_comment_set = False\n self.annotation_type_set = False\n self.annotation_spdx_id_set = False", "def deleteLayoutIdAnnotation(*args):\n return _libsbml.deleteLayoutIdAnnotation(*args)", "def removeAnnotation(self,i=0):\n #print \"REMOVE %s\" % i\n map(undraw,self._annotations[i])\n del self._annotations[i]", "def test_inspect_annotations_remove_all(tmp_path):\n matplotlib = pytest.importorskip(\"matplotlib\")\n import matplotlib.pyplot as plt\n\n matplotlib.use(\"Agg\")\n plt.close(\"all\")\n\n bids_root = setup_bids_test_dir(tmp_path)\n bids_path = _bids_path.copy().update(root=bids_root)\n events_tsv_fpath = bids_path.copy().update(suffix=\"events\", extension=\".tsv\").fpath\n\n # Remove all Annotations.\n raw = read_raw_bids(bids_path=bids_path, verbose=\"error\")\n raw.set_annotations(None)\n raw.load_data()\n raw.save(raw.filenames[0], overwrite=True)\n # Delete events.tsv sidecar.\n (bids_path.copy().update(suffix=\"events\", extension=\".tsv\").fpath.unlink())\n\n # Add custom Annotation.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n _add_annotation(raw_fig)\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # events.tsv sidecar should have been created.\n assert events_tsv_fpath.exists()\n\n # Remove the Annotation.\n inspect_dataset(bids_path, find_flat=False)\n raw_fig = mne_bids.inspect._global_vars[\"raw_fig\"]\n data_ax = raw_fig.mne.ax_main\n\n key_event = KeyEvent(name=\"Annotations\", canvas=raw_fig.canvas, key=\"a\")\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n _fake_click(raw_fig, data_ax, [1.0, 1.0], xform=\"data\", button=3, kind=\"press\")\n\n # Close window and save changes.\n key_event = KeyEvent(name=\"Close\", canvas=raw_fig.canvas, key=raw_fig.mne.close_key)\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n fig_dialog = mne_bids.inspect._global_vars[\"dialog_fig\"]\n key_event = KeyEvent(name=\"Save\", canvas=fig_dialog.canvas, key=\"return\")\n fig_dialog.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # events.tsv sidecar should not exist anymore.\n assert not events_tsv_fpath.exists()", "def infer_data(self):\n ibs = self.ibs\n # The two matching aids\n self.aid_pair = (self.aid1, self.aid2)\n (aid1, aid2) = self.aid_pair\n self.match_text = ibs.get_match_text(self.aid1, self.aid2)\n # The names of the matching annotations\n self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))\n self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))\n self.other_valid_nids = []\n # The other annotations that belong to these two names\n self.gts_list = ibs.get_annot_groundtruth((aid1, aid2))\n self.gt1, self.gt2 = self.gts_list\n # A flat list of all the aids we are looking at\n self.is_split_case = self.nid1 == self.nid2\n self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 + self.gt2)\n self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)\n self.other_aids = list(set(self.all_aid_list) - {self.aid1, self.aid2})\n\n if self.is_split_case:\n # Split case\n self.nCols = max(2, len(self.other_aids))\n self.nRows = 2 if len(self.other_aids) > 0 else 1\n else:\n # Merge/New Match case\n self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)\n self.nRows = 2\n self.nCols = min(self.max_cols, self.nCols)\n\n # Grab not just the exemplars\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))", "def drop_matching_records(self, check):\n matches = self._match(check)\n for rec in matches:\n self._drop_bytes(rec)\n del self._records[rec['msg_id']]", "def clear_annotation(self, from_id):\n if from_id == -1:\n self.global_external_references = SFFGlobalExternalReferenceList()\n else:\n segment = self.segments.get_by_id(from_id)\n segment.biological_annotation.external_references = SFFExternalReferenceList()", "def rm_duplicates(self):\n # get uniq representation of existing detection documents\n existing = set(ed.uniq_data for ed in self.existing_detections)\n # remove duplicates\n for idx in xrange(len(self.new_detections)-1, -1, -1):\n nd = self.new_detections[idx]\n if nd.uniq_data in existing:\n self.new_detections.pop(idx)", "def _deshard_and_remove_padding(all_inferences, all_indices):\n # PyTree[batch_count * B, H, ...] -> PyTree[batch_count * B * H, ...]\n # batch_count * B * H is the total number of examples including padding\n # examples at the end if they exist.\n all_inferences = jax.tree_map(lambda x: x.reshape((-1,) + x.shape[2:]),\n all_inferences)\n\n # [batch_count * B, H] -> [batch_count * B * H]\n all_indices = all_indices.reshape(-1)\n\n # Remove padding.\n non_pad_idxs = np.where(all_indices >= 0)\n all_indices = all_indices[non_pad_idxs]\n all_inferences = jax.tree_map(lambda x: x[non_pad_idxs], all_inferences)\n return all_inferences, all_indices", "def deleteLayoutAnnotation(*args):\n return _libsbml.deleteLayoutAnnotation(*args)", "def delete_annotation(self, index=None):\n del self.annotations[index]", "def _rectified_relabel(infr, cc_subgraphs):\n # Determine which names can be reused\n from wbia.scripts import name_recitifer\n\n infr.print('grouping names for rectification', 3)\n grouped_oldnames_ = [\n list(nx.get_node_attributes(subgraph, 'name_label').values())\n for count, subgraph in enumerate(cc_subgraphs)\n ]\n # Make sure negatives dont get priority\n grouped_oldnames = [\n [n for n in group if len(group) == 1 or n > 0] for group in grouped_oldnames_\n ]\n infr.print(\n 'begin rectification of %d grouped old names' % (len(grouped_oldnames)), 2\n )\n new_labels = name_recitifer.find_consistent_labeling(\n grouped_oldnames, verbose=infr.verbose >= 3\n )\n infr.print('done rectifying new names', 2)\n new_flags = [\n not isinstance(n, int) and n.startswith('_extra_name') for n in new_labels\n ]\n\n for idx in ut.where(new_flags):\n new_labels[idx] = infr._next_nid()\n\n for idx, label in enumerate(new_labels):\n if label < 0 and len(grouped_oldnames[idx]) > 1:\n # Remove negative ids for grouped items\n new_labels[idx] = infr._next_nid()\n return new_labels", "def test_annotations_mapping():\n\told_pair_to_new_pair = {}\n\tfor search_row in map_rows:\n\t\tOL, OP, NL, NP = search_row[0], search_row[1], search_row[2], search_row[3]\n\t\tif not NL == 'NA':\n\t\t\told_pair_to_new_pair[(OL, OP)] = (NL, NP)\n\n\t# rows where multiple old categories map to a single new category\n\tsurjections = []\n\t# rows where only one old category maps to a single new category\n\tone_to_ones = []\n\tfor name_parent_pair in old_pair_to_new_pair:\n\t\tif len(old_pair_to_new_pair[name_parent_pair]) > 1:\n\t\t\tsurjections.append(name_parent_pair)\n\t\telse:\n\t\t\tone_to_ones.append(name_parent_pair)\n\n\t\n\tis_ann_correct = []\n\tfor OL_OP_pair in surjections + one_to_ones:\n\t\tfor old_cat in oset['categories']:\n\t\t\tif old_cat['name'] == OL_OP_pair[0] and old_cat['supercategory'] == OL_OP_pair[1]:\n\t\t\t\told_cat_id = old_cat['id']\n\t\t\t\tanns_to_check = []\n\t\t\t\tfor ann in oset['annotations']:\n\t\t\t\t\tif ann['category_id'] == old_cat_id:\n\t\t\t\t\t\tanns_to_check.append(ann)\n\t\t\t\t\n\t\t\t\t# now we have all the annotations associated with this particular old category\n\t\t\t\tNL, NP = old_pair_to_new_pair[(OL_OP_pair[0], OL_OP_pair[1])]\n\t\t\t\tfor new_cat in nset['categories']:\n\t\t\t\t\tif new_cat['name'] == NL and new_cat['supercategory'] == NP:\n\t\t\t\t\t\tfor ann in anns_to_check:\n\t\t\t\t\t\t\tann_id = ann['id']\n\t\t\t\t\t\t\tnew_ann = new_coco_obj.anns[ann_id]\n\t\t\t\t\t\t\tis_ann_correct += [new_ann['category_id'] == new_cat['id']]\n\n\tassert False not in is_ann_correct", "def DeleteAnnotationsByName(self, name):\n size = len(self._blip_data.content)\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n 0, size, name)\n for index in range(len(self._blip_data.annotations)):\n annotation = self._blip_data.annotations[index]\n if annotation.name == name:\n del self._blip_data.annotations[index]", "def deleteEvaluationNeedlesFromTable(self):\n #productive #onButton\n profprint()\n self.initTableView()\n for name in self.lastNeedleNames:\n print name\n ID=name.lstrip('manual-seg_')\n print \"delete validation needle with ID: <%s>\"%ID\n try:\n ID=int(ID)\n self.deleteNeedleFromTable(ID)\n except ValueError:\n print \"skipping invalid ID\"", "def remove_recog_label(self, event):\n\t\tc=self.seqframe\n\t\tc.delete('recogseqlabel')\n\t\treturn", "def test_total_new_annotations():\n\told_num_anns = len(oset['annotations'])\n\tnew_num_anns = len(nset['annotations'])\n\tnum_NAs_found = 0\n\n\told_anns = oset['annotations']\n\tfor ann in old_anns:\n\t\tann_id = ann['id']\n\t\tcat_id = ann['category_id']\n\t\tcat = old_coco_obj.cats[cat_id]\n\t\tOL = cat['name']\n\t\tfor search_row in map_rows:\n\t\t\tif OL == search_row[0]:\n\t\t\t\trow = search_row \n\t\t\t\tNL = row[2]\n\n\t\t\t\t# now we have the particular row from the CSV whose old category corresponds to this annotation's category\n\t\t\t\tif NL == 'NA':\n\t\t\t\t\tnum_NAs_found += 1\n\n\tassert old_num_anns - num_NAs_found == new_num_anns", "def test_embedded_marking_removal(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n \n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n \n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n \n container.add_marking(indicator, red_marking)\n self.assertTrue(container.is_marked(indicator, red_marking))\n\n container.remove_marking(indicator, red_marking)\n self.assertFalse(container.is_marked(indicator, red_marking))", "def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()", "def removeAutolabel(call, args=(), kwargs={}, nodeClass='*'):", "def remove_ignored(num_frames, gt_data, pr_data, iou_threshold,\n ignore_categories, vis_threshold):\n ignore_categories = np.unique(ignore_categories)\n gt_data_in_frame = split_into_frames(num_frames, gt_data)\n pr_data_in_frame = split_into_frames(num_frames, pr_data)\n for t in range(num_frames):\n gt_curr = gt_data_in_frame[t]\n pr_curr = pr_data_in_frame[t]\n # Find matching within frame.\n iou_matrix = util.iou_xywh(gt_curr[:, BBOX_COLUMNS],\n pr_curr[:, BBOX_COLUMNS])\n overlap_matrix = (iou_matrix >= iou_threshold)\n match_pairs = util.match_detections(overlap_matrix, iou_matrix)\n # Remove annotations in any ignored category.\n gt_exclude = np.zeros(len(gt_curr), dtype=bool)\n if np.size(ignore_categories):\n gt_exclude |= np.isin(gt_curr[:, CATEGORY_COLUMN], ignore_categories)\n if vis_threshold is not None:\n gt_exclude |= ~(gt_curr[:, VISIBILITY_COLUMN] >= vis_threshold)\n # Exclude any predictions that match to excluded annotations.\n pr_exclude = np.zeros(len(pr_curr), dtype=bool)\n pr_exclude[match_pairs[:, 1]] = gt_exclude[match_pairs[:, 0]]\n gt_data_in_frame[t] = gt_curr[~gt_exclude]\n pr_data_in_frame[t] = pr_curr[~pr_exclude]\n gt_num_before = len(gt_data)\n pr_num_before = len(pr_data)\n gt_data = np.concatenate(gt_data_in_frame, axis=0)\n pr_data = np.concatenate(pr_data_in_frame, axis=0)\n gt_num_after = len(gt_data)\n pr_num_after = len(pr_data)\n logging.info(\n 'remove ignore instances: predictions %d -> %d, annotations %d -> %d',\n pr_num_before, pr_num_after, gt_num_before, gt_num_after)\n return gt_data, pr_data", "def DeleteAnnotationsInRange(self, r, name):\n self.__context.builder.DocumentAnnotationDelete(self._blip_data.wave_id,\n self._blip_data.wavelet_id,\n self._blip_data.blip_id,\n r.start, r.end,\n name)\n # TODO(davidbyttow): split local annotations.", "def drop_info(batch):\n if 'id' in batch:\n batch.pop('id')\n if 'rng' in batch:\n batch.pop('rng')\n return batch", "def separate_annotations():\n data_root = '/home/ubuntu/datasets/YT-VIS/'\n ann_file = data_root + 'annotations/instances_train_sub.json'\n import json\n with open(ann_file, 'r') as f:\n ann = json.load(f)\n # ann['videos'] = ann['videos'][15]\n # video_id = [0]\n from tqdm import tqdm\n for id in tqdm(range(len(ann['videos']))):\n videos = []\n anns = []\n video = ann['videos'][id]\n video['id'] = 1\n videos.append(video)\n\n i = 1\n for a in ann['annotations']:\n if a['video_id'] == id + 1:\n anno = a\n anno['id'] = i\n anno['video_id'] = 1\n anns.append(anno)\n i += 1\n # anno = ann['annotations'][id]\n # anno['id'] = 1\n # anno['video_id'] = 1\n # anns.append(anno)\n\n file_name = videos[0]['file_names'][0].split('/')[0]\n\n ann_new = dict()\n ann_new['info'] = ann['info']\n ann_new['licenses'] = ann['licenses']\n ann_new['categories'] = ann['categories']\n ann_new['videos'] = videos\n ann_new['annotations'] = anns\n\n with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:\n json.dump(ann_new, f, ensure_ascii=False)", "def pruneMarks(self):\n self.__prune_marks(self.nodes(data=True))", "def _check_consistency(self, data_id, anno_container):\n if data_id in self.database:\n previous = self.database[data_id]\n if len(previous) > 0:\n # check if the new annotations are the same as the previous\n if not np.all(previous[0].anno_container.annotations ==\n anno_container.annotations):\n msg = ('Conflicting annotations with same ID. Please '\n 'rename the new entry.')\n raise PyannoValueError(msg)", "def _validate_annotations(self):\n for i, (k, v) in enumerate(self._annotations_dict.items()):\n for index, annotation in enumerate(v):\n startOffset = int(annotation['startOffset'])\n endOffset = int(annotation['endOffset'])\n tweet = self._tweets_dict[k]\n annotatedText = annotation['annotatedText']\n\n realOffset = tweet.find(annotatedText)\n if realOffset != startOffset:\n #print(\"Fixing startOffset for {}. (annotated at position {}, but should be at {})\".format(k, startOffset, realOffset))\n\n diff = realOffset - startOffset\n annotation['startOffset'] = \"{}\".format(startOffset+diff)\n annotation['endOffset'] = \"{}\".format(endOffset+diff)" ]
[ "0.6065128", "0.57366157", "0.53676903", "0.53666043", "0.5321259", "0.5258625", "0.5243224", "0.52191055", "0.5205449", "0.519409", "0.5154932", "0.51453507", "0.51413393", "0.5103089", "0.50633407", "0.5005262", "0.49942034", "0.4972329", "0.49611598", "0.4933948", "0.48975125", "0.48863465", "0.4878465", "0.48771816", "0.4844995", "0.48327166", "0.48170355", "0.4800622", "0.47650203", "0.4762677" ]
0.73364335
0
Assumes ibs1 is an updated subset of ibs2. Remerges ibs1 back into ibs2.
def remerge_subset(): import wbia ibs1 = wbia.opendb('PZ_PB_RF_TRAIN') ibs2 = wbia.opendb('PZ_Master1') gids1, gids2 = ibs1.images(), ibs2.images() idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids) isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2) assert all( set.issubset(set(a1), set(a2)) for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids) ) annot_uuids = ut.flatten(isect_gids1.annot_uuids) # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True) # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True) aids1 = ibs1.annots(uuids=annot_uuids, asarray=True) aids2 = ibs2.annots(uuids=annot_uuids, asarray=True) import numpy as np to_aids2 = dict(zip(aids1, aids2)) # to_aids1 = dict(zip(aids2, aids1)) # Step 1) Update individual annot properties # These annots need updates # np.where(aids1.visual_uuids != aids2.visual_uuids) # np.where(aids1.semantic_uuids != aids2.semantic_uuids) annot_unary_props = [ # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags'] 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'case_tags', 'multiple', 'age_months_est_max', 'age_months_est_min', # 'sex_texts' ] to_change = {} for key in annot_unary_props: prop1 = getattr(aids1, key) prop2 = getattr(aids2, key) diff_idxs = set(np.where(prop1 != prop2)[0]) if diff_idxs: diff_prop1 = ut.take(prop1, diff_idxs) diff_prop2 = ut.take(prop2, diff_idxs) logger.info('key = {!r}'.format(key)) logger.info('diff_prop1 = {!r}'.format(diff_prop1)) logger.info('diff_prop2 = {!r}'.format(diff_prop2)) to_change[key] = diff_idxs if to_change: changed_idxs = ut.unique(ut.flatten(to_change.values())) logger.info('Found %d annots that need updated properties' % len(changed_idxs)) logger.info('changing unary attributes: {!r}'.format(to_change)) if False and ut.are_you_sure('apply change'): for key, idxs in to_change.items(): subaids1 = aids1.take(idxs) subaids2 = aids2.take(idxs) prop1 = getattr(subaids1, key) # prop2 = getattr(subaids2, key) setattr(subaids2, key, prop1) else: logger.info('Annot properties are in sync. Nothing to change') # Step 2) Update annotmatch - pairwise relationships infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False) # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3) aids2 = ibs2.get_valid_aids(is_known=True) infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3) infr2.reset_feedback('annotmatch', apply=True) # map feedback from ibs1 onto ibs2 using ibs2 aids. fb1 = infr1.read_wbia_annotmatch_feedback() fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()} fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1) # Add transformed feedback into ibs2 infr2.add_feedback_from(fb1_df_t) # Now ensure that dummy connectivity exists to preserve origninal names # from wbia.algo.graph import nx_utils # for (u, v) in infr2.find_mst_edges('name_label'): # infr2.draw_aids((u, v)) # cc1 = infr2.pos_graph.connected_to(u) # cc2 = infr2.pos_graph.connected_to(v) # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2)) # infr2.neg_redundancy(cc1, cc2) # infr2.pos_redundancy(cc2) infr2.relabel_using_reviews(rectify=True) infr2.apply_nondynamic_update() if False: infr2.wbia_delta_info() infr2.wbia_name_group_delta_info() if len(list(infr2.inconsistent_components())) > 0: raise NotImplementedError('need to fix inconsistencies first') # Make it so it just loops until inconsistencies are resolved infr2.prioritize() infr2.qt_review_loop() else: infr2.write_wbia_staging_feedback() infr2.write_wbia_annotmatch_feedback() infr2.write_wbia_name_assignment() # if False: # # Fix any inconsistency # infr2.start_qt_interface(loop=False) # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399, # 5338, 2654] # import networkx as nx # nx.is_connected(infr2.graph.subgraph(test_nodes)) # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5) # # randomly sample some new labels to verify # import wbia.guitool as gt # from wbia.gui import inspect_gui # gt.ensure_qapp() # ut.qtensure() # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name']) # del old_groups['____'] # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name']) # from wbia.algo.hots import simulate # c = simulate.compare_groups( # list(new_groups.values()), # list(old_groups.values()), # ) # ut.map_vals(len, c) # for aids in c['pred_splits']: # old_nids = ibs2.get_annot_nids(aids) # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1) # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0) # aid1, aid2 = split_aids[0:2] # if False: # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2) # infr2.start_qt_interface(loop=False) # if False: # # import wbia # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN') # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3) # infr1.initialize_graph() # # infr1.reset_feedback('staging') # infr1.reset_feedback('annotmatch') # infr1.apply_feedback_edges() # infr1.relabel_using_reviews() # infr1.apply_review_inference() # infr1.start_qt_interface(loop=False) # delta = infr2.match_state_delta() # logger.info('delta = %r' % (delta,)) # infr2.ensure_mst() # infr2.relabel_using_reviews() # infr2.apply_review_inference() # mst_edges = infr2.find_mst_edges() # set(infr2.graph.edges()).intersection(mst_edges) return """ TODO: Task 2: Build AnnotInfr for ibs2 then add all decision from ibs1 to the internal feedback dict. Ensure that all other (esp old name-id related) edges are correctly placed, then overrite with new vals ( make sure implicit vals do not cuase conflicts with new explicit vals, but old explicit vals should cause a conflict). Then just commit to staging and then commit to annotmatch and re-infer the names. """ # Print some info about the delta # def _to_tup(x): # return tuple(x) if isinstance(x, list) else x # changetype_list = list(zip( # delta['old_decision'], delta['new_decision'], # map(_to_tup, delta['old_tags']), # map(_to_tup, delta['new_tags']))) # changetype_hist = ut.dict_hist(changetype_list, ordered=True) # logger.info(ut.align(ut.repr4(changetype_hist), ':')) # import pandas as pd # pd.options.display.max_rows = 20 # pd.options.display.max_columns = 40 # pd.options.display.width = 160 # pd.options.display.float_format = lambda x: '%.4f' % (x,) # a, b = 86, 6265 # c, d = to_aids1[a], to_aids1[b] # inspect_gui.show_vsone_tuner(ibs2, a, b) # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b]) # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]], # [to_aids1[b]]) # am2 = ibs2.get_annotmatch_rowids_between([a], [b]) # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1)) # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2)) # inspect_gui.show_vsone_tuner(ibs2, 8, 242) # inspect_gui.show_vsone_tuner(ibs2, 86, 103) # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merged_rep(self,other):\n raise NotImplementedError(\"Abstract method\")", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def merge_roidb(roidbs):\n roidb = roidbs[0]\n for r in roidbs[1:]:\n roidb.extend(r)\n return roidb", "def _merge_two(self, obj1, obj2):\r\n for uniq_ident in obj2.keys():\r\n if (uniq_ident not in obj1) \\\r\n or (obj1[uniq_ident]['modified'] \\\r\n < obj2[uniq_ident]['modified']):\r\n obj1[uniq_ident] = obj2[uniq_ident]\r\n\r\n return obj1 # self._dict_to_list(obj1)\r", "def merge(): #Status: WIP\r\n pass", "def merge(self, other: 'Basket') -> None:\n for item in other:\n try:\n existing = self.items.get(ref=item.ref)\n existing.quantity += item.quantity\n existing.save(update_fields=['quantity'])\n except item.DoesNotExist:\n item.basket = self\n item.save(update_fields=['basket'])\n other.delete()\n self._cached_items = None", "def mergeWith(self, others):", "def _merge(self):\n raise NotImplementedError", "def merge_from(self, other):\n assert not self.is_final\n if self.parent is not None:\n assert other.parent is not None\n self.parent.merge_from(other.parent)\n self.isolated_names.update(other.isolated_names)\n self.read.update(other.read)\n self.modified.update(other.modified)\n self.bound.update(other.bound)\n self.deleted.update(other.deleted)\n self.annotations.update(other.annotations)\n self.params.update(other.params)", "def imerge(*iterables):\n return _IMerge(iterables)", "def merge(self, other):\n log.debug('Merging: %s and %s' % (self.serialize(), other.serialize()))\n for k in self.keys():\n for new_item in other[k]:\n if new_item not in self[k]:\n self[k].append(new_item)\n log.debug('Result: %s' % self.serialize())\n return self", "def update(self, other):\n if not other:\n return\n for o in other:\n self.replace(o)", "def merge(a,b):\n c = a.copy()\n c.update(b)\n return c", "def merge_databases(ibs_src, ibs_dst, rowid_subsets=None, localize_images=True):\n # TODO: ensure images are localized\n # otherwise this wont work\n logger.info(\n 'BEGIN MERGE OF {!r} into {!r}'.format(ibs_src.get_dbname(), ibs_dst.get_dbname())\n )\n # ibs_src.run_integrity_checks()\n # ibs_dst.run_integrity_checks()\n ibs_dst.update_annot_visual_uuids(ibs_dst.get_valid_aids())\n ibs_src.update_annot_visual_uuids(ibs_src.get_valid_aids())\n ibs_src.ensure_contributor_rowids()\n ibs_dst.ensure_contributor_rowids()\n ibs_src.fix_invalid_annotmatches()\n ibs_dst.fix_invalid_annotmatches()\n\n # Hack move of the external data\n if rowid_subsets is not None and const.IMAGE_TABLE in rowid_subsets:\n src_gid_list = rowid_subsets[const.IMAGE_TABLE]\n else:\n src_gid_list = ibs_src.get_valid_gids()\n imgpath_list = ibs_src.get_image_paths(src_gid_list)\n dst_imgdir = ibs_dst.get_imgdir()\n if localize_images:\n ut.copy_files_to(imgpath_list, dst_imgdir, overwrite=False, verbose=True)\n ignore_tables = [\n 'lblannot',\n 'lblimage',\n 'image_lblimage_relationship',\n 'annotation_lblannot_relationship',\n 'keys',\n ]\n # ignore_tables += [\n # 'contributors', 'party', 'configs'\n # ]\n # TODO: Fix database merge to allow merging tables with more than one superkey\n # and no primary superkey.\n error_tables = [\n 'imageset_image_relationship',\n 'annotgroup_annotation_relationship',\n 'annotmatch',\n ]\n ignore_tables += error_tables\n ibs_dst.db.merge_databases_new(\n ibs_src.db, ignore_tables=ignore_tables, rowid_subsets=rowid_subsets\n )\n\n # Add ImageSets\n blacklist_set = {\n 'Reviewed Images',\n 'Exemplars',\n '*Exemplars',\n 'All Images',\n '*All Images',\n '*Undetected Images',\n '*Ungrouped Images',\n }\n\n imageset_dict = {}\n src_guuids = ibs_src.get_image_uuids(src_gid_list)\n src_texts_list = ibs_src.get_image_imagesettext(src_gid_list)\n\n for src_guuid, src_text_list in zip(src_guuids, src_texts_list):\n current_set = imageset_dict.get(src_guuid, set())\n src_text_set = set(src_text_list) - blacklist_set\n src_text_set_ = set()\n for src_text in src_text_set:\n src_text_ = '{} / {}'.format(\n ibs_src.dbname,\n src_text,\n )\n src_text_set_.add(src_text_)\n src_text_set = src_text_set_ | current_set\n imageset_dict[src_guuid] = src_text_set\n\n # Set all imagesets for merged databases\n dst_guuids = list(imageset_dict.keys())\n dst_gid_list = ibs_dst.get_image_gids_from_uuid(dst_guuids)\n assert None not in dst_gid_list\n dst_text_set_list = [list(imageset_dict[dst_guuid]) for dst_guuid in dst_guuids]\n length_list = map(len, dst_text_set_list)\n zipped = zip(dst_gid_list, length_list)\n dst_gid_list = ut.flatten([[dst_gid] * length for dst_gid, length in zipped])\n dst_text_list = ut.flatten(dst_text_set_list)\n assert len(dst_gid_list) == len(dst_text_list)\n ibs_dst.set_image_imagesettext(dst_gid_list, dst_text_list)\n\n # Add imageset for Import\n src_image_uuids = ibs_src.get_image_uuids(src_gid_list)\n dst_gid_list = ibs_dst.get_image_gids_from_uuid(src_image_uuids)\n assert None not in dst_gid_list\n timestamp = ut.timestamp(format_='printable').split()[1]\n imageset_text = 'Import from {} on {}'.format(\n ibs_src.dbname,\n timestamp,\n )\n ibs_dst.set_image_imagesettext(dst_gid_list, [imageset_text] * len(dst_gid_list))\n\n logger.info(\n 'FINISHED MERGE {!r} into {!r}'.format(ibs_src.get_dbname(), ibs_dst.get_dbname())\n )", "def update(self, other):\n _merge_dicts(self, other)", "def merge(self, other):\n extras = other.difference(self)\n if len(extras) > 0:\n self.update(extras)\n self.reset()\n return True\n return False", "def merge_slices(s1, s2):\n assert s1._shape[1] == s2._shape[1], \"\"\"The arrays must have the same\n number of columns.\"\"\"\n assert s1._sparse == s2._sparse, \"\"\"A sparse and a dense array cannot\n be merged.\"\"\"\n assert s1._reg_shape == s2._reg_shape, \"\"\"The array regular blocks must\n have the same shape.\"\"\"\n\n len_s1 = s1.shape[0]\n len_s2 = s2.shape[0]\n\n # If s1 or s2 is empty, quickly return the other slice.\n if len_s1 == 0:\n return s2\n if len_s2 == 0:\n return s1\n\n reg_shape = s1._reg_shape\n reg_rows = reg_shape[0]\n\n # Compute the start and end of regular row blocks for s1\n top_rows_s1 = s1._top_left_shape[0]\n reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0\n reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows\n\n # Compute the start and end of regular row blocks for s2\n top_rows_s2 = s2._top_left_shape[0]\n reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0\n reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows\n\n # Get arrays with the regular row blocks for s1 and s2\n reg_s1 = s1[reg_rows_start_s1:reg_rows_end_s1]\n reg_s2 = s2[reg_rows_start_s2:reg_rows_end_s2]\n\n # Add the regular row blocks to the list all_blocks\n all_blocks = []\n if reg_s1.shape[0]:\n all_blocks.extend(reg_s1._blocks)\n if reg_s2.shape[0]:\n all_blocks.extend(reg_s2._blocks)\n\n # If there are remaining rows on the top or bottom of s1 and s2, add them\n # to the list extras. These are row blocks with less than reg_rows.\n extras = []\n if reg_rows_start_s1 > 0:\n extras.append(s1[:reg_rows_start_s1])\n if reg_rows_start_s2 > 0:\n extras.append(s1[:reg_rows_start_s2])\n if reg_rows_end_s1 < len_s1:\n extras.append(s1[reg_rows_end_s1:])\n if reg_rows_end_s2 < len_s2:\n extras.append(s2[reg_rows_end_s2:])\n\n # Arrange the rows of the arrays in extras in groups of reg_rows rows,\n # slicing the arrays when necessary. The last group may have less than\n # reg_rows rows.\n groups = []\n current_capacity = 0\n for extra in extras:\n len_extra = extra.shape[0]\n if current_capacity == 0:\n current_capacity = reg_rows\n groups.append([])\n if extra.shape[0] <= current_capacity:\n current_capacity -= extra.shape[0]\n groups[-1].append(extra)\n else:\n groups[-1].append(extra[:current_capacity])\n groups.append([extra[current_capacity:]])\n current_capacity = current_capacity - len_extra + reg_rows\n\n # Merge the row blocks in each group, forming a single row block per group,\n # and add it to the list all blocks.\n for g in groups:\n blocks = []\n for a in g:\n for row_block in a._blocks:\n blocks.append(row_block)\n group_blocks = [object() for _ in range(s1._n_blocks[1])]\n _merge_rows_keeping_cols(blocks, group_blocks)\n all_blocks.append(group_blocks)\n\n # Now all_blocks contains all the rows of s1 and s2 in an appropiate\n # arrangement to create the merged array.\n return Array(blocks=all_blocks, top_left_shape=reg_shape,\n reg_shape=reg_shape, shape=(len_s1 + len_s2, s1.shape[1]),\n sparse=s1._sparse)", "def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out", "def update_inplace_from(self, other):\n self.__dict__ = other.__dict__.copy()", "def merge(S1, S2, S):\n i = j = 0\n while i + j < len(S):\n if j == len(S2) or (i < len(S1) and S1[i] < S2[j]):\n S[i + j] = S1[i]\n i += 1\n else:\n S[i + j] = S2[j]\n j += 1", "def _merge_mapper(mapper1, mapper2):\n if len(mapper1) > 0:\n if len(mapper2) > 0:\n clusters1 = mapper1['cluster']\n clusters2 = mapper2['cluster']\n clusters = np.unique(np.concatenate((clusters1, clusters2), 0))\n\n mapper1['cluster'] = clusters\n mapper1['links'] += mapper2['links']\n else:\n mapper1 = mapper2\n return mapper1", "def merge(self, other_btree):\n pass", "def canBeMergedWith(self, other):", "def _partial_remap(self):\n \n if not self.has_remap():\n return\n \n del_keys = {}\n keep_ids = set()\n\n for key, val in self.overlap_map.items():\n if key in self.mapping1:\n # find inputs that need to be remapped\n if self.mapping1[key] not in del_keys:\n del_keys[self.mapping1[key]] = set()\n del_keys[self.mapping1[key]].add(key)\n keep_ids.add(self.mapping1[key])\n else:\n keep_ids.add(key)\n new_overlap = {}\n\n # handle new overlaps since mapping could cause merge\n for (key2, val2) in val:\n new_key = key2\n if key2 in self.mapping2:\n new_key = self.mapping2[key2]\n if new_key not in new_overlap:\n new_overlap[new_key] = 0\n new_overlap[new_key] += val2\n \n # update overlap list\n new_overlap_set = set()\n for body, overlap in new_overlap.items():\n new_overlap_set.add((body, overlap)) \n self.overlap_map[key] = new_overlap_set\n \n temp_overlap = self.overlap_map.copy()\n \n # merge rows mapping to same body, remove old body\n for newbody, bodylist in del_keys.items():\n self.overlap_map[newbody] = set()\n for bodyold in bodylist:\n self._merge_row(self.overlap_map[newbody], temp_overlap[bodyold])\n if bodyold not in keep_ids:\n del self.overlap_map[bodyold]\n \n self.mapping1 = None\n self.mapping2 = None", "def combine_dict(self, dict2):\n # iterate through smaller data set\n # base_set will be the larger set and is used for updating\n if len(self.content[\"values\"]) > len(dict2[\"values\"]):\n large_set = self.content[\"values\"]\n small_set = dict2[\"values\"]\n base_set = self.content\n else:\n small_set = self.content[\"values\"]\n large_set = dict2[\"values\"]\n base_set = dict2\n\n subset = {}\n for key in small_set.keys():\n # determine wether to compare keys\n if key in large_set:\n updated_l = large_set[key][\"updated_at\"]\n updated_s = small_set[key][\"updated_at\"]\n if updated_l == 'NULL':\n if updated_s != 'NULL':\n # update to not NULL set\n # if both updated_at are NULL, things\n # are ambiguos. We could defer to created_at\n # but for simplicity we will default to\n # the values in the larger set\n subset[key] = small_set[key]\n else:\n if updated_s == 'NULL':\n # update to not NULL set\n subset[key] = large_set[key]\n else:\n if updated_l > updated_s:\n subset[key] = large_set[key]\n else:\n subset[key] =small_set[key]\n else:\n subset[key] = small_set[key]\n base_set[\"values\"].update(subset)\n new_obj = BackupData()\n new_obj.load_from_dict(base_set)\n return new_obj", "def test_backup_merge_with_unmerged(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 2\n self.log.info(\"Merging existing incremental backups\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Taking more backups\")\n self._take_n_backups(n=2)\n self.backupset.start = 1\n self.backupset.end = 3\n self.log.info(\"Merging new backups into already merged backup\")\n status, output, message = self.backup_merge()\n if not status:\n self.fail(message)\n self.log.info(\"Successfully merged new backups with already merged backup\")", "def hard_blending(im1, im2):\n assert(im1.shape == im2.shape)\n h, w, c = im1.shape\n new_im = im2.copy()\n new_im[:,:(w//2),:] = im1[:,:(w//2),:]\n return new_im", "def _MixCollections(bc1, bc2):\n new = _BinsCollection()\n for i, b1 in enumerate(bc1.GetBins()):\n b2 = bc2.GetBin(i)\n new_bin = binner.Bin()\n new_bin.SetFixedMean(b1.GetFixedMean())\n assert all(b1.GetFixedMean() == b2.GetFixedMean())\n for p in b1.GetPoints():\n new_bin.AddPoint(p)\n for p in b2.GetPoints():\n new_bin.AddPoint(p)\n new.AddBin(new_bin)\n return new", "def merge(self, other):\n if other.n_points != self.n_points:\n raise ValueError(\n 'Deduplicator size mismatch: '\n f'{self.n_points} != {other.n_points}'\n )\n self.data_reduced.extend(other.data_reduced)\n self.data_kd.extend(other.data_kd)", "def merge_tickers(\n ticker1: Ticker,\n ticker2: Ticker,\n update_only: bool = False,\n):\n\n if update_only is True:\n for ticker2_product in ticker2.products:\n if ticker2_product in ticker1.products:\n ticker1.products[ticker2_product].metrics.update(\n ticker2.products[ticker2_product].metrics\n )\n else:\n for ticker2_product in ticker2.products:\n ticker1.products[ticker2_product].metrics.update(\n ticker2.products[ticker2_product].metrics\n )" ]
[ "0.5970242", "0.58449215", "0.58449215", "0.5833292", "0.57845783", "0.5681122", "0.5585994", "0.55577934", "0.5522436", "0.5519949", "0.54871976", "0.5459155", "0.54430294", "0.54322654", "0.5431394", "0.54148954", "0.5397526", "0.53864276", "0.5382178", "0.5351669", "0.53466773", "0.5345576", "0.53433466", "0.53166044", "0.52726215", "0.52600294", "0.52569675", "0.5241593", "0.5240357", "0.5175565" ]
0.696387
0
Finds the aids of annotations in ibs1 that are also in ibs2 ibs1 = wbia.opendb('PZ_Master1') ibs2 = wbia.opendb('PZ_MTEST')
def find_overlap_annots(ibs1, ibs2, method='annots'): if method == 'images': images1, images2 = ibs1.images(), ibs2.images() idxs1, idxs2 = ut.isect_indices(images1.uuids, images2.uuids) isect_images1 = images1.take(idxs1) annot_uuids = ut.flatten(isect_images1.annot_uuids) isect_annots1 = ibs1.annots(uuids=annot_uuids) elif method == 'annots': annots1, annots2 = ibs1.annots(), ibs2.annots() idxs1, idxs2 = ut.isect_indices(annots1.uuids, annots2.uuids) isect_annots1 = annots1.take(idxs1) return isect_annots1.aids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remerge_subset():\n import wbia\n\n ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n ibs2 = wbia.opendb('PZ_Master1')\n\n gids1, gids2 = ibs1.images(), ibs2.images()\n idxs1, idxs2 = ut.isect_indices(gids1.uuids, gids2.uuids)\n isect_gids1, isect_gids2 = gids1.take(idxs1), gids2.take(idxs2)\n\n assert all(\n set.issubset(set(a1), set(a2))\n for a1, a2 in zip(isect_gids1.annot_uuids, isect_gids2.annot_uuids)\n )\n\n annot_uuids = ut.flatten(isect_gids1.annot_uuids)\n # aids1 = ibs1.annots(ibs1.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n # aids2 = ibs2.annots(ibs2.get_annot_aids_from_uuid(annot_uuids), asarray=True)\n aids1 = ibs1.annots(uuids=annot_uuids, asarray=True)\n aids2 = ibs2.annots(uuids=annot_uuids, asarray=True)\n import numpy as np\n\n to_aids2 = dict(zip(aids1, aids2))\n # to_aids1 = dict(zip(aids2, aids1))\n\n # Step 1) Update individual annot properties\n # These annots need updates\n # np.where(aids1.visual_uuids != aids2.visual_uuids)\n # np.where(aids1.semantic_uuids != aids2.semantic_uuids)\n\n annot_unary_props = [\n # 'yaws', 'bboxes', 'thetas', 'qual', 'species', 'unary_tags']\n 'yaws',\n 'bboxes',\n 'thetas',\n 'qual',\n 'species',\n 'case_tags',\n 'multiple',\n 'age_months_est_max',\n 'age_months_est_min', # 'sex_texts'\n ]\n to_change = {}\n for key in annot_unary_props:\n prop1 = getattr(aids1, key)\n prop2 = getattr(aids2, key)\n diff_idxs = set(np.where(prop1 != prop2)[0])\n if diff_idxs:\n diff_prop1 = ut.take(prop1, diff_idxs)\n diff_prop2 = ut.take(prop2, diff_idxs)\n logger.info('key = {!r}'.format(key))\n logger.info('diff_prop1 = {!r}'.format(diff_prop1))\n logger.info('diff_prop2 = {!r}'.format(diff_prop2))\n to_change[key] = diff_idxs\n if to_change:\n changed_idxs = ut.unique(ut.flatten(to_change.values()))\n logger.info('Found %d annots that need updated properties' % len(changed_idxs))\n logger.info('changing unary attributes: {!r}'.format(to_change))\n if False and ut.are_you_sure('apply change'):\n for key, idxs in to_change.items():\n subaids1 = aids1.take(idxs)\n subaids2 = aids2.take(idxs)\n prop1 = getattr(subaids1, key)\n # prop2 = getattr(subaids2, key)\n setattr(subaids2, key, prop1)\n else:\n logger.info('Annot properties are in sync. Nothing to change')\n\n # Step 2) Update annotmatch - pairwise relationships\n infr1 = wbia.AnnotInference(aids=aids1.aids, ibs=ibs1, verbose=3, autoinit=False)\n\n # infr2 = wbia.AnnotInference(aids=ibs2.annots().aids, ibs=ibs2, verbose=3)\n aids2 = ibs2.get_valid_aids(is_known=True)\n infr2 = wbia.AnnotInference(aids=aids2, ibs=ibs2, verbose=3)\n infr2.reset_feedback('annotmatch', apply=True)\n\n # map feedback from ibs1 onto ibs2 using ibs2 aids.\n fb1 = infr1.read_wbia_annotmatch_feedback()\n fb1_t = {(to_aids2[u], to_aids2[v]): val for (u, v), val in fb1.items()}\n fb1_df_t = infr2._pandas_feedback_format(fb1_t).drop('am_rowid', axis=1)\n\n # Add transformed feedback into ibs2\n infr2.add_feedback_from(fb1_df_t)\n\n # Now ensure that dummy connectivity exists to preserve origninal names\n # from wbia.algo.graph import nx_utils\n # for (u, v) in infr2.find_mst_edges('name_label'):\n # infr2.draw_aids((u, v))\n # cc1 = infr2.pos_graph.connected_to(u)\n # cc2 = infr2.pos_graph.connected_to(v)\n # logger.info(nx_utils.edges_cross(infr2.graph, cc1, cc2))\n # infr2.neg_redundancy(cc1, cc2)\n # infr2.pos_redundancy(cc2)\n\n infr2.relabel_using_reviews(rectify=True)\n infr2.apply_nondynamic_update()\n\n if False:\n infr2.wbia_delta_info()\n infr2.wbia_name_group_delta_info()\n\n if len(list(infr2.inconsistent_components())) > 0:\n raise NotImplementedError('need to fix inconsistencies first')\n # Make it so it just loops until inconsistencies are resolved\n infr2.prioritize()\n infr2.qt_review_loop()\n else:\n infr2.write_wbia_staging_feedback()\n infr2.write_wbia_annotmatch_feedback()\n infr2.write_wbia_name_assignment()\n\n # if False:\n # # Fix any inconsistency\n # infr2.start_qt_interface(loop=False)\n # test_nodes = [5344, 5430, 5349, 5334, 5383, 2280, 2265, 2234, 5399,\n # 5338, 2654]\n # import networkx as nx\n # nx.is_connected(infr2.graph.subgraph(test_nodes))\n # # infr = wbia.AnnotInference(aids=test_nodes, ibs=ibs2, verbose=5)\n\n # # randomly sample some new labels to verify\n # import wbia.guitool as gt\n # from wbia.gui import inspect_gui\n # gt.ensure_qapp()\n # ut.qtensure()\n # old_groups = ut.group_items(name_delta.index.tolist(), name_delta['old_name'])\n # del old_groups['____']\n\n # new_groups = ut.group_items(name_delta.index.tolist(), name_delta['new_name'])\n\n # from wbia.algo.hots import simulate\n # c = simulate.compare_groups(\n # list(new_groups.values()),\n # list(old_groups.values()),\n # )\n # ut.map_vals(len, c)\n # for aids in c['pred_splits']:\n # old_nids = ibs2.get_annot_nids(aids)\n # new_nids = ut.take_column(infr2.gen_node_attrs('name_label', aids), 1)\n # split_aids = ut.take_column(ut.group_items(aids, new_nids).values(), 0)\n # aid1, aid2 = split_aids[0:2]\n\n # if False:\n # inspect_gui.show_vsone_tuner(ibs2, aid1, aid2)\n # infr2.start_qt_interface(loop=False)\n\n # if False:\n # # import wbia\n # ibs1 = wbia.opendb('PZ_PB_RF_TRAIN')\n # infr1 = wbia.AnnotInference(aids='all', ibs=ibs1, verbose=3)\n # infr1.initialize_graph()\n # # infr1.reset_feedback('staging')\n # infr1.reset_feedback('annotmatch')\n # infr1.apply_feedback_edges()\n # infr1.relabel_using_reviews()\n # infr1.apply_review_inference()\n # infr1.start_qt_interface(loop=False)\n # delta = infr2.match_state_delta()\n # logger.info('delta = %r' % (delta,))\n\n # infr2.ensure_mst()\n # infr2.relabel_using_reviews()\n # infr2.apply_review_inference()\n\n # mst_edges = infr2.find_mst_edges()\n # set(infr2.graph.edges()).intersection(mst_edges)\n\n return\n \"\"\"\n TODO:\n Task 2:\n Build AnnotInfr for ibs2 then add all decision from\n ibs1 to the internal feedback dict.\n\n Ensure that all other (esp old name-id related) edges are correctly\n placed, then overrite with new vals (\n make sure implicit vals do not cuase conflicts with new\n explicit vals, but old explicit vals should cause a conflict).\n Then just commit to staging and then commit to annotmatch and\n re-infer the names.\n \"\"\"\n\n # Print some info about the delta\n # def _to_tup(x):\n # return tuple(x) if isinstance(x, list) else x\n # changetype_list = list(zip(\n # delta['old_decision'], delta['new_decision'],\n # map(_to_tup, delta['old_tags']),\n # map(_to_tup, delta['new_tags'])))\n # changetype_hist = ut.dict_hist(changetype_list, ordered=True)\n # logger.info(ut.align(ut.repr4(changetype_hist), ':'))\n\n # import pandas as pd\n # pd.options.display.max_rows = 20\n # pd.options.display.max_columns = 40\n # pd.options.display.width = 160\n # pd.options.display.float_format = lambda x: '%.4f' % (x,)\n\n # a, b = 86, 6265\n # c, d = to_aids1[a], to_aids1[b]\n # inspect_gui.show_vsone_tuner(ibs2, a, b)\n # inspect_gui.show_vsone_tuner(ibs1, to_aids1[a], to_aids1[b])\n # am1 = ibs1.get_annotmatch_rowids_between([to_aids1[a]],\n # [to_aids1[b]])\n # am2 = ibs2.get_annotmatch_rowids_between([a], [b])\n # logger.info(ibs1.db.get_table_csv('annotmatch', rowids=am1))\n # logger.info(ibs2.db.get_table_csv('annotmatch', rowids=am2))\n\n # inspect_gui.show_vsone_tuner(ibs2, 8, 242)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 103)\n # inspect_gui.show_vsone_tuner(ibs2, 86, 6265)", "def overlap(annotations1, annotations2):\n return [val for val in annotations1 if val in annotations2]", "def fix_annotmatch_pzmaster1():\n import wbia\n\n ibs = wbia.opendb('PZ_Master1')\n infr = wbia.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5)\n infr.initialize_graph()\n annots = ibs.annots()\n aid_to_nid = ut.dzip(annots.aids, annots.nids)\n\n if False:\n infr.reset_feedback()\n infr.ensure_mst()\n infr.apply_feedback_edges()\n infr.relabel_using_reviews()\n infr.start_qt_interface()\n\n # Get annotmatch rowids that agree with current labeling\n if False:\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import pandas as pd\n\n flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision'])\n flags2 = annotmatch['annotmatch_tag_text'] == ''\n bad_part = annotmatch[flags1 & flags2]\n rowids = bad_part.index.tolist()\n ibs.delete_annotmatch(rowids)\n\n if False:\n # Delete bidirectional annotmatches\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n # Find entires that have both directions\n pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values\n f_edges = {tuple(p) for p in pairs1}\n b_edges = {tuple(p[::-1]) for p in pairs1}\n isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)}\n isect_edges1 = list(isect_edges)\n isect_edges2 = [p[::-1] for p in isect_edges]\n\n # cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text']\n import pandas as pd\n\n custom_ = {\n (559, 4909): (False, ['photobomb']),\n (7918, 8041): (False, ['photobomb']),\n (6634, 6754): (False, ['photobomb']),\n (3707, 3727): (False, ['photobomb']),\n (86, 103): (False, ['photobomb']),\n }\n extra_ = {}\n\n fixme_edges = []\n\n d1 = df.loc[isect_edges1].reset_index(drop=False)\n d2 = df.loc[isect_edges2].reset_index(drop=False)\n flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision']\n from wbia.tag_funcs import _parse_tags\n\n for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()):\n v1, v2 = r1[1], r2[1]\n aid1 = v1['annot_rowid1']\n aid2 = v1['annot_rowid2']\n truth_real = (\n ibs.const.EVIDENCE_DECISION.POSITIVE\n if aid_to_nid[aid1] == aid_to_nid[aid2]\n else ibs.const.EVIDENCE_DECISION.NEGATIVE\n )\n truth1 = v1['annotmatch_evidence_decision']\n truth2 = v2['annotmatch_evidence_decision']\n t1 = _parse_tags(v1['annotmatch_tag_text'])\n t2 = _parse_tags(v2['annotmatch_tag_text'])\n newtag = ut.union_ordered(t1, t2)\n if (aid1, aid2) in custom_:\n continue\n fixme_flag = False\n if not pd.isnull(truth1):\n if truth_real != truth1:\n fixme_flag = True\n if not pd.isnull(truth2):\n if truth_real != truth2:\n fixme_flag = True\n if fixme_flag:\n logger.info('newtag = {!r}'.format(newtag))\n logger.info('truth_real = {!r}'.format(truth_real))\n logger.info('truth1 = {!r}'.format(truth1))\n logger.info('truth2 = {!r}'.format(truth2))\n logger.info('aid1 = {!r}'.format(aid1))\n logger.info('aid2 = {!r}'.format(aid2))\n fixme_edges.append((aid1, aid2))\n else:\n extra_[(aid1, aid2)] = (truth_real, newtag)\n\n extra_.update(custom_)\n new_pairs = extra_.keys()\n new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0)\n new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1)\n new_tag_texts = [';'.join(t) for t in new_tags]\n aids1, aids2 = ut.listT(new_pairs)\n\n # Delete the old\n ibs.delete_annotmatch(\n d1['annotmatch_rowid'].values.tolist()\n + d2['annotmatch_rowid'].values.tolist()\n )\n\n # Add the new\n ams = ibs.add_annotmatch_undirected(aids1, aids2)\n ibs.set_annotmatch_evidence_decision(ams, new_truths)\n ibs.set_annotmatch_tag_text(ams, new_tag_texts)\n\n if False:\n import wbia.guitool as gt\n\n gt.ensure_qapp()\n ut.qtensure()\n from wbia.gui import inspect_gui\n\n inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n\n # pairs2 = pairs1.T[::-1].T\n # idx1, idx2 = ut.isect_indices(list(map(tuple, pairs1)),\n # list(map(tuple, pairs2)))\n # r_edges = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # unique_pairs = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n x = ut.ddict(list)\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import ubelt as ub\n\n _iter = annotmatch.iterrows()\n prog = ub.ProgIter(_iter, length=len(annotmatch))\n for k, m in prog:\n aid1 = m['annot_rowid1']\n aid2 = m['annot_rowid2']\n if m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.POSITIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['agree1'].append(k)\n else:\n x['disagree1'].append(k)\n elif m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.NEGATIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['disagree2'].append(k)\n else:\n x['agree2'].append(k)\n\n ub.map_vals(len, x)\n ut.dict_hist(annotmatch.loc[x['disagree1']]['annotmatch_tag_text'])\n\n disagree1 = annotmatch.loc[x['disagree1']]\n pb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] == 'photobomb']\n aids1 = pb_disagree1['annot_rowid1'].values.tolist()\n aids2 = pb_disagree1['annot_rowid2'].values.tolist()\n aid_pairs = list(zip(aids1, aids2))\n infr = wbia.AnnotInference.from_pairs(aid_pairs, ibs=ibs, verbose=5)\n if False:\n feedback = infr.read_wbia_annotmatch_feedback(edges=infr.edges())\n infr.external_feedback = feedback\n infr.apply_feedback_edges()\n infr.start_qt_interface(loop=False)\n\n # Delete these values\n if False:\n nonpb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] != 'photobomb']\n disagree2 = annotmatch.loc[x['disagree2']]\n ibs.delete_annotmatch(nonpb_disagree1['annotmatch_rowid'])\n ibs.delete_annotmatch(disagree2['annotmatch_rowid'])\n\n # ut.dict_hist(disagree1['annotmatch_tag_text'])\n import networkx as nx\n\n graph = nx.Graph()\n graph.add_edges_from(zip(pb_disagree1['annot_rowid1'], pb_disagree1['annot_rowid2']))\n list(nx.connected_components(graph))\n\n set(annotmatch.loc[x['disagree2']]['annotmatch_tag_text'])\n\n # aid1, aid2 = 2585, 1875\n # # pd.unique(annotmatch['annotmatch_evidence_decision'])\n # from wbia.gui import inspect_gui\n # inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n # from vtool import inspect_matches\n\n # aid1, aid2 = 2108, 2040\n\n # pd.unique(annotmatch['annotmatch_tag_text'])\n\n # infr.reset_feedback()\n # infr.relabel_using_reviews()", "def infer_data(self):\n ibs = self.ibs\n # The two matching aids\n self.aid_pair = (self.aid1, self.aid2)\n (aid1, aid2) = self.aid_pair\n self.match_text = ibs.get_match_text(self.aid1, self.aid2)\n # The names of the matching annotations\n self.nid1, self.nid2 = ibs.get_annot_name_rowids((aid1, aid2))\n self.name1, self.name2 = ibs.get_annot_names((aid1, aid2))\n self.other_valid_nids = []\n # The other annotations that belong to these two names\n self.gts_list = ibs.get_annot_groundtruth((aid1, aid2))\n self.gt1, self.gt2 = self.gts_list\n # A flat list of all the aids we are looking at\n self.is_split_case = self.nid1 == self.nid2\n self.all_aid_list = ut.unique_ordered([aid1, aid2] + self.gt1 + self.gt2)\n self.all_nid_list_orig = ibs.get_annot_name_rowids(self.all_aid_list)\n self.other_aids = list(set(self.all_aid_list) - {self.aid1, self.aid2})\n\n if self.is_split_case:\n # Split case\n self.nCols = max(2, len(self.other_aids))\n self.nRows = 2 if len(self.other_aids) > 0 else 1\n else:\n # Merge/New Match case\n self.nCols = max(len(self.gt1) + 1, len(self.gt2) + 1)\n self.nRows = 2\n self.nCols = min(self.max_cols, self.nCols)\n\n # Grab not just the exemplars\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))\n\n if ut.VERBOSE or ut.is_developer():\n logger.info(\n '[matchver] __init__ nid1={!r}, nid2={!r} '.format(self.nid1, self.nid2)\n )\n logger.info('[matchver] __init__ self.gts_list=%r ' % (self.gts_list))", "def report_exon_overlap(strand1, exons1, strand2, exons2):\n #print(strand1)\n #print(exons1)\n #print(exons2)\n exons1 = convert_2dlst_to_set(exons1)\n first_exon1, last_exon1 = return_first_and_last_exon(exons1)\n exons2 = convert_2dlst_to_set(exons2)\n first_exon2, last_exon2 = return_first_and_last_exon(exons2)\n \n dct_report = dict()\n if not first_exon1 == first_exon2:\n \"\"\" first exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(first_exon1).split(\".\")[1] == str(first_exon2).split(\".\")[1]:\n \"\"\" if first intron-start boundary is the same \"\"\"\n if int(str(first_exon1).split(\".\")[0]) > int(str(first_exon2).split(\".\")[0]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_inside\"\n else:\n dct_report[3] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[5] = \"partial_outside\"\n else:\n dct_report[3] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"different\"\n else:\n dct_report[3] = \"different\"\n else:\n if strand1 == \"+\":\n dct_report[5] = \"same\"\n else:\n dct_report[3] = \"same\"\n\n if not last_exon1 == last_exon2:\n \"\"\" last exon of isoseq and annotated-gene-model are not exactly the same \"\"\"\n if str(last_exon1).split(\".\")[0] == str(last_exon2).split(\".\")[0]:\n \"\"\" if last intron-end boundary is the same \"\"\"\n if int(str(last_exon1).split(\".\")[1]) < int(str(last_exon2).split(\".\")[1]):\n \"\"\" if isoseq first exon is shorter \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_inside\"\n else:\n dct_report[5] = \"partial_inside\"\n else:\n \"\"\" if isoseq first exon is longer \"\"\"\n if strand1 == \"+\":\n dct_report[3] = \"partial_outside\"\n else:\n dct_report[5] = \"partial_outside\"\n else:\n if strand1 == \"+\":\n dct_report[3] = \"different\"\n else:\n dct_report[5] = \"different\" \n else:\n if strand1 == \"+\":\n dct_report[3] = \"same\"\n else:\n dct_report[5] = \"same\"\n return(dct_report[5], dct_report[3])", "def intersect(a, b): \n # print(len(list(set(a) & set(b))), 'unique and matching names between FPL and Understat')\n return list(set(a) & set(b))", "def main():\n args = get_args()\n FILE = args.FILE\n annotations = args.annotations\n outfile = args.outfile\n \n \n if not os.path.isfile(FILE):\n die('\"{}\" is not a file'.format(FILE))\n if not os.path.isfile(annotations):\n die('\"{}\" is not a file'.format(annotations))\n if os.path.isfile(FILE) and os.path.isfile(annotations):\n reader = csv.DictReader(open(FILE), delimiter = '\\t', fieldnames = (\"qseqid\", \"sseqid\", \"pident\", \"length\", \"mismatch\", \"gapopen\", \"qstart\", \"qend\", \"sstart\", \"send\", \"evalue\", \"bitscore\"))\n reader_a = csv.DictReader(open(annotations), fieldnames = (\"centroid\", \"domain\", \"kingdom\", \"phylum\", \"class\", \"order\", \"genus\", \"species\"))\n reader_b = csv.reader(open(annotations, 'r'))\n anno_dict = {}\n for row in reader_b:\n key1 = row[0]\n anno_dict[key1] = row[1:]\n\n #print(anno_dict)\n \n \"\"\"for dct in map(dict, reader_a):\n genus = (f\"{dct['genus']}\")\n species = (f\"{dct['species']}\")\n if genus == \"\": \n print(\"NA\")\n else:\n print(genus)\n if species == \"\":\n print(\"NA\")\n else:\n print(species)\"\"\"\n for dct in map(dict, reader):\n seq_id = (f\"{dct['sseqid']}\") \n pident = (f\"{dct['pident']}\")\n #print(seq_id)\n for dct_a in map(dict, reader_a):\n genus = (f\"{dct_a['genus']}\")\n species = (f\"{dct_a['species']}\")\n if any(seq_id == key for key in anno_dict): \n \"\"\"print(seq_id)\n print(pident)\n print(genus)\n print(species)\n #find a way to print genus and species of seq_id\n \"\"\"\n \n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"for line_a in reader_a:\n an_id = (line_a['centroid']) \n print('\"{}\" is an_id'.format(an_id)) \n for line in reader:\n seq_id = (line['sseqid'])\n print('\"{}\" is seq_id'.format(seq_id))\n if seq_id == an_id:\n print(\"hi\")\n else:\n warn('Cannot find seq \"{}\" in lookup'.format(seq_id))\n \"\"\"\n #pprint.pprint(dict_list)\n #pprint.pprint(dict_list_a)\n #for key, value in d1.items():\n #if key is 'sseqid':\n #print(value)\n #print(dict_list_a['centroid']) ", "def test_get_sam_ids(self):\r\n map_file = StringIO.StringIO(\"\"\"#SampleID\tCountry\tAgeYears\tFamily\tAgeCat\r\n h208A.1\tMalawi\t0.032854209\th208\tChild\r\n h301A.1\tMalawi\t0.05\th301\tChild\r\n h301B.1\tMalawi\t0.05\th301\tChild\r\n USinfTw20.1\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw20.2\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw1.1\tUSA\t0.083333333\tUSinfTw1\tChild\r\n h10M\tMalawi\t26\th10\tAdult\r\n h68M\tMalawi\t26\th68\tAdult\r\n TS25\tUSA\t26\tUSts9\tAdult\r\n TS26\tUSA\t26\tUSts9\tAdult\"\"\")\r\n\r\n map_data, map_header, comments = parse_mapping_file(map_file)\r\n colorby = 'Country'\r\n cat = 'USA'\r\n primary_state = 'AgeCat:Child'\r\n ids1, ids2 = get_sam_ids(map_data, map_header, colorby, cat,\r\n primary_state, secondary_state=None)\r\n self.assertEqual(set(ids1),\r\n set(['USinfTw20.1', 'USinfTw20.2', 'USinfTw1.1']))\r\n self.assertEqual(set(ids2), set(['TS25', 'TS26']))", "def one_to_one_compare(comparison_1, comparison_2):\r\n total = comparison_1.shape[0]\r\n interm = 0\r\n one2one = 0\r\n for row in comparison_1.itertuples():\r\n if row.gene_type == \"one_to_one_mapping\":\r\n interm += 1\r\n Q_gene = row.Query_gene\r\n S_gene = row.Sytentic_genes.split(\",\")[0]\r\n # get the index of the query gene in second file using subject gene var\r\n idx = comparison_2[comparison_2.Query_gene.isin([S_gene])].index.tolist()\r\n # check to see if the index is empty\r\n if idx:\r\n if comparison_2.at[idx[0], \"gene_type\"] == \"one_to_one_mapping\":\r\n comp_2_S_gene = comparison_2.at[idx[0], \"Sytentic_genes\"].split(\",\")[0]\r\n if comp_2_S_gene == Q_gene:\r\n one2one += 1\r\n return(total, interm, one2one)", "def test_ids_from_fasta_lines(self):\r\n\r\n fasta_lines = \\\r\n [\">hsa:8355 HIST1H3G; histone cluster 1, H3g ; K11253 histone H3\",\r\n \"atggcccgcaccaagcagactgcacgcaagtccaccggtggcaaagcgccgcgcaagcagctgg\",\r\n \"ccactaaggcggctcggaaaagcgcgccggccaccggcggcgtgaagaaacctcatcgctaccg\",\r\n \"tcccggcaccgtggctctgcgcgagattcgccgctatcagaagtcgactgagctgctgatccgc\",\r\n \"aagttgcctttccaacgcctggtgcgagaaatcgctcaggacttcaagacagatctgcgctttc\",\r\n \"agagttccgcggtgatggccctgcaggaggcctgcgaggcctacttggtggggctctttgagga\",\r\n \"taccaacctgtgtgccatccatgctaagcgagtgactatcatgcccaaggacattcagctcgct\",\r\n \"cgccgcattcgtggggagagagcgtag\",\r\n \">hsa:9081 PRY; PTPN13-like, Y-linked\",\r\n \"atgggagccactgggcttggctttctactttcctggagacaagacaatttgaatggcact\"]\r\n exp_ids = [\"hsa:8355\", \"hsa:9081\"]\r\n obs_ids = ids_from_fasta_lines(fasta_lines)\r\n\r\n self.assertEqual(obs_ids, exp_ids)", "def interactor_finder():\n from tools import prot_id_converter\n\n proteinList = []\n with open(\"../datafiles/known_interactors.txt\",\"r\") as inpProt: # create list of gene names from hand-made text file with known ptp22 interactors\n for protLine in inpProt:\n if protLine != \"\\n\":\n curName = protLine.strip().split(\"\\t\")[0]\n curName = curName[0] + curName[1:].lower()\n proteinList.append(curName)\n inpIdL = prot_id_converter(proteinList, \"10090\", \"genesymbol\", \"uniprotaccession\") # convert to uniprot accessions\n print(inpIdL)\n \n with open(\"../bob/processed/bobprots_all.csv\",\"r\") as targetF: # create list of all uniprot accessions in Bob's dataset (unique razor proteins only)\n targetD = {}\n for targetLine in targetF:\n targetD[targetLine.split(\",\")[0]] = targetLine.split(\",\")[1].strip()\n for inpIdItem in inpIdL:\n for queryI in inpIdItem:\n if queryI in targetD:\n print(targetD[queryI])\n break", "def find_analogies(self, fig1, fig2):\r\n \r\n if len(fig1) == 0 or len(fig2) == 0:\r\n return {}\r\n \r\n analogies = {} # map from fig1_name: (fig2_name, score)\r\n \r\n for obj1, attrs1 in fig1.items():\r\n matches = {}\r\n \r\n for obj2, attrs2 in fig2.items():\r\n score = 0\r\n max_score = 0\r\n \r\n for attr, value1 in attrs1.items():\r\n cur_points = 1\r\n if not attr in attrs2:\r\n # doesn't exist in other object, skip it\r\n continue\r\n \r\n if attr in self.kb['attribute_priorities']:\r\n priority_rank = self.kb['attribute_priorities'].index(attr)\r\n if priority_rank < 5:\r\n cur_points += 2.0/(priority_rank + 1)\r\n\r\n\r\n value2 = attrs2[attr]\r\n if self.kb['attributes'][attr]['relative'] != 'no':\r\n cur_points += 1\r\n # this is a relative attribute, so names will be different\r\n # just look at the length of the value\r\n if len(value1) == len(value2):\r\n score += cur_points\r\n elif value1 == value2:\r\n # exact match, increase score\r\n score += cur_points\r\n \r\n max_score += cur_points\r\n \r\n matches[obj2] = score / float(max_score)\r\n \r\n analogies[obj1] = sorted(matches.items(), key=lambda match: match[1], reverse=True)\r\n \r\n ret = {} # map from obj1: obj2\r\n \r\n while len(analogies) > 0:\r\n pprint(analogies)\r\n sorted_analogies = sorted(analogies.items(), key=lambda analogy: analogy[1][0][1], reverse=True)\r\n pprint(sorted_analogies)\r\n \r\n obj1 = sorted_analogies[0][0]\r\n obj2 = sorted_analogies[0][1][0][0]\r\n \r\n ret[obj1] = obj2\r\n del analogies[obj1]\r\n \r\n # remove obj2 from any other analogies findings\r\n for i in analogies.keys():\r\n for j in range(len(analogies[i]) - 1, -1, -1):\r\n if analogies[i][j][0] == obj2:\r\n del analogies[i][j]\r\n if len(analogies[i]) == 0:\r\n del analogies[i]\r\n \r\n \r\n return ret", "def test_compare_taxa_summaries_paired_sample_id_map(self):\r\n exp = ('Taxon\\tS1\\tS2\\nArchaea\\t0.4\\t0.4\\nBacteria\\t0.5\\t'\r\n '0.7\\nEukarya\\t0.4\\t0.5\\n', 'Taxon\\tE1\\tE2\\nArchaea\\t0.5\\t'\r\n '0.6\\nBacteria\\t0.7\\t0.8\\nEukarya\\t0.5\\t0.6\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The nonparametric p-value(s) were calculated '\r\n 'using a two-sided permutation test with permutations.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '.% using Fisher\\'s z-transformation (see Sokal and Rohlf rd '\r\n 'edition pg. ). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: '\r\n '\\nCorrelation coefficient\\tParametric p-value\\tNonparametric '\r\n 'p-value\\tCI (lower)\\tCI (upper)\\n.\\t.\\t.\\t.\\t.\\n', None)\r\n obs = compare_taxa_summaries(self.taxa_summary_paired1,\r\n self.taxa_summary_paired4, 'paired', 'pearson',\r\n perform_detailed_comparisons=False,\r\n sample_id_map=self.taxa_summary_paired_samp_id_map1)\r\n self.assertEqual(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])\r\n self.assertEqual(self.remove_nums(obs[2]), exp[2])", "def export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=None):\n import wbia\n\n imgsetid_list = ut.unique_unordered(ut.flatten(ibs.get_image_imgsetids(gid_list)))\n gsgrid_list = ut.unique_unordered(ut.flatten(ibs.get_image_gsgrids(gid_list)))\n\n # TODO: write SQL query to do this\n am_rowids = ibs._get_all_annotmatch_rowids()\n flags1_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid1(am_rowids)]\n flags2_list = [aid in set(aid_list) for aid in ibs.get_annotmatch_aid2(am_rowids)]\n flag_list = ut.and_lists(flags1_list, flags2_list)\n am_rowids = ut.compress(am_rowids, flag_list)\n # am_rowids = ibs.get_valid_aids(ibs.get_valid_aids())\n\n rowid_subsets = {\n const.ANNOTATION_TABLE: aid_list,\n const.NAME_TABLE: nid_list,\n const.IMAGE_TABLE: gid_list,\n const.ANNOTMATCH_TABLE: am_rowids,\n const.GSG_RELATION_TABLE: gsgrid_list,\n const.IMAGESET_TABLE: imgsetid_list,\n }\n ibs_dst = wbia.opendb(dbdir=new_dbpath, allow_newdir=True)\n # Main merge driver\n merge_databases(ibs, ibs_dst, rowid_subsets=rowid_subsets)\n logger.info('Exported to {!r}'.format(new_dbpath))\n return new_dbpath", "def makeAMixOf2Annotations(inputAnnotPath1, inputAnnotPath2, outputMixPath):\n # make sure the paths end in a slash\n if inputAnnotPath1[-1] != u'/':\n inputAnnotPath1 = u'{0}/'.format(inputAnnotPath1)\n if inputAnnotPath2[-1] != u'/':\n inputAnnotPath2 = u'{0}/'.format(inputAnnotPath2)\n if outputMixPath[-1] != u'/':\n outputMixPath = u'{0}/'.format(outputMixPath)\n # for each input open\n for inPath in [inputAnnotPath1, inputAnnotPath2]:\n # open the file, read the lines\n with open(u'{0}sample.en'.format(inPath)) as inEnFile:\n enLns = inEnFile.readlines()\n with open(u'{0}sample.fr'.format(inPath)) as inFrFile:\n frLns = inFrFile.readlines()\n with open(u'{0}sampleAnnotation.tsv'.format(inPath)) as inAnnotFile:\n annotLns = inAnnotFile.readlines()\n with open(u'{0}sampleReference.tsv'.format(inPath)) as inRefFile:\n refLns = inRefFile.readlines()\n with open(u'{0}scores.tsv'.format(inPath)) as inScFile:\n scLns = inScFile.readlines()\n with open(u'{0}scoresAndMetaData.tsv'.format(inPath)) as inScMetaFile:\n scMetaLns = inScMetaFile.readlines()\n # choose and index randomly\n dejaVus = set([])\n while len(dejaVus) < int(len(enLns)/2.0):\n randomInd = randint(0, len(enLns)-1)\n while randomInd in dejaVus:\n randomInd = randint(0, len(enLns)-1)\n # add to dejavus\n dejaVus.add(randomInd)\n # dump to output file\n utilsOs.appendLineToFile(enLns[randomInd], u'{0}sample.en'.format(outputMixPath), addNewLine=False)\n utilsOs.appendLineToFile(frLns[randomInd], u'{0}sample.fr'.format(outputMixPath), False)\n utilsOs.appendLineToFile(annotLns[randomInd], u'{0}sampleAnnotation.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(refLns[randomInd], u'{0}sampleReference.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scLns[randomInd], u'{0}scores.tsv'.format(outputMixPath), False)\n utilsOs.appendLineToFile(scMetaLns[randomInd], u'{0}scoresAndMetaData.tsv'.format(outputMixPath), False)", "def test_pyramids(self):\n pyramids = JsonlReader(_pyramids_file_path, Pyramid).read()\n annotations = JsonlReader(_pyramid_annotations_file_path, PyramidAnnotation).read()\n\n instance_id_to_pyramid = {}\n for pyramid in pyramids:\n instance_id_to_pyramid[pyramid.instance_id] = pyramid\n\n instance_id_to_annotations = defaultdict(list)\n for annotation in annotations:\n instance_id_to_annotations[annotation.instance_id].append(annotation)\n\n assert instance_id_to_pyramid.keys() == instance_id_to_annotations.keys()\n for instance_id, pyramid in instance_id_to_pyramid.items():\n assert len(pyramid.summaries) == 4\n assert len(pyramid.summarizer_ids) == 4\n for reference in pyramid.summaries:\n assert len(reference) > 0\n\n scu_ids = set([scu.scu_id for scu in pyramid.scus])\n for annotation in instance_id_to_annotations[instance_id]:\n assert len(annotation.summary) > 0, (instance_id, annotation.summarizer_id)\n for scu in annotation.scus:\n assert scu.scu_id in scu_ids, (scu.scu_id, scu_ids)", "def test_identification_banana_vs_bowl_vs_food_can(self):\n # Getting the dataset\n bowl_ids = ['fa61e604661d4aa66658ecd96794a1cd',\n 'f74bba9a22e044dea3769fcd5f96f4',\n 'd2e1dc9ee02834c71621c7edb823fc53']\n banana_ids = ['f6e6117261dca163713c042b393cc65b',\n 'ba0d56295321002718ddbf38fa69c501',\n '7d78e217e0ba160fe2b248b8bb97d290']\n bowls = []\n for bowl_id in bowl_ids:\n bowls.append(SketchupModel.find_google_id(bowl_id))\n bananas = []\n for banana_id in banana_ids:\n bananas.append(SketchupModel.find_google_id(banana_id))\n # Training\n iden = Identifier()\n iden.add_models(bananas, 'banana')\n iden.add_models(bowls, 'bowl')\n iden.train()\n # Identification\n for i in range(20):\n example = Example.get_random(['banana', 'bowl'])\n pcd_file = example.pcd_file()\n print \"Identification of file {}\".format(example)\n cloud = PointCloud.load_pcd(pcd_file.name)\n iden.identify(cloud)", "def mapsMatch(m1,m2):\n same = True\n f1 = file(m1,'r').readlines()\n f2 = file(m2,'r').readlines()\n for i, row in enumerate(f1):\n row = row.strip().split()\n row2 = f2[i].strip().split()\n if row[0] <> row2[0]:\n\t same = False\n break\n return same", "def match(a1, a2):\n inds_1, inds_2 = [], []\n i, j = 0, 0\n for s in range(0, len(a1)):\n increment = lambda a, x, ind: x+1 if a[ind] != \"-\" else x\n if a1[s] is a2[s] and a1[s] != \"-\":\n inds_1.append(i)\n inds_2.append(j)\n i = increment(a1,i,s)\n j = increment(a2,j,s)\n return inds_1, inds_2", "def test_marking_duplication(self):\n container = stixmarx.new()\n package = container.package\n red_marking = generate_marking_spec(generate_red_marking_struct())\n amber_marking = generate_marking_spec(generate_amber_marking_struct())\n\n incident = Incident(title=\"Test\")\n package.add_incident(incident)\n\n indicator = Indicator(title=\"Test\")\n incident.related_indicators.append(indicator)\n\n container.add_marking(incident, red_marking, descendants=True)\n container.add_global(amber_marking)\n\n self.assertTrue(container.is_marked(indicator, red_marking))\n self.assertTrue(container.is_marked(indicator, amber_marking))\n\n markings = container.get_markings(indicator)\n self.assertEqual(len(markings), 2)", "def interpretBiom(bf, mf, c, OTUIds):\n biom_file = parse_biom_table(bf)\n mapping_file = parse_mapping_file_to_dict(mf)\n mapping_file = mapping_file[0]\n\n category_dict = dict( [ ( key, val [ c ] ) for ( key, val ) in mapping_file.iteritems() ] )\n sorted_category_dict = sorted(category_dict.iteritems(), key = operator.itemgetter(1))\n\n print sorted_category_dict\n\n samp_ids = []\n for vals, ids, md in biom_file.iterSamples():\n samp_ids.append(ids)\n \n samples_present = []\n final_list = []\n count = 0\n counter = 0\n\n # This takes in the list of OTU ID's and matches them with ID from\n # the biom file using the getValueByIds. If it isn't 0 then keep track\n # of it (i.e. the OTU is present in that sample) and do an intersection\n # between all of the said OTU's within each sample. Unfortunately, matching\n # is O(n^2) no matter what.\n for j in OTUIds:\n\tfor id in samp_ids:\n\t for k in j:\n\t \tif int(biom_file.getValueByIds(k, id)) != 0:\n count = count + 1\n if count == len(j):\n samples_present.append(id)\n count = 0\n\t if id == \n counter = counter + 1\n final_list.append(counter)\n\t# temporary hack: used the set function to make a unique list \n\t# I should clear the list after each iteration through the OTUIds\n\t# but it somehow clears the entire list even if I append it before.\n final_list.append(set(samples_present))\n\n # this overcomes the temporary hack and converts from set to list\n for i in xrange(len(final_list)):\n\tif i % 2 != 0:\n\t final_list[i] = list(final_list[i])\n\n\n return final_list", "def annotate(input_bedfile, db_bedfile,output_bedfile,f=1E-9, wo=False, verbose=True):\n input_bed = pybedtools.BedTool(input_bedfile)\n db_bed = pybedtools.BedTool(db_bedfile)\n if wo:\n output_bed = input_bed.intersect(db_bed, wo=True, sorted=True, names='db',f=f)\n else:\n output_bed = input_bed.intersect(db_bed, wa=True, wb=True, sorted=True, names='db',f=f)\n output_bed.saveas(output_bedfile)\n if verbose:\n print('annotated to', output_bedfile)", "def eafindicators(npsA, npsB):\n # calcular os indicadores com o eaf conjunto\n lt, ind = libaft.eaf2d(npsA + npsB, ind=True)\n # espalmar lista, ou seja,\n # (m listas) * (n pontos) * (b bits) -> lista de (m * n pontos) * (b bits)\n flat_ind = [point for level in ind for point in level]\n return flat_ind", "def compare_ids(id1_name,id1,id2_name,id2) :\n\tprint(\"*\"*15+\"Comparing {0} and {1} \".format(id1_name,id2_name)+\\\n\t\t\"*\"*15)\n\tprint('{0} and {1} : {3} : {2}'.format(id1_name,id2_name,\\\n\t\t\tset(id1).intersection(id2),len(set(id1).intersection(id2))))\n\tprint('{0} - {1} : {3} : {2}'.format(id1_name,id2_name,\\\n\t\t\tset(id1).difference(id2),len(set(id1).difference(id2))))\n\tprint('{1} - {0} : {3} : {2}'.format(id1_name,id2_name,\\\n\t\t\tset(id2).difference(id1),len(set(id2).difference(id1))))", "def test_compare_taxa_summaries_paired_sample_id_map_partial(self):\r\n # The sample ID map has some mappings that are not complete- i.e. a\r\n # sample from one file has a new sample ID that doesn't match any other\r\n # new sample IDs. In this case, the sample should be ignored.\r\n exp = ('Taxon\\tS1\\tS2\\nArchaea\\t0.4\\t0.4\\nBacteria\\t0.5\\t'\r\n '0.7\\nEukarya\\t0.4\\t0.5\\n', 'Taxon\\tE1\\tE2\\nArchaea\\t0.5'\r\n '\\t0.6\\nBacteria\\t0.7\\t0.8\\nEukarya\\t0.5\\t0.6\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '95.0% using Fisher\\'s z-transformation (see Sokal and Rohlf 3rd '\r\n 'edition pg. 575). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: 1'\r\n '\\nCorrelation coefficient\\tParametric p-value\\tNonparametric '\r\n 'p-value\\tCI (lower)\\tCI (upper)\\n1.0000\\t0.0000\\tN/A\\tN/A\\tN/A\\n',\r\n '# Correlation coefficient: pearson.\\n# The parametric p-value(s) '\r\n 'were calculated using a two-sided test of significance using a '\r\n 't-distribution.\\n# The '\r\n 'confidence interval(s) were constructed at a confidence level of '\r\n '95.0% using Fisher\\'s z-transformation (see Sokal and Rohlf 3rd '\r\n 'edition pg. 575). The confidence interval(s) are two-sided.\\n# '\r\n 'Number of samples that matched between the taxa summary files: 1'\r\n '\\nSample ID\\tSample ID\\tCorrelation coefficient\\tParametric '\r\n 'p-value\\tParametric p-value (Bonferroni-corrected)\\t'\r\n 'Nonparametric p-value\\tNonparametric p-value '\r\n '(Bonferroni-corrected)\\tCI (lower)\\tCI (upper)\\nS1\\tE2\\t1.0000\\t'\r\n '0.0000\\t0.0000\\tN/A\\tN/A\\tN/A\\tN/A\\n')\r\n\r\n obs = compare_taxa_summaries(self.taxa_summary_paired1,\r\n self.taxa_summary_paired4, 'paired', 'pearson',\r\n num_permutations=0,\r\n perform_detailed_comparisons=True,\r\n sample_id_map=self.taxa_summary_paired_samp_id_map2)\r\n # We can test exactly because there aren't any stochastic p-values.\r\n self.assertEqual(obs, exp)", "def lines(a, b):\n # Turn split versions of both files into sets to remove duplicates\n # Split Lines used to automatically split at the end of a line. No need for \"\\n\" this way\n a1 = set(a.splitlines())\n b1 = set(b.splitlines())\n\n return a1 & b1", "def get_interactions(list_atoms1, list_atoms2, dist):\n beta_carbons1 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms1))\n beta_carbons2 = list(filter(lambda x: x.get_id() == \"CB\", list_atoms2))\n ns = NeighborSearch(beta_carbons1)\n interactions = []\n\n for atom in beta_carbons2:\n interact = ns.search(atom.get_coord(), dist)\n interactions.extend(\n [tuple(sorted([str(atom.get_parent().resname), str(x.get_parent().resname)])) for x in interact])\n return interactions", "def get_intersecting_dissemination_ids(cross_section, dissemination_areas):\n assert 'DAUID' in cross_section.columns \n dissem_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].DAUID.unique()\n reg_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].CSDUID.unique()\n# code_arr = dissemination_areas.loc[dissemination_areas['DAUID'].isin(np.unique(cross_section['DAUID'].values))].CODEID.unique()\n\n\n return list(dissem_arr), list(reg_arr)", "def overlap(path1, path2):\n DataL1 = BedTool(path1).sort()\n DataL2 = BedTool(path2).sort()\n overlap = DataL1.intersect(DataL2, wao=True)\n Overlap_df = overlap.to_dataframe()\n Strand1 = list(Overlap_df.iloc[:, 5])\n Strand2 = list(Overlap_df.iloc[:, 11])\n p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent = orientation(Strand1, Strand2)\n return p_p, m_m, p_m, m_p, same_strand, opposite_strand, convergent, divergent", "def export_annots(ibs, aid_list, new_dbpath=None):\n logger.info('Exporting annotations aid_list={!r}'.format(aid_list))\n if new_dbpath is None:\n new_dbpath = make_new_dbpath(ibs, 'aid', aid_list)\n gid_list = ut.unique(ibs.get_annot_gids(aid_list))\n nid_list = ut.unique(ibs.get_annot_nids(aid_list))\n return export_data(ibs, gid_list, aid_list, nid_list, new_dbpath=new_dbpath)" ]
[ "0.6751747", "0.6087413", "0.601892", "0.58519965", "0.5660577", "0.54375374", "0.54265267", "0.5317513", "0.5268035", "0.5241501", "0.51906496", "0.5165996", "0.5164853", "0.51569146", "0.5151366", "0.51483524", "0.51438683", "0.51413226", "0.5127341", "0.5124664", "0.51017076", "0.5092623", "0.5090039", "0.5073293", "0.50722057", "0.5032198", "0.50299317", "0.50206953", "0.501582", "0.500459" ]
0.6915208
0
Constructs the type with the given alias using the given args and kwargs.
def create_object(self, alias: str, *args: Any, **kwargs: Any) -> Any: object_type = self._type_aliases.get(alias) if object_type is None: raise KeyError(f"There is no type registered for alias {alias}") if not callable(object_type): raise TypeError( f"Asked to call {alias} with args {args} and kwargs {kwargs} but it is not " f"callable, its a {type(alias).__name__}." ) return object_type(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t", "def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:\n args = []\n for i in alias.__args__:\n if isinstance(i, TypeVar):\n value: Any = next(parameters)\n elif isinstance(i, _GenericAlias):\n value = _reconstruct_alias(i, parameters)\n elif hasattr(i, \"__parameters__\"):\n prm_tup = tuple(next(parameters) for _ in i.__parameters__)\n value = i[prm_tup]\n else:\n value = i\n args.append(value)\n\n cls = type(alias)\n return cls(alias.__origin__, tuple(args), alias.__unpacked__)", "def create_alias(name, alias=\"\", asname=None):\n alias_node = ast.alias()\n\n alias_node.alias = alias\n alias_node.asname = asname\n alias_node.name = name\n\n return alias_node", "def create_typedef(*args):\n return _ida_hexrays.create_typedef(*args)", "def __call__(self, *args):\n return TypeCall(self, args)", "def build(cls, key: str, **kwargs):\n\n def add(parser):\n return cls.add_arguments(key, parser)\n\n kwargs = {f\"{key}_{cls.alias}_\" + k: v for k, v in kwargs.items()}\n args = argparse.Namespace(**kwargs)\n args = fill_missing_args(args, add)\n return cls(key, args)", "def build(cls, **kwargs):\n return cls(kwargs) #pylint: disable=no-value-for-parameter", "def register_type_abbreviation(name, alias):\n _TYPE_ABBREVIATIONS[name] = alias", "def type_instance(typedef):\n if subclassof(typedef, Type):\n # Type class passed, create no-arg instance\n typedef = typedef()\n return typedef", "def make_anafaze_type(struct_def, name):\n return type(name, (object,), \n dict(\n to_python = partial(unpack, *[struct_def]),\n from_python = partial(pack, *[struct_def]),\n byte_size = calcsize(struct_def),\n )\n )()", "def create_account_alias(self, alias):\r\n params = {'AccountAlias': alias}\r\n return self.get_response('CreateAccountAlias', params)", "def _create_type(self, keyword_or_identifier, **kwargs):\n name = keyword_or_identifier\n if isinstance(name, Identifier):\n return self._idl_type_factory.reference_type(name, **kwargs)\n elif isinstance(name, str):\n return self._idl_type_factory.simple_type(name, **kwargs)\n else:\n assert False", "def build_analysis_link(linktype, **kwargs):\n\n builder_name = 'create_%s'%linktype\n try:\n builder_func = BUILDER_DICT[builder_name]\n except KeyError:\n raise KeyError(\"Could not build an analysis link using a creator function %s\"%builder_name)\n return builder_func(**kwargs)", "def addop(name, fields, args=None, alias=False):\n\n namespace = {\"fields\": fields, \"alias\": alias}\n\n if args is not None:\n namespace[\"args\"] = args\n\n # Dynamically create the \"name\" object\n type(name, (mn_pinky,), namespace)", "def instantiate(name, *args, **kwargs):\n ...", "def make_object(obj, kwargs):\n return obj(**kwargs)", "def T_new(cls, **kwargs):\n # We are using `type.__new__` so that we get a concrete class, not\n # a template.\n return type.__new__(_TemplateMeta, 'TWithArgs', (_TWithArgs,), {\n '__doc__': 'A template parameter with arguments applied.',\n '__slots__': (),\n '_kwargs': kwargs,\n })", "def __init__(\n self,\n name: Optional[str] = None,\n aliases: Iterable[str] = (),\n args: Iterable[Argument] = (),\n ) -> None:\n self.args = Lexicon()\n self.positional_args: List[Argument] = []\n self.flags = Lexicon()\n self.inverse_flags: Dict[str, str] = {} # No need for Lexicon here\n self.name = name\n self.aliases = aliases\n for arg in args:\n self.add_arg(arg)", "def __init__(self, type_name, args):\n super().__init__()\n self.type_name = type_name\n self.args = args\n self._projection = None", "def new(cls, **kwargs):\n return cls(**kwargs)", "def mk_typ(self, name, kind):\n # (str, ty.Kind) -> ty.TypeVar\n\n typ = ty.TypeVar(name, kind)\n self.type_param_scopes[0].appendleft((name, typ))\n return typ", "def as_(self, alias):\n return AliasedQuery(self, alias)", "def catalog_alias_create(self, args):\n try:\n if args.id:\n alias = self.server.connect_ermrest_alias(args.id)\n try:\n if alias.retrieve():\n print(\"Catalog alias already exists\")\n return\n except requests.HTTPError as e:\n if e.response.status_code == 404:\n pass\n else:\n raise\n owner = args.owner if args.owner else None\n alias = self.server.create_ermrest_alias(args.id, owner, args.alias_target)\n if not args.quiet:\n print(\"Created new catalog alias %s with the following configuration:\\n\" % alias.alias_id)\n pp(alias.retrieve())\n except HTTPError as e:\n if e.response.status_code == requests.codes.not_found:\n raise ResourceException('Catalog alias not found', e)\n elif e.response.status_code == requests.codes.conflict:\n raise ResourceException(\"Catalog alias already exists\", e)\n else:\n raise", "def named_build(klass, name):\n k = Factory.build(klass)\n k.name = name\n return k", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def create(cls, **props: Any) -> 'ResolverOp':\n real_instance = super().__call__()\n cls._check_kwargs(props)\n for name, value in props.items():\n setattr(real_instance, name, value)\n return real_instance", "def __init__(self, name: str, python_type: type):\n self.name = name\n self.python_type = python_type", "def of(clazz, **kwargs):\n return clazz(kwargs)", "def construct_from_string(cls, string):\n # Remove fletcher specific naming from the arrow type string.\n if string.startswith(\"fletcher[\"):\n string = string[9:-1]\n\n if string == \"list<item: string>\":\n return cls(pa.list_(pa.string()))\n\n try:\n type_for_alias = pa.type_for_alias(string)\n except (ValueError, KeyError):\n # pandas API expects a TypeError\n raise TypeError(string)\n\n return cls(type_for_alias)", "def namedtuple(*args, **kwargs):\n nt_class = _invisiblenamedtuple(*args, **kwargs)\n # put it on the global scale so it can be tupled correctly\n globals()[nt_class.__name__] = nt_class\n return nt_class" ]
[ "0.6451635", "0.6361811", "0.5905361", "0.5635368", "0.5622488", "0.55912095", "0.55612254", "0.5529319", "0.5490742", "0.5441523", "0.5422917", "0.5418361", "0.5410999", "0.5371319", "0.5332267", "0.53162473", "0.52660453", "0.51575106", "0.51328623", "0.51288486", "0.5125596", "0.5123979", "0.50744647", "0.50714195", "0.50679046", "0.50679046", "0.5042089", "0.50358295", "0.50345206", "0.5034125" ]
0.6978425
0
this starts transcribing for up to (60) seconds
def start_transcribing(): transcribe.main()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transcribe_proc():\n while True:\n # Get result of transcription\n transcribe_result = transcriber.transcribe_stream(\n audio_stream(), sample_rate, sample_width, channels\n )\n\n _LOGGER.debug(\"Transcription result: %s\", transcribe_result)\n\n transcribe_result = transcribe_result or Transcription.empty()\n transcribe_dict = dataclasses.asdict(transcribe_result)\n transcribe_dict[\"timeout\"] = is_timeout\n\n print_json(transcribe_dict)\n transcription_printed.set()", "def subscribe(receiver, updateInterval=10):", "def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex", "async def _resubscribe_loop(self) -> None:\n _LOGGER.debug(\"_resubscribe_loop started\")\n while self._subscriptions:\n next_renewal = min(self._subscriptions.values())\n wait_time = next_renewal - time.monotonic() - RESUBSCRIBE_TOLERANCE_SECS\n _LOGGER.debug(\"Resubscribing in %f seconds\", wait_time)\n if wait_time > 0:\n await asyncio.sleep(wait_time)\n\n await self._async_resubscribe_services(notify_errors=True)\n\n _LOGGER.debug(\"_resubscribe_loop ended because of no subscriptions\")", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def acquire_data_for_interval_in_seconds(self, duration):\n # create acquisition thread\n data_acq_thread = threading.Thread(target=self.data_acquisition.start_acquisition)\n data_acq_thread.daemon = True\n data_acq_thread.start()\n\n # create subscriber listening thread\n sub_thread = threading.Thread(target=self.application.start_listening)\n sub_thread.daemon = True\n sub_thread.start()\n\n time.sleep(duration)\n\n self.data_acquisition.stop_acquisition()\n print('Acquisition Complete!')", "def subscribe(receiver, updateInterval=None):", "async def inbound_task_call(self):\n from cocotb.triggers import Timer\n await Timer(0, units=\"ps\")", "def start_transmit(self):\n\n # Set publishing rate\n self.r = rospy.Rate(50) # 50Hz\n \n quitting = False\n while not rospy.is_shutdown() and not quitting:\n try:\n # JointState message to publish joint positions\n js_msg = self.build_joint_state_msg()\n \n # PoseStamped messages to publish position and \n # orientation of each joint\n ps_msg = self.build_pose_stamped_msg()\n \n # TODO: TwistStamped messages to publish linear and\n # angular velocities of each joint\n ts_msg = TwistStamped()\n\n # Publish the messages\n self.js_pub.publish(js_msg)\n self.ps_pub.publish(ps_msg)\n\n # TODO: Publish TwistStamped\n # self.ts_pub.publish(ts_msg)\n self.r.sleep()\n self.t += 0.01 # automated tests time var\n \n except KeyboardInterrupt:\n LOG.e(\"KeyboardInterrupt detected\", \"start_transmit\")\n quitting = True\n\n LOG.d(\"Quit command sent to client\", \"main\")\n raise QuitMessageException(\"Quit message received from client\")", "def loop(self,timeout=1):\n self.stream.loop(timeout)", "def start(self):\n while True:\n self.pull_accounts_rolls()\n sleep(PULL_FREQUENCY_SECONDS)", "def _handler_command_start_autosample(self, *args, **kwargs):\n next_state = None\n result = None\n\n # Assure the device is transmitting.\n if not self._param_dict.get(SBE37Parameter.TXREALTIME):\n self._do_cmd_resp('set', SBE37Parameter.TXREALTIME, True, **kwargs)\n \n # Issue start command and switch to autosample if successful.\n self._do_cmd_no_resp('startnow', *args, **kwargs)\n \n next_state = SBE37ProtocolState.AUTOSAMPLE \n \n return (next_state, result)", "def start(self):\n\n while True:\n measurement = self.generate_message()\n measurement.save()\n print(\"Storing new measurement\")\n time.sleep(10)", "def _subscribe(self):\n self.subscribed = True\n self.subscribe_date = now()\n self.unsubscribed = False", "def streamTest():\n timer = StoreTimer(store, duration=2.0)\n bottle.response.set_header('Content-Type', 'text/event-stream') #text\n bottle.response.set_header('Cache-Control', 'no-cache')\n # Set client-side auto-reconnect timeout, ms.\n yield 'retry: 1000\\n\\n'\n i = 0\n yield 'id: {0}\\n'.format(i)\n i += 1\n yield 'data: START\\n\\n'\n n = 1\n while not timer.expired:\n yield 'id: {0}\\n'.format(i)\n i += 1\n yield 'data: {0}\\n\\n'.format(n)\n n += 1\n yield \"data: END\\n\\n\"", "def start(self):\n self.end_time = time.time() + self.timeout_time", "def connect(self):\n self.wss.start()\n while not self.wss.conn.connected.is_set():\n time.sleep(1e-4)\n\n for _type in self.types:\n for symbol in self.symbols:\n self.subscribe(_type, symbol)", "def TeleopPeriodic(self):\n Scheduler.GetInstance().Run()\n LiveWindow.Run()", "def enable_subscription():\n client = KConsumer(config=subscriber_config)\n counter = 0\n while 1:\n data = client.consume()\n if data:\n print(\"Received Data\", counter)\n class_label = inference_on_data(data.value)\n publish_response(class_label)", "def run_aqi(self):\r\n while True:\r\n self.get_aqi()\r\n time.sleep(30 - time.time() % 30)", "def loop_forever(self):\n while True:\n if self.get_parameter_value(\"publishing_mode\") == \"continuous\":\n self.publishMeasure()", "def subscribe_to_stream(client, iterator, seconds_running):\n end_time = datetime.now() + timedelta(seconds=seconds_running)\n while True:\n record_response = client.get_records(ShardIterator=iterator)\n # Only run for a certain amount of time.\n # Stop looping if no data returned. This means it's done\n now = datetime.now()\n if end_time < now:\n break\n else:\n # yield data to outside calling iterator\n for record in record_response['Records']:\n data = record[\"Data\"].decode(\"utf-8\")\n timestamp = record[\"ApproximateArrivalTimestamp\"]\n record_id = record[\"SequenceNumber\"]\n tweet_record = [record_id, timestamp, data]\n yield tweet_record\n # Get next iterator for shard from previous request\n iterator = record_response['NextShardIterator']", "def run(config, logging, inq, subscribe_callback, unsubscribe_callback):", "def start(self):\n self.timer.start(500)", "async def publish(self):\n for sock in self.subscribers:\n sock.send_json(self.main_server.state)\n await asyncio.sleep(0)", "def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)", "def slot_timer(self, _sender, _data):\r\n if self.connected:\r\n if time.time() - self._time_last_received > 60:\r\n self.debug(\"### did not receive anything for a long time, disconnecting.\")\r\n self.force_reconnect()\r\n self.connected = False\r\n if time.time() - self._time_last_subscribed > 1800:\r\n # sometimes after running for a few hours it\r\n # will lose some of the subscriptons for no\r\n # obvious reason. I've seen it losing the trades\r\n # and the lag channel channel already, and maybe\r\n # even others. Simply subscribing again completely\r\n # fixes this condition. For this reason we renew\r\n # all channel subscriptions once every hour.\r\n self.debug(\"### refreshing channel subscriptions\")\r\n self.channel_subscribe(False)", "def transcribe(config):\n\n long_mode = True\n\n if 'audio_data' not in config:\n raise KeyError(\"`audio_data` not specified for transcription operation.\")\n\n if 'timeout' not in config:\n raise KeyError(\"`timeout` not specified for transcription operation.\")\n\n try:\n if config.pop('audio_duration') < 60: \n long_mode = False\n except KeyError:\n pass\n\n if long_mode:\n print(\"Running in long audio duration mode (audio is >60 seconds duration)...\")\n print(\"Uploading file...\")\n remote_object = gcloud_upload_file(config['audio_data'], config['storage_bucket'])\n file_name = remote_object.rsplit('/', 1)[-1]\n\n config['audio_data'] = \"gs://%s/%s\" % (config['storage_bucket'], file_name)\n storage_bucket = config.pop('storage_bucket')\n\n print(\"Transcribing file...\")\n result = gcloud_transcribe_long(config)\n\n print(\"Transcription successful, cleaning up...\")\n print(\"Deleting uploaded GCS file...\")\n gcloud_delete_file(file_name, storage_bucket)\n else:\n print(\"Transcribing file...\")\n config.pop('timeout')\n config.pop('storage_bucket')\n result = gcloud_transcribe_short(config)\n\n return result", "def track_tx(self) -> None:\n\n start = time()\n self.waiting = True\n while self.waiting:\n sleep(self.timer_tick)\n if time() - start >= self.time_threshold:\n if self.tx_type:\n msg = (\n f\"🕔 * {self.tx_type} * - tx sent from {str(self.sender.address)}\"\n f\" has exceeded threshold of {str(self.time_threshold)} seconds\"\n )\n else:\n msg = (\n f\"🕔 tx sent from {str(self.sender.address)} has exceeded threshold of \"\n f\"{str(self.time_threshold)} seconds\"\n )\n self.alert(msg)\n self.waiting = False\n self.sender = None\n self.tx_type = \"\"", "def timeoutConnection(self):\n self.transport.stopProducing()" ]
[ "0.68453956", "0.60066426", "0.57858366", "0.5739796", "0.57244843", "0.57060385", "0.56948715", "0.5627209", "0.55653596", "0.54652876", "0.54332554", "0.5412271", "0.54015946", "0.53609794", "0.5348195", "0.53372765", "0.53358334", "0.5301576", "0.5300465", "0.52419746", "0.52354187", "0.52280384", "0.51990014", "0.5197897", "0.5178572", "0.51693493", "0.51507705", "0.51507413", "0.51378614", "0.5132074" ]
0.65298873
1
Return function that computes loss only for those targets that are not 1.
def masked_loss_func(loss_function): def masked_loss_fn(predictions, targets): assert targets.ndim == 1 target_mask = T.neq(targets, -1) valid_inds = T.nonzero(target_mask) return loss_function(predictions[valid_inds], targets[valid_inds]) return masked_loss_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss_fn(self, targets, outputs, model):", "def loss_func(self, logits, targets):\r\n return -np.sum(targets * np.log(logits)) / logits.shape[0]", "def loss_fn(self, pred: Tensor, true: Tensor) -> Tensor:\n pass", "def mask_nan_keep_loss(y_true, y_pred):\n y_pred, y_true, num_notnan = mask_nan(y_true, y_pred)\n loss = K.sum((K.flatten(y_pred) - K.flatten(y_true)) ** 2) / num_notnan\n return tf.where(~tf.math.is_nan(loss), loss, 0)", "def loss_valid(self, mask, y_true, y_pred):\n return self.l1(mask * y_true, mask * y_pred)", "def loss_function(self, targets, outputs):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=outputs)\n return tf.reduce_mean(cross_entropy)", "def _neg_loss(outputs: torch.Tensor, targets: torch.Tensor):\n pos_inds = targets.eq(1).float()\n neg_inds = targets.lt(1).float()\n\n neg_weights = torch.pow(1 - targets, 4)\n\n loss = 0\n\n pos_loss = torch.log(outputs) * torch.pow(1 - outputs, 2) * pos_inds\n neg_loss = torch.log(1 - outputs) * torch.pow(outputs, 2) * neg_weights * neg_inds\n\n num_pos = pos_inds.float().sum()\n pos_loss = pos_loss.sum()\n neg_loss = neg_loss.sum()\n\n if num_pos == 0:\n loss = loss - neg_loss\n else:\n loss = loss - (pos_loss + neg_loss) / num_pos\n return loss", "def forward(self, pred, target):\n if self.mask:\n target, mask = target\n # todo: loss with mask\n else:\n # todo: loss w/o mask\n pass\n return loss", "def check_loss(self, loss):\r\n if loss in loss_functions:\r\n return loss\r\n else:\r\n raise InvalidNeuralNetwork()", "def _compute_unreduced_loss_impl(self, labels, logits, mask=None):\n raise NotImplementedError('Calling an abstract method.')", "def loss_function(cls, logits, label, targeted):\n\n if targeted:\n adv_loss = - torch.gather(logits, 1, label)\n else:\n adv_loss = torch.gather(logits, 1, label)\n\n return adv_loss.mean()", "def targets_weights_fn(self):\n return common_layers.weights_all", "def loss_function(self, prbs, labels, mask):\n\t\t# Using boolean mask\n\t\tloss = tf.keras.losses.sparse_categorical_crossentropy(labels, prbs)\n\t\tsum_loss = tf.reduce_sum(tf.boolean_mask(loss, mask))\n\n\t\treturn sum_loss", "def f1_loss(y_true, y_pred):\n return 1.0 - f1_score(y_true, y_pred, average='weighted')", "def cls_wrapper(target_labels, predicted_labels):\n # Find which targets contribute to the loss (targets with non-neutral labels)\n contributing_indices = tf.where(tf.not_equal(target_labels, -1))\n\n # Take contributing\n target_labels = tf.gather_nd(target_labels, contributing_indices)\n contributing_prediction = tf.gather_nd(predicted_labels, contributing_indices)\n\n # Compute loss\n res = function(target_labels,\n contributing_prediction)\n\n # Zero batch size case\n return K.switch(tf.size(res) > 0, K.mean(res), tf.constant(0.0))", "def dice_loss(\n inputs: torch.Tensor,\n targets: torch.Tensor,\n num_masks: float,\n ):\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(-1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_masks", "def loss_fn(outputs, labels):\n num_examples = outputs.size()[0]\n return -torch.sum(outputs[range(num_examples), labels])/num_examples", "def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss", "def loss_compact(y, a):\n return -1 * (y * np.log10(a) + (1 - y) * np.log10(1 - a))", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def do_loss(logits, labels):\n return tf.reduce_sum(tf.square(logits - labels))", "def loss_fn(pred: torch.Tensor, truth: torch.Tensor):\n\n if truth is None or pred is None:\n return None\n\n return CrossEntropyLoss()(pred, truth)", "def _compute_loss(self, predictions, targets, **params):\n pass", "def compute_loss(self,\n pred_seg: Dict[str, torch.Tensor],\n target: torch.Tensor,\n ) -> Dict[str, torch.Tensor]:\n target[target > 0] = 1\n return super().compute_loss(pred_seg, target)", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def __call__(self,\n predictions,\n targets,\n ignore_nan_targets=False,\n scope=None,\n **params):\n with tf.name_scope(scope, 'Loss', [predictions, targets, params]):\n if ignore_nan_targets:\n targets = tf.where(tf.is_nan(targets), predictions, targets)\n return self._compute_loss(predictions, targets, **params)", "def __call__(self, preds: torch.Tensor, targets: torch.Tensor) -> torch.Tensor:\n if isinstance(targets, (list, tuple)):\n\n target_a, target_b, lam = targets\n loss = lam * self.criterion(preds, target_a) + (1 - lam) * self.criterion(preds, target_b)\n else:\n loss = self.criterion(preds, targets)\n return loss", "def get_nobackprop_loss(self) -> Dict[str, tt.Tensor]:\n return {k: dy.nobackprop(v) for k, v in self.expr_factors.items()}", "def multi_output_loss(y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.float32:\n tf.print(\"Sum of actual masking: \", tf.reduce_sum(y_true))\n tf.print(\"Sum of predicted masking: \", tf.reduce_sum(y_pred))\n # loss_multiplier = tf.where(tf.greater(y_true, tf.constant(5.)), tf.constant(10.),\n # tf.constant(1.))\n loss = tf.keras.losses.mean_squared_error(y_true,\n y_pred)\n # tf.print(\"Y true: \", y_true)\n # tf.print(\"Loss multiplier: \", loss_multiplier)\n # loss *= tf.cast(loss_multiplier, dtype=tf.float32)\n return tf.reduce_mean(loss)", "def reg_wrapper(targets, predicted_deltas):\n # Extract labels, intentionally squeezed\n target_labels = targets[:, :, 0]\n\n # Extract deltas\n target_deltas = targets[:, :, 1:]\n\n # Find which targets contribute to the loss (targets with positive labels)\n contributing_indices = tf.where(tf.equal(target_labels, 1))\n\n # Take contributing\n target_deltas = tf.gather_nd(target_deltas, contributing_indices)\n contributing_prediction = tf.gather_nd(predicted_deltas, contributing_indices)\n\n # Compute loss\n res = function(target_deltas,\n contributing_prediction)\n # Zero batch size case\n return K.switch(tf.size(res) > 0, K.mean(res), tf.constant(0.0))" ]
[ "0.66698176", "0.6661593", "0.65615445", "0.6476919", "0.6280919", "0.62795126", "0.62525606", "0.6203152", "0.6193807", "0.6179458", "0.6176614", "0.6161819", "0.6148114", "0.6142843", "0.6128714", "0.610796", "0.6101929", "0.6089113", "0.6025568", "0.5995906", "0.59949726", "0.5987635", "0.5986894", "0.598246", "0.5979796", "0.5968267", "0.59612054", "0.5957809", "0.5946844", "0.59367245" ]
0.6711504
0
deep_hash creates a hash from the object
def deep_hash(obj): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_hash(o):\n if isinstance(o, (set, tuple, list)):\n return hash(tuple([make_hash(e) for e in o]))\n elif not isinstance(o, dict) and o.__class__.__module__ == 'builtins':\n return hash(o)\n elif not isinstance(o, dict):\n return make_hash(o.__dict__)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def make_hash(o):\n\n if isinstance(o, (set, tuple, list)):\n\n return hash( tuple([make_hash(e) for e in o]) )\n\n elif not isinstance(o, dict):\n\n return hash(o)\n\n new_o = copy.deepcopy(o)\n for k, v in new_o.items():\n new_o[k] = make_hash(v)\n\n return hash(tuple(frozenset(sorted(new_o.items()))))", "def hash(obj):\n \n import hashlib\n import pickle\n \n sha = hashlib.sha256()\n sha.update(pickle.dumps(obj))\n \n return sha.hexdigest()", "def hashcode(o):", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\r\n return hash(tuple(sorted(self.__dict__.items())))", "def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()", "def __hash__(self, reinit=False):\n if not self.hash_value is None and not reinit:\n return self.hash_value\n elif isinstance(self, Leaf):\n self.hash_value = Hash.leaf_hash(self)\n return self.hash_value\n else:\n self.hash_value = Hash.node_hash(self)\n return self.hash_value", "def __hash__(self):\n return hash(self.hash)", "def hash_obj(self, obj):\r\n md5er = hashlib.md5()\r\n update_hash(md5er, obj)\r\n return md5er.hexdigest()", "def __hash__(self):\n hashable = tuple(self.pandas_object.values.tobytes())\n if isinstance(self.pandas_object, pd.DataFrame):\n hashable += tuple(self.pandas_object.columns)\n else:\n hashable += tuple(self.pandas_object.name)\n return hash(hashable)", "def dict_hash(obj, start=''):\n h = hashlib.sha1(to_bytes(start))\n h.update(to_bytes(obj.__class__.__name__))\n if isinstance(obj, dict):\n for key, value in sorted(obj.items()):\n h.update(to_bytes(key))\n h.update(to_bytes(dict_hash(value)))\n elif isinstance(obj, (list, tuple)):\n for el in obj:\n h.update(to_bytes(dict_hash(el)))\n else:\n # basic types\n if isinstance(obj, bool):\n value = str(int(obj))\n elif isinstance(obj, (six.integer_types, float)):\n value = str(obj)\n elif isinstance(obj, (six.text_type, bytes)):\n value = obj\n elif obj is None:\n value = b''\n else:\n raise ValueError(\"Unsupported value type: %s\" % obj.__class__)\n h.update(to_bytes(value))\n return h.hexdigest()", "def hash(self):\n\n # deepcopy so that the cached definition property is not modified by the deletes below\n d = deepcopy(self.definition)\n\n # omit version\n if \"podpac_version\" in d:\n del d[\"podpac_version\"]\n\n # omit style in every node\n for k in d:\n if \"style\" in d[k]:\n del d[k][\"style\"]\n\n s = json.dumps(d, separators=(\",\", \":\"), cls=JSONEncoder)\n return hash_alg(s.encode(\"utf-8\")).hexdigest()", "def hash(self):\n\n # deepcopy so that the cached definition property is not modified by the deletes below\n d = deepcopy(self.definition)\n\n # omit version\n if \"podpac_version\" in d:\n del d[\"podpac_version\"]\n\n # omit style in every node\n for k in d:\n if \"style\" in d[k]:\n del d[k][\"style\"]\n\n s = json.dumps(d, separators=(\",\", \":\"), cls=JSONEncoder)\n return hash_alg(s.encode(\"utf-8\")).hexdigest()", "def get_hash(self):\n return freeze_dict(self.get_hash_params())", "def __hash__(self):\n return self.to_hash()", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def hash(obj, hash_name='md5', coerce_mmap=False):\n if 'numpy' in sys.modules:\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\n else:\n hasher = Hasher(hash_name=hash_name)\n return hasher.hash(obj)", "def _makehash():\n return defaultdict(_makehash)", "def object_hash(obj):\n try:\n code = obj.__code__.co_code\n except AttributeError:\n attrlist = [getattr(obj, name) for name in dir(obj)\n if not name.startswith('__')]\n codelist = [attr.__code__.co_code for attr in attrlist\n if hasattr(attr, '__code__')]\n code = b','.join(codelist)\n digest = hashlib.md5(code).hexdigest()\n return digest", "def hash(obj, hash_name='md5', coerce_mmap=False):\r\n if 'numpy' in sys.modules:\r\n hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)\r\n else:\r\n hasher = Hasher(hash_name=hash_name)\r\n return hasher.hash(obj)", "def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())", "def hash(self):\n return self.hash_by_id(self.id)", "def __hash__(self):\n return super().__hash__()", "def __hash__(self):\n return hash(self.base_location) ^ hash(self.fold_path) ^ hash(self.field)", "def __hash__(self):\n return self['id'].__hash__()", "def hashable(obj):\n return bytes(str(obj), \"utf-8\")" ]
[ "0.64543235", "0.6425407", "0.62933004", "0.61203706", "0.6063225", "0.6063225", "0.6063225", "0.6063225", "0.6049848", "0.60369045", "0.5989689", "0.5945062", "0.5930857", "0.5918197", "0.58890283", "0.5871536", "0.5871536", "0.5856316", "0.5843264", "0.58174515", "0.57913715", "0.5776458", "0.57729256", "0.57606184", "0.57369155", "0.5735146", "0.57215804", "0.57091135", "0.57062346", "0.5687651" ]
0.8473571
0
deep_cmp compares two objects deeply
def deep_cmp(obj1, obj2): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_deep_equals(obja, objb, isequal):\n\n objatree = wo.typedtree(obja)\n objbtree = wo.typedtree(objb)\n match = objatree == objbtree\n ok = match == isequal\n\n if ok:\n s = \"pass\"\n else:\n s = \"fail\"\n\n print(f\"{obja} == {objb} is {match} : {s}\")\n return ok", "def _assert_equal(obj1, obj2, expected_type=None, deep_fields=None):\n if obj1 is None and obj2 is None:\n return\n\n deep_fields = deep_fields or {}\n\n if expected_type is not None:\n _assert(\n isinstance(obj1, expected_type),\n \"type mismatch for obj1: expected '{0}' but was '{1}'\".format(\n expected_type, type(obj1)\n ),\n )\n _assert(\n isinstance(obj2, expected_type),\n \"type mismatch for obj2: expected '{0}' but was '{1}'\".format(\n expected_type, type(obj2)\n ),\n )\n _assert(type(obj1) is type(obj2), \"obj1 and obj2 are not the same type.\")\n\n for key in obj1.__dict__.keys():\n _assert(hasattr(obj1, key), \"obj1 does not have an attribute '%s'\" % key)\n _assert(hasattr(obj2, key), \"obj2 does not have an attribute '%s'\" % key)\n\n if key not in deep_fields.keys():\n _assert(\n getattr(obj1, key) == getattr(obj2, key),\n \"%s was not the same (%s, %s)\"\n % (key, getattr(obj1, key), getattr(obj2, key)),\n )\n else:\n nested1 = getattr(obj1, key)\n nested2 = getattr(obj2, key)\n\n if isinstance(nested1, list) and isinstance(nested2, list):\n l1 = sorted(getattr(obj1, key), key=lambda x: str(x))\n l2 = sorted(getattr(obj2, key), key=lambda x: str(x))\n\n _assert(\n len(l1) == len(l2), \"Length of list field %s was different\" % key\n )\n\n for item1, item2 in zip(l1, l2):\n deep_fields[key](item1, item2)\n else:\n deep_fields[key](nested1, nested2)", "def cmp ( self, object1, object2 ):\n return cmp( self.get_raw_value( object1 ),\n self.get_raw_value( object2 ) )", "def cmp ( self, object1, object2 ):\n return cmp( object1[ self.index ], object2[ self.index ] )", "def compare_structs(expected, actual, should_strict_compare=None, path=None):\n if path is None:\n path = []\n differences = []\n\n if isinstance(expected, dict) and isinstance(actual, dict):\n expected_keys = frozenset(list(expected.keys()))\n actual_keys = frozenset(list(actual.keys()))\n\n for key in expected_keys - actual_keys:\n differences.append(f'{_path_to_string(path + [key])}: not found in actual')\n\n if should_strict_compare is not None and should_strict_compare(path):\n for key in actual_keys - expected_keys:\n differences.append(f'{_path_to_string(path + [key])}: only defined in actual')\n\n for key in expected_keys & actual_keys:\n child_differences = compare_structs(expected[key], actual[key], should_strict_compare, path + [key])\n differences.extend(child_differences)\n\n elif expected != actual:\n differences.append('{path}: {a} != {b} (expected != actual)'.format(\n path=_path_to_string(path),\n a=repr(expected),\n b=repr(actual)\n ))\n\n return differences", "def _exact_compare(tree1, tree2):\n attrs = ['name', 'length', 'support']\n for n1, n2 in zip(tree1.postorder(), tree2.postorder()):\n for attr in attrs:\n if getattr(n1, attr, None) != getattr(n2, attr, None):\n return False\n return True", "def test_shallow_deep_object(self):\n # cache params\n cache_key = 'test_shallow_deep_object'\n cache_len = 60\n num_items = 3\n num_sub_items = 20000\n\n # prepare cache data and save\n cache_data = {}\n for n in range(num_items):\n cache_data[n] = self.get_cache_data(num_sub_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data[2].items().sort(),\n retrieved_data[2].items().sort())\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def diff(self, other, match=lambda x: True, clean=False):\n result = {}\n\n def _iterativediff(t1, t2, subdir):\n \"\"\"compares two trees and appends new tree nodes to examine to\n the stack\"\"\"\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))\n\n stack = []\n _iterativediff(self._tree, other._tree, b'')\n while stack:\n subdir, t1, t2 = stack.pop()\n # stack is populated in the function call\n _iterativediff(t1, t2, subdir)\n\n return result", "def deep_hash(obj):\n pass", "def sub_comparison(obj1,obj2,translate):\n return [Difference(f\"{obj1.__class__.__name__} > {meth.__name__}\",result) for (meth,attr) in translate if (result := meth(getattr(obj1,attr),getattr(obj2,attr))) is not None]", "def _iterativediff(t1, t2, subdir):\n if t1 is None:\n t1 = {}\n if t2 is None:\n t2 = {}\n\n for e1 in t1:\n realname = subdir + pycompat.fsencode(e1.name)\n\n if e1.type == pygit2.GIT_OBJ_TREE:\n try:\n e2 = t2[e1.name]\n if e2.type != pygit2.GIT_OBJ_TREE:\n e2 = None\n except KeyError:\n e2 = None\n\n stack.append((realname + b'/', e1, e2))\n else:\n n1, fl1 = self.find(realname)\n\n try:\n e2 = t2[e1.name]\n n2, fl2 = other.find(realname)\n except KeyError:\n e2 = None\n n2, fl2 = (None, b'')\n\n if e2 is not None and e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n\n if not match(realname):\n continue\n\n if n1 != n2 or fl1 != fl2:\n result[realname] = ((n1, fl1), (n2, fl2))\n elif clean:\n result[realname] = None\n\n for e2 in t2:\n if e2.name in t1:\n continue\n\n realname = subdir + pycompat.fsencode(e2.name)\n\n if e2.type == pygit2.GIT_OBJ_TREE:\n stack.append((realname + b'/', None, e2))\n elif match(realname):\n n2, fl2 = other.find(realname)\n result[realname] = ((None, b''), (n2, fl2))", "def __cmp__(self, that):\n property_compare = self.CmpProperties(that)\n if property_compare:\n return property_compare\n else:\n return cmp(self.__entity.key(), that.__entity.key())", "def compare(self, other):\n # all redefinitions of __cmp__ method should start with the\n # following lines:\n if self is other:\n return 0\n n1 = self.__class__\n n2 = other.__class__\n c = _old_compare(n1, n2)\n if c:\n return c\n #\n st = self._hashable_content()\n ot = other._hashable_content()\n c = (len(st) > len(ot)) - (len(st) < len(ot))\n if c:\n return c\n for l, r in zip(st, ot):\n l = Basic(*l) if isinstance(l, frozenset) else l\n r = Basic(*r) if isinstance(r, frozenset) else r\n if isinstance(l, Basic):\n c = l.compare(r)\n else:\n c = (l > r) - (l < r)\n if c:\n return c\n return 0", "def recursiveEquals(obj1, obj2, **kwargs):\n if isIndexable(obj1) != isIndexable(obj2):\n return False\n if isIndexable(obj1):\n for entry1, entry2 in zip(obj1, obj2):\n if not recursiveEquals(entry1, entry2, **kwargs):\n return False\n return True\n # Do the numeric evaluation\n num1 = expressionToNumber(evaluate(obj1, **kwargs))\n num2 = expressionToNumber(evaluate(obj2, **kwargs))\n return np.isclose(num1, num2)", "def compareTree(t1, t2):\n \n reorderTree(t1)\n reorderTree(t2)\n\n return compareTreeHelper(t1, t2)", "def test_compare() -> None:\n\n obj = SpecificLocation()\n obj2 = SpecificLocation()\n\n assert obj != obj2\n\n obj._id = obj2.id\n\n assert obj == obj2", "def compare(self, other):\n # all redefinitions of __cmp__ method should start with the\n # following three lines:\n if self is other: return 0\n c = cmp(self.__class__, other.__class__)\n if c: return c\n #\n st = self._hashable_content()\n ot = other._hashable_content()\n c = cmp(len(st),len(ot))\n if c: return c\n Basic = self.__class__.Basic\n for l,r in zip(st,ot):\n if isinstance(l, Basic):\n c = l.compare(r)\n else:\n c = cmp(l, r)\n if c: return c\n return 0", "def compare_trees(tree1, tree2):\n for key in tree1.keys():\n print(key)\n assert key in tree2.keys()\n if isinstance(tree1[key], list):\n print(tree1[key])\n assert tree1[key] == tree2[key]\n else:\n print('Calling compare_trees recursively')\n compare_trees(tree1[key], tree2[key])", "def _cmp(pack, other):\n return pack.name < other.name", "def _compare_elements(self, old, new):\n res = None\n # We want to go through the tree post-order\n if isinstance(old, dict):\n res_dict = self.compare_dicts(old, new)\n if (len(res_dict) > 0):\n res = res_dict\n # Now we are on the same level\n # different types, new value is new\n elif (type(old) != type(new)):\n res = {'---': old, '+++': new}\n # recursive arrays\n # we can be sure now, that both new and old are\n # of the same type\n elif (isinstance(old, list)):\n res_arr = self._compare_arrays(old, new)\n if (len(res_arr) > 0):\n res = res_arr\n # the only thing remaining are scalars\n else:\n scalar_diff = self._compare_scalars(old, new)\n if scalar_diff is not None:\n res = scalar_diff\n\n return res", "def __cmp__(self, other):\n _, _ = self, other\n return 0", "def test_comparison_overrides(self):\n\n # adding these methods directly to each class to avoid decoration\n # by the testlib decorators.\n class H1(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H2(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H3(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n class H6(object):\n def __init__(self, value='abc'):\n self.value = value\n def __nonzero__(self):\n return False\n def __hash__(self):\n return hash(self.value)\n def __eq__(self, other):\n if isinstance(other, type(self)):\n return self.value == other.value\n return False\n\n \n mapper(H1, t1, properties={\n 'h2s': relation(H2, backref='h1'),\n 'h3s': relation(H3, secondary=t4, backref='h1s'),\n 'h1s': relation(H1, secondary=t5, backref='parent_h1'),\n 't6a': relation(H6, backref='h1a',\n primaryjoin=t1.c.id==t6.c.ht1a_id),\n 't6b': relation(H6, backref='h1b',\n primaryjoin=t1.c.id==t6.c.ht1b_id),\n })\n mapper(H2, t2)\n mapper(H3, t3)\n mapper(H6, t6)\n\n s = create_session()\n for i in range(3):\n h1 = H1()\n s.save(h1)\n\n h1.h2s.append(H2())\n h1.h3s.extend([H3(), H3()])\n h1.h1s.append(H1())\n\n s.flush()\n self.assertEquals(t1.count().scalar(), 4)\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = h1\n\n h6 = H6()\n h6.h1a = h1\n h6.h1b = x = H1()\n assert x in s\n\n h6.h1b.h2s.append(H2())\n\n s.flush()\n\n h1.h2s.extend([H2(), H2()])\n s.flush()\n\n h1s = s.query(H1).options(eagerload('h2s')).all()\n self.assertEqual(len(h1s), 5)\n\n self.assert_unordered_result(h1s, H1,\n {'h2s': []},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'},\n {'value': 'abc'},\n {'value': 'abc'}])},\n {'h2s': []},\n {'h2s': (H2, [{'value': 'abc'}])})\n\n h1s = s.query(H1).options(eagerload('h3s')).all()\n\n self.assertEqual(len(h1s), 5)\n h1s = s.query(H1).options(eagerload_all('t6a.h1b'),\n eagerload('h2s'),\n eagerload_all('h3s.h1s')).all()\n self.assertEqual(len(h1s), 5)", "def test_equivalency(self):\n def compare_func(obj, node):\n # same id\n self.assertEqual(obj.id, node.get(\"id\"))\n\n # same html\n self.assertEqual(obj.html.prettify, node.prettify)\n\n # parents have same id (only for non-root elements)\n if not obj == self.document.root:\n self.assertEqual(obj.parent.id, node.parent.get(\"id\"))\n\n # same number of children\n child_nodes = self.get_children_of_node(node)\n self.assertEqual(len(obj.children), len(child_nodes))\n\n # children have same ids\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n self.assertEqual(child_obj.id, child_node.get(\"id\"))\n\n self.recursively_compare_tree_against_html(compare_func)", "def test_cmp(self, tmpdir, treantclass):\n with tmpdir.as_cwd():\n c1 = treantclass('a')\n c2 = treantclass('b')\n c3 = treantclass('c')\n\n assert sorted([c3, c2, c1]) == [c1, c2, c3]\n assert c1 <= c2 < c3\n assert c3 >= c2 > c1", "def __cmp__(self, other):\n return cmp(repr(self), repr(other))", "def with_cmp(attrs):\n def attrs_to_tuple(obj):\n \"\"\"\n Create a tuple of all values of *obj*'s *attrs*.\n \"\"\"\n return tuple(getattr(obj, a) for a in attrs)\n\n def eq(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) == attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ne(self, other):\n result = eq(self, other)\n if result is NotImplemented:\n return NotImplemented\n else:\n return not result\n\n def lt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) < attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def le(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) <= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def gt(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) > attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def ge(self, other):\n if isinstance(other, self.__class__):\n return attrs_to_tuple(self) >= attrs_to_tuple(other)\n else:\n return NotImplemented\n\n def hash_(self):\n return hash(attrs_to_tuple(self))\n\n def wrap(cl):\n cl.__eq__ = eq\n cl.__ne__ = ne\n cl.__lt__ = lt\n cl.__le__ = le\n cl.__gt__ = gt\n cl.__ge__ = ge\n cl.__hash__ = hash_\n\n return cl\n return wrap", "def compare_nested_dicts(dict1,dict2):\n\n if sorted(dict1.keys()) != sorted(dict2.keys()):\n return False\n\n for key in dict1:\n if isinstance(dict1[key],dict):\n res = compare_nested_dicts(dict1[key],dict2[key])\n if not res:\n return False\n else:\n continue\n if not isinstance(dict1[key],(six.string_types,list,NoneType)) and not np.allclose(dict1[key],dict2[key]):\n return False\n elif isinstance(dict1[key],(six.string_types,list,NoneType)) and not dict1[key] == dict2[key]:\n return False\n\n return True", "def __cmp__(self, other):\n if not isinstance(other, ComplexField_class):\n return cmp(type(self), type(other))\n return cmp(self._prec, other._prec)", "def pod_equals(x, y):\n return type(x) == type(y) and x.__dict__ == y.__dict__", "def compareObjects(self, item1, item2):\n\t\tkeys1 = item1.keys()\n\t\tkeys2 = item2.keys()\n\t\tkeys1.sort()\n\t\tkeys2.sort()\n\t\tresult=True\n\t\tif keys1 != keys2:\n\t\t\treturn False\n\t\tfor key in keys1:\n\t\t\tif key == 'meta': continue\n\t\t\tkey1 = item1[key]\n\t\t\tkey2 = item2[key]\n\t\t\t# For our purpose, 30 is equal to 30.000\n\t\t\tif key == 'check_interval':\n\t\t\t\tkey1 = int(float(key1))\n\t\t\t\tkey2 = int(float(key2))\n\t\t\tif key1 != key2:\n\t\t\t\tresult = False\n\t\tif result == False: return False\n\t\treturn True" ]
[ "0.6726066", "0.66530275", "0.6527878", "0.64677584", "0.6271268", "0.621182", "0.6145705", "0.61302245", "0.61206347", "0.60844535", "0.60643643", "0.6057797", "0.6032406", "0.6009618", "0.59857124", "0.59731966", "0.5950671", "0.5939408", "0.5894853", "0.58666605", "0.5774725", "0.5773367", "0.5741469", "0.5723668", "0.5698148", "0.568158", "0.5657071", "0.56423825", "0.5630915", "0.5625435" ]
0.9118865
0
Move files in the current working directory that match a pattern.
def batch_mover(pattern, directory=None): if directory is None: directory = Path().cwd() for i in os.scandir(directory): if file_check(pattern, i.name): pass # shutil.move(i.name, yeah we gotta change a lot here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_files(sim_dir, dest_dir, file_patterns):\n for f in file_patterns:\n for p in glob.glob1(sim_dir, f):\n try:\n shutil.move(os.path.join(sim_dir, p), os.path.join(dest_dir, p))\n except Exception as e:\n print(\n \"error while copy ing file from {} to {}\\n{}\".format(\n sim_dir, dest_dir, e\n )\n )", "def move_files(from_dir, to_dir, keyword):\n \n if not os.path.exists(to_dir):\n os.mkdir(to_dir)\n \n if keyword == None:\n # If keyword is left empty, from_dir is considered a list of files.\n to_move = from_dir\n else:\n to_move = glob.glob(os.path.join(from_dir, '*' + keyword + '*'))\n \n n_moved = 0 \n for f in to_move:\n if os.path.isfile(f):\n shutil.move(f, to_dir)\n n_moved += 1\n \n print \"Moved %i files to %s.\" % (n_moved, to_dir)", "def gmove(pattern, destination):\n for item in glob.glob(pattern):\n if not move(item, destination):\n return False\n return True", "def move(mover, backup, regular_expressions, capture_groups):\r\n find, move = regular_expressions\r\n mover.find_files(find)\r\n mover.move_files(move, capture_groups)\r\n backup.write_to_json()", "def move_backups(self, name, source, destination, regex):\n files = os.listdir(source)\n pattern = re.compile(regex)\n for entry in files:\n match = pattern.match(entry)\n if match is None:\n continue\n if name == match.group(1):\n self.logger.debug('Archiving %s', entry)\n path = os.path.join(source, entry)\n result = self.os_rename(path, os.path.join(destination, entry))\n if result != 0:\n return result\n return 0", "def MovieScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in movtypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(moviePath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Movies'", "def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'", "def findfif2move(self, source, destination, foldername):\n import glob\n import shutil\n\n os.chdir(source)\n mainfolders = os.listdir(u'.')\n\n for fname in mainfolders:\n try:\n if fname[:2] == foldername:\n subjectdir = os.path.join(source, fname)\n os.chdir(subjectdir)\n subfolders = os.listdir(u'.')\n \n # for each subject in the provided subfolders \n for s in subfolders:\n if s[0] == 's':\n sessiondir = os.path.join(subjectdir, s)\n os.chdir(sessiondir)\n file = glob.glob(\"*.fif\") # find files to move\n\n for files in file: \n shutil.copy(os.path.join(sessiondir,files),\n destination + fname[1:])\n except Exception:\n print(\"Something went wrong while copying the data >>>\", fname)\n pass\n os.chdir(source)", "def _move_files(topdatadir, startdate, model_forcing):\n\n curdate = startdate\n subdir = f\"{topdatadir}/cf_{model_forcing}\"\n subdir += f\"_{curdate.year:04d}{curdate.month:02d}\"\n files = glob.glob(f\"{subdir}/*.NC\")\n for filename in files:\n shutil.move(filename, os.path.join(topdatadir, os.path.basename(filename)))\n shutil.rmtree(subdir)", "def move_file():\n # print(\"\\n\".join(os.listdir(filepath)))\n # folders = [os.path.join(filepath, fld) for fld in os.listdir(filepath)]\n # print(filepath + \":\\n \" + \"\\n \".join(folders))\n folders = filter(os.path.isdir, os.listdir(u\".\"))\n # print(\"Sub-folders: \", u\"\\n\".join(folders))\n for folder in folders:\n files = [os.path.join(folder, fn) for fn in os.listdir(folder)]\n files = filter(os.path.isfile, files)\n for fn in files:\n _, filename = os.path.split(fn)\n shutil.move(fn, filename)\n assert 0 == len(os.listdir(folder))", "def move(matches):\n for source in matches:\n target = matches[source]\n os.rename(source, target)", "def move_files(fname_fout, root_dir, dest_dir):\n fname, f_ext = os.path.splitext(fname_fout)\n # Find files which filename of fname_fout\n matches = []\n pattern = fname + '*'\n root_fnames = os.listdir(root_dir)\n for filename in fnmatch.filter(root_fnames, pattern):\n matches.append([filename, os.path.join(root_dir, filename)])\n # Extract new folder name based on fname_fout\n new_folder_name = reshape_fname(fname_fout, ['nairfoil', 'nsetup'])\n dest_dir = os.path.join(dest_dir, new_folder_name)\n # Move files\n for cur_file in matches:\n os.renames(cur_file[1], os.path.join(dest_dir, cur_file[0]))", "def move_to_folder(folder = \"output\"):\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\") or files.endswith(\".tpl\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def move_files_with_extension(self, extension: str):\n\n while True:\n files_with_extension = self.collect_files_with_extensions(extension)\n print(files_with_extension)\n folders_containing = set(\n [\n os.path.basename(os.path.dirname(file))\n for file in files_with_extension\n ]\n )\n directory = input(\n f\"Files with '{extension}' extension are scattered in your folders:\\n\"\n f\" {', '.join(folders_containing)}\\n\"\n f\"Where do you want to put them?\\n\"\n f\"({', '.join(self.possibilities.keys())})\\n\"\n )\n if directory in self.possibilities:\n self.move_files(files_with_extension, directory)\n break\n else:\n print(\"Invalid Input\")", "def moveFiles(outputDir, files):\n\tfor fn in files:\n\t\tshutil.move(fn, join(outputDir, getFilenameWithoutPath(fn)))", "def move_files(src_dir, dst_dir):\n for f in os.listdir(src_dir):\n try:\n name, season, episode = FILENAME_PATTERN.search(f).groups()\n except AttributeError:\n try:\n name, season, episode = FILENAME_PATTERN2.search(f).groups()\n except AttributeError:\n print \"Cannot parse\", f\n pass\n\n name = name.replace('.', ' ').replace('_', ' ').strip().title()\n\n dir_path = os.path.join(dst_dir, name, 'Season %02d' % int(season))\n full_path = os.path.join(dir_path, f)\n source_path = os.path.join(src_dir, f)\n\n if not os.path.exists(dir_path):\n os.makedirs(dir_path, 0777)\n\n if not os.path.exists(full_path):\n shutil.move(source_path, full_path)\n os.symlink(full_path, source_path)", "def move_files(self, files: List[str], directory=\"\"):\n result = []\n for file in files:\n if directory == \"\":\n temp_file = File(file)\n new_directory = self._create_or_define(temp_file)\n origin_folder = \"\"\n else:\n new_directory = directory\n origin_folder = os.path.basename(os.path.dirname(file))\n temp_file = File(os.path.basename(file))\n\n if not file.startswith(new_directory):\n if temp_file.get_extension():\n temp_extension = \".\" + temp_file.get_extension()\n else:\n temp_extension = \"\"\n\n ordinal_number = self.check_same_objects(new_directory, temp_file)\n target_name = temp_file.get_just_name() + temp_extension\n if ordinal_number:\n formatted_ordinal_number = f\" ({ordinal_number - 1})\"\n target_name = (\n temp_file.get_just_name()\n + formatted_ordinal_number\n + temp_extension\n )\n\n if self.underscore_flag:\n target_name = target_name.replace(\" \", \"_\")\n\n new_position = os.path.join(self.directory, new_directory, target_name)\n\n file_position = os.path.join(\n self.directory, origin_folder, str(temp_file)\n )\n if file_position != os.path.join(\n self.directory,\n new_directory,\n temp_file.get_just_name() + temp_extension,\n ):\n result.append(os.path.join(origin_folder, str(temp_file)))\n self.possibilities[new_directory].files.append(temp_file)\n if not self.dry_run:\n os.rename(file_position, new_position)\n else:\n print(f\"{file_position} would be moved to {new_position}\")\n elif self.dry_run:\n print(\n f\"{file_position} won't be move since the location is the same\"\n )\n\n self.log_result(result, directory)", "def move_files(src, dst, filenames):\n for filename in filenames:\n os.rename(os.path.join(src, filename), os.path.join(dst, filename))", "def moveFiles(inputDir, inputFiles):\n\tfor file in inputFiles:\n\t\tlogger.debug('moveFiles: {0}'.format(file))\n\t\tshutil.move(join(inputDir, file), join(inputDir, 'processed', file))\n\n\treturn 0", "def move_files(file: str, destination: str):\n\n try:\n result = _process_files(\"mv\", \"-v\", file, destination)\n except FileNotFoundError:\n print(\"ERROR: '{}' does not exist.\".format(file))\n except FolderNotFoundError:\n print(\n \"ERROR: '{}' destination does not exist.\".format(destination)\n )\n except InsufficientRightsError:\n print(\"ERROR: Insufficient rights to destination '{}'.\".format(\n destination)\n )\n else:\n print(result)", "def moveDirectoryContents(self, source, target, force=False):\n if source.endswith('/') or source.endswith('\\\\'):\n source += '*'\n else:\n source += os.path.sep + '*'\n if force:\n command = 'mv -f %s %s'\n else:\n command = 'mv %s %s'\n self.communicate(command % (source, target))", "def move_file(source, destination):\n #source = client_variables.output_folder\n #destination = client_variables.client_folder\n copyfiles = os.listdir(source)\n ext = (\".xlsx\", \".csv\", \".pdf\", \".png\")\n for copyfile in copyfiles:\n if copyfile.endswith(ext):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)\n elif copyfile.startswith('GetTotalByYearReport'):\n copyfile = source + \"/\" + copyfile\n print \"copying\", copyfile\n shutil.move(copyfile, destination)", "def main():\n os.chdir(\"FilesToSort\")\n files = os.listdir('.')\n for file in files:\n extension_directory = file[file.find('.') + 1:]\n try:\n os.mkdir(extension_directory)\n except FileExistsError:\n pass\n shutil.move(file, extension_directory)", "def add_files(\n self,\n pattern: str,\n ) -> None:\n matches = glob.glob(pattern, recursive=True)\n for match in sorted(matches):\n self.add_file(match)", "def move_to_folder(folder = \"output\"):\n\n for files in os.listdir(os.getcwd()):\n if files.endswith(\".tcl\") or files.endswith(\".pdb\") or files.endswith(\".fasta\"):\n new_file = folder + \"/\" + files\n os.rename(files, new_file)", "def simple_move_files(selected_image_list, out_dir='/command/results/top_images_test_set/'):\n for file_no in range(len(selected_image_list)):\n shutil.move(selected_image_list[file_no], out_dir + selected_image_list[file_no].split('/')[-1])\n return", "def moveFiles(self, fids, pid):\n\n f = self.getFileInfo(fids[0])\n if not f or f.package == pid:\n return False\n if not self.getPackageInfo(pid):\n raise PackageDoesNotExists(pid)\n\n # TODO move real files\n\n self.db.moveFiles(f.package, fids, pid)\n\n return True", "def move_media(items, dest):\n for file in items:\n filename = os.path.basename(file)\n os.rename(file, dest + '\\\\' + filename)", "def move_from_temp_directory(self):", "def move_files(origin=''):\n\tpng_file_list = glob.glob(origin+'*png')\n\tif png_file_list != []:\n\t\tif not os.path.exists(origin+'positions-histograms'):\n\t\t\tos.makedirs(origin+'positions-histograms')\n\t\tfor png in png_file_list:\n\t\t\tshutil.move(str(png), origin+'positions-histograms')" ]
[ "0.7225139", "0.7025052", "0.6768235", "0.6700694", "0.66823375", "0.6493144", "0.6468564", "0.64624935", "0.64362115", "0.62788254", "0.6260499", "0.62399226", "0.6172828", "0.6145879", "0.614499", "0.6129682", "0.6116104", "0.6089378", "0.60550845", "0.6048404", "0.6036013", "0.60319006", "0.6026394", "0.6016185", "0.5949945", "0.5908318", "0.58771896", "0.5855885", "0.580796", "0.5807758" ]
0.7294452
0
Check that the file exists and matched the desired pattern.
def file_check(pattern, file_to_check): if file_to_check.name.__contains__(pattern): yield True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_contains_pattern(file, pattern):\r\n if not os.path.isfile(file):\r\n raise NameError('file %s does not exist' % file)\r\n return not utils.system('egrep -q \"' + pattern + '\" ' + file,\r\n ignore_status=True)", "def validate_string_match(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.match(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out", "def validate_string_search(self, pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.search(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out", "def fnmatch(pattern, filename) -> bool:\n return _fnmatch(filename, pattern)", "def search_existing_file(path):\n return os.path.isfile(path)", "def check_filename_pattern_list(self, filename_pattern_list):\n Util.print_standout(\"check file exists or not. %s\" % filename_pattern_list)\n for filename_pattern in filename_pattern_list:\n matched_file_name = self._find_by_name_pattern(self.data_dir, filename_pattern)\n if not filename_pattern or not matched_file_name:\n Util.print_error(\"check filename pattern [%s] is can not match.\" % filename_pattern)\n return False\n return True", "def matches_filename(\n path: str,\n patterns: Sequence[str],\n log_message: str,\n logger: logging.Logger,\n) -> bool:\n if not patterns:\n return False\n basename = os.path.basename(path)\n if basename not in {\".\", \"..\"} and fnmatch(basename, patterns):\n logger.debug(log_message, {\"path\": basename, \"whether\": \"\"})\n return True\n\n absolute_path = os.path.abspath(path)\n match = fnmatch(absolute_path, patterns)\n logger.debug(\n log_message,\n {\"path\": absolute_path, \"whether\": \"\" if match else \"not \"},\n )\n return match", "def validate_string_findall(pattern, file):\r\n try:\r\n file_open = open(file, 'r')\r\n except:\r\n logging.info(\"file not found\")\r\n return -1\r\n file_data = file_open.read()\r\n ret_out = re.findall(pattern, file_data)\r\n if ret_out:\r\n return True, ret_out\r\n else:\r\n return False, ret_out", "def fits_file_exists (filepath):\n return validate_file_path(filepath, FITS_EXTENTS)", "def FileExists(file):\n return os.path.exists(file)", "def is_file_exists(self):\n pass", "def does_file_exist(self, fn):\n if True:\n print(f\"-=- {fn} found.\")\n return True\n else:\n print(f\"-!- {fn} not found. Try again\")\n return False", "def create_href_checker(pattern, working_dir):\n file_list = os.listdir(working_dir)\n def check_href(href):\n \"\"\"Return whether a url is vlaid or not\"\"\"\n if bool(pattern.match(href)):\n if os.path.basename(urlparse.urlparse(href).path) not in file_list:\n return True\n return False\n return check_href", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def exists_file(f):\n if os.path.exists(f):\n return True\n return False", "def exists(self, path):", "def check_file(filename: str):\n if os.path.isfile(filename):\n return True\n else:\n raise FileExistsError", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"validate_match_batch.py\", get_files)", "def match_input_files(file: str) -> bool:\n pattern = r\"^input_20\\d\\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])\\.feather\"\n return True if re.match(pattern, file) else False", "def _existFile(f):\n\treturn os.path.isfile(f)", "def check_file_exist(self):\n return False", "def MatchPattern(file_path, pattern):\n try:\n with open(file_path, \"r\") as f:\n prog = re.compile(pattern)\n for line in f:\n result = prog.match(line)\n if result:\n return result.groups()\n except IOError:\n pass\n except Exception:\n pass\n\n return None", "def occurs(pattern, filename, match=grep):\n try:\n # look for the first match -- if one is found, we're done\n match(pattern, filename).next()\n return True\n except StopIteration:\n return False", "def file_exists(file_path):\n\n return Path(file_path).is_file()", "def matchPatterns(path, patterns):\n name = os.path.basename(path)\n for p in patterns:\n if fnmatch.fnmatch(name, p):\n return True\n return False", "def match_input_files(file: str) -> bool:\n pattern = (\n r\"^input_population_20\\d\\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])\\.feather\"\n )\n return True if re.match(pattern, file) else False", "def _check_file_exists(self, filepath, should_exist=True):\n _, _, stderr = self.execute_command(CommandBuilder.list(filepath))\n compare = operator.ne if should_exist else operator.eq\n if compare(len(stderr.readlines()), 0):\n msg = \"not found\" if should_exist else \"already exists\"\n raise OSError(f\"{filepath} {msg} on server\")", "def log_file_exists(file_name: str):\n if os.path.isfile(get_complete_file_name(file_name)):\n return True\n return False", "def _check_valid_file(self, file):\n\n try:\n _ = open(f\"{file}\")\n except FileNotFoundError:\n raise ValueError", "def fileCheck(filename):\n if not os.path.isfile(filename):\n print('File: ' + filename + ' not found. Exiting...', file=sys.stderr)\n sys.exit(1)" ]
[ "0.73465383", "0.7302456", "0.72112775", "0.71983016", "0.6769893", "0.67092055", "0.66988623", "0.65889895", "0.6551649", "0.64450276", "0.6444477", "0.64354426", "0.6429209", "0.6409745", "0.6392553", "0.6392284", "0.63780326", "0.636964", "0.6360524", "0.63543516", "0.6349137", "0.6343589", "0.6338822", "0.63304216", "0.63142776", "0.63130945", "0.6307027", "0.6304581", "0.6303551", "0.62976646" ]
0.74560165
0
Player's Steam persona (profile) name.
def persona_name(self) -> str: return self._persona_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name(self) -> str:\n try:\n return self.stats[\"Player name\"]\n except KeyError as ke:\n logger.debug(ke, exc_info=True)\n logger.warn(\"unable to get player name\")\n return \"\"", "def get_name(self):\n try:\n return self.profile_data[\"name\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve player name: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)", "def get_name(self):\n return self._player_name", "def get_name_from_player(player):\r\n return player.name.lower()", "def get_name(self):\n \n # Return the player's name\n return self._name", "def second_name(self, instance):\r\n return instance.user.profile.second_name", "def get_display_name(member):\n if member.nick is None:\n name = member.name\n else:\n name = member.nick\n if User.objects.get(id=member.id).is_ironman:\n name += ' (IM)'\n return name", "def given_name(self):\n profile = self._json['author-profile']\n return profile.get('preferred-name', {}).get('given-name')", "def get_player_name(self):\n return self._player_name", "def __str__(self):\n profile = {\n 'address': self.address,\n 'bio': self.bio,\n 'website': self.website,\n 'hireable': self.hireable,\n 'travel_radius': self.travel_radius,\n 'phone number': self.phone_number,\n 'camera type': self.camera_type,\n 'type of photography': self.photography_type\n }\n return str(self.user.username)", "def get_name(username):\n print(\"We halo \" + username + \" , piye kabare?\")", "def nice_name(self):\n if self.first_name or self.last_name:\n return \"%s %s\" % (self.first_name, self.last_name)\n else:\n key = \"profile.nice_name\"\n cache_key = \"%s.%s.%s\" % (settings.SITE_CACHE_KEY, key, self.pk) \n cached = cache.get(cache_key)\n if cached is None:\n cached = self.user.username\n cache.set(cache_key, cached)\n return cached", "def person_name(self):\n return self._person_name", "def getName(self):\n\n return self.player", "def name(who):\r\n if who == 0:\r\n return 'Player 0'\r\n elif who == 1:\r\n return 'Player 1'\r\n else:\r\n return 'An unknown player'", "def __str__(self):\n return self.user.username + \"'s Profile\"", "def get_full_name(self):\n return self.username", "def get_full_name(self):\n return self.username", "def get_display_name(record):\n return record[\"InstanceProfileName\"]", "def name(self) -> str:\n return self.profile_device.name", "def get_user_name(self):\n full_name = f'{self.f_name} {self.l_name}'\n return full_name", "def steamUsername() -> Optional[str]:\n\ttry:\n\t\twith open( f'{steamDir()}/config/loginusers.vdf' ) as file:\n\t\t\tusers = Property.parse( file, 'loginusers.vdf' ).as_dict()[ 'users' ]\n\t\t# find the first user in the users dict and take is username\n\t\treturn users[ [ usr for usr in users.keys() ][ 0 ] ][ 'personaname' ]\n\texcept ( FileNotFoundError, TokenSyntaxError ):\n\t\treturn None", "def get_user_fullname(self):\n return self.applicant.userprofile.display_name()", "def get_name() -> str:", "def get_displayname(self):\n return self.full_name or self.user.username", "def full_name(self, obj: User) -> str:\n return obj.get_full_name()", "def default_name(self):\n name = f\"Player {self.UID.split('-')[0]}\"\n return name", "def get_short_name(self):\n return f\"{self.first_name} {self.last_name[:1]}\" if self.first_name else self.username", "def display_name(self) -> str:\n return self.requester.username", "def autoname(self):\n\t\tself.name = self.role_profile" ]
[ "0.7409105", "0.7398977", "0.7245066", "0.71149844", "0.7076989", "0.69657075", "0.69252086", "0.6905714", "0.6882069", "0.6875132", "0.68576926", "0.68524104", "0.6836869", "0.6832816", "0.6792989", "0.6789445", "0.676719", "0.676719", "0.67574185", "0.6729789", "0.66886723", "0.66632986", "0.66366017", "0.6631232", "0.65868604", "0.65629405", "0.65621555", "0.6550477", "0.65435445", "0.6542174" ]
0.74546546
0
Create a team for the context's user. An administrator can also perform the action on a user's behalf.
def create_team_action(request): # Create the team. now = datetime.utcnow() user_id = request.context.user_id user = load_user(request.db, user_id) # Select a round based on the user's badges. round_ids = find_round_ids_with_badges(request.db, user['badges'], now) if len(round_ids) == 0: # The user does not have access to any open round. raise ApiError('not qualified for any open round') if len(round_ids) > 1: # XXX The case where a user has badges for multiple open rounds # is currently handled by picking the first one, which is the # one that has the greatest id. This is unsatisfactory. pass round_id = round_ids[0] round_ = load_round(request.db, round_id, now) if not round_['is_registration_open']: raise ApiError('registration is closed') # Create the team. team_id = create_user_team(request.db, user_id, now) # Create a participation. create_participation(request.db, team_id, round_id, now=now) # Ensure the user gets team credentials. reset_user_principals(request) return {'success': True}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n req = team_req.parse_args(strict=True)\n curr_user = api.user.get_user()\n if curr_user[\"teacher\"]:\n raise PicoException(\"Teachers may not create teams\", 403)\n req[\"team_name\"] = req[\"team_name\"].strip()\n if not all(\n [\n c in string.digits + string.ascii_lowercase + \" ()+-,#'&!?\"\n for c in req[\"team_name\"].lower()\n ]\n ):\n raise PicoException(\n \"Team names cannot contain special characters other than \"\n + \"()+-,#'&!?\",\n status_code=400,\n )\n\n if req[\"team_name\"] == curr_user[\"username\"]:\n raise PicoException(\"Invalid team name\", status_code=409)\n\n new_tid = api.team.create_and_join_new_team(\n req[\"team_name\"], req[\"team_password\"], curr_user\n )\n res = jsonify({\"success\": True, \"tid\": new_tid})\n res.status_code = 201\n return res", "def create_team():\n # Get the user's id from access token\n uid = get_jwt_identity()\n\n # If no user id, return error\n if not uid:\n return make_response(\n jsonify({'error': 'Could not verify!'}),\n 401,\n {'WWW-Authentication': 'Basic realm=\"Login required!\"'})\n\n # Try to get user from database\n query = User.query.filter_by(public_id=uid)\n\n try:\n user = query.one()\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 401\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Get team data from request\n data = request.get_json()\n\n # Verify that all required team data was sent\n if not data['name'] or not data['group']:\n return make_response(jsonify({'error': 'Missing data!'}), 400)\n\n # Create team object\n team = Team(\n name=data['name'],\n iso_2=data['iso_2'],\n group=data['group'])\n\n # Try to add team to database\n try:\n db.session.add(team)\n db.session.commit()\n\n # If team name already in database, return error\n except IntegrityError:\n return jsonify({\n 'error': 'Team with name already exists'\n }), 400\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialze the team object and return json response\n team_schema = TeamSchema()\n output = team_schema.dump(team).data\n\n return jsonify({\n 'success': 'Successfully retrieved team.',\n 'team': output\n }), 200", "def team_add(token_user):\n if not json_param_exists('name') or \\\n not json_param_exists('type'):\n abort(400, \"one or more required parameter is missing\")\n name = request.json['name']\n team_type = TeamType.query.filter_by(name=request.json['type']).first()\n if not team_type:\n abort(400, \"invalid team type\")\n\n if team_type.name == 'other_team':\n if not token_user.has_permission('team.create') and \\\n not token_user.has_permission('team.create.elevated'):\n abort(403, 'team creation is not permitted')\n else: # creating any team other than 'other_team' requires elevated\n if not token_user.has_permission('team.create.elevated'):\n abort(403, 'insufficient permissions to create a team of this type')\n\n team = Team(name=name)\n team.team_type = team_type\n\n try:\n get_db().add(team)\n get_db().commit()\n except IntegrityError:\n abort(409, 'team name is already in use')\n\n return '', 201", "def test_teams_add_user_to_team_v2(self):\n pass", "def test_create_new_team(self):\n default_user = AnotherUserFactory(email_confirmed=True)\n token = Token.objects.get(user=default_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Team.objects.filter(name=data['name']).exists())", "def test_teams_add_user_to_team_v1(self):\n pass", "def add_team(self):\n team = Team(self.context, ResourcePath(\"team\", self.resource_path))\n team._parent_collection = self.parent_collection\n qry = ServiceOperationQuery(self, \"team\", None, team, None, team)\n self.context.add_query(qry)\n\n def _construct_create_team_request(request):\n cur_qry = self.context.current_query\n if cur_qry.id == qry.id:\n request.method = HttpMethod.Put\n request.set_header('Content-Type', \"application/json\")\n request.data = json.dumps(request.data)\n\n self.context.before_execute(_construct_create_team_request, False)\n return team", "def test_create_team(self):\n pass", "def create(self, request):\n serializer = data_serializers.CreateTeamSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.create_team(request_data=request_data)\n serializer = data_serializers.PresentTeamSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except domain_exceptions.TeamHasALeader as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def create_team(request):\n if request.method == 'POST':\n email = request.session.get('email', None)\n team_name = request.POST.get('team_name', None)\n team = Team(name=team_name)\n team.save()\n\n message = \"Team created, please use the cool search feature and assign yourself to the team\"\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')", "def team_user_add(token_user, team_id, user_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n # check for permissions to update the team\n if not (token_user.has_permission('team.update.elevated') or\n (token_user.has_permission('team.update') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to add user to team')\n\n # don't allow adding to 'single' teams\n if team.team_type == TeamType.query.filter_by(name='single').first():\n abort(400, 'cannot add a user to a \"single\" team')\n\n user = User.query.get(user_id)\n if user is None:\n abort(400, 'invalid user id')\n\n if team.has_member(user):\n abort(409, 'user already in team')\n\n user.teams.append(team)\n get_db().commit()\n\n return '', 201", "async def add_team_member(team_id: str = Path(..., description=\"ID value of the desired team\"),\n user_id: str = Body(..., description=\"ID value of the desired user\", embed=True),\n db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_team_member(team_id=team_id, user_id=user_id)\n inserted_record = {\"id_team\": inserted_record[0], \"id_user\": inserted_record[1]}\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record", "def create(self, org, data):\n request = self.request_builder('orgs.teams.create', org=org, body=data)\n return self._post(request)", "def test_create_team_creates_survey(self):\n user = User.create(name='User Foo', email='[email protected]')\n user.put()\n\n code = 'trout viper'\n\n team_response = self.testapp.post_json(\n '/api/teams',\n {\n 'name': 'Team Foo',\n 'code': code,\n 'program_id': self.ep_program.uid,\n },\n headers=self.login_headers(user),\n )\n team_dict = json.loads(team_response.body)\n\n survey_result = Survey.get(team_id=team_dict['uid'])\n self.assertEqual(len(survey_result), 1)\n survey = survey_result[0]\n\n return user, team_dict", "def test_teams_create(self):\n pass", "def create_team(self, name, owner_id, members=None):\n # Ensure that the owner exists and all team members exist. Will raise\n # exception if user is unknown.\n self.assert_user_exists(owner_id)\n if not members is None:\n for user_id in set(members):\n if not user_id == owner_id:\n self.assert_user_exists(user_id)\n # Ensure that the given team name is uniqe and does not contain too many\n # characters\n sql = 'SELECT * FROM team WHERE name = ?'\n if name is None or name.strip() == '':\n raise err.ConstraintViolationError('missing team name')\n elif len(name.strip()) > 255:\n raise err.ConstraintViolationError('team name contains more than 255 character')\n elif not self.con.execute(sql, (name.strip(),)).fetchone() is None:\n raise err.ConstraintViolationError('team name \\'{}\\' exists'.format(name.strip()))\n # Get unique identifier for the new team.\n team_id = util.get_unique_identifier()\n # Create the new team and add team members. Ensure that at least the\n # team owner is added as a team member.\n sql = 'INSERT INTO team_member(team_id, user_id) VALUES(?, ?)'\n self.con.execute(\n 'INSERT INTO team(id, name, owner_id) VALUES(?, ?, ?)',\n (team_id, name.strip(), owner_id)\n )\n self.con.execute(sql, (team_id, owner_id))\n member_count = 1\n if not members is None:\n for user_id in set(members):\n if not user_id == owner_id:\n self.con.execute(sql, (team_id, user_id))\n member_count += 1\n self.con.commit()\n # Return team descriptor\n return TeamDescriptor(\n identifier=team_id,\n name=name,\n owner_id=owner_id,\n member_count=member_count\n )", "def post(self, team_id, project_id):\n if not TeamService.is_user_team_manager(team_id, token_auth.current_user()):\n return {\n \"Error\": \"User is not an admin or a manager for the team\",\n \"SubCode\": \"UserPermissionError\",\n }, 401\n\n try:\n role = request.get_json(force=True)[\"role\"]\n except DataError as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\"Error\": str(e), \"SubCode\": \"InvalidData\"}, 400\n\n try:\n if not ProjectAdminService.is_user_action_permitted_on_project(\n token_auth.current_user, project_id\n ):\n raise ValueError()\n TeamService.add_team_project(team_id, project_id, role)\n return (\n {\n \"Success\": \"Team {} assigned to project {} with role {}\".format(\n team_id, project_id, role\n )\n },\n 201,\n )\n except ValueError:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403", "async def create_team(new_team: BaseTeam, db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_team(new_team=new_team)\n inserted_record = init_BaseTeam(inserted_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record", "def perform_create(self, serializer):\n team = get_object_or_404(models.Team, pk=self.kwargs.get('pk'))\n\n return serializer.save(team=team)", "def create_challenge_team(request, challenge_pk):\n\tif request.method == \"POST\":\n\t\tteam_name = request.POST[\"team-name\"]\n\t\t\n\t\tnew_team = ChallengeTeam()\n\t\tnew_team.team_name = team_name\n\t\t\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\tnew_team.challenge = selected_challenge\n\t\t\n\t\tnew_team.save()\n\t\t\n\t\treturn redirect(\"/challenge/view/\" + str(challenge_pk))\n\t\t\n\telse:\n\t\tselected_challenge = Challenge.objects.get(pk = challenge_pk)\n\t\t\n\t\tcontext = RequestContext(request, {\"challenge_name\" : selected_challenge.name})\n\t\treturn render_to_response(\"encourage/create_team.html\", context)", "def _create(self, cursor, row):\n team = Team(name=row['name'])\n team.id = row['id']\n\n for u in self._get_users(cursor, team):\n team.add_user(u)\n\n return team", "def callback_team_create(\n self, request, uri, headers, status_code=201, read_only=True\n ):\n # Disabling unused-argument because this is a callback with\n # required method signature.\n # pylint: disable=unused-argument,too-many-arguments\n self.assertEqual(\n request.headers['Authorization'],\n 'token {0}'.format(self.OAUTH2_TOKEN)\n )\n json_body = json.loads(request.body)\n for item in ['name', 'permission']:\n self.assertTrue(item in json_body.keys())\n if read_only:\n self.assertEqual(json_body['permission'], 'pull')\n else:\n self.assertEqual(json_body['permission'], 'push')\n return (status_code, headers, json.dumps({'id': 2}))", "def new(request):\n template = loader.get_template('team/new.html')\n\n if request.method == 'POST':\n form = TeamForm(request.user, request.POST)\n if form.is_valid():\n team = form.save(commit=False)\n team.year = datetime.datetime.now().year\n if 'logo_image' in request.FILES:\n team.logo = request.FILES['logo_image']\n if request.POST.get('team_info'):\n team.information = request.POST.get('team_info')\n team.save()\n\n # assign team to all members\n request.user.profile.team = team\n request.user.save()\n if form.cleaned_data['member2'] is not '':\n member2 = User.objects.get(pk=form.cleaned_data['member2'])\n member2.profile.team = team\n member2.save()\n if form.cleaned_data['member3'] is not '':\n member3 = User.objects.get(pk=form.cleaned_data['member3'])\n member3.profile.team = team\n member3.save()\n if form.cleaned_data['member4'] is not '':\n member4 = User.objects.get(pk=form.cleaned_data['member4'])\n member4.profile.team = team\n member4.save()\n\n messages.success(request, _('Your team has been created.'))\n\n else:\n if request.user.profile.team is not None:\n return redirect('/team/my-team')\n form = TeamForm(request.user)\n\n context = {'form': form}\n return CustomHttpResponse.send(template, context, request)", "def create(self, request):\n serializer = data_serializers.TeamLeaderOrEmployeeRequestDataSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_team_entity = self.controller.assign_team_leader(request_data=request_data)\n serializer = data_serializers.TeamLeaderPresenterSerializer(new_team_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.EmployeeDoesNotExist\n )as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_create_team_user_group(client):\n group = client.create_team_user_group(TEAM_ID, {\n \"name\": \"Python group\",\n \"is_reviewer\": True,\n \"is_admin\": True,\n \"admin_rights\": [\"upload\"]\n })\n assert group.team_id == TEAM_ID\n assert group.group_id == NEW_GROUP_ID\n assert group.name == \"Python group\"\n assert group.permissions['is_admin']\n assert group.permissions['is_reviewer']\n assert group.permissions['admin_rights'] == [\"upload\"]", "def creat_team(self):\n te = Teams()\n per = Persons()\n teamlist = []\n for one in per.find({'role':'leader'},{'team_name'}):\n if one['team_name'] not in teamlist:\n teamlist.append(one['team_name'])\n # print len(teamlist)\n for team in teamlist:\n tmp = {'name': '', 'leader_email': '', 'person_emails': []}\n tmp['name'] = team\n tmp['leader_email'] = per.get_one({'team_name':team,'role':'leader'})['email']\n for one in per.find({'team_name':team},{'email'}):\n tmp['person_emails'].append(one['email'])\n print tmp\n search_t = te.get_one({'name':team})\n if search_t is None:\n te.insert_one(tmp)\n else:\n te.update_one({'name':team,'leader_email':'','person_emails':''},tmp,cover=True)", "def create_superuser(self, email, role, teams, password):\n user = self.create_user(\n email,\n password=password,\n )\n\n for team in teams:\n al = AccessEntry(email=email, team=team)\n al.save()\n\n\n user.is_admin = True\n user.save(using=self._db)\n return user", "def test_cannot_create_new_team(self):\n\n data = {\n 'name': 'Griffons',\n 'description': 'Only strong souls can be joined us.'\n }\n response = self.client.post(reverse('api:teams-list'), data)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_handle_assign_as_team_lead(self):\r\n def facade_retrieve_side_effect(*args, **kwargs):\r\n \"\"\"Return a side effect for the mock facade.\"\"\"\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n else:\r\n calling_user = User(user)\r\n return calling_user\r\n self.mock_facade.retrieve.side_effect = facade_retrieve_side_effect\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n self.mock_facade.query.return_value = [team]\r\n self.assertTupleEqual(\r\n self.testcommand.handle(\"project assign ID team-name\",\r\n user),\r\n (\"Project successfully assigned!\", 200))", "def test_teams_add_user_to_team_by_batch_v1(self):\n pass" ]
[ "0.760319", "0.75646865", "0.7414623", "0.693689", "0.6909473", "0.6858494", "0.6830383", "0.6734105", "0.6727118", "0.668748", "0.6665157", "0.66149485", "0.6561478", "0.65193206", "0.64911103", "0.648043", "0.6461089", "0.64321554", "0.64144987", "0.64086944", "0.63389486", "0.6322869", "0.63138044", "0.62904567", "0.6200939", "0.6197344", "0.6194254", "0.6191913", "0.61832976", "0.6176081" ]
0.7725349
0
Evaluate plugin and vcf compatibility
def check_plugin(vcf_reader, plugin): # Always use core plug-in plugins = ['core'] # Collect supplied plugin(s) [plugins.append(item) for item in plugin] # Create set plugins = list(set(plugins)) # Evaluate vcf and plugin compatibility for plugin in plugins: if plugin == "core": from pScout.plugin.plugin_reader import core ret = core(vcf_reader, "pScout/plugin/get_core.ini") if ret is 1: # Not compatible exit() return plugins
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_client_custom_plugin():\n client = ConfigureClients(plugins=[PluginVipCustomisation])\n assert client.plugins == [PluginVipCustomisation]", "def test_check_status_code_returns_true():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance._check_status_code('replace me with real xml') == True", "def test_check_detail_code_returns_true():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance._check_detail_code('replace me with real xml') == True", "def check_supported_features(self):", "def junos_cve_query(version):\n pass", "def check(self, runtime):", "def request_plugins(self):", "def get_plugin_interface(self):", "def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh", "def test(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.test()", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def _check_caffe_version(self, caffe):\n if 'deconv_from_layer' in dir(caffe.classifier.Classifier):\n print \"debug[caffe]: caffe version provides all required functions. Good!\"\n else:\n print \"warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'\"", "def compare_cow_transport_algorithms():\n # TODO: Your code here\n pass", "def main():\n # 1.get the input value\n parser = optparse.OptionParser()\n parser.add_option('-D', '--data-path', help='data path for check', dest='data_path')\n\n options, args = parser.parse_args()\n if not options.data_path:\n parser.print_help()\n sys.exit(-1)\n data_path = options.data_path\n vendor, region, version, is_level0 = Util.parse_rdf_version(os.path.basename(data_path))\n # 2.check\n if region and version:\n success_components_check_info_dic, failed_components_check_info_dic = check_region_components(region, is_level0, data_path)\n\n if not failed_components_check_info_dic:\n return True\n return False", "def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True", "def core_plugin(self):\n pass", "def is_scalac_plugin(self):\r\n return self.has_label('scalac_plugin')", "def load():\n return VirtualDatacenterPlugin()", "def check(self, verifier = None):\n for (k, v) in self._versions.items():\n #print(k,v)\n if k == 'KLEE':\n # check KLEE only if we are using KLEE\n if verifier.startswith('klee'):\n vers = self._get_klee_version()\n expected = self._decode(v)\n self._check(k, expected, vers)\n elif k == 'sbt-slicer':\n vers = self._get_slicer_version()\n expected = self._decode(v[:8])\n self._check(k, expected, vers)\n elif k == 'sbt-instrumentation':\n vers = self._get_instr_version()\n expected = self._decode(v[:8])\n self._check(k, expected, vers)", "def server_plugin():", "def nfvi_compute_initialize(config, pool):\n global _compute_plugin\n\n if _compute_plugin is None:\n _compute_plugin = NFVIComputePlugin(config['namespace'], pool)\n if _compute_plugin.ready_to_initialize(config['config_file']):\n _compute_plugin.initialize(config['config_file'])\n return True\n else:\n return False", "def support(self):", "def rpc_match():", "def plugh():", "def vrules(self):\n ...", "def __call__ ( self , x , *args ) :\n #\n ## 1) evaluate the function \n val = self.func_eval ( x , *args )\n #\n ## no uncertainties? \n if isinstance ( x , num_types ) : return VE ( val , 0 )\n # ignore small or invalid uncertanties \n elif 0 >= x.cov2() or iszero ( x.cov2() ) : return VE ( val , 0 )\n #\n ## 2) evaluate the derivative\n dfun = self.__derivative\n d = dfun ( float ( x ) , *args ) \n ## 3) calculate the variance \n cov2 = d * d * x.cov2()\n ## 4) get a final result \n return VE ( val , cov2 )", "def test_discoverable(self):\r\n plugins = getPlugins(IProcessor)\r\n lmath = [p for p in plugins if p.name == \"mlore\"]\r\n self.assertEqual(len(lmath), 1, \"Did not find math lore plugin: %r\" % (lmath,))", "def __virtual__():\n if salt.utils.path.which(\"apf\") is None:\n return (False, \"The apf execution module cannot be loaded: apf unavailable.\")\n elif not IPTC_IMPORTED:\n return (\n False,\n \"The apf execution module cannot be loaded: python-iptables is missing.\",\n )\n else:\n return True", "def check():", "def plugin_two():\n return \"two\"" ]
[ "0.5617833", "0.53539044", "0.5324882", "0.5316147", "0.52079403", "0.520496", "0.5125704", "0.5116243", "0.50876707", "0.49684304", "0.49534646", "0.48966265", "0.48424554", "0.4832243", "0.4799134", "0.47725004", "0.47721884", "0.47630158", "0.4759951", "0.4743248", "0.47369474", "0.4727059", "0.46975204", "0.46972167", "0.46944672", "0.4669396", "0.46640497", "0.46531332", "0.46503514", "0.46391124" ]
0.6621885
0
print the details of the given cluster object
def print_cluster_attributes(self, objects): print("\n") print(("ClusterName".ljust(35),":",objects.ClusterName.value())) print(("Repository Disk".ljust(35),":", \ objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value())) print("\nNodes in the cluster :\n-----------------------") for Node in objects.Node.Node : print(("HostName".ljust(35),":",\ Node.HostName.value())) print(("PartitionID".ljust(35),":", \ Node.PartitionID.value())) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_cluster(self):\n if self.controller.cluster:\n self.print_object(\n 'cluster', ('id', 'name', 'status'), self.controller.cluster\n )\n else:\n print(\"There is no cluster.\")", "def print_cluster(self):\n print('Cluster', self.number)\n for pattern in self.patterns:\n pattern.print_pattern()", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp", "def print_df(cluster_df):\n import sys\n\n cluster_df.to_string(sys.stdout, index=False, header=True)\n # Print an empty line to finish\n print()", "def __str__(self):\n return \"Cluster\"", "def show_clusters(self):\n cluster_ids = [\n self.controller.cluster and self.controller.cluster['id']\n ]\n self.print_list(\n ('id', 'name', 'status'), self.controller.get_clusters(),\n lambda x: cluster_ids.index(x['id'])\n )", "def print_cluster_summary(algo, i):\n assert algo in ['DBSCAN', 'KMeans', 'DBSCAN_topics', 'KMeans_topics']\n \n cluster_df = apps_df.copy()\n cluster_df = cluster_df[cluster_df[algo] == i]\n print('Cluster {} consists out of {} apps.'.format(str(i), str(cluster_df.shape[0])))\n titles = list(cluster_df['title'])\n print('The apps are:\\n {}'.format('\\n\\t'.join(titles)))", "def show_cluster(name: str) -> Cluster:\n environment = EnvironmentProvider().environment\n return environment.clusters[name]", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def __repr__(self):\n\n return \"<Cluster id=%s>\" % (self.id)", "def print_infrastructure(aws_key, aws_secret):\n _, _, _, redshift_client = create_clients(aws_key, aws_secret)\n for k, v in get_cluster_properties(redshift_client):\n print(k, v)", "def print_cluster(self, cluster, value):\n total = 0\n ham = 0\n spam = 0\n for message in cluster:\n if self.spamorham[self.ids[message]] == 'ham':\n ham += 1\n elif self.spamorham[self.ids[message]] == 'spam':\n spam += 1\n else:\n print(\"ERROR!\")\n total += 1\n\n print(\"Total number of messages in the {0} cluster: {1}\\n\"\n \"Percentage of SPAM messages in the {2} cluster: {3}\\n\"\n \"Percentage of HAM messages in the {4} cluster: {5}\".format(value, total, value,\n str((float(spam) / total) * 100), value,\n str((float(ham) / total) * 100)))", "def print_cluster_properties(redshift_client):\n cluster_properties = get_cluster_properties(redshift_client)\n print('HOST: %s' % cluster_properties['Endpoint']['Address'])\n property_keys = [\n 'ClusterIdentifier', 'NodeType', 'ClusterStatus', 'MasterUsername',\n 'DBName', 'Endpoint', 'NumberOfNodes', 'VpcId'\n ]\n property_tuples = [\n (k, v) for k, v in cluster_properties.items() if k in property_keys\n ]\n dfshow = pd.DataFrame(data=property_tuples, columns=['Key', 'Value'])\n print(dfshow)", "def __str__(self):\n return \"Clustering\"", "def show_vsan_cluster(self, cluster_id):\n url = \"clusters/%s\" % str(cluster_id)\n resp, body = self.get(url)\n body = json.loads(body)\n self.expected_success(200, resp.status)\n return service_client.ResponseBody(resp, body['cluster'])", "def get_cluster_info(self) -> Dict[str, Any]:\n pass", "def show(self, notebook=notebook_display):\n print(\"\\nCluster Ensemble:\")\n if notebook is True:\n display(self._df)\n elif notebook is False:\n print(self._df)\n self.massrich_parameters()", "def print_tasks_by_cluster_instance(self, cluster_address):\n cluster_address = cluster_address.cast(uCluster_ptr_type)\n task_root = (\n cluster_address['tasksOnCluster']['root']\n )\n\n if task_root == 0x0:\n print(\n 'There is no tasks for cluster at address: {}'.format(cluster_address)\n )\n return\n\n print('{:>4}{:>20}{:>18}{:>25}'.format('ID', 'Task Name', 'Address', 'State'))\n curr = task_root\n task_id = 0\n\n while True:\n print(\n ('{:>4}{:>20}{:>18}{:>25}'.format(task_id, curr['task_']['name'].string(),\n str(curr['task_'].reference_value())[1:],\n str(curr['task_']['state']))\n )\n )\n\n curr = curr['next'].cast(uBaseTaskDL_ptr_type)\n task_id += 1\n if curr == task_root:\n break", "def print_cell_information(obj_ase_cell):\n # print the lattice vectors\n print('a1=',obj_ase_cell.cell[0,:])\n print('a2=',obj_ase_cell.cell[1,:])\n print('a3=',obj_ase_cell.cell[2,:])\n for i,a in enumerate(obj_ase_cell):\n print(i,a.symbol,a.position)", "def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }", "def __repr__(self):\n\n return \"<TermCluster id=%d term=%s cluster_id=%d>\" % (\n self.termcluster_id, self.term, self.cluster_id)", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep", "def __repr__(self):\n rep = \"alg_cluster.Cluster(\"\n rep += str(self._fips_codes) + \", \"\n rep += str(self._horiz_center) + \", \"\n rep += str(self._vert_center) + \", \"\n rep += str(self._total_population) + \", \"\n rep += str(self._averaged_risk) + \")\"\n return rep", "def dump(cluster):\n\n clovr = pymongo.Connection().clovr\n clusters = clovr.clusters\n\n\n clusters.save(updateDict(cluster,\n dict(_id=cluster['name'])))", "def cluster_info(self, target_nodes: Optional[\"TargetNodesT\"] = None) -> ResponseT:\n return self.execute_command(\"CLUSTER INFO\", target_nodes=target_nodes)", "def print_tasks_by_cluster_address(self, cluster_address):\n # Iterate through a circular linked list of tasks and print out its\n # name along with address associated to each cluster\n\n # convert hex string to hex number\n try:\n hex_addr = int(cluster_address, 16)\n except:\n print_usage(self.usage_msg)\n return\n\n cluster_address = gdb.Value(hex_addr)\n self.print_tasks_by_cluster_instance(cluster_address)", "def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )", "def invoke(self, arg, from_tty):\n\n cluster_root = get_cluster_root()\n if not cluster_root:\n print('Cannot get the root of the linked list of clusters')\n return\n curr = cluster_root\n print('{:>20}{:>18}'.format('Name', 'Address'))\n\n while True:\n print(\n ('{:>20}{:>18}'.format(curr['cluster_']['name'].string(),\n str(curr['cluster_'].reference_value())[1:]))\n )\n curr = curr['next'].cast(uClusterDL_ptr_type)\n if curr == cluster_root:\n break", "def describe_cluster_no_status_response():\n return {\n \"cluster\": {\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }" ]
[ "0.7697402", "0.68622345", "0.67253536", "0.66951793", "0.6661364", "0.6646771", "0.6497768", "0.6483208", "0.64796126", "0.64762145", "0.6474442", "0.64543897", "0.6430488", "0.6301079", "0.6287944", "0.6264077", "0.6209574", "0.6173595", "0.612323", "0.6118218", "0.61159134", "0.6097176", "0.6012123", "0.6012123", "0.59945494", "0.59679925", "0.59277546", "0.5922493", "0.5897002", "0.5880937" ]
0.81189764
0
Returns a dict, key industry, value set of stock symbols Parses the US_Large_Cap.txt file, parses the WIKIdatasetscodes.csv file,
def parse_US_Large_Cap(): stocks = set() industries = defaultdict(set) with open('US_Large_Cap.txt') as f: for line in f: industry_match = re.match(r'.*--\s*(.*)', line) if industry_match: ind = industry_match.group(1) stock_match = re.match(r'.*(WIKI/\S*)', line) if stock_match: s = stock_match.group(1) industries[ind].add(s) stocks.add(s) print("Distinct Industries = {}".format(len(industries))) print("Distinct Stocks = {}".format(len(stocks))) quandl_wiki_stocks = set() with open('WIKI-datasets-codes.csv') as f: for line in f: stock_match = re.match(r'.*(WIKI/[^,]*)', line) if stock_match: s = stock_match.group(1) quandl_wiki_stocks.add(s) print("Distinct Quandl Wiki Stocks = {}".format(len(quandl_wiki_stocks))) # remove stocks not in quandl_wiki_stocks for v in industries.values(): v &= quandl_wiki_stocks return industries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_stock_list():\n print(\"Reading list of stocks.\")\n stocks = {}\n with open(STOCKS_FILE) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n stocks[row['Symbol']] = (row['Name'], row['Sector'])\n return stocks", "def get_data_from_file(file_name):\n stocks = []\n with open(file_name) as fh:\n keys = line2words(fh.readline()) # assigns the first line of the text document as the keys\n for line in fh: # reads the subsequent lines and assigns them as the as the values\n stocks.append(dict(zip(keys, line2words(line))))\n return stocks", "def get_stock_data():\n if not os.path.exists('./catalog/stock_data'):\n os.mkdir('./catalog/stock_data')\n \n inventory_data = {}\n inventory_file = './catalog/stock_data/inventory-bro.txt'\n \n download_data = True\n if os.path.exists(inventory_file):\n # Check that inventory file is no more than 1 day old\n filestat = os.stat(inventory_file)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n # Get inventory data from ftp site\n from ftplib import FTP_TLS\n print 'Downloading inventory-bro.txt ....'\n ftps = FTP_TLS('ftp.appareldownload.com')\n ftps.login('Br0d3r', 'Br0d3r2oll')\n ftps.prot_p()\n #ftps.retrlines('LIST')\n ftps.retrbinary('RETR inventory-bro.txt', open(inventory_file, 'wb').write)\n ftps.quit()\n \n print \"Parse inventory-bro.txt ... \"\n first_row = None\n for row in csv.reader(open(inventory_file, 'rb')):\n itemRef = row[4].lower()\n if itemRef == 'style number':\n # save first row to be used as column header\n first_row = row\n continue\n \n source_attribs = [{'attribute_type': 'source', 'attribute_value': 'broderbros'}]\n \n inventory_data.setdefault(itemRef, [])\n \n color = row[8].lower()\n size = row[10].lower()\n \n # Warehouses starts at column 13\n for i in range(13, len(first_row)):\n wh_name = first_row[i]\n options = [\n {'option_type': 'color', 'option_value': color, 'attributes': []},\n {'option_type': 'size', 'option_value': size, 'attributes': []},\n {'option_type': 'warehouse', 'option_value': wh_name, 'attributes': source_attribs, 'shared': True},\n {'option_type': 'vendor', 'option_value': 'broderbros', 'attributes': source_attribs, 'shared': True},\n ]\n inventory_data[itemRef].append({'options': options, 'inventory': row[i]})\n \n # Pricing data\n pricing_tarfile = \"./catalog/stock_data/bro-AllStyles_R06.tar.gz\"\n download_data = True\n if os.path.exists(pricing_tarfile):\n # Check that file is no more than 1 day old\n filestat = os.stat(pricing_tarfile)\n tm = datetime.datetime.fromtimestamp(filestat.st_mtime)\n today = datetime.datetime.now()\n dt = today - tm\n if dt.days < 1:\n download_data = False\n \n if download_data:\n print 'Downloading items.csv for price data ....'\n br = utils.create_browser(1, 2)\n br.open(\"https://www.broderbros.com/cgi-bin/online/webbro/bro-index.w\")\n try:\n # Fill login form\n br.select_form(name = 'frmLogin')\n frm = br.form\n \n ctrl = frm.find_control('userName')\n ctrl.value = USERNAME\n ctrl = frm.find_control('password')\n ctrl.value = PASSWORD\n \n # Submit login form\n if TESTRUN: print 'Submit Login Form'\n \n br.select_form(name = 'frmLogin')\n br.submit()\n except:\n print \"Login form does not exist, please check URL, downloaded html or site is down\"\n return None\n try:\n tar_url = \"https://www.broderbros.com/cgi-bin/download/webshr/prod-info-view.w?f=bro-AllStyles_R06.tar.gz\"\n br.retrieve(tar_url, pricing_tarfile)\n except:\n print \"Error when downloading pricing file\"\n return None\n \n try:\n tar = tarfile.open(pricing_tarfile)\n for member in tar.getmembers():\n member.name = member.name.split('/')[-1] # strip directory from filename\n tar.extractall('catalog/stock_data/bro-AllStyles_R06')\n tar.close()\n except:\n print \"Error when extracting items.csv\"\n return None\n \n f_object = open('./catalog/stock_data/bro-AllStyles_R06/items_R06.csv', 'rb')\n #~ f_object = open('items_R06.csv', 'rb')\n \n print \"Parse items_R06.csv ... \"\n for row in csv.reader(f_object):\n itemRef = row[7].lower()\n if itemRef == 'style code':\n continue\n \n size = row[8].lower()\n color = row[11].lower()\n price = row[18]\n \n item_data = inventory_data.get(itemRef)\n if not item_data:\n continue\n # Find data with same size and color\n for var_dict in item_data:\n options = var_dict['options']\n opt_dict = {}\n for opt in options:\n opt_type = opt['option_type']\n opt_value = opt['option_value']\n if opt_type == 'size':\n opt_dict['size'] = opt_value\n elif opt_type == 'color':\n opt_dict['color'] = opt_value\n if opt_dict['size'] == size and opt_dict['color'] == color:\n var_dict['price'] = [{'price_type': 'retail_price', 'price': price}]\n \n f_object.close()\n \n try:\n shutil.rmtree(\"./catalog/stock_data/bro-AllStyles_R06\")\n #~ os.remove(\"./catalog/stock_data/bro-AllStyles_R06.tar.gz\")\n except:\n pass\n \n return inventory_data", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d", "def _load_price_csv2(symbol):\n history = _load_pricehistory(symbol)\n return {k: v[\"open\"] for k, v in history.items()}", "def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary", "def readFile(filename):\n with open(filename) as f:\n name = f.readline().rstrip(\"\\n\")\n d={}\n for line in f:\n line = line.rstrip(\"\\n\")\n (itemName, Quantity, Price)=line.split(\" \")\n d[itemName]=[int(Quantity),int(Price)]\n return name, d", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def getSymbolMap():\n name = os.path.join(os.path.dirname(__file__), 'nasdaq_nasdaqcom.csv')\n symbols = TickerSymbols(name)\n return symbols.getNameToTicker()", "def get_BM_data(filename):\n\n data = {}\n with open(filename, 'r') as f:\n f.readline() #discard first line\n line = f.readline()\n for k in ('name', 'gender', 'age', 'division',\n 'country', 'time'):\n data[k] = []\n while line != '':\n split = line.split(',')\n data['name'].append(split[0])\n data['gender'].append(split[1])\n data['age'].append(int(split[2]))\n data['division'].append(int(split[3]))\n data['country'].append(split[4]) \n data['time'].append(float(split[5][:-1])) #remove \\n\n line = f.readline()\n return data", "def read_stock(db, openfile):\n pass", "def getBMData(filename):\n\n data = {}\n f = open(filename)\n line = f.readline() \n data['name'], data['gender'], data['age'] = [], [], []\n data['division'], data['country'], data['time'] = [], [], []\n while line != '':\n split = line.split(',')\n data['name'].append(split[0])\n data['gender'].append(split[1])\n data['age'].append(int(split[2]))\n data['division'].append(int(split[3]))\n data['country'].append(split[4]) \n data['time'].append(float(split[5][:-1])) #remove \\n\n line = f.readline()\n f.close()\n return data", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def buildCurrencyDict(filename): \n currencies = {}\n with open(os.path.join(\"input\", filename), \"rt\", encoding=\"utf8\") as f:\n reader = csv.reader(f)\n for line in reader:\n currencies[line[1]] = Currency(line[1], line[0], float(line[2]))\n return currencies", "def __init__(self):\n\n\n f = open(datapath + '/Data/companylist.csv', 'r')\n\n\n for line in f:\n reg = line.split(',')\n if reg[0] != 'Symbol':\n if reg[0] not in self.cnames:\n self.cnames[reg[0]] = [reg[1], reg[2], reg[3], reg[4].strip()]\n else:\n if reg[4].strip() != 'ASX':\n self.cnames[reg[0]] = [reg[1], reg[2], reg[3], reg[4].strip()]", "def extract_data(file_name):\n population_data = {\n \"gTitle\": \"SAARC Countries Population For Year 2004 - 2014\",\n \"xLabels\": [\n \"2004\",\n \"2005\",\n \"2006\",\n \"2007\",\n \"2008\",\n \"2009\",\n \"2010\",\n \"2011\",\n \"2012\",\n \"2013\",\n \"2014\",\n ],\n \"xText\": \"Years\",\n \"yText\": \"Population in millions\",\n \"data\": [],\n }\n temp = {}\n with open(file_name, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if (\n row[\"Region\"] in saarc_countries\n and row[\"Year\"] in population_data[\"xLabels\"]\n ):\n value = float(row[\"Population\"])\n temp[row[\"Year\"]] = temp.get(row[\"Year\"], 0) + value\n\n for val in population_data[\"xLabels\"]:\n population_data[\"data\"].append(int((temp[val] / 1000)))\n\n return population_data", "def get_market_conditions(filename):\n market_conditions = []\n print(\"Generate conditions\")\n\n #open the csv file to be read\n with open(filename) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n\n #loop through each row to check with the risk factor and get the stress shift\n for row in readCSV:\n if len(row) > 14:\n risk_factor_csv = row[5]\n stress_shift_csv = row[13]\n\n #check risk factor and get the stress shift\n for rf in riskfactors:\n for key in rf:\n if risk_factor_csv == key and stress_shift_csv != '1':\n #create the output obj with the values to return\n obj = {\"Risk_factor\": rf[key], \"Stress_shift\": stress_shift_csv}\n market_conditions.append(obj)\n return market_conditions", "def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data", "def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary", "def load_stock_symbol_fr_file(self):\n stock_list = pandas.read_csv(self.retrieval_type_input_file_dict[self.stock_retrieval_type])\n stock_list = list(stock_list['SYMBOL'])\n self.set_full_stocklist_to_retrieve(stock_list)", "def __get(self, ticker_symbol):\n\n # n = name\n # l1 = last trade\n # c1 = change\n # p2 = change percent\n url = \"http://finance.yahoo.com/d/quotes.csv?s=%s&f=nl1c1p2\" % ticker_symbol\n req = Request(url)\n resp = urlopen(req) \n csv_str = resp.read().decode().strip()\n\n elems = csv_str.split(',')\n\n return dict(name=elems[0].strip('\"'), ask_price=elems[1], change=elems[2], changep=elems[3].strip('\"'))", "def getCouponAreaDict(coupon_area_file):\n\tfile_handle = open(coupon_area_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n coupon_area_dict = {}\n for row in file_reader:\t\n temp_dict = coupon_area_dict.get(row['COUPON_ID_hash'], {'SMALL_AREA_NAME':[],'PREF_NAME':[]})\n\t\ttemp_dict['SMALL_AREA_NAME'].append(row['SMALL_AREA_NAME'])\n\t\ttemp_dict['PREF_NAME'].append(row['PREF_NAME'])\n\t\tcoupon_area_dict['COUPON_ID_hash'] = temp_dict\n\n\t# converting list to set for faster search #\n\tfor key in coupon_area_dict:\n\t\tcoupon_area_dict[key]['SMALL_AREA_NAME'] = set(coupon_area_dict[key]['SMALL_AREA_NAME'])\n\t\tcoupon_area_dict[key]['PREF_NAME'] = set(coupon_area_dict[key]['PREF_NAME'])\n\n file_handle.close()\n return coupon_area_dict", "def instruments():\n instr_dict = {}\n #\n instr_dict['LRISr'] = 2**0\n instr_dict['LRISb'] = 2**1\n instr_dict['Kastb'] = 2**2\n instr_dict['shane_kast_red'] = 2**3\n instr_dict['shane_kast_red_ret'] = 2**3\n instr_dict['DEIMOS'] = 2**4\n instr_dict['NIRSPEC'] = 2**5\n instr_dict['GMOS'] = 2**6\n instr_dict['DBSP'] = 2**7\n #\n return instr_dict", "def getUniChemData(self, inchiKeyList):\n mapD = {\n 1: {\"name\": \"chembl\", \"baseUrl\": \"https://www.ebi.ac.uk/chembl/\", \"entryUrl\": \"https://www.ebi.ac.uk/chembldb/compound/inspect/\"},\n 3: {\"name\": \"pdb\", \"baseUrl\": \"http://www.ebi.ac.uk/pdbe/\", \"entryUrl\": \"http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/\"},\n 2: {\"name\": \"drugbank\", \"baseUrl\": \"http://drugbank.ca/\", \"entryUrl\": \"http://www.drugbank.ca/drugs/\"},\n 5: {\"name\": \"pubchem_dotf\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov/sources/sources.cgi\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 4: {\"name\": \"gtopdb\", \"baseUrl\": \"http://www.guidetopharmacology.org\", \"entryUrl\": \"http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=\"},\n 11: {\"name\": \"ibm\", \"baseUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/\", \"entryUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/?sid=\"},\n 6: {\"name\": \"kegg_ligand\", \"baseUrl\": \"http://www.genome.jp/kegg/ligand.html\", \"entryUrl\": \"http://www.genome.jp/dbget-bin/www_bget?\"},\n 9: {\"name\": \"zinc\", \"baseUrl\": \"http://zinc15.docking.org\", \"entryUrl\": \"http://zinc15.docking.org/substances/\"},\n 8: {\"name\": \"nih_ncc\", \"baseUrl\": \"http://nihsmr.evotec.com/evotec/\", \"entryUrl\": \"\"},\n 10: {\"name\": \"emolecules\", \"baseUrl\": \"https://www.emolecules.com/\", \"entryUrl\": \"https://www.emolecules.com/cgi-bin/more?vid=\"},\n 12: {\"name\": \"atlas\", \"baseUrl\": \"http://www.ebi.ac.uk/gxa/home\", \"entryUrl\": \"http://www.ebi.ac.uk/gxa/query?conditionQuery=\"},\n 7: {\"name\": \"chebi\", \"baseUrl\": \"http://www.ebi.ac.uk/chebi/downloadsForward.do\", \"entryUrl\": \"http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI%3A\"},\n 14: {\n \"name\": \"fdasrs\",\n \"baseUrl\": \"http://fdasis.nlm.nih.gov/srs/srs.jsp\",\n \"entryUrl\": \"http://fdasis.nlm.nih.gov/srs/ProxyServlet?mergeData=true&objectHandle=DBMaint&APPLICATION_NAME=fdasrs&actionHandle=default&nextPage=jsp/srs/ResultScreen.jsp&TXTSUPERLISTID=\",\n },\n 15: {\"name\": \"surechembl\", \"baseUrl\": \"https://www.surechembl.org/search/\", \"entryUrl\": \"https://www.surechembl.org/chemical/\"},\n 21: {\"name\": \"pubchem_tpharma\", \"baseUrl\": \"http://www.thomson-pharma.com/\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 22: {\"name\": \"pubchem\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/compound/\"},\n 27: {\"name\": \"recon\", \"baseUrl\": \"https://vmh.uni.lu\", \"entryUrl\": \"https://vmh.uni.lu/\"},\n 28: {\"name\": \"molport\", \"baseUrl\": \"https://www.molport.com/shop/index\", \"entryUrl\": \"https://www.molport.com/shop/molecule-link/\"},\n 31: {\n \"name\": \"bindingdb\",\n \"baseUrl\": \"https://www.bindingdb.org/bind/index.jsp\",\n \"entryUrl\": \"http://www.bindingdb.org/bind/chemsearch/marvin/MolStructure.jsp?monomerid=\",\n },\n 41: {\"name\": \"swisslipids\", \"baseUrl\": \"http://www.swisslipids.org/\", \"entryUrl\": \"http://www.swisslipids.org/\"},\n 29: {\"name\": \"nikkaji\", \"baseUrl\": \"http://jglobal.jst.go.jp/en/\", \"entryUrl\": \"http://jglobal.jst.go.jp/en/redirect?Nikkaji_No=\"},\n 32: {\"name\": \"comptox\", \"baseUrl\": \"https://comptox.epa.gov/dashboard/\", \"entryUrl\": \"https://comptox.epa.gov/dashboard/\"},\n 33: {\"name\": \"lipidmaps\", \"baseUrl\": \"http://www.lipidmaps.org\", \"entryUrl\": \"http://www.lipidmaps.org/data/LMSDRecord.php?LMID=\"},\n 35: {\"name\": \"carotenoiddb\", \"baseUrl\": \"http://carotenoiddb.jp/index.html\", \"entryUrl\": \"http://carotenoiddb.jp/Entries/\"},\n 36: {\"name\": \"metabolights\", \"baseUrl\": \"http://www.ebi.ac.uk/metabolights/\", \"entryUrl\": \"http://www.ebi.ac.uk/metabolights/\"},\n 37: {\"name\": \"brenda\", \"baseUrl\": \"https://www.brenda-enzymes.org/index.php\", \"entryUrl\": \"https://www.brenda-enzymes.org/ligand.php?brenda_ligand_id=\"},\n 17: {\"name\": \"pharmgkb\", \"baseUrl\": \"https://www.pharmgkb.org\", \"entryUrl\": \"https://www.pharmgkb.org/drug/\"},\n 18: {\"name\": \"hmdb\", \"baseUrl\": \"http://www.hmdb.ca\", \"entryUrl\": \"http://www.hmdb.ca/metabolites/\"},\n 24: {\n \"name\": \"nmrshiftdb2\",\n \"baseUrl\": \"http://nmrshiftdb.nmr.uni-koeln.de/portal/media-type/html/user/anon/page/default.psml/js_pane/P-Home\",\n \"entryUrl\": \"http://nmrshiftdb.org/molecule/\",\n },\n 25: {\"name\": \"lincs\", \"baseUrl\": \"http://www.lincsproject.org/\", \"entryUrl\": \"http://identifiers.org/lincs.smallmolecule/\"},\n 39: {\"name\": \"chemicalbook\", \"baseUrl\": \"https://www.chemicalbook.com\", \"entryUrl\": \"https://www.chemicalbook.com/ChemicalProductProperty_EN_\"},\n 20: {\"name\": \"selleck\", \"baseUrl\": \"http://www.selleckchem.com\", \"entryUrl\": \"http://www.selleckchem.com/products/\"},\n 23: {\"name\": \"mcule\", \"baseUrl\": \"https://mcule.com\", \"entryUrl\": \"https://mcule.com/\"},\n 26: {\"name\": \"actor\", \"baseUrl\": \"https://actor.epa.gov\", \"entryUrl\": \"http://actor.epa.gov/actor/chemical.xhtml?casrn=\"},\n 34: {\"name\": \"drugcentral\", \"baseUrl\": \"http://drugcentral.org\", \"entryUrl\": \"http://drugcentral.org/drugcard/\"},\n 38: {\"name\": \"rhea\", \"baseUrl\": \"http://www.rhea-db.org\", \"entryUrl\": \"http://www.rhea-db.org/searchresults?q=CHEBI:\"},\n }\n oD = {}\n try:\n for ky in inchiKeyList:\n unc = unichem_client # pylint: disable=no-member\n # unc.set_format(\"json\")\n uDL = unc.get(ky)\n if uDL:\n qD = {}\n for uD in uDL:\n if \"src_id\" in uD and int(uD[\"src_id\"]) in mapD:\n qD[mapD[int(uD[\"src_id\"])][\"name\"]] = uD[\"src_compound_id\"]\n if qD:\n oD[ky] = qD\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def get_codes(path):\n hospital_codes = {}\n with open(path, encoding='utf8') as f:\n for line in f:\n val, key = line.split(\",\")\n hospital_codes[int(key)] = val\n return hospital_codes", "def __load_company_data(self):\n\n for ticker_type, ticker_list in self.tickers.items():\n # yfinance only has sector, industry and country for stocks\n if ticker_type == \"STOCK\":\n for ticker in ticker_list:\n # Only gets fields for tickers with missing data\n # TODO: Should only get field missing for tickers with missing data\n # now it's taking the 4 of them\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"] from isin/ticker\n info_list = get_info_from_ticker(ticker)\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n elif ticker_type == \"CRYPTO\":\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"Crypto\", \"Crypto\", \"Crypto\", \"Crypto\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n else:\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"-\", \"-\", \"-\", \"-\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list", "def read_data(input_file, support,\r\n freq_item_list, infreq_item_dict):\r\n\r\n df = pd.read_csv(input_file)\r\n # load the data in a dataframe\r\n support_count = int(support*len(df.index))\r\n first_level = []\r\n for i in df.columns:\r\n # iterate through each column header\r\n first_level.append(tuple((i,)))\r\n if sum(df[i]) >= support_count:\r\n freq_item_list[tuple((i,))] = sum(df[i])\r\n else:\r\n infreq_item_dict[tuple((i,))] = sum(df[i])\r\n\r\n return first_level, df, support_count", "def get_currencies():\n currencies = {}\n\n currencies_utilities.fetch_currencies()\n with open(currencies_csv, mode='rU') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n currencies[row['Code']] = row\n\n return currencies" ]
[ "0.6792169", "0.6174895", "0.6046333", "0.59522957", "0.58431596", "0.57646185", "0.5708081", "0.5615214", "0.5540734", "0.5530439", "0.55191875", "0.54750764", "0.54580003", "0.5432584", "0.54289293", "0.5422499", "0.54044735", "0.53776044", "0.5358801", "0.53523725", "0.5351689", "0.5338284", "0.5323369", "0.531401", "0.5308507", "0.53061193", "0.5287488", "0.52845037", "0.52823627", "0.5282058" ]
0.7746812
0
Adds months to the sourcedate (adjusting the day if necessary)
def add_months(sourcedate, months): month = sourcedate.month - 1 + months year = sourcedate.year + month // 12 month = month % 12 + 1 day = min(sourcedate.day, calendar.monthrange(year, month)[1]) return datetime.date(year, month, day)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_months(sourcedate, months):\n month = sourcedate.month - 1 + months\n year = int(sourcedate.year + month / 12)\n month = month % 12 + 1\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\n return datetime.date(year, month, day)", "def add_months(sourcedate, months):\r\n\r\n month = sourcedate.month - 1 + months\r\n year = sourcedate.year + month / 12\r\n month = month % 12 + 1\r\n day = min(sourcedate.day, calendar.monthrange(year, month)[1])\r\n end_date = datetime.date(year, month, day)\r\n return str(end_date.day) + \"/\" + str(end_date.month) + \"/\" + str(end_date.year)", "def add_month(cab_data):\n return cab_data.assign(month=lambda x: x.time.dt.month)", "def date_add_months(date, num_months):\n\tyear, month, day = date.timetuple()[:3]\n\n\tmonth += num_months\n\t\n\tif num_months > 0:\n\t\twhile month > 12:\n\t\t\tmonth -= 12\n\t\t\tyear += 1\n\telif num_months < 0:\n\t\twhile month < 1:\n\t\t\tmonth += 12\n\t\t\tyear -= 1\n\n\tmonth_max_days = calendar.monthrange(year, month)[1]\n\tif day > month_max_days:\n\t\tday = month_max_days\n\n\treturn datetime.date(year=year, month=month, day=day)", "def add_months(year, months, offset):\n months = months - 1 # 0 index months coming in\n nextmonths = months + offset\n months_offset = nextmonths % 12 + 1 # 1 index it going out\n years_offset = nextmonths // 12\n return (year + years_offset, months_offset)", "def add_months_to_date(date_or_datetime, offset):\n newyear, newmonth = add_months(date_or_datetime.year, date_or_datetime.month, offset)\n try:\n return date_or_datetime.replace(year=newyear, month=newmonth)\n except ValueError:\n ret = date_or_datetime.replace(year=newyear, month=newmonth, day=1)\n return ret.replace(day=(first_of_next_month(ret) - datetime.timedelta(days=1)).day)", "def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar", "def _next_month(self):\r\n self._canvas.place_forget()\r\n\r\n year, month = self._date.year, self._date.month\r\n self._date = self._date + self.timedelta(\r\n days=calendar.monthrange(year, month)[1] + 1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstruct calendar\r", "def add_months(time, number=1):\n assert isinstance(time, dt.date)\n assert isinstance(number, int)\n number = int(number)\n\n year = time.year\n month = time.month\n day = time.day\n if number >= 0:\n for n in range(number):\n year, month = next_month(year, month)\n else:\n for n in range(abs(number)):\n year, month = previous_month(year, month)\n\n days_in_month = calendar.mdays[month]\n if calendar.isleap(year) and month == 2:\n days_in_month = 29\n\n if day > days_in_month:\n whatis = 'Invalid date: %04i-%02i-%02i.' % (year, month, day)\n day = days_in_month\n willdo = 'Setting to: %04i-%02i-%02i.' % (year, month, day)\n message = ' '.join(whatis, willdo)\n warnings.warn(message)\n\n return time.replace(year=year, month=month, day=day)", "def next_month(date):\n\n return date + datetime.timedelta(days=calendar.monthrange(date.year, date.month)[1])", "def __add__(self, other):\n if not isinstance(other, datetime.timedelta):\n raise TypeError(\n \"Unsupported operand type(s) for +: {.__name__} and {.__name__}.\".format(type(self), type(other)))\n delta_years, delta_months = 0, 0\n delta_days = other.days\n total_remaining_days_this_year = NepaliDate.total_days(self.year) - (\n sum(NepaliDate.calendar_data[self.year][:self.month - 1]) + self.day\n )\n from_year, from_month, from_day = self.year, self.month, self.day\n if delta_days > total_remaining_days_this_year:\n delta_days -= total_remaining_days_this_year\n from_year += 1\n from_month = 1\n from_day = 0\n if from_year > MAX_DATE['year']:\n raise OverflowError(\"Resulting date out of range.\")\n delta_years = 0\n if delta_days > NepaliDate.total_days(from_year):\n for year in range(from_year, MAX_DATE['year'] + 1):\n total_days = NepaliDate.total_days(year)\n if delta_days > total_days:\n delta_days -= total_days\n delta_years += 1\n else:\n break\n from_year += delta_years\n if from_year > MAX_DATE['year']:\n raise OverflowError(\"Resulting date out of range.\")\n if from_year == self.year:\n total_remaining_days_this_month = NepaliDate.calendar_data[from_year][from_month - 1] - from_day\n if delta_days > total_remaining_days_this_month:\n delta_days -= total_remaining_days_this_month\n from_month += 1\n from_day = 0\n for month_days in NepaliDate.calendar_data[from_year][from_month - 1:]:\n if delta_days > month_days:\n delta_days -= month_days\n delta_months += 1\n else:\n break\n from_month += delta_months\n from_day += delta_days\n self.year, self.month, self.day = from_year, from_month, from_day\n return self", "def setMonth(self, *args):\n return _libsbml.Date_setMonth(self, *args)", "def test_date_accept_plus_days_with_month_wrap(self):\n spi_search = \"find date 2011-03-31 + 2\"\n inv_search = \"year:2011-04-02\"\n self._compare_searches(inv_search, spi_search)", "def add_month(t: datetime, n: int = 1) -> datetime:\n t2 = t\n for count in range(abs(n)): # pylint: disable=unused-variable\n if n > 0:\n t2 = datetime(year=t2.year, month=t2.month, day=1) + timedelta(days=32)\n else:\n t2 = datetime(year=t2.year, month=t2.month, day=1) - timedelta(days=2)\n try:\n t2 = t.replace(year=t2.year, month=t2.month)\n except Exception:\n last_day = monthrange(t2.year, t2.month)[1]\n t2 = t.replace(year=t2.year, month=t2.month, day=last_day)\n return t2", "def date_diff_months_to_shift(d1_date,d2_date): \n d1 = datetime.strptime(d1_date, '%m/%d/%Y')\n d2 = datetime.strptime(d2_date, '%m/%d/%Y')\n return (d2.year - d1.year) * 12 + d2.month - d1.month-1", "def next_month(dateobj):\n year_delta, old_month = divmod(dateobj.month, 12)\n return datetime.date(dateobj.year + year_delta, old_month + 1, 1)", "def _get_months(self):\n for employee in self:\n if employee.date_of_join:\n try:\n join_date = datetime.strptime(employee.date_of_join, DEFAULT_SERVER_DATE_FORMAT)\n to_date = datetime.now().strftime(DEFAULT_SERVER_DATE_FORMAT)\n current_date = datetime.strptime(to_date, DEFAULT_SERVER_DATE_FORMAT)\n employee.duration_in_months = (current_date.year - join_date.year) * 12 + current_date.month - join_date.month\n except:\n employee.duration_in_months = 0.0\n else:\n employee.duration_in_months = 0.0", "def month_shift(\n d: Union[dt.date, dt.datetime, pd.Timestamp],\n months: int = 1\n) -> Union[dt.date, dt.datetime, pd.Timestamp]:\n year = d.year\n month = d.month + months\n if not (1 <= month <= 12):\n year = year + ((month - 1) // 12)\n month = ((month - 1) % 12) + 1\n try:\n return d.replace(year=year, month=month)\n except ValueError:\n # Handle month overflow (e.g clip Feb 31 to 28)\n return month_shift(d=d.replace(day=1), months=months + 1) - dt.timedelta(days=1)", "def EOMONTH(start_date, months):\n return DATE(start_date.year, start_date.month + months + 1, 1) - datetime.timedelta(days=1)", "def test_get_data_months_run_start_date_same_year(self, climate_processor):\n with patch('bloomcast.utils.datetime') as mock_datetime:\n mock_datetime.date.today.return_value = datetime.date(2011, 9, 1)\n mock_datetime.date.side_effect = datetime.date\n data_months = climate_processor._get_data_months()\n assert data_months[0] == datetime.date(2011, 1, 1)\n assert data_months[-1] == datetime.date(2011, 9, 1)", "def parse_monthly_dates(self, x, pattern, ext, rename=None):\n datestring = self.scrub_string(x, pattern, ext, rename=rename)\n return datetime.datetime.strptime(datestring, '%Y%m')", "def fetch_months_to_download(cur_date, year_to_collect):\n year_to_collect = int(year_to_collect) # fail fast\n output_fmt = '%Y%m%d'\n\n if year_to_collect > cur_date.year:\n raise ValueError('Error: Year to collect is greater than current year')\n\n range_end = f'{cur_date.year}-{cur_date.month}-01' if cur_date.year == year_to_collect else f'{year_to_collect + 1}-01-01'\n\n return pd.date_range(\n start=f'{year_to_collect - 1}-12-01', # start at last month of previous year\n end=range_end,\n freq='MS'\n ).strftime(output_fmt)", "def new_month(self, month: int, year: int, bill: Bill) -> None:\n raise NotImplementedError", "def test_get_data_months_run_start_date_prev_year(self, climate_processor):\n with patch('bloomcast.utils.datetime') as mock_datetime:\n mock_datetime.date.today.return_value = datetime.date(2012, 2, 1)\n mock_datetime.date.side_effect = datetime.date\n data_months = climate_processor._get_data_months()\n assert data_months[0] == datetime.date(2011, 1, 1)\n assert data_months[11] == datetime.date(2011, 12, 1)\n assert data_months[-1] == datetime.date(2012, 2, 1)", "def month(self):\n return 0", "def month(self):\n return 0", "def DATEADD(start_date, days=0, months=0, years=0, weeks=0):\n return DATE(start_date.year + years, start_date.month + months,\n start_date.day + days + weeks * 7)", "def EOMONTH(\n start_date: func_xltypes.XlDateTime,\n months: func_xltypes.XlNumber\n) -> func_xltypes.XlNumber:\n delta = relativedelta(months=int(months))\n edate = utils.number_to_datetime(int(start_date)) + delta\n\n if edate <= utils.EXCEL_EPOCH:\n raise xlerrors.NumExcelError(\n f\"Date result before {utils.EXCEL_EPOCH}\")\n\n eomonth = edate + relativedelta(day=31)\n\n return utils.datetime_to_number(eomonth)", "def _get_months(self, cr, uid, context):\n months=[(str(n),str(n)) for n in range(1,13)]\n return months", "def __next_month(self, year, month):\n year, month = (year, month + 1) if month < 12 else (year + 1, 1)\n\n return self.create(year, month)" ]
[ "0.8102769", "0.79254377", "0.67087173", "0.6575526", "0.6172759", "0.5974256", "0.5790029", "0.5745349", "0.56915337", "0.5571453", "0.5530026", "0.55111146", "0.54789144", "0.53980106", "0.5394042", "0.5374351", "0.53717643", "0.5355199", "0.5321254", "0.53072524", "0.5303881", "0.5292516", "0.5281971", "0.5278433", "0.5269655", "0.5269655", "0.52604735", "0.5254444", "0.5238983", "0.5237697" ]
0.80970645
1
A mobility(T; Nd, Na; A, B) = 3/2 3/2 T + B (Nd + Na) / T
def mobility(mat, T, A, B): Nd = mat.p_donor_concentration(T=T) Na = mat.n_acceptor_concentration(T=T) T32 = T ** (3 / 2) return A / (T32 + B * (Nd + Na) / T32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mu_na(n: float, a: float) -> float:\n return n * n * a * a * a", "def ilerp(a, b, t):\n return (t - a) / (b - a)", "def C_Na_eq():\n global C_Na, C_Mg, C_dNTP\n return C_Na + 120*sqrt(C_Mg - C_dNTP)", "def t(o, r):\n return (r/o)**2", "def calc_mad(a,b):\n comb = a + b\n idx = np.array(range(len(a)))[~np.isnan(comb)]\n a1=a[idx]\n b1=b[idx]\n N = len(a1)\n mad = np.sum(np.abs(a1-b1))/N\n return mad", "def termometroNp(temp):\n\n # Genera un margen de error, utilizando numpy\n error = np.array([ random.normalvariate(0, 0.2) for x in range(999) ])\n temp = temp\n # Diferencia entre la temperatura real y el margen de error\n dif = np.array([ x + temp for x in error ])\n\n return dif", "def noncentral_moment(n, a, b):\n n = _validate_moment_n(n)\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n mu = mp.one\n for k in range(n):\n mu *= (a + k)/(a + b + k)\n return mu", "def mutual_info(a,b,c,n):\r\n if a == 0: \r\n return 0\r\n print(a,b,c,n) \r\n return log10((a * n) / ((a + c) * (a + b)))", "def trifactor(a, d, c):\n u, l = zeros_like(d), zeros_like(a)\n u[0] = d[0]\n for k in range(len(a)):\n l[k] = a[k]/u[k]\n u[k+1] = d[k+1] - l[k]*c[k]\n return l, u", "def NMC_diffusivity_PeymanMPM(sto, T):\n\n D_ref = 8 * 10 ** (-15)\n E_D_s = 18550\n arrhenius = np.exp(E_D_s / pybamm.constants.R * (1 / 298.15 - 1 / T))\n\n return D_ref * arrhenius", "def graphite_diffusivity_PeymanMPM(sto, T):\n\n D_ref = 5.0 * 10 ** (-15)\n E_D_s = 42770\n arrhenius = np.exp(E_D_s / pybamm.constants.R * (1 / 298.15 - 1 / T))\n\n return D_ref * arrhenius", "def ER_Theory(N,Kappa) :\n\tMu2 = Kappa - ( 2*Kappa*(1.0 - (Kappa/N))*math.log(N) )**0.5 + (( (Kappa*(1.0 - (Kappa/N)))/math.log(N) )**0.5)*( math.log( (2*math.pi*math.log((N**2)/(2*math.pi))) ) - 0.5772)\n\treturn Mu2", "def Tinker05(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n a = 0.707\n b = 0.35\n c = 0.8\n else:\n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n sa = a**0.5\n return 1.+(sa*(a*nu**2) + sa*b*(a*nu**2)**(1.-c) - (a*nu**2)**c/((a*nu**2)**c + \\\n b*(1.-c)*(1.-c/2.)))/(dc*sa)", "def SMT01(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n a = 0.707\n b = 0.5\n c = 0.6\n else: \n a = self.bias_par['a']\n b = self.bias_par['b']\n c = self.bias_par['c']\n sa = a**0.5\n return 1.+(sa*(a*nu**2.) + sa*b*(a*nu**2.)**(1.-c) - \\\n (a*nu**2.)**c/((a*nu**2.)**c + \\\n b*(1.-c)*(1.-c/2.)))/(dc*sa)", "def make_aiot(A, D, use_log=False):\n magic = 1-exp(-5000/D) #undocumented in penncnv paper\n\n def aiot(d):\n \"\"\"modify the state transition matrix by snp distance.\n \"\"\"\n res = A * (1-exp(-d/D)) / magic\n _adjust(res)\n return ( log(res) if use_log\n else res )\n return aiot", "def tdma(a, b, c, d):\n \n n = len(b)\n x = np.zeros(n)\n \n # elimination:\n \n for k in range(1,n):\n q = a[k]/b[k-1]\n b[k] = b[k] - c[k-1]*q\n d[k] = d[k] - d[k-1]*q\n \n # backsubstitution:\n \n q = d[n-1]/b[n-1]\n x[n-1] = q\n \n for k in range(n-2,-1,-1):\n q = (d[k]-c[k]*q)/b[k]\n x[k] = q\n \n \n return x", "def expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 150))", "def final_amt(p,r,n,t):\n\n a = p * (1 + r/n) ** (n*t)\n return a #This is what makes the function \"fruitful\"", "def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def theta_rule(a, u, h, t, n, th):\n \n Dt = t[n+1] - t[n]\n num = (1.0 - (1-th)*a*Dt)*u[n] + (1-th)*a*Dt*h(t[n]) + th*a*Dt*h(t[n+1])\n den = 1 + th*a*Dt\n \n return num/den", "def u_exact(t):\n return a * t + b", "def obj_u_opt_N_fixed(u, T, alpha, B):\n x = T.dot(u)\n return alpha.T.dot(x) - x.T.dot(B*x)", "def exact_moments( A, w ):\n\n k = len(w)\n P = A.dot( diag( w ) ).dot( A.T )\n #T = sum( [ w[i] * tensorify( A.T[i], A.T[i], A.T[i] ) for i in xrange( k ) ] )\n T = lambda theta: A.dot( diag( w) ).dot( diag( A.T.dot( theta ) ) ).dot( A.T )\n\n return P, T", "def _etaM(self,x):\n return self._etaM_cool(x) + self._etaM_hot(x)", "def diff_fn(\n mu_i: tf.Tensor,\n ddu_n_i: tf.Tensor,\n ddu_t_i: tf.Tensor,\n ) -> tf.Tensor:\n return mu_i * (4.0 / 3.0 * ddu_n_i + 1.0 / 3.0 * ddu_t_i)", "def obj_u_opt_N_opt(u, T, alpha, B, N, num_tumor_voxels, Td = 2):\n x = T.dot(u)\n alpha_tilde = alpha #np.repeat(N, num_tumor_voxels)*alpha\n B_tilde = B #np.repeat(N, num_tumor_voxels)*B\n #Note that all modalities must have the same number of tumor voxels:\n return alpha_tilde.T.dot(x) - x.T.dot(B_tilde*x) + num_tumor_voxels*(np.sum(N)-1)*(np.log(2)/Td)", "def CustomMathTest(): \n \n def CheckEqual(iterator):\n return len(set(iterator)) <= 1\n \n print(\"\")\n print(\" ..Testing.. \")\n print(\"\")\n \n Tests = []\n\n #Setup\n c = [1,2,3,nan,3]\n c2 = ma.masked_array(c,isnan(c))\n #Python has a less-comfortable handling of missing values.\n c3 = [2,3,-1,4,0]\n \n\n print(\"Testing MeanNa...\")\n Expected = [1.0, 2.0, 3.0, 2.25, 3.0]\n Actual = MeanNa(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n print(\"Testing Catch...\")\n Expected = [0,1,.5,0]\n Actual = [Catch(.4),Catch(.6),Catch(.4,.3),Catch(.4,.1)]\n print(Expected)\n print(Actual)\n print(Actual==Expected)\n Tests.append((Actual==Expected))\n print(\"\")\n \n print(\"Testing Influence...\")\n Expected = [array([ 0.88888889]), array([ 1.33333333]), array([ 1.]), array([ 1.33333333])]\n Actual = Influence(GetWeight(c2))\n print(Expected)\n print(Actual)\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True\n print(Flag)\n Tests.append(Flag) \n print(\"\")\n \n print(\"Testing ReWeight...\")\n Expected = [0.08888888888888889, 0.17777777777777778, 0.26666666666666666, 0.2, 0.26666666666666666]\n Actual = ReWeight(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n Votes = array([[1,1,0,0], \n [1,0,0,0],\n [1,1,0,0],\n [1,1,1,0],\n [0,0,1,1],\n [0,0,1,1]])\n \n Votes = ma.masked_array(Votes,isnan(Votes))\n \n print(\"Testing ReverseMatrix...\")\n Expected = array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 1],\n [1, 1, 0, 0],\n [1, 1, 0, 0]])\n Actual = ReverseMatrix(Votes)\n print(Expected)\n print(Actual)\n Flag=False\n if(sum(Expected==Actual)==24):\n Flag=True\n print(Flag)\n Tests.append(Flag)\n print(\"\") \n \n print(\"Testing WeightedPrinComp...\")\n Expected = array([-0.81674714, -0.35969107, -0.81674714, -0.35969107, 1.17643821, 1.17643821])\n Actual = WeightedPrinComp(Votes)[1]\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True \n print(Flag)\n Tests.append(Flag) \n print(\"\") \n \n print(\" *** TEST RESULTS ***\")\n print(Tests)\n print(CheckEqual(Tests))\n \n return(CheckEqual(Tests))", "def test_am_wta(Simulator, plt, seed, rng):\n\n D = 64\n vocab = Vocabulary(D, rng=rng)\n vocab.parse('A+B+C+D')\n\n def input_func(t):\n if t < 0.2:\n return vocab.parse('A+0.8*B').v\n elif t < 0.3:\n return np.zeros(D)\n else:\n return vocab.parse('0.8*A+B').v\n\n with nengo.Network('model', seed=seed) as m:\n am = AssociativeMemory(vocab, wta_output=True)\n in_node = nengo.Node(output=input_func, label='input')\n nengo.Connection(in_node, am.input)\n\n in_p = nengo.Probe(in_node)\n out_p = nengo.Probe(am.output, synapse=0.03)\n\n sim = Simulator(m)\n sim.run(0.5)\n t = sim.trange()\n more_a = (t > 0.15) & (t < 0.2)\n more_b = t > 0.45\n\n plt.subplot(2, 1, 1)\n plt.plot(t, nengo.spa.similarity(sim.data[in_p], vocab))\n plt.ylabel(\"Input\")\n plt.ylim(top=1.1)\n plt.legend(vocab.keys, loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(t, nengo.spa.similarity(sim.data[out_p], vocab))\n plt.plot(t[more_a], np.ones(t.shape)[more_a] * 0.8, c='g', lw=2)\n plt.plot(t[more_b], np.ones(t.shape)[more_b] * 0.8, c='g', lw=2)\n plt.ylabel(\"Output\")\n plt.legend(vocab.keys, loc='best')\n\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"A\").v) > 0.79\n assert similarity(sim.data[out_p][more_a], vocab.parse(\"B\").v) < 0.19\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"B\").v) > 0.79\n assert similarity(sim.data[out_p][more_b], vocab.parse(\"A\").v) < 0.19", "def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)", "def specificity(self):\n return self.tn / (self.tn + self.fp) if self.tn + self.fp > 0 else .0" ]
[ "0.6247191", "0.6061543", "0.5862243", "0.58048445", "0.5770478", "0.57220936", "0.5684574", "0.56817734", "0.5677259", "0.5659736", "0.55972934", "0.55965066", "0.55772066", "0.556147", "0.55419177", "0.552071", "0.5519663", "0.5517092", "0.5505766", "0.5494118", "0.5485629", "0.54780334", "0.5465106", "0.54613125", "0.54530257", "0.54290956", "0.5421405", "0.542124", "0.5412408", "0.5410473" ]
0.74827826
0
Adjust scrollbars according to window and canvassize.
def adjustScrolls(self): cwidth = self._canvas.winfo_width() cheight = self._canvas.winfo_height() self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) if cwidth < self.canvwidth or cheight < self.canvheight: self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, column=0, rowspan=1, columnspan=1, sticky='news') self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, column=1, rowspan=1, columnspan=1, sticky='news') else: self.hscroll.grid_forget() self.vscroll.grid_forget()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AdjustMyScrollbars(self):\r\n\r\n if self._anchor:\r\n xUnit, yUnit = self.GetScrollPixelsPerUnit()\r\n if xUnit == 0:\r\n xUnit = self.GetCharWidth()\r\n if yUnit == 0:\r\n yUnit = self._lineHeight\r\n\r\n x, y = self._anchor.GetSize(0, 0, self)\r\n y += yUnit + 2 # one more scrollbar unit + 2 pixels\r\n x_pos = self.GetScrollPos(wx.HORIZONTAL)\r\n y_pos = self.GetScrollPos(wx.VERTICAL)\r\n x = self._owner.GetHeaderWindow().GetWidth() + 2\r\n if x < self.GetClientSize().GetWidth():\r\n x_pos = 0\r\n\r\n self.SetScrollbars(xUnit, yUnit, x/xUnit, y/yUnit, x_pos, y_pos)\r\n else:\r\n self.SetScrollbars(0, 0, 0, 0)", "def AdjustMyScrollbars(self):\r\n\r\n if self._anchor:\r\n \r\n x, y = self._anchor.GetSize(0, 0, self)\r\n y += _PIXELS_PER_UNIT + 2 # one more scrollbar unit + 2 pixels\r\n x += _PIXELS_PER_UNIT + 2 # one more scrollbar unit + 2 pixels\r\n x_pos = self.GetScrollPos(wx.HORIZONTAL)\r\n y_pos = self.GetScrollPos(wx.VERTICAL)\r\n self.SetScrollbars(_PIXELS_PER_UNIT, _PIXELS_PER_UNIT, x/_PIXELS_PER_UNIT, y/_PIXELS_PER_UNIT, x_pos, y_pos)\r\n \r\n else:\r\n \r\n self.SetScrollbars(0, 0, 0, 0)", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def setup_scrollbar(self):\r\n self.container_widgets[\"order_frame\"].grid_propagate(False)\r\n self.container_widgets[\"orders_scrollbar\"].grid(row=0, column=1, sticky='ns')\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-4>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].bind_all(\"<Button-5>\", self.on_mousewheel) # TODO not working\r\n self.container_widgets[\"order_canvas\"].config(\r\n yscrollcommand=self.container_widgets[\"orders_scrollbar\"].set)\r\n self.container_widgets[\"order_canvas\"].config(\r\n scrollregion=self.container_widgets[\"order_canvas\"].bbox(\"all\"))\r\n self.container_widgets[\"order_canvas\"].create_window(\r\n (0, 0),\r\n window=self.container_widgets[\"orders_container\"],\r\n anchor='nw')\r\n # TODO change width\r\n self.container_widgets[\"order_canvas\"].config(\r\n width=600 + self.container_widgets[\"orders_scrollbar\"].winfo_width())", "def _configure_interior(event):\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def __window_resizeBy(self, xDelta, yDelta):\n pass", "def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()", "def resize(self, width, height):\n geo = self.geometry\n # Start of menu.\n self.menu_start = self.window.width - (geo.menu_width +\\\n geo.horizontal_margin + geo.scroll_bar_width)\n # Update vertical span of the window.\n self.current_view_span = height - self.status_bar.height\n # Call the resize method of all objects in the current window.\n for object in self.object_list:\n object.resize(width, height)\n # Just one call to the adaptive plot height is needed. Therefore the\n # calls need to be here.\n if self.waveforms:\n self.utils.adaptPlotHeight()", "def update_scrollbar(self):\n self.testCanvas.bind('<Configure>', self.on_configure)\n self.testFrame.bind('<Configure>', self.on_configure)", "def setwinsize(self, rows, cols):", "def on_window1_configure_event(self, source=None, event=None):\n\t\tself.scrolledwindow1.set_property('width-request', event.width - self.size_offset[0])\n\t\tself.scrolledwindow1.set_property('height-request', event.height - self.size_offset[1])\n\t\t#self.window1.do_configure_event(source, event)", "def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)", "def __window_resizeTo(self, iWidth, iHeight):\n pass", "def Pane_Resized( self, new_sizes ):\r\n if(new_sizes[0] > 200 ):\r\n cb.xtotal = new_sizes[0]-100\r\n self.canvas_one.config(width = new_sizes[0])\r\n self.canvas_scale.config(width = new_sizes[0])\r\n else:\r\n cb.xtotal = 200-100\r\n self.canvas_one.config(width = 200)\r\n self.canvas_scale.config(width = 200)\r\n if (len(new_sizes) > 1 ):\r\n self.canvas_two.config(width=new_sizes[1])\r\n self.system.Draw()", "def scroll_window(self):\r\n window = tkinter.Frame(self.root)\r\n scroller = tkinter.Scrollbar(self.root, orient=\"vertical\",\r\n command=self.canvas.yview)\r\n self.canvas.configure(yscrollcommand=scroller.set)\r\n\r\n scroller.pack(side=\"right\", fill=\"y\")\r\n self.canvas.pack(side=\"left\", fill=\"both\", expand=True)\r\n self.canvas.create_window((4, 4), window=window, anchor=\"nw\",\r\n tags=\"self.window\")\r\n return window", "def resize(self):\n\t\tself.win.erase()\n\t\tfor c in self.components:\n\t\t\tc.resize()\n\t\tself.draw(True)", "def updateScrollArea(self):\n iconx = []\n icony = []\n if len(self.icons) > 0:\n for item in self.icons:\n iconx.append(item.x())\n icony.append(item.y())\n self.setMinimumWidth(max(iconx)+75)\n self.setMinimumHeight(max(icony)+75)", "def AdjustDC(self, dc):\r\n \r\n xpix, dummy = self._owner.GetScrollPixelsPerUnit()\r\n x, dummy = self._owner.GetViewStart()\r\n\r\n # account for the horz scrollbar offset\r\n dc.SetDeviceOrigin(-x * xpix, 0)", "def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns", "def SizeWindows(self):\n self._SizeWindows()", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def config_do_not_scale_image_to_fit(self):\n # establish scrollbars\n self.sbarv = tkinter.Scrollbar(self, orient=tkinter.VERTICAL)\n self.sbarh = tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL)\n self.sbarv.config(command=self.yview)\n self.sbarh.config(command=self.xview)\n\n self.config(yscrollcommand=self.sbarv.set)\n self.config(xscrollcommand=self.sbarh.set)\n self.sbarv.grid(row=0, column=1, stick=tkinter.N+tkinter.S)\n self.sbarh.grid(row=1, column=0, sticky=tkinter.E+tkinter.W)", "def update_scroll_region(self):\n self.configure(scrollregion=(-self._radius - self.circ_pad,\n -self._radius - self.circ_pad,\n self._radius + self.circ_pad,\n self._radius + self.circ_pad))", "def __reconfig__(self, event):\r\n x, y = event.width//2, event.height//2\r\n self.canvas.config(scrollregion=(-x, -y, x, y))", "def on_canvas_resize(self, event) -> None:\r\n\r\n self.painter.adjust_to_canvas()\r\n self.painter.draw_board()", "def set_scroll_length(self):\n size = self.driver.get_window_size()\n self.x_cord = int(size['width'] / 2)\n self.start_y = int(size['height'] * 0.9)\n self.end_y = int(size['height'] * 0.1)", "def onFrameConfigure(self, event):\n self.panel_002.config(scrollregion=self.panel_002.bbox(\"all\"))", "def ev_windowsizechanged(self, event: WindowResized) -> None:", "def DoSetSize(self, x, y, width, height, sizeFlags=wx.SIZE_AUTO):\r\n \r\n parent_size = self.GetParent().GetClientSize()\r\n if x + width > parent_size.x:\r\n width = max(0, parent_size.x - x)\r\n if y + height > parent_size.y:\r\n height = max(0, parent_size.y - y)\r\n\r\n wx.PyControl.DoSetSize(self, x, y, width, height, sizeFlags)" ]
[ "0.7038642", "0.6943688", "0.69007504", "0.6676013", "0.6641863", "0.6597355", "0.6593239", "0.6341009", "0.63041687", "0.6233112", "0.61331993", "0.6067895", "0.60603666", "0.60564363", "0.6000023", "0.5999915", "0.59684426", "0.5967346", "0.59607744", "0.5949428", "0.59440607", "0.5922338", "0.5912757", "0.5840104", "0.58262473", "0.58021045", "0.5786779", "0.57556045", "0.57442784", "0.57378495" ]
0.7195202
0
return a blank image object
def _blankimage(): img = TK.PhotoImage(width=1, height=1) img.blank() return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),\n }\n return out", "def create_empty_image(width=512, height=512):\n blank_img = np.zeros((width, height, 3), np.uint8)\n # Return instance of the class\n return ExtendedImage(blank_img)", "def image(self) -> PIL.Image.Image:\n try:\n data = io.BytesIO(self.data)\n return PIL.Image.open(data)\n except Exception: # Image data is incorrect, fix as a simple transparent image\n return PIL.Image.new('RGBA', Image.MAX_IMAGE_SIZE)", "def image(self):\n return self.any_image(-1)", "def NullImageProto(msg:str = \"\"):\n return NLImage(width=0, height=0, data=msg)", "def topil(self) -> Image.Image:\n if self.width == 0 or self.height == 0:\n return None\n return Image.frombytes(\n \"RGBA\", (self.width, self.height), self.data, \"raw\", \"ARGB\", 0, 1\n )", "def image(self):\n if self.hasImage():\n return self._image.pixmap().toImage()\n return None", "def no_bin(image, *args, **kwargs):\n return image", "def gen_empty_img(w=640, h=480):\n return np.zeros((h, w, 3), np.uint8)", "def _read_empty(self):\n self.image_missing = True\n\n return_img = {}\n return_metadata = {}\n\n try:\n rows, cols = self.grid.subset_shape\n except AttributeError:\n rows, cols = self.grid.shape\n\n for param in self.parameters:\n data = np.full((rows, cols), np.nan)\n return_img[param] = data.flatten()\n return_metadata[param] = {'image_missing': 1}\n\n return return_img, return_metadata", "def make_image(self, path):\n\t\treturn None", "def EmptyBitmap(*args, **kwargs):\n val = _gdi_.new_EmptyBitmap(*args, **kwargs)\n return val", "def image(self):\r\n\r\n if sys.version < '3':\r\n imageio = StringIO.StringIO(self._image_data)\r\n else:\r\n imageio = StringIO.BytesIO(self._image_data)\r\n\r\n try:\r\n source_image = PILImage.open(imageio)\r\n img = PILImage.new('RGBA', source_image.size, (0, 0, 0, 0))\r\n\r\n if source_image.mode == 'L':\r\n alpha = source_image.split()[0]\r\n transparency = source_image.info.get('transparency')\r\n mask = PILImage.eval(alpha, lambda a: 0 if a == transparency else 255)\r\n img.paste(source_image, (0, 0), mask=mask)\r\n else:\r\n img.paste(source_image, (0, 0))\r\n except IOError, e:\r\n raise PILUnavailableError(e.args[0].split()[1])\r\n finally:\r\n imageio.close()\r\n\r\n self.original_width, self.original_height = img.size\r\n\r\n # Crop the image searching for the smallest possible bounding box\r\n # without losing any non-transparent pixel.\r\n # This crop is only used if the crop flag is set in the config.\r\n if self.config['crop']:\r\n img = img.crop(img.split()[-1].getbbox())\r\n return img", "def no_image(cls):\n def eval_fn(p: Posting):\n if p.img_url is None:\n return f\"I couldn't find any images for this posting.\"\n\n return cls(eval_fn)", "def image(self):\n return self._image", "def getimage(self):", "def image(self):\n if self.hasImage():\n return self._pixmapHandle.pixmap().toImage()\n return None", "def blank_image(height, width):\n all_green = create_uniform_image(height, width, [0, 255, 0])\n return all_green", "def blanck_picture(img):\r\n blank_image = np.zeros((img.shape[0],img.shape[1],3), np.uint8)\r\n blank_image[0:, 0:] = 0, 0, 0\r\n return blank_image", "def blank(width, height, channels=3, value=0):\n blank_image = np.full((height, width, channels), value, np.uint8)\n return Image(img=blank_image)", "def get_missing_image(self):\n # This class should have a 'name' property so it mimics the Django file\n # field.\n return MissingProductImage()", "def get_image(self) -> Image.Image:\n raw_buffer_data = self.get_raw_frame_buffer_object_data()\n image = Image.frombytes(\n \"RGBA\",\n self.get_pixel_shape(),\n raw_buffer_data,\n \"raw\",\n \"RGBA\",\n 0,\n -1,\n )\n return image", "def obimg():\n # The client might make a call to get a pic for an object which might\n # not have one. Better to return a blank than an error in that case.\n imgdat = B64ENCTRANSPARENT4X4PNG\n try:\n dsType = dbacc.reqarg(\"dt\", \"string\", required=True)\n dsId = dbacc.reqarg(\"di\", \"string\", required=True)\n inst = dbacc.cfbk(dsType, \"dsId\", dsId)\n if inst:\n picfldmap = {\"Point\": \"pic\"}\n imgdat = inst[picfldmap[dsType]]\n imgdat = base64.b64decode(imgdat)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respond(imgdat, mimetype=\"image/png\")", "def get_image():\n return models.Image.objects.all()[0]", "def get_blank_image(width: int, height: int, n_channels: int, cval=255) -> np.ndarray:\n if n_channels == 0:\n image = np.zeros((height, width)) + 255\n else:\n image = np.zeros((height, width, n_channels)) + cval\n return image.astype(\"uint8\")", "def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)", "def generate_image(self):\n pass", "def _ensure_empty_image_ok(self):\n if self.ignore_empty:\n return\n\n if len(self) > 1:\n raise RuntimeError(\n \"Cannot write None image at extension %d\" % len(self))\n if 'ndims' in self[0]._info:\n raise RuntimeError(\"Can only write None images to extension zero, \"\n \"which already exists\")", "def get_image ( self, object ):\n return self.image", "def create_one_image(attrs=None):\n attrs = attrs or {}\n\n # Set default attribute\n image_info = {\n 'id': str(uuid.uuid4()),\n 'name': 'image-name' + uuid.uuid4().hex,\n 'owner': 'image-owner' + uuid.uuid4().hex,\n 'container_format': '',\n 'disk_format': '',\n 'min_disk': 0,\n 'min_ram': 0,\n 'is_public': True,\n 'protected': False,\n 'properties': {'Alpha': 'a', 'Beta': 'b', 'Gamma': 'g'},\n 'status': 'status' + uuid.uuid4().hex,\n }\n\n # Overwrite default attributes if there are some attributes set\n image_info.update(attrs)\n\n return image.Image(**image_info)" ]
[ "0.7410479", "0.734866", "0.7067597", "0.69549996", "0.6941064", "0.68790126", "0.6719695", "0.6615003", "0.6612284", "0.66046363", "0.6579427", "0.65710306", "0.6570533", "0.6521228", "0.6519786", "0.6500374", "0.64950687", "0.64875454", "0.64772147", "0.64727896", "0.6462663", "0.64451796", "0.6432417", "0.6422724", "0.63404447", "0.63294715", "0.62986755", "0.62791866", "0.62776905", "0.62618655" ]
0.82468075
0
Create an invisible polygon item on canvas self.cv)
def _createpoly(self): return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initiate(self):\n pts = []\n for point in self.points:\n pt = gr.Point(point[0],point[1])\n pts.append(pt)\n\n self.vis = [gr.Polygon(pts)]\n\n self.draw()", "def draw_me(self, canvas):\n return canvas.create_polygon(self.dot, outline=self.poly, fill=self.color)", "def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))", "def _drawpoly(self, polyitem, coordlist, fill=None,\n outline=None, width=None, top=False):\n cl = []\n for x, y in coordlist:\n cl.append(x * self.xscale)\n cl.append(-y * self.yscale)\n self.cv.coords(polyitem, *cl)\n if fill is not None:\n self.cv.itemconfigure(polyitem, fill=fill)\n if outline is not None:\n self.cv.itemconfigure(polyitem, outline=outline)\n if width is not None:\n self.cv.itemconfigure(polyitem, width=width)\n if top:\n self.cv.tag_raise(polyitem)", "def remove_drawing_poly(self):\n\n self.drawing_poly = QPolygonF()\n self.drawing_points_coords = []\n\n for p in self.drawing_points:\n p.setVisible(False)\n\n for line in self.connecting_line_list:\n line.setVisible(False)\n if self.connecting_line:\n self.connecting_line.setVisible(False)\n self.connecting_line = None\n self.first_draw = True\n if self.set_tooltip:\n self.set_tooltip(\"\")", "def event_click_polygon(self, event):\n\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n if drag_lims:\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = list(old_coords) + [event_x_pos, event_y_pos]\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n # re-initialize shape if we're not actively drawing\n else:\n new_coords = (event.x, event.y, event_x_pos+1, event_y_pos+1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True", "def addContour(self, coordinates):\r\n\r\n # instantiate a graphics item\r\n contour = gc.GraphicsCollection()\r\n # make it polygon type and populate its points\r\n points = [QtCore.QPointF(x, y) for x, y in zip(*coordinates)]\r\n contour.Polygon(QtGui.QPolygonF(points), self.name)\r\n # set its properties\r\n contour.pen.setColor(self.pencolor)\r\n contour.pen.setWidth(self.penwidth)\r\n contour.pen.setCosmetic(True) # no pen thickness change when zoomed\r\n contour.brush.setColor(self.brushcolor)\r\n\r\n # add contour as a GraphicsItem to the scene\r\n # these are the objects which are drawn in the GraphicsView\r\n self.contour_item = PGraphicsItem.GraphicsItem(contour, self.scene)\r\n\r\n # add the contour as item to the scene\r\n self.scene.addItem(self.contour_item)", "def _draw_polygon(self):\n xs, ys = zip(*self._xys) if self._xys else ([], [])\n self._selection_artist.set_data(xs, ys)\n self._update_box()\n # Only show one tool handle at the start and end vertex of the polygon\n # if the polygon is completed or the user is locked on to the start\n # vertex.\n if (self._selection_completed\n or (len(self._xys) > 3\n and self._xys[-1] == self._xys[0])):\n self._polygon_handles.set_data(xs[:-1], ys[:-1])\n else:\n self._polygon_handles.set_data(xs, ys)\n self.update()", "def generatePolygons():", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def create(self):\n self.parent.copyCurrentWinState(self.pltw)\n # add a new vector\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def draw_aim(self):\n polygon(screen, self.color, [(self.x, self.y), (self.x + self.r * 1.71 / 2, self.y - self.r / 2),\n (self.x + self.r * 1.71, self.y), (self.x + self.r * 1.71, self.y + self.r),\n (self.x + self.r * 1.71 / 2, self.y + 3 * self.r / 2), (self.x, self.y + self.r)])", "def create_surface(self):\n oval = graphics.Oval(graphics.Point(5,0), graphics.Point(300, -10))\n oval.setFill(\"gray\")\n return oval", "def remove_drawing_rect(self):\n self.drawing_rect = QPolygonF()\n if self.connecting_rect:\n self.connecting_rect.setVisible(False)\n self.connecting_rect = None\n self.first_draw = True", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def addPolygon(self, verts, color=[220,0,0], thickness=1.0, alpha=255,\n linestyle='=', fill=None, selectable=True, movable=False,\n selectThickness=2.0, selectColor=None, closed=True,\n name='QIVPolygon', noadd=False, isCosmetic=False):\n\n # create the polygon object\n polygon = QIVPolygon(verts, color=color, thickness=thickness,\n alpha=alpha, linestyle=linestyle, fill=fill, selectable=selectable,\n movable=movable, closed=closed, view=self, name=name,isCosmetic=isCosmetic)\n\n if (not noadd):\n # add the polygon to the scene\n self.scene.addItem(polygon)\n\n # and add it to our list of items\n self.sceneItems.append(polygon)\n\n return polygon", "def create_new_polygon(self, coords, **options):\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def _finish_polygon(self):\n global undo_stack, choose_polygon\n if len(self.polygon_points) < 6:\n messagebox.showinfo(title='Info', message='Too few points for a polygon')\n return 'too_few_points'\n relative_poly_points = []\n for p in range(0, len(self.polygon_points), 2):\n relative_poly_points.extend(self.get_canvas_relative_coords((self.polygon_points[p],\n self.polygon_points[p + 1])))\n if choose_polygon:\n undo_stack.append('p')\n self.polygons.append(self.canvas.create_polygon(relative_poly_points,\n outline='blue', activewidth=3, width=1,\n fill='magenta', stipple='gray50'))\n self.canvas.tag_bind(self.polygons[-1], '<ButtonPress-1>', self.callback_click_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<ButtonRelease-1>', self.callback_release_polygon)\n self.canvas.tag_bind(self.polygons[-1], '<B1-Motion>', self.callback_move_polygon)\n for p in self.polygon_groundstructure:\n self.canvas.delete(p)\n self.polygon_points_history[self.polygons[-1]] = np.reshape(np.asarray(self.polygon_points),\n (round(len(self.polygon_points) / 2),\n 2))\n self.polygon_points.clear()\n self.polygon_groundstructure.clear()\n self.parent_class.activate_save_bt()", "def draw(self, context):\n # TODO: Add this color to Add-on option\n color = (1.0, 1.0, 0.5, 1.0)\n alpha = 2.0 * math.atan((18.0 / 2.0) / self.lens.value[0])\n dist = 0.5 / (math.tan(alpha / 2.0))\n if self.height.value[0] == 0:\n width = 0.7\n else:\n width = self.width.value[0] / self.height.value[0]\n \n points = dict()\n points['border'] = [None, None, None, None]\n points['center'] = [None]\n \n # Points of face\n points['right_eye'] = [\n mathutils.Vector((0.25, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((0.3, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((0.3, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((0.25, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((0.25, 0.25, self.distance.value[0] - dist))\n ]\n points['left_eye'] = [\n mathutils.Vector((-0.25, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3, 0.25, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((-0.25, 0.0, self.distance.value[0] - dist)),\n mathutils.Vector((-0.25, 0.25, self.distance.value[0] - dist))\n ]\n \n points['mouth'] = [\n mathutils.Vector((-0.40912365913391113, -0.11777058243751526, self.distance.value[0] - dist)),\n mathutils.Vector((-0.3441678285598755, -0.15873458981513977, self.distance.value[0] - dist)),\n mathutils.Vector((-0.2563667893409729, -0.1998385488986969, self.distance.value[0] - dist)),\n mathutils.Vector((-0.18191590905189514, -0.22385218739509583, self.distance.value[0] - dist)),\n mathutils.Vector((-0.10375960171222687, -0.23957833647727966, self.distance.value[0] - dist)),\n mathutils.Vector((0.0, -0.2464955747127533, self.distance.value[0] - dist)),\n mathutils.Vector((0.10375960171222687, -0.23957833647727966, self.distance.value[0] - dist)),\n mathutils.Vector((0.18191590905189514, -0.22385218739509583, self.distance.value[0] - dist)),\n mathutils.Vector((0.2563667893409729, -0.1998385488986969, self.distance.value[0] - dist)),\n mathutils.Vector((0.3441678285598755, -0.15873458981513977, self.distance.value[0] - dist)),\n mathutils.Vector((0.40912365913391113, -0.11777058243751526, self.distance.value[0] - dist))\n ]\n \n # Put border points of camera to basic position\n points['border'][0] = mathutils.Vector((\n -width / 2.0,\n -0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][1] = mathutils.Vector((\n width / 2.0,\n -0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][2] = mathutils.Vector((\n width / 2.0,\n 0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n points['border'][3] = mathutils.Vector((\n -width / 2.0,\n 0.5,\n self.distance.value[0] - dist,\n 1.0\n ))\n \n # Center of view\n points['center'][0] = mathutils.Vector((\n 0.0,\n 0.0,\n self.distance.value[0],\n 1.0\n ))\n \n # Create transformation (rotation) matrix\n rot_matrix = mathutils.Quaternion(self.rotation.value).to_matrix().to_4x4()\n \n # Transform points in all point groups\n for point_group in points.values():\n for index in range(len(point_group)):\n # Rotate points\n point_group[index] = (rot_matrix * point_group[index]).to_3d()\n # Move points\n point_group[index] += mathutils.Vector(self.location.value)\n\n border = points['border']\n center = points['center']\n\n # Store glColor4f\n col_prev = bgl.Buffer(bgl.GL_FLOAT, [4])\n bgl.glGetFloatv(bgl.GL_COLOR, col_prev)\n\n bgl.glColor4f(color[0], color[1], color[2], color[3])\n\n # Draw username\n coord_2d = location_3d_to_region_2d(\n context.region,\n context.space_data.region_3d,\n center[0])\n\n # When coordinates are not outside window, then draw the name of avatar\n if coord_2d is not None:\n # TODO: add to Add-on options\n font_id, font_size, my_dpi = 0, 12, 72\n blf.size(font_id, font_size, my_dpi)\n blf.position(font_id, coord_2d[0] + 2, coord_2d[1] + 2, 0)\n blf.draw(font_id, str(self.username))\n\n # Get & convert the Perspective Matrix of the current view/region.\n persp_matrix = context.space_data.region_3d.perspective_matrix\n temp_mat = [persp_matrix[j][i] for i in range(4) for j in range(4)]\n persp_buff = bgl.Buffer(bgl.GL_FLOAT, 16, temp_mat)\n \n # Store previous OpenGL settings.\n # Store MatrixMode\n matrix_mode_prev = bgl.Buffer(bgl.GL_INT, [1])\n bgl.glGetIntegerv(bgl.GL_MATRIX_MODE, matrix_mode_prev)\n matrix_mode_prev = matrix_mode_prev[0]\n \n # Store projection matrix\n proj_matrix_prev = bgl.Buffer(bgl.GL_DOUBLE, [16])\n bgl.glGetFloatv(bgl.GL_PROJECTION_MATRIX, proj_matrix_prev)\n \n # Store Line width\n line_width_prev = bgl.Buffer(bgl.GL_FLOAT, [1])\n bgl.glGetFloatv(bgl.GL_LINE_WIDTH, line_width_prev)\n line_width_prev = line_width_prev[0]\n \n # Store GL_BLEND\n blend_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_BLEND, blend_prev)\n blend_prev = blend_prev[0]\n \n # Store GL_DEPTH_TEST\n depth_test_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_DEPTH_TEST, depth_test_prev)\n depth_test_prev = depth_test_prev[0]\n \n # Store GL_LINE_STIPPLE\n line_stipple_prev = bgl.Buffer(bgl.GL_BYTE, [1])\n bgl.glGetFloatv(bgl.GL_LINE_STIPPLE, line_stipple_prev)\n line_stipple_prev = line_stipple_prev[0]\n \n # Prepare for 3D drawing\n bgl.glLoadIdentity()\n bgl.glMatrixMode(bgl.GL_PROJECTION)\n bgl.glLoadMatrixf(persp_buff)\n bgl.glEnable(bgl.GL_BLEND)\n bgl.glEnable(bgl.GL_DEPTH_TEST)\n \n # Draw \"Look At\" point\n bgl.glLineWidth(1)\n bgl.glBegin(bgl.GL_LINES)\n bgl.glColor4f(color[0], color[1], color[2], color[3])\n \n bgl.glVertex3f(\n self.location.value[0] + 0.1,\n self.location.value[1],\n self.location.value[2]\n )\n bgl.glVertex3f(\n self.location.value[0] - 0.1,\n self.location.value[1],\n self.location.value[2]\n )\n \n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1] + 0.1,\n self.location.value[2]\n )\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1] - 0.1,\n self.location.value[2]\n )\n \n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2] + 0.1\n )\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2] - 0.1\n )\n \n bgl.glEnd()\n\n # Draw border of camera\n bgl.glBegin(bgl.GL_LINE_STRIP)\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glVertex3f(border[1][0], border[1][1], border[1][2])\n bgl.glVertex3f(border[2][0], border[2][1], border[2][2])\n bgl.glVertex3f(border[3][0], border[3][1], border[3][2])\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glEnd()\n \n # Draw left eye\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['left_eye']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n\n # Draw right eye\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['right_eye']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n \n # Draw mouth\n bgl.glBegin(bgl.GL_LINE_STRIP)\n for point in points['mouth']:\n bgl.glVertex3f(point[0], point[1], point[2])\n bgl.glEnd()\n \n # Draw dashed lines from center of \"camera\" to border of camera \n bgl.glEnable(bgl.GL_LINE_STIPPLE)\n bgl.glBegin(bgl.GL_LINES)\n bgl.glVertex3f(border[0][0], border[0][1], border[0][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[1][0], border[1][1], border[1][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[2][0], border[2][1], border[2][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glVertex3f(border[3][0], border[3][1], border[3][2])\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glEnd()\n \n # Draw dashed line from Look At point and center of camera\n bgl.glBegin(bgl.GL_LINES)\n bgl.glVertex3f(\n self.location.value[0],\n self.location.value[1],\n self.location.value[2]\n )\n bgl.glVertex3f(center[0][0], center[0][1], center[0][2])\n bgl.glEnd()\n bgl.glDisable(bgl.GL_LINE_STIPPLE)\n\n # Restore previous OpenGL settings\n bgl.glLoadIdentity()\n bgl.glMatrixMode(matrix_mode_prev)\n bgl.glLoadMatrixf(proj_matrix_prev)\n bgl.glLineWidth(line_width_prev)\n if not blend_prev:\n bgl.glDisable(bgl.GL_BLEND)\n if not line_stipple_prev:\n bgl.glDisable(bgl.GL_LINE_STIPPLE)\n if not depth_test_prev:\n bgl.glDisable(bgl.GL_DEPTH_TEST)\n\n bgl.glColor4f(col_prev[0], col_prev[1], col_prev[2], col_prev[3])", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def render(self):\n\n theta = self.angle*math.pi/180.0\n cth = math.cos(theta)\n sth = math.sin(theta)\n pts = []\n cornerpts = []\n\n for vertex in self.points:\n x = vertex[0] + self.pos[0] - self.anchor[0]\n y = vertex[1] + self.pos[1] - self.anchor[1]\n\n xt = x * cth - y * sth\n yt = x * sth + y * cth\n\n x = xt + self.anchor[0]\n y = yt + self.anchor[1]\n\n cornerpts.append([x,y])\n pts.append(gr.Point(self.scale * x, self.win.getHeight() - self.scale*y))\n\n self.corners = cornerpts\n self.vis = [gr.Polygon(pts)]", "def draw_polygon(self, *points, color=DEFAULT.color):", "def clean_area(screen,origin,width,height,color):\r\n ox,oy = origin\r\n points = [(ox,oy),(ox+width,oy),(ox+width,oy+height),(ox,oy+height),(ox,oy)]\r\n pygame.draw.polygon(screen, color, points, 0)", "def begin_poly(self):\n self._poly = [self._position]\n self._creatingPoly = True", "def add_poly_to_scene(self, polygon, point_marker_dict=None, curve_marker_dict=None, hole_mode=False):\n if hole_mode:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(255, 255, 255)))\n poly.setZValue(1)\n self.poly_list.append(poly)\n self.hole_list.append(poly)\n else:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(0, 0, 0, 50)))\n self.poly_list.append(poly)\n self.add_poly_corners(poly, point_marker_dict)\n self.add_poly_edges(poly, curve_marker_dict)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\n return poly", "def verts(self, xys):\n self._xys = [*xys, xys[0]]\n self._selection_completed = True\n self.set_visible(True)\n if self._draw_box and self._box is None:\n self._add_box()\n self._draw_polygon()", "def toggle_border_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n poly.setBrush(QColor(0, 0, 0, 0))\n\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(True)\n # Enable selection of the edges of the polygon, if the edge has a marker display it\n for edge in self.edge_list:\n edge.childItems()[0].setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(True)", "def drawPolygon(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def draw_overlay(self):\n pass", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component" ]
[ "0.6772693", "0.67152303", "0.67051923", "0.6295918", "0.62573355", "0.6244688", "0.62337166", "0.6097748", "0.60878134", "0.60610825", "0.6032351", "0.60145247", "0.5891362", "0.5884091", "0.58743083", "0.58682644", "0.58416355", "0.58405304", "0.5811396", "0.58092785", "0.58086634", "0.57212305", "0.57005346", "0.56867087", "0.566262", "0.564579", "0.56369907", "0.56263", "0.5622271", "0.562013" ]
0.69808215
0
Configure polygonitem polyitem according to provided
def _drawpoly(self, polyitem, coordlist, fill=None, outline=None, width=None, top=False): cl = [] for x, y in coordlist: cl.append(x * self.xscale) cl.append(-y * self.yscale) self.cv.coords(polyitem, *cl) if fill is not None: self.cv.itemconfigure(polyitem, fill=fill) if outline is not None: self.cv.itemconfigure(polyitem, outline=outline) if width is not None: self.cv.itemconfigure(polyitem, width=width) if top: self.cv.tag_raise(polyitem)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_poly_corners(self, poly_item, marker_dict=None):\n poly = poly_item.polygon()\n\n for i in range(poly.size()):\n point = poly.at(i)\n p = self.addEllipse(-4, -4, 8, 8, self.LUBronze, self.LUBronze)\n p.setZValue(2) # Make sure corners always in front of polygon surfaces\n p.setParentItem(poly_item)\n p.__setattr__(\"localIndex\", int(i))\n p.setPos(point.x(), point.y())\n p.setFlag(QGraphicsItem.ItemIsSelectable)\n p.setFlag(QGraphicsItem.ItemIsMovable)\n self.point_coord_list = np.append(self.point_coord_list, [[p.x(), p.y()]], axis=0)\n\n self.potential_edge_splitters.append(p)\n\n # Used to pass markers when loading a g\n if marker_dict:\n if i in marker_dict:\n self.add_marker(p, marker_dict[i])\n text = p.childItems()[0]\n text.setVisible(False)", "def add_poly_to_scene(self, polygon, point_marker_dict=None, curve_marker_dict=None, hole_mode=False):\n if hole_mode:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(255, 255, 255)))\n poly.setZValue(1)\n self.poly_list.append(poly)\n self.hole_list.append(poly)\n else:\n poly = self.addPolygon(polygon, QPen(QColor(0, 0, 0, 0)), QBrush(QColor(0, 0, 0, 50)))\n self.poly_list.append(poly)\n self.add_poly_corners(poly, point_marker_dict)\n self.add_poly_edges(poly, curve_marker_dict)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\n return poly", "def add_poly_edges(self, poly_item, marker_dict=None):\n\n poly = poly_item.polygon()\n\n for i in range(1, poly.size() + 1):\n if i == poly.size():\n p1 = poly.at(i - 1)\n p2 = poly.at(0)\n index = -poly.size()\n\n else:\n p1 = poly.at(i - 1)\n p2 = poly.at(i)\n index = i\n\n line = self.addLine(QLineF(p1, p2))\n line.setZValue(-1)\n display_line = self.addLine(QLineF(p1, p2), QPen(self.LUBronze, 3))\n line.__setattr__(\"localIndex\", index)\n line.setParentItem(poly_item)\n display_line.setParentItem(line)\n self.edge_list.append(line)\n\n # Used to pass markers when loading a g\n if marker_dict:\n if i - 1 in marker_dict:\n self.add_marker(display_line, marker_dict[i - 1])\n display_line.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n text = display_line.childItems()[0]\n text.setVisible(False)", "def generatePolygons():", "def __init__(self, model, polygon, segments = None, strength = 1,\r\n variables = [], priors=[], snap_distance = 1E-10,\r\n snap = False, influence = None):\r\n\r\n import numpy as np\r\n import copy\r\n import matplotlib.path\r\n import math\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n \r\n # This element adds water, so it also requires an influence range\r\n if influence is None:\r\n self.influence = self.model.domain_radius*2\r\n else:\r\n self.influence = influence\r\n \r\n # Complexify the polygon, if it isn't already complex\r\n polygon = self.complexify(polygon)\r\n \r\n # Prepare the polygon variable\r\n self.polygon = polygon\r\n \r\n # Is the polygon closed? If not, close it temporarily\r\n self.snap_distance = snap_distance\r\n if np.abs(self.polygon[0]-self.polygon[-1]) > self.snap_distance:\r\n self.polygon = np.asarray(list(self.polygon)+[self.polygon[0]])\r\n \r\n # Also create an array with real coordinates\r\n self.polygon_XY = np.column_stack((\r\n np.real(copy.copy(self.polygon))[:,np.newaxis],\r\n np.imag(copy.copy(self.polygon))[:,np.newaxis] ))\r\n\r\n # Is the polygon counter-clockwise? If not, correct it\r\n if self.are_vertices_clockwise(self.polygon_XY):\r\n self.polygon = np.flip(self.polygon)\r\n self.polygon_XY = np.flipud(self.polygon_XY)\r\n \r\n # Do we wish to subdivide the polygon?\r\n # First, check if the user specified a desired segment count\r\n if segments is None:\r\n self.segments = self.polygon.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.polygon.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than the number of vertices \"+str(polygon.shape[0]-1)+'.')\r\n \r\n # Subdivide the polygon, if desired\r\n if self.segments > self.polygon.shape[0]-1:\r\n self.polygon_XY = self.subdivide_line(self.polygon_XY,self.segments)\r\n self.polygon = self.polygon_XY[:,0] + 1j*self.polygon_XY[:,1]\r\n \r\n # This is a hack: We shrink the polygon by a small amount. This should ensure \r\n # that no issues arise from evaluating points directly on the boundary; \r\n # there might be other ways to solve this issue alternatively\r\n self.polygon_XY = self.shrink_polygon(\r\n polygon = self.polygon_XY,\r\n offset = 1E-10)\r\n self.polygon = self.polygon_XY[:,0] + 1j*self.polygon_XY[:,1]\r\n \r\n # Un-close the polygon again\r\n self.polygon_XY = self.polygon_XY[:-1,:]\r\n self.polygon = self.polygon[:-1]\r\n \r\n # If vertex snapping is enabled, snap all outside vertices onto the domain edge\r\n if snap:\r\n self.snap_to_domain()\r\n \r\n # =====================================================================\r\n # Now some area-sink-specific work\r\n # =====================================================================\r\n \r\n # Get the angles of all segments to the x axis\r\n # required for the local coordinates, Strack 1989, 37.19\r\n self.alpha = np.zeros(self.segments)\r\n for seg in range(self.segments):\r\n if seg == self.segments-1:\r\n nextseg = 0\r\n else:\r\n nextseg = seg+1\r\n \r\n # Get the side vector, then normalize it \r\n temp = self.polygon[nextseg]-self.polygon[seg]\r\n temp /= np.abs(temp)\r\n \r\n self.alpha[seg] = math.asin(np.imag(temp))\r\n \r\n \r\n # Get the central point of the polygon\r\n self.zc = np.mean(self.polygon)\r\n \r\n # Calculate the area of the polygon with the shoelace formula:\r\n self.A = self.get_polygon_area()\r\n \r\n # Calculate the coefficients c0, c1, c2 for all segments\r\n self.L = np.zeros(self.segments)\r\n for seg in range(self.segments):\r\n \r\n if seg == self.segments-1:\r\n nextseg = 0\r\n else:\r\n nextseg = seg+1\r\n \r\n # Save the length of the segment\r\n self.L[seg] = np.abs(self.polygon[nextseg]-self.polygon[seg])\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = strength\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # # Prepare the matrix block containing the effect of this element onto \r\n # # itself for future use in solving the linear system. The matrix requires\r\n # # subtraction of the A_star variable from its diagonal entries for completion\r\n # self.block = self.matrix_contribution()\r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'", "def addPolygon(self, verts, color=[220,0,0], thickness=1.0, alpha=255,\n linestyle='=', fill=None, selectable=True, movable=False,\n selectThickness=2.0, selectColor=None, closed=True,\n name='QIVPolygon', noadd=False, isCosmetic=False):\n\n # create the polygon object\n polygon = QIVPolygon(verts, color=color, thickness=thickness,\n alpha=alpha, linestyle=linestyle, fill=fill, selectable=selectable,\n movable=movable, closed=closed, view=self, name=name,isCosmetic=isCosmetic)\n\n if (not noadd):\n # add the polygon to the scene\n self.scene.addItem(polygon)\n\n # and add it to our list of items\n self.sceneItems.append(polygon)\n\n return polygon", "def Polygon(self, polyline = False):\n\n from geographiclib.polygonarea import PolygonArea\n return PolygonArea(self, polyline)", "def __init__(self, dot=dif, color=BLACK, poly=BLACK):\n super(Polygon, self).__init__(color, poly)\n self.dot = dot", "def __init__(self, vertices, subs=None, **kwargs):\n super(Polygon, self).__init__(vertices, ring=True, **kwargs)\n self._geotype = \"Polygon\"\n if subs is not None:\n self.subs = list(subs)\n else:\n self.subs = []\n return", "def __init__(self, model, polygon, segments = None, k = 0.1,\r\n variables = [], priors=[], snap_distance = 1E-10,\r\n zero_cutoff = 1E-10, snap = True):\r\n\r\n import numpy as np\r\n import copy\r\n import matplotlib.path\r\n \r\n # Append this element to the specified model\r\n self.model = model\r\n model.elementlist.append(self)\r\n model.linear_solver = True\r\n \r\n # Prepare the polygon variable\r\n self.polygon = polygon\r\n \r\n self.polygon = self.complexify(self.polygon)\r\n \r\n self.snap_distance = snap_distance\r\n self.zero_cutoff = zero_cutoff\r\n \r\n # Is the polygon closed? If not, close it temporarily\r\n if np.abs(self.polygon[0]-self.polygon[-1]) > self.snap_distance:\r\n self.polygon = np.asarray(list(self.polygon)+[self.polygon[0]])\r\n \r\n # Also create an array with real coordinates\r\n self.polygon_XY = np.column_stack((\r\n np.real(copy.copy(self.polygon))[:,np.newaxis],\r\n np.imag(copy.copy(self.polygon))[:,np.newaxis] ))\r\n\r\n # Is the polygon counter-clockwise? If not, correct it\r\n if self.are_vertices_clockwise(self.polygon_XY):\r\n self.polygon = np.flip(self.polygon)\r\n self.polygon_XY = np.flipud(self.polygon_XY)\r\n \r\n # Do we wish to subdivide the polygon?\r\n # First, check if the user specified a desired segment count\r\n if segments is None:\r\n self.segments = self.polygon.shape[0]-1\r\n else:\r\n self.segments = segments\r\n \r\n if self.segments < self.polygon.shape[0]-1:\r\n raise Exception('Prescribed number of line segments '+str(self.segments)+\" mustn't be smaller than the number of vertices \"+str(polygon.shape[0]-1)+'.')\r\n \r\n # Subdivide the polygon, if desired\r\n if self.segments > self.polygon.shape[0]-1:\r\n self.polygon_XY = self.subdivide_line(self.polygon_XY,self.segments)\r\n self.polygon = self.polygon_XY[:,0] + 1j*self.polygon_XY[:,1]\r\n \r\n # Un-close the polygon again\r\n self.polygon_XY = self.polygon_XY[:-1,:]\r\n self.polygon = self.polygon[:-1]\r\n \r\n # If vertex snapping is enabled, snap all outside vertices onto the domain edge\r\n if snap:\r\n self.snap_to_domain()\r\n\r\n # This is a hack: We shrink the polygon by a small amount. This ensures \r\n # that no issues arise from evaluating points directly on the boundary, \r\n # and allows us to consider inhomogeneities directly bounding each other; \r\n # there might be other ways to solve this issue alternatively\r\n self.polygon_XY = self.shrink_polygon(\r\n polygon = self.polygon_XY,\r\n offset = 1E-10)\r\n self.polygon = self.polygon_XY[:,0] + 1j*self.polygon_XY[:,1]\r\n\r\n # The control points of the inhomogeneity are simply its vertices\r\n # This is required for the linear solver\r\n self.zc = self.polygon\r\n \r\n # Raise an exception if this inhomogeneity intersects any of the previous\r\n # inhomogeneities\r\n for e in self.model.elementlist[:-1]:\r\n if isinstance(e, ElementInhomogeneity):\r\n if any(e.are_points_inside_polygon(self.zc)):\r\n raise Exception('Inhomogeneities may not intersect each other.')\r\n \r\n # Create a path with the edges of the polygon\r\n # We can use this path to find out if evaluation points are inside or \r\n # or outside the inhomogeneity.\r\n self.linepath = matplotlib.path.Path(self.polygon_XY)\r\n \r\n # Get strength parameters for each vertex\r\n self.strength = np.ones(self.segments)\r\n \r\n # Assign the hydraulic conductivity of the inhomogeneity\r\n self.k = k\r\n \r\n # Extract target variables\r\n self.variables = variables\r\n self.priors = priors\r\n \r\n # Prepare the matrix block containing the effect of this element onto \r\n # itself for future use in solving the linear system. The matrix requires\r\n # subtraction of the A_star variable from its diagonal entries for completion\r\n self.block = self.matrix_contribution()\r\n \r\n # Check if the prior matches the number of parameters\r\n if len(self.priors) != len(self.variables):\r\n raise Exception('Number of priors must match number of unknown variables. Number of priors: '+str(self.priors)+' / Number of unknown variables: '+str(len(self.variables)))\r\n \r\n if len(self.variables) > 0:\r\n # There are some model variables specified\r\n for idx,var in enumerate(self.variables):\r\n self.model.num_params += 1\r\n exec(\"self.model.params += [self.%s]\" % var)\r\n self.model.priors += [self.priors[idx]]\r\n self.model.variables += [var]\r\n if 'name' in list(self.priors[idx].keys()):\r\n self.model.param_names += [self.priors[idx]['name']] \r\n else: \r\n self.model.param_names += ['unknown']", "def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)", "def __init__(self, pl, matrix, materialnodebysymbol):\n super(BoundPolygons, self).__init__(pl, matrix, materialnodebysymbol)", "def PolygonPatch(polygon, **kwargs):\n return PathPatch(PolygonPath(polygon), **kwargs)", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def draw_shape_polygon(self, poly, xform, colour):\n pts = [xform.chain(p) for p in poly.points]\n self.canvas.polygon([(p.x, p.y) for p in pts], outline=colour)", "def delete_polygon(self, poly: QGraphicsPolygonItem, delete_from_coord_list=False):\n\n self.poly_list.remove(poly)\n\n if poly in self.hole_list:\n self.hole_list.remove(poly)\n\n for item in poly.childItems():\n if isinstance(item, PyQt5.QtWidgets.QGraphicsLineItem):\n self.edge_list.remove(item)\n if item in self.potential_edge_splitters:\n self.potential_edge_splitters.remove(item)\n\n if delete_from_coord_list:\n for point in self.poly_to_list(poly, \"Global\"):\n self.point_coord_list = np.delete(self.point_coord_list, np.where(\n np.all(self.point_coord_list == [[point.x(), point.y()]], axis=1))[0][0], axis=0)\n\n poly.hide()", "def add_polygon(api_key, hexagon_id, hexagon_shape,\n api_endpoint=(\"https://engine.tygron.com/api/session/event/\"\n \"editorbuilding/add_polygons/?\")):\n multi = geometry.MultiPolygon([hexagon_shape])\n add = geometry.mapping(multi)\n r = requests.post(url=api_endpoint+api_key, json=[hexagon_id, 1, add,\n True])\n return", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def _set_poly_roi(self, event, x, y, flags, params):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n self.tpoly.append((x, y))\r\n self.ix = x\r\n self.iy = y\r\n self.drawing = True\r\n if event == cv2.EVENT_MOUSEMOVE:\r\n if self.drawing:\r\n self.img = self.current_frame.copy()\r\n _poly = self.tpoly + [(x, y), self.tpoly[0]]\r\n for p1, p2 in zip(_poly[:-1], _poly[1:]):\r\n cv2.line(self.img, p1, p2, (0, 0, 255), 3)\r\n if event == cv2.EVENT_LBUTTONDBLCLK:\r\n self.poly = self.tpoly\r\n self.tpoly = []\r\n self.drawing = False", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def _draw_polygon(self):\n xs, ys = zip(*self._xys) if self._xys else ([], [])\n self._selection_artist.set_data(xs, ys)\n self._update_box()\n # Only show one tool handle at the start and end vertex of the polygon\n # if the polygon is completed or the user is locked on to the start\n # vertex.\n if (self._selection_completed\n or (len(self._xys) > 3\n and self._xys[-1] == self._xys[0])):\n self._polygon_handles.set_data(xs[:-1], ys[:-1])\n else:\n self._polygon_handles.set_data(xs, ys)\n self.update()", "def convert_polygon_COCO_type(element: list, height: int, width: int) -> Dict:\n rles = maskUtils.frPyObjects(element, height, width)\n p = maskUtils.merge(rles)\n return p", "def define_polygon(cls, polygon):\n \n num_obj = cls()\n num_obj.coord = [np.array(polygon)]\n return num_obj", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def polyListComponentConversion(*args, border: bool=True, fromEdge: bool=True, fromFace:\n bool=True, fromUV: bool=True, fromVertex: bool=True,\n fromVertexFace: bool=True, internal: bool=True, toEdge:\n bool=True, toFace: bool=True, toUV: bool=True, toVertex:\n bool=True, toVertexFace: bool=True, uvShell: bool=True,\n vertexFaceAllEdges: bool=True, **kwargs)->List[selectionItem]:\n pass", "def get(self,item: str) -> List[Dict[EnumShapeCategories, Union[str, List]]]:\n polygons = []\n for i,polygon in enumerate(self.dataset[item]):\n label:int = self.attr_mapping[polygon[EnumShapeCategories.Label]]\n color_code = \"#\"+f\"{label:02x}\"*3 # conversion to hexadecimal color (#FFFFFF for white for instance)\n polygons.append({EnumShapeCategories.Label:color_code,EnumShapeCategories.Points:polygon[EnumShapeCategories.Points]})\n return polygons", "def __draw_polygon(self, event, klick):\n global creating_polygon\n curX = self.canvas.canvasx(event.x)\n curY = self.canvas.canvasy(event.y)\n if not klick and len(self.polygon_points) >= 2:\n c_r_x, c_r_y = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n distanceX = curX - c_r_x\n distanceY = curY - c_r_y\n if pow(pow(distanceX, 2) + pow(distanceY, 2), 1 / 2) <= 15:\n return\n image_relative_x, image_relative_y = self.get_image_relative_coords((curX, curY))\n self.polygon_points.extend((image_relative_x, image_relative_y))\n self.polygon_groundstructure.append(self.canvas.create_rectangle(curX - 2, curY - 2, curX + 2, curY + 2,\n outline='magenta', width=1,\n activewidth=2))\n if not creating_polygon: # start with a new polygon\n creating_polygon = True\n return\n else: # draw a line between the last points\n c_r_x1, c_r_y1 = self.get_canvas_relative_coords((self.polygon_points[-4], self.polygon_points[-3]))\n c_r_x2, c_r_y2 = self.get_canvas_relative_coords((self.polygon_points[-2], self.polygon_points[-1]))\n self.polygon_groundstructure.append(self.canvas.create_line([c_r_x1, c_r_y1, c_r_x2, c_r_y2],\n fill='magenta', width=2))" ]
[ "0.70407003", "0.6504912", "0.6365025", "0.625637", "0.6162488", "0.60109514", "0.5975227", "0.59541506", "0.5913706", "0.5852789", "0.5838715", "0.5821715", "0.58086246", "0.57636315", "0.572026", "0.5710127", "0.5707078", "0.5682682", "0.56402135", "0.56137234", "0.5599655", "0.5599655", "0.5567753", "0.5541065", "0.55308545", "0.5480296", "0.54424983", "0.54333085", "0.54178214", "0.54062515" ]
0.66038036
1
Create an invisible line item on canvas self.cv)
def _createline(self): return self.cv.create_line(0, 0, 0, 0, fill="", width=2, capstyle = TK.ROUND)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)", "def draw(self, img):\n self._erase_last_line(self.img)\n\n idxs = np.argwhere(img[:, self._pos] == 0)\n self.prev_y = (idxs.min(), idxs.max())\n\n cv.line(img, (self._pos, 0), (self._pos, self.h), (0, 0, 0), 1)", "def _draw_line(self, event):\n if not self.obstacle_creation_mode:\n return\n\n if self.previous_coordinates is None:\n self.previous_coordinates = event.x, event.y\n self.new_obstacle.append([event.x, event.y])\n return\n\n x1, y1 = event.x, event.y\n\n if self._is_closing_shape(x1, y1, self.new_obstacle):\n x1, y1 = self.new_obstacle[0]\n else:\n self.new_obstacle.append([x1, y1])\n\n x0, y0 = self.previous_coordinates\n self.canvas.create_line(x0, y0, x1, y1, **self.LINE_OPTIONS)\n self.previous_coordinates = x1, y1", "def __init__(self, ax, useblit=False, **lineprops):\n self.ax = ax\n self.canvas = ax.figure.canvas\n\n self.canvas.mpl_connect('motion_notify_event', self.onmove)\n self.canvas.mpl_connect('draw_event', self.clear)\n\n self.visible = True\n self.horizOn = True\n self.vertOn = True\n self.useblit = useblit\n\n self.lineh = ax.axhline(0, visible=False, **lineprops)\n self.linev = ax.axvline(0, visible=False, **lineprops)\n\n self.background = None\n self.needclear = False", "def stopLineDrawing(self):\n taskMgr.remove(\"drawLineTask\")\n if self.line is not None:\n self.line.reset()\n self.line = None", "def vline(self, x, y, height, color):\n self.rect(x, y, 1, height, color, fill=True)", "def line(self, drawer, canvas):\n start_width = random.randint(\n self._width / 8, self._width / 4)\n start_height = random.randint(\n self._height / 4, self._height * 3 / 4)\n stop_width = random.randint(\n self._width * 3 / 4, self._width * 7 / 8)\n stop_height = random.randint(\n self._height / 4, self._height * 3 / 4)\n drawer.line(\n (start_width,\n start_height,\n stop_width,\n stop_height),\n fill=random.randint(128, 155),\n width=3\n )", "def draw_line(self, frame, rect):\n print(\"x0, y0, x1, y1\", self.x0, self.y0, self.x1, self.y1)\n print(\"cross_x, cross_y\", self.cross_x, self.cross_y)\n left, top, right, bottom = rect\n # 枠内では線を表示しないようにしてやる\n if top<self.y1<bottom and left<self.x1<right:\n return\n # フレームと線の交点\n if (self.x1 >= right or self.x1 <= left or self.y1 <= top or self.y1 >= bottom) and self.cross_x == 0:\n self.cross_x = self.x1\n self.cross_y = self.y1\n return\n draw = ImageDraw.Draw(frame)\n draw.line((self.cross_x, self.cross_y, self.x1, self.y1), fill=(255, 255, 255), width=3)", "def separator_line(parent, dims, flag, packvalue=Tk.TOP, bgcol='#d9d9d9'):\n lincanvas = Tk.Canvas(parent, height=dims[1], width=dims[0])\n lincanvas.config(bg=bgcol)\n try:\n lincanvas.pack(side=packvalue, fill=Tk.BOTH, expand=Tk.YES)\n except:\n return None\n if flag:\n lincanvas.create_line(dims[2], dims[1]/2, dims[0] - dims[2],\n dims[1]/2)\n else:\n lincanvas.create_line(dims[0]/2, dims[2], dims[0]/2,\n dims[1] - dims[2])\n return lincanvas", "def main():\n top = Tk()\n dim = 400\n cnv = Canvas(top, width=dim, height=dim)\n cnv.pack()\n lines = []\n for _ in range(10):\n xrd = randint(6, dim-6)\n yrd = randint(6, dim-6)\n xrd2 = randint(6, dim-6)\n yrd2 = randint(6, dim-6)\n lines.append(ShowLine(cnv, Point(xrd,yrd), Point(xrd2, yrd2)))\n for line in lines:\n line.draw()\n top.mainloop()", "def create(self):\n self.parent.copyCurrentWinState(self.pltw)\n # add a new vector\n vname = self.pltw.curvelist[self.cpos].name + 'BL'\n (nvec, npt) = np.shape(self.pltw.blklst[self.blkno])\n if self.pltw.pasteVector(self.data[2], self.blkno, vname):\n xname = self.pltw.getVnam(self.blkno, self.xpos)\n xvinfo = vectInfo(self.blkno, self.xpos, xname)\n yvinfo = vectInfo(self.blkno, nvec, vname)\n self.pltw.curvelist.append(curveInfo(vname, xvinfo, yvinfo))\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def onmove(self, event):\n if self.ignore(event):\n return\n if not self.canvas.widgetlock.available(self):\n return\n if event.inaxes != self.ax:\n self.linev.set_visible(False)\n self.lineh.set_visible(False)\n\n if self.needclear:\n self.canvas.draw()\n self.needclear = False\n return\n self.needclear = True\n\n self.linev.set_xdata((event.xdata, event.xdata))\n self.linev.set_visible(self.visible and self.vertOn)\n\n self.lineh.set_ydata((event.ydata, event.ydata))\n self.lineh.set_visible(self.visible and self.horizOn)\n\n if self.visible and (self.vertOn or self.horizOn):\n self._update()", "def simple_canvas(self):\n self.canvas = Canvas()\n\n self.box1 = Box()\n self.canvas.add(self.box1)\n self.box1.matrix.translate(100, 50)\n self.box1.width = 40 \n self.box1.height = 40 \n self.box1.request_update()\n\n self.box2 = Box()\n self.canvas.add(self.box2)\n self.box2.matrix.translate(100, 150)\n self.box2.width = 50 \n self.box2.height = 50 \n self.box2.request_update()\n\n self.line = Line()\n self.head = self.line.handles()[0]\n self.tail = self.line.handles()[-1]\n self.tail.pos = 100, 100\n self.canvas.add(self.line)\n\n self.canvas.update_now()\n self.view = GtkView()\n self.view.canvas = self.canvas\n from gi.repository import Gtk\n win = Gtk.Window()\n win.add(self.view)\n self.view.show()\n self.view.update()\n win.show()\n\n self.tool = ConnectHandleTool(self.view)", "def _draw_line(event, x, y, flags, params):\n global img, source_img\n global p1, p2\n if event == cv2.EVENT_LBUTTONDOWN:\n img = source_img.copy()\n p1 = (x, y)\n elif event == cv2.EVENT_LBUTTONUP:\n p2 = (x, y)\n img = source_img.copy()\n text = 'position: %d' % p2[0]\n cv2.putText(img, text, (100, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 3, DrawingShapeUtils.COLOR, \n DrawingShapeUtils.LINE_THICKNESS)\n cv2.line(img, (x, y+100), (x, y-100), DrawingShapeUtils.COLOR,\n DrawingShapeUtils.LINE_THICKNESS)", "def add(self, line):\n self.cull()\n self.lines.append(line)", "def vline(self, x, y, length, color):\n self.fill_rect(x, y, 1, length, color)", "def drawLine(self,start,stop):\n startX = int(self.vert[start][0]*self.scale + self.size/2)\n startY = int(self.vert[start][1]*self.scale + self.size/2)\n endX = int(self.vert[stop][0]*self.scale + self.size/2)\n endY = int(self.vert[stop][1]*self.scale + self.size/2)\n \n self.canvas.create_line(startX,startY,endX,endY,fill='white')", "def draw_desc_diagonal(self, player):\n if player == 1:\n color = GameData.circle_color\n elif player == 2:\n color = GameData.cross_color\n\n pygame.draw.line(self.game_screen, color, (15, 15), (GameData.screen_dim - 15, GameData.screen_dim - 15),\n GameData.win_line_width)", "def set_mode_line():\n global DRAW_MODE, CURRENT_LABEL, SHAPE_SIZE\n global mouse_pos, line_start_pos\n\n if DRAW_MODE==\"line\":\n # draw the line on the mask\n cv.line(source_msk, line_start_pos, mouse_pos, CURRENT_LABEL, thickness=SHAPE_SIZE)\n\n line_start_pos = mouse_pos\n DRAW_MODE=\"line\"", "def Toggle_Filter_Line( self ):\r\n if self.filter_line_on == 1:\r\n if( self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n self.filter_line = 0\r\n self.filter_line_on = 0\r\n elif self.filter_line_on == 0:\r\n if(self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n x = cb.xtotal - self.filter_distance*cb.xtotal/cb.longx + cb.xorigin\r\n self.filter_line = self.canvas_one.create_line( x,0,x,self.ys, fill=self.highlight_color)\r\n self.filter_line_on = 1", "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def __init__(self):\n self.circle=visual.Circle(win,radius=.5, edges=32, fillColor='white') \n self.circle2=visual.Circle(win,radius=.1, edges=32, fillColor='white') \n self.linev = visual.Line(win, start=(0,.8), end=(0,-.8), lineWidth=6, lineColor='black') \n self.lineh = visual.Line(win, start=(.8,0), end=(-.8,0), lineWidth=6, lineColor='black') \n \n self.components = [self.circle, self.circle2, self.linev, self.lineh]", "def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1", "def pre_draw(self):", "def draw_line():\n global y1, y2\n canvas.create_line(x1, y1, x2, y2, width=2, fill=color)\n y1 -= 10\n y2 += 10", "def draw_lines(img, lines):\n for l in lines:\n rr, cc, val = line_aa(l[0][1], l[0][0], l[1][1], l[1][0])\n img[rr, cc] = 1\n\n return img", "def drawLine(img, start, end, color = (0,0,255), thickness = 3):\n\tcv2.line(img, start, end, color, thickness)", "def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)", "def event_click_line(self, event):\n event_x_pos = self.canvasx(event.x)\n event_y_pos = self.canvasy(event.y)\n if self.get_vector_object(self.variables.current_shape_id).image_drag_limits:\n drag_lims = self.get_vector_object(self.variables.current_shape_id).image_drag_limits\n canvas_lims = self.image_coords_to_canvas_coords(drag_lims)\n if event_x_pos < canvas_lims[0]:\n event_x_pos = canvas_lims[0]\n elif event_x_pos > canvas_lims[2]:\n event_x_pos = canvas_lims[2]\n if event_y_pos < canvas_lims[1]:\n event_y_pos = canvas_lims[1]\n elif event_y_pos > canvas_lims[3]:\n event_y_pos = canvas_lims[3]\n if self.variables.actively_drawing_shape:\n old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)\n new_coords = tuple(list(old_coords) + [event_x_pos, event_y_pos])\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n else:\n new_coords = (event_x_pos, event_y_pos, event_x_pos + 1, event_y_pos + 1)\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)\n self.variables.actively_drawing_shape = True" ]
[ "0.6389729", "0.6292108", "0.6281788", "0.618214", "0.60821176", "0.6069137", "0.60624546", "0.60604846", "0.60585237", "0.60570765", "0.6035188", "0.6017805", "0.59448105", "0.59333545", "0.5931279", "0.59004617", "0.5883799", "0.58727956", "0.5841444", "0.5840402", "0.5837919", "0.5836906", "0.5815303", "0.578688", "0.57759076", "0.5757407", "0.5755996", "0.57383084", "0.57338625", "0.5733415" ]
0.74068683
0
Delay subsequent canvas actions for delay ms.
def _delay(self, delay): self.cv.after(delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delay():\r\n time.sleep(2)", "def _delay(self, delay=None):\n return self.screen.delay(delay)", "def _delay(self, n=None):", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def delay(ms: int, /) -> None:", "def __delay(msecs):\n time.sleep(msecs / 1000)", "def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)", "async def sleep(cls, delay: float) -> None:", "def setdelay(self):\n delay=self.inputdelay.getEntry()\n cmd=\"setDelay(\"+self.board+','+self.inpedge+','+delay+')'\n self.vb.io.execute(cmd,log=\"out\",applout=\"<>\")", "def winner_delay(self, winner_delay):\n\n self._winner_delay = winner_delay", "def script_delay(now):\n self._listener = None\n self.turn_on()", "def RandomDelay():\r\n sleep(random())", "def delayExec(self, delay_ms):\n if not 0 < delay_ms < 30000:\n raise(ValueError('`delay` [{0}] must be between 0 and 40000 ms'\n ''.format(delay_ms)))\n cmd_string = 'M{0}'.format(delay_ms)\n self.cmd_chain += cmd_string", "def delay(interval):\n time.sleep(interval / 1000.0)", "def _animation_move_tick(self, diff_x, diff_y):\r\n self.root.after(self._MOVE_ANIMATION_DELAY, self.canvas.move(self.item, diff_x, diff_y))\r\n self.canvas.update()", "def refresh(self, delay, one_step=False):\n self.canvas.itemconfig(\"rect\", fill=self._primary_color)\n\n # complete a turn on a temp board\n temp_board = self.blank_board()\n for row in range(self._dim):\n for col in range(self._dim):\n temp_board[row][col] = self.live_or_die(row, col)\n\n # replace real board with new updated board\n self._board = temp_board\n\n\n # refresh UI\n self.model_refresh()\n\n\n if self._continue and not one_step:\n self.after(delay, lambda: self.refresh(delay))", "def shift(self, delay):\n self.go_to(self.time + delay)", "def _delay_update(now):\n _LOGGER.debug(\n \"%s Called delayed (%ssec) update\", self._name, self._delay\n )\n self.schedule_update_ha_state()\n self._timer = None", "def pause(time=1e-6):\n pl.draw()\n pl.gcf().canvas.start_event_loop(time)", "def run_after_delay(delay_ms: float, callback: Callable[[], None]):\n heapq.heappush(\n _sorted_scheduled_events,\n _ScheduledEvent(\n time=pygame.time.get_ticks() + delay_ms, callback=callback\n ),\n )", "def SetStepDelay(self,delay=200): \n self.Bus.Transaction(chr(self.Address)+chr(0x43)+chr(delay))", "def sleep(self):\n time.sleep(0.2)", "def call_later(self, delay, callback):\n self.factory.manager.call_later(delay, callback)", "def _call_later(self, delay, callback):\n self.io_loop.call_later(delay, callback)", "def _delay(self):\n if not self.next_scheduled:\n self.next_scheduled = self.clock_func() + self.interval\n return\n while True:\n current = self.clock_func()\n if current >= self.next_scheduled:\n extratime = current - self.next_scheduled\n self.next_scheduled = current + self.interval - extratime\n return\n delay_amt = self.next_scheduled - current\n #Call for 0, because that might be meaningful to sleep_func.\n if self.allow_negative_sleep or delay_amt >= 0: \n self.sleep_func(self.next_scheduled - current)", "def _ontimer(self, fun, t):\n if t == 0:\n self.cv.after_idle(fun)\n else:\n self.cv.after(t, fun)", "def delay(self, distance, seconds):\n delay = distance/seconds\n return delay", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def test_delay(self):\n model = BDF(debug=False)\n node1, c1, t1 = 100, 3, 0.3\n node2, c2, t2 = 101, 4, 0.4\n sid = 42\n card_lines = ['DELAY', sid, node1, c1, t1, node2, c2, t2]\n model.add_card(card_lines, card_lines[0], comment='', is_list=True,\n has_none=True)\n model.add_grid(100, [0., 0., 0.])\n model.add_grid(101, [0., 0., 0.])\n model.validate()\n model.cross_reference()\n #print(model.delays[42])\n save_load_deck(model)", "def sleep(self):\n self.sleep_after(1) # Can't be 0, that means 'don't sleep'" ]
[ "0.7070052", "0.6835207", "0.6616521", "0.64635235", "0.6431732", "0.6303699", "0.61440194", "0.61355704", "0.6104973", "0.60513353", "0.6049009", "0.60098773", "0.5986527", "0.5979398", "0.5903434", "0.58919734", "0.5890372", "0.58212", "0.5795427", "0.57597876", "0.5746297", "0.567689", "0.56303334", "0.5619893", "0.5613748", "0.561237", "0.55856997", "0.5579015", "0.55710393", "0.5511297" ]
0.72006464
0
Check if the string color is a legal Tkinter color string.
def _iscolorstring(self, color): try: rgb = self.cv.winfo_rgb(color) ok = True except TK.TclError: ok = False return ok
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_color(s):\n def in_range(i): return 0 <= i <= int('0xFFFFFF', 0)\n\n try:\n if type(s) == int:\n return in_range(s)\n elif type(s) not in (str, bytes):\n return False\n elif s in webcolors.css3_names_to_hex:\n return True\n elif s[0] == '#':\n return in_range(int('0x' + s[1:], 0))\n elif s[0:2] == '0x':\n return in_range(int(s, 0))\n elif len(s) == 6:\n return in_range(int('0x' + s, 0))\n except ValueError:\n return False", "def colorOK(colorStr):\n tkWdg = _getTkWdg()\n\n try:\n tkWdg.winfo_rgb(colorStr)\n except tkinter.TclError:\n return False\n return True", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def is_color(color):\n # check if color is\n # 1) the default empty value\n # 2) auto\n # 3) a color name from the 16 color palette\n # 4) a color index from the 256 color palette\n # 5) an HTML-style color code\n if (color in ['', 'auto'] or\n color in COLORS.keys() or\n (color.isdigit() and int(color) >= 0 and int(color) <= 255) or\n (color.startswith('#') and (len(color) in [4, 7, 9]) and\n all(c in '01234567890abcdefABCDEF' for c in color[1:]))):\n return color\n raise VdtValueError(color)", "def is_valid_color(value):\n if is_str(value):\n return is_hex_string(value)\n elif is_tuple_or_list(value):\n return (is_tuple_or_list(value)\n and is_three_channeled(value)\n and has_valid_channel_values(value))\n else:\n return is_str_or_coll(value)", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def __isValidColor(self, name):\n try:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return QColor.isValidColor(name)\n except AttributeError:\n if name.startswith(\"#\"):\n if len(name) not in [4, 7, 10, 13]:\n return False\n hexCheckStr = name[1:]\n return self.__isHexString(hexCheckStr)\n else:\n if self.__isHexString(name) and len(name) in [3, 6, 9, 12]:\n return True\n return name in QColor.colorNames()", "def is_colorstr(arg):\n try:\n assert len(arg) == 6\n for c in arg:\n assert c in COLORMAP\n except AssertionError:\n raise argparse.ArgumentTypeError('%s is not a valid color string' % arg)\n return arg", "def EyeColorTest(str):\n\n\tvalidcolors = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\n\treturn str in validcolors", "def validColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW, EMPTY):\n return False\n else:\n return True", "def check_color(style):\n for kw in list(cc.keys()):\n m = re.search(kw, style)\n if m:\n return m.group()\n\n # Return 'b' if nothing has found\n return 'b'", "def get_color(cls, string_color: str) -> Union['Color', bool]:\n r = False\n for color in cls:\n # if color == cls.CSI:\n # continue\n if str(color) == string_color:\n return color\n if not r:\n r = str(color).startswith(string_color)\n\n return r", "def test_color__html_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"#a1B2c3D4\")\n\n self.assertEqual(color.r, 0xA1)\n self.assertEqual(color.g, 0xB2)\n self.assertEqual(color.b, 0xC3)\n self.assertEqual(color.a, 0xD4)", "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "def isRGB(color):\n try:\n if color[0:4] != 'rgb(':\n return False\n if color[-1:] != ')':\n return False\n if len(color[4:-1].split(',')) != 3:\n return False\n for i in color[4:-1].split(','):\n if i.replace(' ', '').isdigit() == False:\n return False\n if int(i.replace(' ', '')) < 0 or int(i.replace(' ', '')) > 255:\n return False\n return True\n except TypeError:\n return False", "def valid_color(self, color):\n valid = False\n if (isinstance(color, list) and len(color) == 3):\n valid = True\n for chan in color:\n valid = valid and (0 <= chan <= 15)\n if not valid:\n _LOGGER.warn(\"{0} was not a valid color\".format(color))\n return valid", "def check_dog_color(dog):\n colors = [\"White\", \"Black\", \"Brown\", \"Sable\", \"Gray\", \"Fawn\", \"Cream\"]\n\n if isinstance(dog.color, str):\n if dog.color not in colors:\n raise InvalidColorError(\"Dog color is not in the accepted list of colors\")\n else:\n raise NotStringError(\"Dog color entered is not a string\")", "def test_color__hex_str_arg(self):\n # See test_webstyle() for related tests.\n color = pygame.Color(\"0x1a2B3c4D\")\n\n self.assertEqual(color.r, 0x1A)\n self.assertEqual(color.g, 0x2B)\n self.assertEqual(color.b, 0x3C)\n self.assertEqual(color.a, 0x4D)", "def isColor(self,color):\n return self.color==color", "def is_valid_hair_color(hair_color: str) -> bool:\n return re.match(r'^#[a-f|0-9]{5}', hair_color)", "def isRGB(color):\n if not(isinstance(color, list) or isinstance(color, tuple)):\n raise pgUIException(str(color) + ' is not a valid color',\n code = 20)\n if len(color) != 3:\n raise pgUIException(str(color) + ' color has to have three components',\n code = 21)\n if not(isinstance(color[0], int))\\\n or not(isinstance(color[1], int))\\\n or not(isinstance(color[2], int)):\n raise pgUIException(str(color) + ' color components have to be integers',\n code = 23)\n for c in color:\n if c < 0 or c > 255:\n raise pgUIException(str(color) +\n ' color components are to be in between 0 and 255',\n code = 22)\n return True", "def is_red(self):\n return \"red\" == self.color", "def is_color(self, color: ColorLike) -> bool:\n\n if isinstance(color, Color):\n return self.color == color\n elif isinstance(color, str):\n return str(self.color) == color\n elif isinstance(color, int):\n return int(self.color) == color\n return False", "def is_cue_color(color, tolerance = 100):\n return col_diff(color, CUE_COLOR) <= tolerance", "def test_weirdColorFormatting(self):\n self.assertAssembledEqually(\"\\x031kinda valid\", A.fg.black[\"kinda valid\"])\n self.assertAssembledEqually(\n \"\\x03999,999kinda valid\", A.fg.green[\"9,999kinda valid\"]\n )\n self.assertAssembledEqually(\n \"\\x031,2kinda valid\", A.fg.black[A.bg.blue[\"kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,999kinda valid\", A.fg.black[A.bg.green[\"9kinda valid\"]]\n )\n self.assertAssembledEqually(\n \"\\x031,242 is a special number\",\n A.fg.black[A.bg.yellow[\"2 is a special number\"]],\n )\n self.assertAssembledEqually(\"\\x03,02oops\\x03\", A.normal[\",02oops\"])\n self.assertAssembledEqually(\"\\x03wrong\", A.normal[\"wrong\"])\n self.assertAssembledEqually(\"\\x031,hello\", A.fg.black[\"hello\"])\n self.assertAssembledEqually(\"\\x03\\x03\", A.normal)", "def is_hex_color(color, verbose='info'):\n # Set the logger\n set_logger(verbose=verbose)\n\n if not isinstance(color, str):\n logger.info('Hex [%s] should be of type string' %(str(color)))\n\n return False\n\n if color.startswith('#'):\n color = color[1:]\n else:\n logger.info('Hex [%s] should start with \"#\"' %(str(color)))\n return False\n\n if len(color) != 6:\n logger.info('Hex [%s] should be of length 7 incl \"#\"' %(str(color)))\n return False\n\n try:\n int(color, 16)\n return True\n except ValueError:\n return False", "def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"", "def validPlayerColor(color):\n if color not in (RED, GREEN, BLUE, YELLOW):\n return False\n else:\n return True", "def _is_color(cls, obj: Any) -> bool:\n\n return isinstance(obj, Color)", "def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))" ]
[ "0.7850937", "0.7765966", "0.76969033", "0.7676884", "0.75903404", "0.75249046", "0.74149966", "0.7390387", "0.70323974", "0.6862508", "0.6857582", "0.68549395", "0.6794173", "0.6701827", "0.6685319", "0.6660881", "0.654674", "0.6521498", "0.6519961", "0.6518414", "0.6441103", "0.64399606", "0.6426535", "0.638994", "0.63802284", "0.63762516", "0.637306", "0.6370527", "0.6361881", "0.63585913" ]
0.8371807
0
Write txt at pos in canvas with specified font and color. Return text item and xcoord of right bottom corner of text's bounding box.
def _write(self, pos, txt, align, font, pencolor): x, y = pos x = x * self.xscale y = y * self.yscale anchor = {"left":"sw", "center":"s", "right":"se" } item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], fill = pencolor, font = font) x0, y0, x1, y1 = self.cv.bbox(item) self.cv.update() return item, x1-1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):\n if color is None:\n color = _UTILS_COLOR_WHITE\n _gl.glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n _gl.glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n _gl.glRasterPos3f(x, y, z)\n else:\n # noinspection PyBroadException\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _UTILS_ERRS[0]:\n print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')\n _UTILS_ERRS[0] = True\n else:\n raise Exception('Point must be Point3 type')", "def draw_text(text, pos, color=COLOR_WHITE, font=GLUT_BITMAP_TIMES_ROMAN_24,\n linespace=20):\n glColor3fv(color)\n if isinstance(pos, Point3):\n x = pos.get_x()\n y = pos.get_y()\n z = pos.get_z()\n glRasterPos3f(x, y, z)\n for char in text:\n if char == \"\\n\":\n y += linespace\n glRasterPos3f(x, y, z)\n else:\n try:\n glutBitmapCharacter(font, ord(char))\n except:\n if not _ERRS[0]:\n printGLError(\n 'la version actual de OpenGL no posee la funcion glutBitmapCharacter')\n _ERRS[0] = True\n else:\n raise Exception(\"el punto debe ser del tipo point3\")", "def draw_text(self, text, position, font_size, font_color):\n font_color = check_color(font_color)\n STtext.text(self.canvas, text, position, font_size, font_color)", "def text_draw(self, x, y, text, style={}):", "def draw_pos_text(self, text):\n fw, fh = self.font.size(text) # fw: font width, fh: font height\n surface = self.font.render(text, True, (0, 255, 0))\n # // makes integer division in python3\n self.screen.blit(surface, ((self.width - fw) // 2, (self.height - fh) // 2))", "def add_text(self, text, color, pos, font):\n text = font.render(text, True, color)\n text_rec = text.get_rect(center=pos)\n self.window.blit(text, text_rec)", "def draw_text(surf, font, text, pos,\n antialiasing=True,\n color=(255, 255, 255),\n anchor=\"northwest\"):\n x, y = pos\n s = font.render(text, antialiasing, color)\n s_rect = s.get_rect()\n\n if \"north\" in anchor:\n s_rect.y = y\n elif \"south\" in anchor:\n s_rect.y = y - s_rect.h\n else:\n s_rect.y = y - s_rect.h/2\n\n if \"west\" in anchor:\n s_rect.x = x\n elif \"east\" in anchor:\n s_rect.x = x - s_rect.w\n else:\n s_rect.x = x - s_rect.w/2\n\n surf.blit(s, s_rect)", "def draw_text(self, i, j, text, col, bg=None):\n txt = self.font.render(text, True, col, bg)\n rect = txt.get_rect()\n rect.center = self.get_rect(i, j).center\n self.screen.blit(txt, rect)", "def text(self, str: str, x: int, y: int, colour: int, /) -> None:", "def draw_text(self, text, position=(0, 0), color='black', font=None,\n font_size=12, rotation=0, **kwargs):\n font = self.font(font_size)\n\n text_image = Image.new('L', self.dimensions, 'black')\n draw_text_image = ImageDraw.Draw(text_image)\n draw_text_image.text(position, text, font=font, fill='white')\n\n alpha = Image.new('L', self.dimensions)\n alpha = ImageChops.lighter(alpha, text_image)\n\n solidcolor = Image.new('RGBA', self.dimensions, color)\n image_mask = Image.eval(text_image, lambda p: 255 * (int(p != 0)))\n self.base_image = Image.composite(solidcolor, self.base_image, image_mask)\n self.base_image.putalpha(alpha)", "def draw_text(self, text, font, color, surface, x, y): #use for narrative in end sequence\n text_obj = font.render(text, True, color)\n text_rect = text_obj.get_rect()\n text_rect.center = (x, y)\n surface.blit(text_obj, text_rect)", "def create_text(text, font, colour, position):\n _text = font.render(text, False, colour)\n _text_rect = _text.get_rect()\n _text_rect.center = position # place text centered on given position\n\n return {'surface': _text, 'rect': _text_rect}", "def draw_text(window, text, size, text_pos, color=WHITE, bold=False):\n font = pygame.font.Font(FONT_PATH, size)\n if bold:\n font.set_bold(1)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = text_pos\n window.blit(text_surface, text_rect)", "def draw_text(\n self, text: str, size: int, color: Tuple[int, int, int], x: int, y: int\n ) -> None:\n # TODO: Select and use a better font\n font = pg.font.Font(pg.font.get_default_font(), size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n self.screen.blit(text_surface, text_rect)", "def render_text(self, text, x_pos, y_pos, colour=0):\n if colour == 0:\n GL.glColor3f(0.0, 0.0, 0.0) # text is black\n elif colour == 1:\n GL.glColor3f(1.0, 0.0, 0.0)\n elif colour == 2:\n GL.glColor3f(0.0, 1.0, 0.0)\n GL.glRasterPos2f(x_pos, y_pos)\n font = GLUT.GLUT_BITMAP_HELVETICA_12\n\n for character in text:\n if character == '\\n':\n y_pos = y_pos - 20\n GL.glRasterPos2f(x_pos, y_pos)\n else:\n GLUT.glutBitmapCharacter(font, ord(character))", "def drawText(text, font, surface, x, y, textcolour):\r\n textobj = font.render(text, 1, textcolour)\r\n textrect = textobj.get_rect()\r\n textrect.topleft = (x, y)\r\n surface.blit(textobj, textrect)", "def create_text_box(self, box_pos, text_font):\n self.textBox = tk.Text(self.top, height=1, width=17,\n font=('Helvetica', text_font))\n self.textBox.grid(row=box_pos[0], column=box_pos[1],\n columnspan=box_pos[2], rowspan=box_pos[3])", "def draw_text(self, text, position, color, centered=False, scale=1.5, thickness=3):\n if centered:\n text_size = opencv.getTextSize(text, opencv.FONT_HERSHEY_SIMPLEX, fontScale=scale, thickness=thickness)[0]\n text_size = Point(-text_size[0]/2.0, text_size[1]/2.0)\n position = (position + text_size)\n position = self._format_point(position)\n opencv.putText(self.img, text, position.tuple(), opencv.FONT_HERSHEY_SIMPLEX, fontScale=scale,\n color=color.bgra(), thickness=thickness)", "def GetToolsPosition(self, dc, item, rect):\r\n \r\n text_width = text_height = 0\r\n horizontal = self._orientation == AUI_TBTOOL_HORIZONTAL\r\n text_bottom = self._text_orientation == AUI_TBTOOL_TEXT_BOTTOM\r\n text_right = self._text_orientation == AUI_TBTOOL_TEXT_RIGHT\r\n bmp_width = item.GetBitmap().GetWidth()\r\n bmp_height = item.GetBitmap().GetHeight()\r\n \r\n if self._agwFlags & AUI_TB_TEXT: \r\n dc.SetFont(self._font)\r\n label_size = GetLabelSize(dc, item.GetLabel(), not horizontal)\r\n text_height = label_size.GetHeight()\r\n text_width = label_size.GetWidth()\r\n \r\n bmp_x = bmp_y = text_x = text_y = 0\r\n\r\n if horizontal and text_bottom:\r\n bmp_x = rect.x + (rect.width/2) - (bmp_width/2)\r\n bmp_y = rect.y + 3\r\n text_x = rect.x + (rect.width/2) - (text_width/2)\r\n text_y = rect.y + ((bmp_y - rect.y) * 2) + bmp_height\r\n \r\n elif horizontal and text_right:\r\n bmp_x = rect.x + 3\r\n bmp_y = rect.y + (rect.height/2) - (bmp_height / 2)\r\n text_x = rect.x + ((bmp_x - rect.x) * 2) + bmp_width\r\n text_y = rect.y + (rect.height/2) - (text_height/2)\r\n \r\n elif not horizontal and text_bottom:\r\n bmp_x = rect.x + (rect.width / 2) - (bmp_width / 2)\r\n bmp_y = rect.y + 3\r\n text_x = rect.x + (rect.width / 2) - (text_width / 2)\r\n text_y = rect.y + ((bmp_y - rect.y) * 2) + bmp_height\r\n \r\n bmp_rect = wx.Rect(bmp_x, bmp_y, bmp_width, bmp_height)\r\n text_rect = wx.Rect(text_x, text_y, text_width, text_height)\r\n\r\n return bmp_rect, text_rect", "async def outline_text(draw_surface, coords, draw_text, font):\n draw = partial(draw_surface.text, text=draw_text, font=font,\n fill=\"black\")\n for offset_pair in product(range(-1, 2), repeat=2):\n draw((coords[0]+offset_pair[0], coords[1]+offset_pair[1]))\n draw(coords, fill=\"white\")", "def draw_text(\n self,\n text,\n position,\n font=FONT,\n size=16,\n color=Color.BLACK,\n centered=True,\n ):\n _font = pygame.font.Font(font, size)\n _text = _font.render(text, True, color)\n _rect = _text.get_rect()\n x, y = position\n if centered:\n x -= _rect.width // 2\n y -= _rect.height // 2\n _rect.topleft = x, y\n self.screen.blit(_text, _rect)", "def drawtxt(txt,font,fs,clr,x,y,w,h,tf):\n if tf == True:\n pygame.draw.rect(screen, BLACK, (x,y,w,h))\n if pygame.font:\n font = pygame.font.Font(font,fs)\n text = font.render(txt, False, clr)\n screen.blit(text, (x,y))\n pygame.display.update(x,y,w,h)", "def str(self, pos, text, dx=0, dy=0, **options):\n underride(options, fill='black', font=font, anchor=W)\n x, y = pos\n x += dx\n y += dy\n return self.text([x, y], text, **options)", "def textObject(text, font, color):\n\n textSurface = font.render(text, True, color)\n return textSurface, textSurface.get_rect()", "def drawEditText(self, font, text, x, y, selection=(0,0), caret=-1):\n self.color = Vec4(*font.color)\n name = font.name\n \n char_count = 0 \n ox = x\n baseLetter = self.atlas.getChar(name + str(ord(\"T\")))\n omaxh = baseLetter[3] - baseLetter[4][1]\n\n for line in text.split(\"\\n\"):\n build = []\n maxh = omaxh \n \n for c in line:\n if char_count == caret:\n u,v,w,h,e = self.atlas.getChar(name + str(ord('|')))\n build.append((x-w/2,y+e[1],u,v,w,h))\n char_count += 1 \n \n code = ord(c) \n if code <= 32:\n u,v,w,h,e = self.atlas.getChar(name + str(77))\n x += e[0]\n continue\n u,v,w,h,e = self.atlas.getChar(name + str(code))\n build.append((x,y+e[1],u,v,w,h))\n x += e[0]\n maxh = max(maxh,h-e[1])\n \n else:\n if char_count == caret:\n u,v,w,h,e = self.atlas.getChar(name + str(ord('|')))\n build.append((x-w/2,y+e[1],u,v,w,h))\n char_count += 1 \n \n for x,y,u,v,w,h in build:\n self.rectStreatch((x,y+maxh-h,w,h),(u,v,w,h))\n \n x = ox \n y += maxh", "def draw_text(self, text, x=0, y=0,\n color=None, bg=colors.Off, aa=False,\n font_name=font.default_font, font_scale=1):\n fh = font.fonts[font_name]['height']\n for c in text:\n if c == '\\n':\n y += font_scale * fh\n x = 0\n elif c == '\\r':\n pass # skip it\n else:\n fw = self.draw_char(x, y, c, color, bg, aa, font_name, font_scale)\n x += font_scale * fw\n if x >= self.width:\n break", "def draw_text(display, font_name, text, size, color, x, y):\n font = pg.font.Font(font_name, size)\n text_surface = font.render(text, True, color)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n display.blit(text_surface, text_rect)", "def drawText(txt,x,y,ucoords=1,TeX=0):\n if (type(txt) is types.IntType) or (type(txt) is types.FloatType) or \\\n (type(txt) is types.LongType):\n drawNumber(txt,x,y,ucoords)\n else:\n if TeX: setTeXMode(1)\n drawMessage(txt,x,y,ucoords)", "def write(self, text, x=None, y=None):\n\n # TODO - change this so that the cursor moves.\n if x is None:\n x = self.cursorx\n if y is None:\n y = self.cursory\n\n self._strDirty = True\n startIndex = self._convertTupleIndexsToSingleIndex(x, y)\n for i in range(startIndex, startIndex + len(text)):\n cx, cy = self._convertSingleIndexToTupleIndexes(i % self.area)\n if not self.isOnCanvas(cx, cy):\n break\n\n self._chars[cx][cy] = text[i - startIndex]\n self._fginfo[cx][cy] = self._fg\n self._bginfo[cx][cy] = self._bg\n\n self.cursor = self._convertSingleIndexToTupleIndexes((startIndex + len(text)) % self.area)", "def write(self, x, y, text, fg, bg):\n brush = self.get_brush(fg, bg)\n try:\n self.win.addstr(y, x, text, brush)\n except curses.error:\n if x == self.width - 1 and y == self.height - 1:\n pass" ]
[ "0.703068", "0.70164824", "0.6974559", "0.69286704", "0.68471724", "0.6755969", "0.66728175", "0.6666933", "0.65818965", "0.65559834", "0.65518737", "0.65458", "0.64890784", "0.6485086", "0.6478929", "0.64785075", "0.6454026", "0.6442451", "0.6432531", "0.63997805", "0.6373174", "0.6357918", "0.6355584", "0.63406986", "0.6334561", "0.6323662", "0.63230264", "0.6315774", "0.6301588", "0.6280925" ]
0.81335413
0
Bind fun to mousebuttonrelease event on Myturtle. fun must be a function with two arguments, the coordinates of the point on the canvas where mouse button is released. num, the number of the mousebutton defaults to 1 If a Myturtle is clicked, first _onclickevent will be performed, then _onscreensclickevent.
def _onrelease(self, item, fun, num=1, add=None): if fun is None: self.cv.tag_unbind(item, "<Button%s-ButtonRelease>" % num) else: def eventfun(event): x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y)/self.yscale) fun(x, y) self.cv.tag_bind(item, "<Button%s-ButtonRelease>" % num, eventfun, add)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onscreenclick(self, fun, num=1, add=None):\n if fun is None:\n self.cv.unbind(\"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.bind(\"<Button-%s>\" % num, eventfun, add)", "def mouse_release_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_release(self, x, y, button):\n pass", "def button_release_event(self, widget, event):\n x, y = event.x, event.y\n\n # x, y = coordinates where the button was released\n self.last_win_x, self.last_win_y = x, y\n\n button = 0\n # prepare button mask as in button_press_event()\n\n data_x, data_y = self.check_cursor_location()\n\n return self.make_ui_callback('button-release', button, data_x, data_y)", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def on_mouse_release(self, x, y, button, key_modifiers):\r\n pass", "def onclick(self, fun, btn=1, add=None):\n self.screen._onclick(self.Myturtle._item, fun, btn, add)\n self._update()", "def _onclick(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button-%s>\" % num, eventfun, add)", "def press_button(self, num):\n if num == 1:\n f = self.counter1 % len(self.layout.button1_func)\n self.layout.button1_func[f]()\n self.counter1 += 1\n elif num == 2:\n f = self.counter2 % len(self.layout.button2_func)\n self.layout.button2_func[f]()\n self.counter2 += 1\n elif num == 3:\n f = self.counter3 % len(self.layout.button3_func)\n self.layout.button3_func[f]()\n self.counter3 += 1\n elif num == 4:\n f = self.counter4 % len(self.layout.button4_func)\n self.layout.button4_func[f]()\n self.counter4 += 1", "def button_release(self, event: Any) -> None:\n if event.button == 1:\n self.left_button_down = False\n if event.button == 2:\n self.middle_button_down = False\n if event.button == 3:\n self.right_button_down = False", "def ev_mousebuttondown(self, event: MouseButtonDown) -> None:", "def ev_mousebuttonup(self, event: tcod.event.MouseButtonUp) -> T | None:", "def set_doubleclick_slot(self, fun):\n self.doubleclick_fun = fun", "def on_mouse_press(self, x, y, button):\n\n pass", "def _ondrag(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-Motion>\" % num)\n else:\n def eventfun(event):\n try:\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n except Exception:\n pass\n self.cv.tag_bind(item, \"<Button%s-Motion>\" % num, eventfun, add)", "def set_button(frame, caption, func, number):\n\n b = Button(frame, text=caption, width=15, height=1)\n b.bind(\"<Button-1>\", lambda event, f=number: func(event, f))\n b.pack(side=LEFT)", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def check_mouse_release_for_buttons(_x, _y, button_list):\n for button in button_list:\n if button.pressed:\n button.on_release()", "def check_mouse_release_for_buttons(x: float, y: float, button_list: list):\n for button in button_list:\n if button.pressed:\n #sets button pressed to false\n button.on_release()", "def release(button='left', coords=(0, 0)):\n _perform_click_input(button=button, coords=coords, button_down=False, button_up=True)", "def handle_mouse_press(self, event):", "def _onkeyrelease(self, fun, key):\n if fun is None:\n self.cv.unbind(\"<KeyRelease-%s>\" % key, None)\n else:\n def eventfun(event):\n fun()\n self.cv.bind(\"<KeyRelease-%s>\" % key, eventfun)", "def triangleBtnHandler(val):\n if val == 1 :\n print(\"Triangle button pressed\")\n else:\n print(\"Triangle button released\")", "def button_release_cb(self, darea, event):\n self.oldx, self.oldy = event.x, event.y\n self.draw_pointer(self.cr, None, None)\n self.queue_draw()\n self.oldx, self.oldy = None, None\n self.emit('end-dnd')\n return True", "def release():\n gui.mouseUp()", "def ev_joybuttondown(self, event: tcod.event.JoystickButton) -> T | None:", "def bind(self):\n self.canvas.bind(\"<ButtonPress-1>\", self.click)", "def add_button_callback(self, button, function, event=BUTTON_DOWN, threaded=True):\n\t\tif event == LCD.BUTTON_DOWN:\n\t\t\tedge = 'falling'\n\t\telif event == LCD.BUTTON_UP:\n\t\t\tedge = 'rising'\n\t\telif event == LCD.BUTTON_EITHER:\n\t\t\tedge = 'both'\n\t\tRPIO.add_interrupt_callback(button, function, edge, RPIO.PUD_UP, threaded, 20)", "def on_canvas_mouse_release(self, event) -> None:\r\n\r\n self.edit_toggle_mode = None", "def _release(self, event):" ]
[ "0.68110013", "0.65954924", "0.6564144", "0.61202985", "0.6027845", "0.60225904", "0.5944487", "0.59349585", "0.5782131", "0.5691157", "0.5666011", "0.56604856", "0.5641726", "0.5623444", "0.56176275", "0.55965006", "0.5556579", "0.55450773", "0.5525885", "0.551433", "0.5465966", "0.5458655", "0.54140496", "0.5404308", "0.5387459", "0.53859615", "0.53851384", "0.5381947", "0.53752977", "0.53636223" ]
0.7111682
0
Bind fun to mousemoveevent (with pressed mouse button) on Myturtle. fun must be a function with two arguments, the coordinates of the actual mouse position on the canvas. num, the number of the mousebutton defaults to 1 Every sequence of mousemoveevents on a Myturtle is preceded by a mouseclick event on that Myturtle.
def _ondrag(self, item, fun, num=1, add=None): if fun is None: self.cv.tag_unbind(item, "<Button%s-Motion>" % num) else: def eventfun(event): try: x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y)/self.yscale) fun(x, y) except Exception: pass self.cv.tag_bind(item, "<Button%s-Motion>" % num, eventfun, add)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onscreenclick(self, fun, num=1, add=None):\n if fun is None:\n self.cv.unbind(\"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.bind(\"<Button-%s>\" % num, eventfun, add)", "def ev_MOUSEMOTION(self, event):", "def onclick(self, fun, btn=1, add=None):\n self.screen._onclick(self.Myturtle._item, fun, btn, add)\n self._update()", "def _onclick(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button-%s>\" % num, eventfun, add)", "def mouse_position_event(self, x: int, y: int):\n pass", "def handle_mouse_press(self, event):", "def on_mouse_press(self, x, y, button):\n\n pass", "def press_button(self, num):\n if num == 1:\n f = self.counter1 % len(self.layout.button1_func)\n self.layout.button1_func[f]()\n self.counter1 += 1\n elif num == 2:\n f = self.counter2 % len(self.layout.button2_func)\n self.layout.button2_func[f]()\n self.counter2 += 1\n elif num == 3:\n f = self.counter3 % len(self.layout.button3_func)\n self.layout.button3_func[f]()\n self.counter3 += 1\n elif num == 4:\n f = self.counter4 % len(self.layout.button4_func)\n self.layout.button4_func[f]()\n self.counter4 += 1", "def ev_mousebuttondown(self, event: tcod.event.MouseButtonDown) -> T | None:", "def ev_mousemotion(self, event: tcod.event.MouseMotion) -> T | None:", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def ev_MOUSEDOWN(self, event):", "def set_button(frame, caption, func, number):\n\n b = Button(frame, text=caption, width=15, height=1)\n b.bind(\"<Button-1>\", lambda event, f=number: func(event, f))\n b.pack(side=LEFT)", "def handle_mouse(self, x, y):\n pass", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)", "def move(point):\n # wrapper just so we don't have to import pymouse separately\n m = PyMouse()\n m.move(*point)", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mousePosition(self):", "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def MouseClick(event):\r\n global player\r\n global winner\r\n Window.focus_set()\r\n x = event.x // 100 # convertit une coordonée pixel écran en coord grille de jeu\r\n y = event.y // 100\r\n if ( (x<0) or (x>2) or (y<0) or (y>2) ) : return\r\n \r\n print(\"clicked at\", x,y)\r\n hasPlay = Play(x,y,player) # on regarde si le joueur a jouer correctement\r\n if hasPlay:\r\n newPlayer() # dans ce cas là on change de joueur \r\n winner = Victoire()\r\n if (winner or MatchNul()):\r\n Dessine(winner)\r\n Window.update()\r\n Window.after(3000)\r\n ResetGame(winner)\r\n Dessine(winner)\r\n return\r\n Dessine(winner)\r\n if hasPlay: # si le joueur a bien joué, alors c'est au tour de l'ia\r\n Window.update()\r\n Window.after(3000)\r\n thisIsIA()", "def new_move():\r\n\r\n if players[0] == 1:\r\n draw_circle() \r\n players.reverse() \r\n elif players[0] == 0:\r\n if pygame.mouse.get_pressed().count(True)>=1:\r\n draw_X()", "def _onrelease(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-ButtonRelease>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button%s-ButtonRelease>\" % num,\n eventfun, add)", "def _get_mouse_button_number(self, event):\n raise NotImplementedError", "def EVT_MOTION_STATE(win, func, motionID):\n win.Connect(-1, -1, dict_motions_pairs[motionID], func)", "def click(xy, offset_xy=(0,0)):\n (x,y) = xy\n (offset_x, offset_y) = offset_xy\n x = x + offset_x\n y = y + offset_y\n move_to((x,y))\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)" ]
[ "0.65346754", "0.59252113", "0.58134866", "0.567966", "0.55477583", "0.55414116", "0.5523079", "0.5498528", "0.5462336", "0.5461063", "0.54575276", "0.5379014", "0.5379014", "0.53327185", "0.5305558", "0.5290165", "0.5269398", "0.5261307", "0.5252702", "0.52516973", "0.52459466", "0.52232456", "0.521438", "0.5201761", "0.5197504", "0.51765305", "0.51564485", "0.5149898", "0.51484936", "0.51467836" ]
0.6218892
1
Bind fun to mouseclick event on canvas. fun must be a function with two arguments, the coordinates of the clicked point on the canvas. num, the number of the mousebutton defaults to 1 If a Myturtle is clicked, first _onclickevent will be performed, then _onscreensclickevent.
def _onscreenclick(self, fun, num=1, add=None): if fun is None: self.cv.unbind("<Button-%s>" % num) else: def eventfun(event): x, y = (self.cv.canvasx(event.x)/self.xscale, -self.cv.canvasy(event.y)/self.yscale) fun(x, y) self.cv.bind("<Button-%s>" % num, eventfun, add)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onclick(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button-%s>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button-%s>\" % num, eventfun, add)", "def onclick(self, fun, btn=1, add=None):\n self.screen._onclick(self.Myturtle._item, fun, btn, add)\n self._update()", "def onclick(self, fun, btn=1, add=None):\n self._onscreenclick(fun, btn, add)", "def click_fun(event):\r\n print(\"event is\", event.event, event.which)", "def press_button(self, num):\n if num == 1:\n f = self.counter1 % len(self.layout.button1_func)\n self.layout.button1_func[f]()\n self.counter1 += 1\n elif num == 2:\n f = self.counter2 % len(self.layout.button2_func)\n self.layout.button2_func[f]()\n self.counter2 += 1\n elif num == 3:\n f = self.counter3 % len(self.layout.button3_func)\n self.layout.button3_func[f]()\n self.counter3 += 1\n elif num == 4:\n f = self.counter4 % len(self.layout.button4_func)\n self.layout.button4_func[f]()\n self.counter4 += 1", "def set_click_function( self, a_function ):\n self.click_function = a_function", "def set_doubleclick_slot(self, fun):\n self.doubleclick_fun = fun", "def bind(self):\n self.canvas.bind(\"<ButtonPress-1>\", self.click)", "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def set_button(frame, caption, func, number):\n\n b = Button(frame, text=caption, width=15, height=1)\n b.bind(\"<Button-1>\", lambda event, f=number: func(event, f))\n b.pack(side=LEFT)", "def on_mouse_press(self, x, y, button):\n\n pass", "def _click_function( self, event ):\n if self.click_function is None:\n print( \"ListboxScroll -- click_function not set\" )\n else:\n # sending the selection get, but perhaps should\n # send the event and let click function ....!!!\n # a_key = event.widget.selection_get()\n #rint( a_key )\n # self.click_function( a_key )\n self.click_function( event )", "def onclick(event):\n\t#~ cords = [] #This is an empty list which will store the x and y coordinates of each click on the graph\n\t#It's fine to keep this as a list because we won't be operating on it\n\tglobal ix, iy\n\tix,iy = event.xdata, event.ydata\n\tprint 'x = %.5f, y = %.2e' %(ix,iy) #This will print out the x and y values so you can check that no shifting occured\n\n\tglobal cords\n\tcords.append((ix,iy)) #Stores the x and y click in the array\n\n\treturn", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def on_clicked(self, func):\n cid = self.cnt\n self.observers[cid] = func\n self.cnt += 1\n return cid", "def eventBindings(callbackFct, isThread=False,grabInput=False):\n\tprint(\"[PSSM_OpenCV - Click handler] : Let's do this\")\n\tglobal eventCallbackFct\n\tif grabInput:\n\t\tprint('Using an emulator - nothing to be grabbed')\n\teventCallbackFct = callbackFct\n\tcv2.setMouseCallback(\"PSSM_Emulator\", cv2Link)", "def _ondrag(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-Motion>\" % num)\n else:\n def eventfun(event):\n try:\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n except Exception:\n pass\n self.cv.tag_bind(item, \"<Button%s-Motion>\" % num, eventfun, add)", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def handle_mouse_press(self, event):", "def on_clicked(self, func):\n return self._observers.connect('clicked', lambda event: func(event))", "def _onrelease(self, item, fun, num=1, add=None):\n if fun is None:\n self.cv.tag_unbind(item, \"<Button%s-ButtonRelease>\" % num)\n else:\n def eventfun(event):\n x, y = (self.cv.canvasx(event.x)/self.xscale,\n -self.cv.canvasy(event.y)/self.yscale)\n fun(x, y)\n self.cv.tag_bind(item, \"<Button%s-ButtonRelease>\" % num,\n eventfun, add)", "def click_event(self, event, x, y, flags, param):\n del flags, param\n if self.calibrated:\n if event == cv2.EVENT_LBUTTONUP:\n # Left click\n self.event = {'event': 'Positioning', 'x': x, 'y': y}\n\n elif event == cv2.EVENT_RBUTTONUP:\n # Right click\n self.event = {'event': 'PatchClamp', 'x': x, 'y': y}\n pass", "def Canvas_onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n print 'x = %f -> i = %d, y = %f' % (ix,ix/0.5*fig.Fulllength, iy)\n\n global coords\n coords = [ix, iy]\n\n return coords", "def game_click(coord):\n mouseclick(coord[0], coord[1])\n time.sleep(0.5)", "def apply_click(self, pos):\n if self.drawmode & self.DRAW_CIRCLE:\n self.click_bits(pos)\n else:\n self.click_bit(pos)", "def onclick(event, annot, pltObj, pixels, rawdata, **kwargs):\n\tvis = annot.get_visible()\n\tif event.inaxes == pltObj:\n\t\tif not vis:\n\t\t\tupdateAnnot(event.xdata, event.ydata, pixels, annot, rawdata)\n\t\t\tannot.set_visible( True )\n\t\t\tevent.canvas.draw()\n\t\telse:\n\t\t\tannot.set_visible( False )\n\t\t\tevent.canvas.draw()", "def on_pixel_clicked(self, pix_id):\n print(f\"Clicked pixel_id {pix_id}\")", "def on_click(self, x, y):\n mul_x, mul_y = self.multiplier\n off_x, off_y = self.offset\n x -= off_x\n x /= mul_x\n y -= off_y\n y /= mul_y\n for button in self.button_dict.values():\n button.check_click(x, y)", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def __on_click(self,event, x, y, p1, p2): \r\n \r\n # global variables of the class with mouse click position\r\n global mouse_click_pos, mouse_click_list \r\n \r\n mouse_click_list = []\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n \r\n mouse_click_pos = (x,y)\r\n print(mouse_click_pos)\r\n mouse_click_list.append((x, y))" ]
[ "0.6381053", "0.63472396", "0.6090573", "0.58589256", "0.5826599", "0.5661302", "0.5654933", "0.56274605", "0.5607574", "0.55640775", "0.5502268", "0.54359365", "0.53825593", "0.5359921", "0.5359921", "0.53023577", "0.52998567", "0.5286697", "0.52823967", "0.52305996", "0.5229726", "0.5228155", "0.5222723", "0.5211015", "0.51464933", "0.51415503", "0.5123183", "0.51216", "0.511618", "0.51158863" ]
0.72543937
0
Bind fun to keyrelease event of key. Canvas must have focus. See method listen
def _onkeyrelease(self, fun, key): if fun is None: self.cv.unbind("<KeyRelease-%s>" % key, None) else: def eventfun(event): fun() self.cv.bind("<KeyRelease-%s>" % key, eventfun)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_key_release(self, event):", "def key_release_event(self, event):\n pass", "def on_release(self, keyname):\n self.keydown = False\n keyname = str(keyname).strip('\\'')\n log.info('KEY RELEASE ' + keyname)\n if keyname in self.controls_keyrelease:\n key_handler = self.controls_keyrelease[keyname]()", "def ev_KEYUP(self, event):", "def _on_key_press(self, event):", "def _onkeypress(self, fun, key=None):\n if fun is None:\n if key is None:\n self.cv.unbind(\"<KeyPress>\", None)\n else:\n self.cv.unbind(\"<KeyPress-%s>\" % key, None)\n else:\n def eventfun(event):\n fun()\n if key is None:\n self.cv.bind(\"<KeyPress>\", eventfun)\n else:\n self.cv.bind(\"<KeyPress-%s>\" % key, eventfun)", "def ev_KEYDOWN(self, event):", "def key_release_event(self, widget, event):\n # get keyname or keycode and translate to ginga standard\n # keyname =\n # keycode =\n keyname = '' # self.transkey(keyname, keycode)\n self.logger.debug(\"key release event, key=%s\" % (keyname))\n return self.make_ui_callback('key-release', keyname)", "def keyReleaseEvent(self, event):\n self.game_engine.input_manager.keyReleaseEvent(event)", "def _on_key_release(self, key):\n if key is self.TRIGGER_KEY:\n print(\"End Recording\")\n self.do_record = False", "def on_key_release(self, key, modifiers):\n player_controller.input_release(key, self.player)", "def on_key_release(self, symbol, modifiers):\n self.gamestatemanager.peek().on_key_release(symbol, modifiers, self.config_data[\"controls\"])", "def debounced_key_release(event):\n # print('Debounced release', repr(event.key))\n key_indicator.set_text('')\n fig.canvas.draw()", "def on_key_release(self, key, modifiers):\n pass # stop animation", "def on_key_event(self, key):\n pass", "def keyReleaseEvent (self, event):\n super(DiagramScene, self).keyReleaseEvent(event)", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n\n self.key_handler(key)", "def keyReleaseEvent(self, ev):\n self.currentKbKey = None\n\n if (ev.key() == self.panKey):\n # disable Pan/Zoom mode\n self.panning = False\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.selectAddKey):\n # disable selection add mode\n if self.__pointerLeftWidget:\n # we've left the widget - reset the cursor to the standard arrow\n self.setCursor(Qt.ArrowCursor)\n else:\n self.setCursor(self.defaultCursor)\n elif (ev.key() == self.zoomKey):\n # disable zoom mode\n self.__zooming = False\n else:\n self.keyRelease.emit(self, ev)", "def keyReleaseEvent(self, event):\n # The autorepeat debounces\n if not event.isAutoRepeat():\n if event.key() == Qt.Key_Up or event.key() == Qt.Key_Down or (\n event.key() == Qt.Key_Left) or event.key() == Qt.Key_Right:\n self.notifyObservers(BehavioralStates.RC, (Qt.Key_Slash, \"0\"))\n # this is so the next time we press w we know it's a new key\n elif event.key() == Qt.Key_W:\n self.notifyObservers(BehavioralStates.RC, (Qt.Key_Q, \"0\"))", "def key_handler(self):\n \n self.pressed = waitKey(1) & 255 #wait for keypress for 10 ms\n if self.pressed == 27: #exit program on 'esc'\n print \"exiting...\"\n self.camera.cam.release()\n exit()\n \n for key in self.key_controls.keys():\n if chr(self.pressed) == key:\n self.key_controls[key]()", "def ev_keydown(self, event: KeyDown) -> None:", "def key_handler(self, event):\n if event.type == pygame.KEYUP: \n self.done = True", "def on_key_press(self, event):\n\n #print(\"you pressed {}\".format(event.key))\n key_press_handler(event, self.canvas, self.toolbar)", "def debounced_key_press(event):\n # print('Debounced press', repr(event.key))\n key_indicator.set_text(event.key)\n if event.key == ' ':\n throttle_handler()\n fig.canvas.draw()", "def release_bound_key(self, event):\n try:\n if event.key in [key[0] for key in self.key_bindings]:\n self.unpress()\n except TypeError:\n if event.key in self.key_bindings:\n self.unpress()", "def onkey(self, fun, key):\n if fun is None:\n if key in self._keys:\n self._keys.remove(key)\n elif key not in self._keys:\n self._keys.append(key)\n self._onkeyrelease(fun, key)", "def on_press(self, keyname):\n if self.keydown:\n return\n try:\n self.keydown = True\n keyname = str(keyname).strip('\\'')\n log.info('KEY PRESS ' + keyname)\n if keyname == 'Key.esc':\n self.toggle_tracking(False)\n # self.tracking = False\n self.drone.land()\n self.drone.quit()\n\n \n cv2.destroyAllWindows() \n os._exit(0)\n \n if keyname in self.controls_keypress:\n self.controls_keypress[keyname]()\n except AttributeError:\n log.debug(f'special key {keyname} pressed')", "def on_key_release(self, key, callback):\n self._key_release_mappings.setdefault(key, []).append(callback)", "def key_down(key):\n vk = key\n # XXX exception if >= 256\n _key_down(vk)", "def on_key_down(self, key, callback):\n self._key_down_mappings.setdefault(key, []).append(callback)" ]
[ "0.7828458", "0.73670536", "0.7318554", "0.6937562", "0.68964875", "0.6888811", "0.68864244", "0.6816621", "0.6766921", "0.6632363", "0.6632041", "0.6628382", "0.6627461", "0.66099536", "0.6597856", "0.6597017", "0.651941", "0.6456723", "0.642119", "0.6409312", "0.6390656", "0.6363465", "0.63627356", "0.6327647", "0.63162", "0.62750506", "0.6227194", "0.62167096", "0.62116504", "0.6190865" ]
0.80060947
0
If key is given, bind fun to keypress event of key. Otherwise bind fun to any keypress. Canvas must have focus. See method listen.
def _onkeypress(self, fun, key=None): if fun is None: if key is None: self.cv.unbind("<KeyPress>", None) else: self.cv.unbind("<KeyPress-%s>" % key, None) else: def eventfun(event): fun() if key is None: self.cv.bind("<KeyPress>", eventfun) else: self.cv.bind("<KeyPress-%s>" % key, eventfun)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _onkeyrelease(self, fun, key):\n if fun is None:\n self.cv.unbind(\"<KeyRelease-%s>\" % key, None)\n else:\n def eventfun(event):\n fun()\n self.cv.bind(\"<KeyRelease-%s>\" % key, eventfun)", "def onkey(self, fun, key):\n if fun is None:\n if key in self._keys:\n self._keys.remove(key)\n elif key not in self._keys:\n self._keys.append(key)\n self._onkeyrelease(fun, key)", "def onkeypress(self, fun, key=None):\n if fun is None:\n if key in self._keys:\n self._keys.remove(key)\n elif key is not None and key not in self._keys:\n self._keys.append(key)\n self._onkeypress(fun, key)", "def bind_key(self, key, command):\n self.key_bindings[key] = command", "def on_key_press(self, key, callback):\n self._key_press_mappings.setdefault(key, []).append(callback)", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n\n self.key_handler(key)", "def on_key_event(self, key):\n pass", "def processKey(self, key):\n if self.key_handler is not None:\n self.key_handler(key)", "def key_event(self, key: Any, action: Any):\n pass", "def on_key(self, _win, key, _scancode, action, _mods):\n if action == glfw.PRESS or action == glfw.REPEAT:\n if key == glfw.KEY_ESCAPE or key == glfw.KEY_Q:\n glfw.set_window_should_close(self.win, True)\n if key == glfw.KEY_W:\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, next(self.fill_modes))\n if key == glfw.KEY_R:\n os.system(\"pkill aplay\")\n os.system(\"aplay T-Rex.wav &\")\n glfw.set_time(0)\n if key == glfw.KEY_N:\n self.normal_mapping = 1 - self.normal_mapping", "def bind_key(self, key):\n self.key_bindings.append(key)", "def on_key(window, key, scancode, action, mods):\n if action != glfw.PRESS:\n return\n \n global controller\n\n if key == glfw.KEY_SPACE:\n controller.fillPolygon = not controller.fillPolygon\n\n elif key == glfw.KEY_ESCAPE:\n glfw.set_window_should_close(window, True)\n\n # Si detecta la tecla [Q] cambia el estado del efecto 1 : zoom\n elif key == glfw.KEY_Z:\n controller.effect1 = not controller.effect1\n\n # Si detecta la tecla [W] cambia el estado del efecto 2 : corte\n elif key == glfw.KEY_C:\n controller.effect2 = not controller.effect2\n\n else:\n print('Unknown key')", "def __acceptKeyDown(self, key):\n self.accept(key, self.__handleKeyDown, [key])", "def bind(self, keysym, func):\n if type(keysym) == list:\n [self.bind(key, func) for key in keysym]\n elif keysym in self.binds:\n self.binds[keysym].append(func)\n else:\n self.binds[keysym] = [func]", "def __master_key_callback(self, window, key, scancode, action, mods):\n self.call_key_callback(window, key, scancode, action, mods, keyboard=self)", "def keypress(self, key): # pragma: no cover\n if key == \"s\":\n self.screenshot()\n\n elif key == \"q\" or key == \"Esc\":\n self.close()\n\n elif key == \"c\":\n self._print_camera()", "def _on_key_press(self, event):", "def key_handler(self):\n \n self.pressed = waitKey(1) & 255 #wait for keypress for 10 ms\n if self.pressed == 27: #exit program on 'esc'\n print \"exiting...\"\n self.camera.cam.release()\n exit()\n \n for key in self.key_controls.keys():\n if chr(self.pressed) == key:\n self.key_controls[key]()", "def _handle_key_event(self, key, modifiers, mapping):\n if key in mapping:\n for callback in mapping[key]:\n callback()", "def register_key_press_event(self, event_name, key, callback):\n if key in self._key_cb:\n raise ValueError(\"Key %s is already bound to another event.\" % key)\n self._key_cb[key] = event_name\n self._root.bind(\"<%s>\" % key, callback)", "def process_key(self, key):\n # Global and nonmaskable callbacks are supposed to work\n # even when the screen backlight is off\n #\n # First, querying global callbacks - they're more important than\n # even the current proxy nonmaskable callbacks\n logger.debug(\"Received key: {}\".format(key))\n if key in self.global_keymap:\n callback = self.global_keymap[key]\n self.handle_callback(callback, key, type=\"global\")\n return\n # Now, all the callbacks are either proxy callbacks or backlight-related\n # Saving a reference to current_proxy, in case it changes during the lookup\n current_proxy = self.get_current_proxy()\n if key in current_proxy.nonmaskable_keymap:\n callback = current_proxy.nonmaskable_keymap[key]\n self.handle_callback(callback, key, type=\"nonmaskable\", context_name=current_proxy.context_alias)\n return\n # Checking backlight state, turning it on if necessary\n if callable(self.backlight_cb):\n try:\n # backlight_cb turns on the backligth as a side effect\n backlight_was_off = self.backlight_cb()\n except:\n logger.exception(\"Exception while calling the backlight check callback!\")\n else:\n # If backlight was off, ignore the keypress\n if backlight_was_off is True:\n return\n # Now, all the other callbacks of the proxy:\n # Simple callbacks\n if key in current_proxy.keymap:\n callback = current_proxy.keymap[key]\n self.handle_callback(callback, key, context_name=current_proxy.context_alias)\n #Maskable callbacks\n elif key in current_proxy.maskable_keymap:\n callback = current_proxy.maskable_keymap[key]\n self.handle_callback(callback, key, type=\"maskable\", context_name=current_proxy.context_alias)\n #Keycode streaming\n elif callable(current_proxy.streaming):\n self.handle_callback(current_proxy.streaming, key, pass_key=True, type=\"streaming\", context_name=current_proxy.context_alias)\n else:\n logger.debug(\"Key {} has no handlers - ignored!\".format(key))\n pass #No handler for the key", "def on_key(self, _window, key, _scancode, action, _mods):\n is_press = action == glfw.PRESS or action == glfw.REPEAT\n if is_press and (key == glfw.KEY_ESCAPE or key == glfw.KEY_Q):\n glfw.set_window_should_close(self.window, True)\n\n if action != glfw.REPEAT:\n self.key_handler(key, is_press)", "def bindKeys(self):\r\n self.c.bind(\"<Button-1>\",self.seek)\r\n self.c.bind(\"<MouseWheel>\",self.app.zoom)\r\n self.c.bind(\"<Button-3>\",self.peek)", "def send(self, key: str) -> bool:\n\n def _run_callback() -> None:\n \"\"\"Call callback if `keys.ANY_KEY` is bound\"\"\"\n\n if keys.ANY_KEY in self._bindings:\n method, _ = self._bindings[keys.ANY_KEY]\n method(self, key)\n\n if self.execute_binding(key):\n return True\n\n if key == keys.BACKSPACE and self.cursor > 0:\n left = self.value[: self.cursor - 1]\n right = self.value[self.cursor :]\n self.value = left + right\n\n self.cursor -= 1\n\n _run_callback()\n\n elif key in [keys.LEFT, keys.CTRL_B]:\n self.cursor -= 1\n\n elif key in [keys.RIGHT, keys.CTRL_F]:\n self.cursor += 1\n\n # Ignore unhandled non-printing keys\n elif key == keys.ENTER or key not in string.printable:\n return False\n\n # Add character\n else:\n left = self.value[: self.cursor] + key\n right = self.value[self.cursor :]\n\n self.value = left + right\n self.cursor += len(key)\n _run_callback()\n\n return True", "def keypress(cls, _, key):\n return key", "def on_key_press(self, event):\n\n #print(\"you pressed {}\".format(event.key))\n key_press_handler(event, self.canvas, self.toolbar)", "def keypress(key):\n k = PyKeyboard()\n if key == 'enter':\n key = k.return_key\n k.tap_key(key)", "def on_press(key):\n try:\n # gets pressed key char value and searches it from dict with get method.\n mapped_key = key_mappings.get(key.char) # gets value and type tuple or None\n if mapped_key:\n module.pressed_key = mapped_key\n except AttributeError:\n traceback.print_exc()\n except KeyboardInterrupt:\n print(f\"\\n{module.current_time()} Application stopped\")", "def add_key_command(self, key, command):\n\n self.keybindings[key] = command", "def keypress(self, key, state=None):\n\n\t\tself._interface.keypress(key, state)" ]
[ "0.71817255", "0.7139713", "0.694745", "0.6629521", "0.65834564", "0.6567381", "0.6504404", "0.6493854", "0.6422913", "0.6373026", "0.6367208", "0.6248102", "0.6203105", "0.61477613", "0.61441123", "0.6135494", "0.6100688", "0.60639316", "0.6040798", "0.6035595", "0.6017277", "0.6000811", "0.5999378", "0.59959877", "0.5978476", "0.59320456", "0.5909544", "0.587373", "0.58488506", "0.58247584" ]
0.7777757
0
Install a timer, which calls fun after t milliseconds.
def _ontimer(self, fun, t): if t == 0: self.cv.after_idle(fun) else: self.cv.after(t, fun)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ontimer(self, fun, t=0):\n self._ontimer(fun, t)", "def start(self, sec, callFunc, *args, **kwargs):\n self.cancel()\n \n def doit(args=args, kwargs=kwargs):\n self._timerID = None\n callFunc(*args, **kwargs)\n\n self._timerID = self._tkWdg.after(int(0.5 + (1000.0 * sec)), doit)", "def __timeout(self, seconds, func, *args):\n t = threading.Timer(seconds, func, *args)\n self._timer = t\n t.start()", "def timerfunc(func):\n function_timer = functools.partial(__timerfunc, printer=print)(func)\n return function_timer", "def timer(func):\n def timer_wraper(*args, **kwargs):\n start = time.time()\n func(*args, **kwargs)\n end = time.time()\n runtime = (end - start)\n #Minutes, seconds, hours, minutes\n m, s = divmod(runtime, 60)\n h, m = divmod(m, 60)\n print(\" Execution time: %d:%02d:%02d (H:MM:SS)\" % (h, m, s))\n return timer_wraper", "def create_timer(function, time):\n timer = Timer()\n timer.timeout.connect(function)\n timer.start(time)\n timer.speed = time\n return timer", "def __set_interval(self, func, sec):\n\n def func_wrapper():\n self.__set_interval(func, sec)\n func()\n\n t = threading.Timer(sec, func_wrapper)\n t.start()\n return t", "def timer(fun):\n @wraps(fun)\n def wrapper(args):\n \"\"\"Wraps function execution time.\"\"\"\n if args[\"--time\"]:\n import time\n start_time = time.time()\n result = fun(args)\n LOGGER.info(\"Total time:\", time.time() - start_time)\n return result\n\n return fun(args)\n\n return wrapper", "def startTimer (self, usec, callback):\n return self.impl.startTimer(usec, callback)", "def timer(func):\n @wraps(func)\n def wrapper_timer(*args, **kwargs):\n print(f'[Function: {func.__name__!r} start...]')\n tic = time()\n try:\n return func(*args, **kwargs)\n finally:\n toc = time()\n run_time = toc - tic\n print(f'[Function: {func.__name__!r} finished in {run_time:.4f} secs]\\n')\n return wrapper_timer", "def clockit(func):\n def new(*args, **kw):\n t = Timer()\n retval = func(*args, **kw)\n t.stop()\n print(\"{} in {}\".format(func.__name__, t))\n del t\n return retval\n return new", "def _settimer(t,m=''):\n\n FNULL = open(os.devnull, 'w')\n \n selfPath = os.path.abspath(__file__)\n cmd = 'nohup nice python {:s} _run {:0.10f} {:s}'.format(selfPath,t,m)\n \n subprocess.Popen(cmd.split(),stdout=FNULL, stderr=subprocess.STDOUT)\n print _gettxt(t,m=m)", "def timer(func):\n def wrapper(*args, **kwargs):\n start = time.time()\n value = func(*args, **kwargs)\n print(f\"Running time: {time.time() - start}\")\n return value\n return wrapper", "def watch(self, func, seconds=3600):\n func\n time.sleep(seconds)", "def timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter() # 1\n value = func(*args, **kwargs)\n end_time = time.perf_counter() # 2\n run_time = end_time - start_time # 3\n print(\"Finished %r in %.2f seconds\"%(func.__name__,run_time))\n return value\n return wrapper_timer", "def Timer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n ans = func(*args, **kwargs)\n end_time = time.perf_counter()\n running_time = end_time-start_time\n logger.debug(\n f'func:{func.__name__} running in {running_time:.4f} sec.')\n return ans\n return wrapper", "def update(self, func):\n if self.current_time == 0:\n func()\n return\n self.current_time -= 1\n hours = self.current_time // 3600\n minutes = self.current_time % 3600 // 60\n seconds = self.current_time % 60\n try:\n self.timer_label.setText('%02d:%02d:%02d' % (hours, minutes, seconds))\n if self.current_time <= 10:\n self.timer_label.setStyleSheet('color: red')\n Qt.QTimer().singleShot(1000, lambda: self.update(func))\n except RuntimeError:\n return", "def timer(func):\n\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\n return value\n\n return wrapper_timer", "def timer(func):\n # Define the wrapper function to return\n def wrapper(*args, **kwargs):\n # When wrapper() is called, get the current time\n t_start = time.time()\n # Call the decorated function and store the result.\n result = func(*args, **kwargs)\n # Get the total time it took to run, and print it.\n t_total = time.time() - t_start\n print('{} took {}s'.format(func.__name__, t_total))\n\n return result\n\n return wrapper", "def time_it(func):\n\n def wrapper(*args, **kwargs):\n start = timer()\n func(*args, **kwargs)\n print(timer() - start)\n\n return wrapper", "def timer(func):\n\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter() # 1\n value = func(*args, **kwargs)\n end_time = time.perf_counter() # 2\n run_time = end_time - start_time # 3\n print(f\"Finished {func.__name__!r} in {run_time:.4f} secs\")\n return value\n\n return wrapper_timer", "def register_event(self, freq, func):\n def wrapper():\n if self.running:\n if func():\n t = threading.Timer(freq, wrapper)\n\n self.events_lock.acquire()\n self.events.append(t)\n t.start()\n self.events_lock.release()\n\n wrapper()", "def timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.perf_counter() # 1\n value = func(*args, **kwargs)\n end_time = time.perf_counter() # 2\n run_time = end_time - start_time # 3\n# print(f\"\\nFinished {func.__name__!r} in {run_time:.4f} secs\")\n print('\\nFinished {!r} in {:.4f} s'.format(func.__name__, run_time))\n return value\n return wrapper_timer", "def timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = time.clock() # 1\n value = func(*args, **kwargs)\n end_time = time.clock() # 2\n run_time = end_time - start_time # 3\n print(\"Finished {0!r} in {1:.4f} secs\".format(func.__name__, run_time ))\n return value\n return wrapper_timer", "def start_periodic_timer(self, interval, fun, *args, **kwargs):\n entry = timer(interval, fun, *args, **kwargs)\n self._timers.append(entry)\n return entry", "def _runtimer(t,m=''):\n t = float(t) # minutes\n \n if not os.path.exists('/var/tmp/timer'):\n os.makedirs('/var/tmp/timer')\n \n txt,endobj = _gettxt(t,m,ret_end=True)\n \n name = '/var/tmp/timer/' + str(time.mktime(endobj.timetuple()))\n with open(name,'w') as F:\n F.write(txt)\n \n time.sleep(t*60)\n \n if not os.path.exists(name):\n return # It was deleted while sleeping\n else:\n os.remove(name)\n \n # Just mac for now. Will add linux later\n uname = os.uname()[0]\n if uname.lower() == 'darwin':\n cmd = \"\"\"terminal-notifier -message 'Timer Finished: {:0.2f} minutes. {:s}' -title 'Timer'\"\"\".format(t,m)\n os.system(cmd)\n os.system('say -v bells \"beep\"')", "def func_with_internal_timer(n):\n start = time.time()\n\n # Do the work of the function here e.g.\n time.sleep(n)\n\n elapsed = time.time() - start\n print(\"{} finished in {:2.3f} seconds\".format(\"func_with_internal_timer\", elapsed))", "def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1", "def timer(func):\n @wraps(func)\n def wrap_timer(*args, **kwargs):\n t0 = perf_counter()\n returned = func(*args, **kwargs)\n t1 = perf_counter()\n print(f\"[Time: {t1-t0:.6f} s]\")\n return returned\n return wrap_timer", "def __window_setTimeout(self, f, delay):\n self.__dict__['__timeout'].append(f)" ]
[ "0.7902541", "0.6533716", "0.6487333", "0.64392936", "0.64059657", "0.6389365", "0.6331448", "0.6325191", "0.62742996", "0.6244253", "0.615073", "0.6129014", "0.6125973", "0.6120906", "0.61153", "0.61113065", "0.61081517", "0.6058988", "0.60318214", "0.6030502", "0.60116047", "0.5982823", "0.59795576", "0.5974313", "0.5973388", "0.59538776", "0.5941212", "0.59347355", "0.59172636", "0.5915301" ]
0.6822445
1
Configure image item as to draw image object at position (x,y) on canvas)
def _drawimage(self, item, pos, image): x, y = pos self.cv.coords(item, (x * self.xscale, -y * self.yscale)) self.cv.itemconfig(item, image=image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y", "def on_draw_over_image(self):", "def draw_image_on_canvas(self, force_generation=False):\n\n self.canvas_vertex = (self.canvas.canvasx(0), self.canvas.canvasy(0))\n box_coords = (self.canvas_vertex[0], self.canvas_vertex[1],\n self.canvas_vertex[0] + self.frame.width, self.canvas_vertex[1] + self.frame.height)\n\n # some weird bug with canvas being 0 when scrolling back to origin\n if box_coords[0] == -1:\n box_coords = (box_coords[0] + 1, box_coords[1], box_coords[2] + 1, box_coords[3])\n\n if box_coords[1] == -1:\n box_coords = (box_coords[0], box_coords[1] + 1, box_coords[2], box_coords[3] + 1)\n\n self.box_coords = box_coords\n\n image, self.top_left = self.get_image(box_coords, force_generation=force_generation)\n\n if image is not None:\n self.canvas.delete(\"all\")\n\n # this ownership is necessary, or the image does not show up on the canvas\n self.image = ImageTk.PhotoImage(image=image)\n\n self.image_on_canvas = self.canvas.create_image(\n self.top_left[0], self.top_left[1], image=self.image, anchor=\"nw\")", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)", "def Draw(self):\n\t\tGameImage.Draw(self, self.coords)", "def __init__(self, image, rect, x, y, xv, yv):\n self.image = image\n self.rect = rect\n self.rect.topleft = [x, y]\n self.x = x\n self.y = y\n self.xv = xv\n self.yv = yv", "def display_image(self, img, img_pos):\n image = tk.Label(self.top, image=img)\n image.grid(row=img_pos[0], column=img_pos[1],\n columnspan=img_pos[2], rowspan=img_pos[3])", "def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))", "def on_image(self, image):", "def blitme(self):\r\n #draw the image to the screen at the position specifid by self.rect.\r\n self.screen.blit(self.image,self.rect)", "def draw(self):\r\n self.screen.blit(self.image, self.image.get_rect())", "def set_sprite(self, image):\n self.current_sprite = image\n self.draw_alpha()", "def update_current_image(self):\n\n rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)\n self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)\n self.update()", "def draw_image(self, image, position = (0, 0), anchor= 'topleft'):\n offset = self._calculate_offset(anchor, image._surf.get_size())\n self._surf.blit(image._surf, position + offset)", "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def draw(self, surface):\r\n if self.visible:\r\n surface.blit(self.image, (self.x, self.y))", "def __init__(self, data, (x,y)):\n\t\tGameImage.__init__(self, data)\n\t\tself.coords = (x,y)", "def update_drawer_img(self):\n self.drawer = aggdraw.Draw(self.img)\n self.drawer.settransform(self.coordspace_transform)", "def draw(self, color = Color.GREEN):\n self.image[self.x, self.y] = color", "def __init__(self, x, y):\n # Call the parent's constructor\n super().__init__()\n width = 10\n height = 10\n \n # Make a blue wall, of the size specified in the parameters\n self.image = pygame.Surface([width, height])\n self.image.fill(BLUE)\n \n # Make our top-left corner the passed-in location.\n self.rect = self.image.get_rect()\n self.rect.y = y\n self.rect.x = x", "def draw_item(self, icon_character, surface, x, y):\n img_path = os.path.join('images/decor', icon_character)\n character_image = pygame.image.load(img_path).convert_alpha()\n surface.blit(character_image, (x, y))", "def __init__(self, color, x, y):\n \n # Call the parent class (Sprite) constructor\n super().__init__()\n \n # Create the image of the block of appropriate size\n # The width and height are sent as a list for the first parameter.\n self.image = pygame.Surface([block_width, block_height])\n \n # Fill the image with the appropriate color\n self.image.fill(color)\n \n # Fetch the rectangle object that has the dimensions of the image\n self.rect = self.image.get_rect()\n \n # Move the top left of the rectangle to x,y.\n # This is where our block will appear..\n self.rect.x = x\n self.rect.y = y", "def SetItemImage(self, item, image, which=TreeItemIcon_Normal):\r\n\r\n item.SetImage(image, which)\r\n\r\n dc = wx.ClientDC(self)\r\n self.CalculateSize(item, dc)\r\n self.RefreshLine(item)", "def add_image(self, id, arr):\n\t\t## remove original image and add new one\n\t\tif self.cells[id].image:\n\t\t\tself.cells[id].v.removeItem(self.cells[id].image)\n\t\t'''\n\t\timg = UiImageItem()\n\t\tself.cells[id].v.addItem(img)\n\t\timg.setImage(arr)\n\t\t## rotate the imageItem (oddly the image is rotated by default)\n\t\timg.setRotation(-90)\n\t\t'''\n\t\t## use ROI instead of imageItem\n\t\tshape = arr.shape\n\t\timg = ImageROI([0,0], [shape[0],shape[1]], movable=False, rotatable=False, sendBack=False, parent=self.cells[id].v)\n\n\t\timg.set_image(arr)\n\t\tself.cells[id].image = img\n\t\tself.cells[id].v.addItem(img)\n\t\t\n\t\t##set histo's curve color, should be called before set_image\n\t\tbuttons = self.cells[1].button_area.colorWidget.buttons\n\t\timg.set_curve_pen(buttons['color'].currentColor(), buttons['size'].value(), buttons['tense'].value()/100.)\n\n\t\t## rotate the imageItem (oddly the image is rotated by default)\n\t\t## there is a bug with the rotate method\n\t\timg.setRotation(-90)\n\n\t\t## connect images signals to slot in class ImageBar\n\t\timg.sigAddVertexRequested.connect(self.add_point)\n\t\timg.sigLoadImageRequested.connect(self.load_image_requested)\n\t\timg.sigCurveUpdated.connect(self.curve_updated)\n\t\t#img.sigDragTriggered.connect(self.cells[id].pathplot.add_draw_point)\n\t\tself.cells[id].open_action.triggered.connect(img.load_image_clicked)\n\t\t## clear existed points\n\t\tself.clear_point(id=id)\n\t\t## reset button area\n\t\tself.reset_button_area(id)\n\t\t## send signal to inform imagePanel\n\t\tself.sigImageLoaded.emit((id, arr))\n\t\t## update config file\n\t\tself.conf.configs['original_array'][id] = arr\n\n\t\tif id == 0 and id not in self.state['ready']:\n\t\t\tself.sigEnableHeatmapButton.emit(self)\n\n\t\tself.cells[id].v.setMouseEnabled(True, True)\n\t\tself.state['ready'][id] = True\n\n\t\t## just test...\n\t\t#ct = contourSelection.resize_segmentation(arr)\n\t\t#self.cells[id].contour = ct\n\t\t#print ct", "def draw_image(self, image, src_coor, src_size, dest_coor, dest_size, angle = 0):\n img = Image_process.update(image, src_coor, src_size, dest_size, angle)\n self.canvas.create_image(dest_coor, image=img)", "def __init__(self, rect, image):\n self.rect = rect\n self.image = image\n self.state = self.S_ACTIVE", "def __init__(self, pos, width):\n\n self.width = width\n self.image = pygame.Surface((self.width,500))\n self.image.fill((random.choice([(0,0,0), (50,50,50), (32,32,32)])))\n self.x = pos[0]\n self.y = pos[1]\n self.rect = self.image.get_rect()\n self.rect.topleft = pos", "def __init__(self, color, x, y):\r\n\r\n # Call the parent class (Sprite) constructor\r\n super().__init__()\r\n\r\n # Create the image of the block of appropriate size\r\n # The width and height are sent as a list for the first parameter.\r\n self.image = pygame.Surface([block_width, block_height])\r\n\r\n # Fill the image with the appropriate color\r\n self.image.fill(color)\r\n\r\n # Fetch the rectangle object that has the dimensions of the image\r\n self.rect = self.image.get_rect()\r\n\r\n # Move the top left of the rectangle to x,y.\r\n # This is where our block will appear..\r\n self.rect.x = x\r\n self.rect.y = y", "def draw(self, surface):\r\n surface.blit(self.image, self.rect)" ]
[ "0.7062693", "0.70012945", "0.6607183", "0.6527935", "0.6488746", "0.6411776", "0.639959", "0.6380511", "0.6360058", "0.6359835", "0.6342156", "0.63302064", "0.63219225", "0.62677747", "0.62642443", "0.6258162", "0.6256595", "0.6230513", "0.62140775", "0.6204366", "0.620224", "0.618804", "0.6169311", "0.6159611", "0.6157931", "0.61505014", "0.6138468", "0.61358625", "0.612417", "0.6116143" ]
0.8295738
0