query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
save the dataset to records file. Remember to call after convert_to_ids method.
|
def save_records(self, pad_id):
train_records_path = os.path.join(self.config.records_dir, "train.tfrecords")
dev_records_path = os.path.join(self.config.records_dir, "dev.tfrecords")
test_records_path = os.path.join(self.config.records_dir, "test.tfrecords")
statistics_file = os.path.join(self.config.records_dir, "statistics.json")
dict = {}
dict['train_examples_num'] = len(self.train_set)
dict['dev_examples_num'] = len(self.dev_set)
dict['test_examples_num'] = len(self.test_set)
with open(statistics_file, 'w', encoding='utf8')as p:
json.dump(dict, p)
self._save_records(train_records_path, self.train_set, pad_id)
self._save_records(dev_records_path, self.dev_set, pad_id)
self._save_records(test_records_path, self.test_set, pad_id)
self.logger.info("all data have saved to records files.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_dataset(self):\n if self.res_dataset is None:\n return\n if self.write_path is None:\n raise Exception(\"Error: Attempted to save result dataset without ever specifiying a path to write to\")\n\n if self.format == \"arrow\":\n self.res_dataset.save_to_disk(self.write_path)\n elif self.format == \"csv\":\n self.res_dataset.to_csv(self.write_path, index = False)",
"def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")",
"def save(self):\n logger.debug('>>--- save --->>')\n\n # Assign entity id to all remaining records.\n logger.info('Assigning entity id to all remaining records.')\n for rec_id in self.left_dataset.index.values:\n self.left_dataset.set_value(rec_id, 'ENTITY_ID',\n MemoryLinkBase.get_next_id())\n\n output = self.linked.append(self.left_dataset)\n output = output.sort_values(['ENTITY_ID'])\n\n dataset = self.project['datasets'][0]\n\n try:\n usecols = dataset['columns'] or self.left_columns\n except KeyError:\n usecols = self.left_columns\n\n self.left_dataset = pd.read_csv(dataset['url'],\n index_col=dataset['index_field'],\n usecols=usecols,\n skipinitialspace=True,\n dtype=self.left_dtypes)\n\n result = pd.concat([self.left_dataset, output['ENTITY_ID']], axis=1,\n join='inner')\n cols = result.columns.tolist()\n cols.insert(0, cols.pop(cols.index('ENTITY_ID')))\n result = result[cols]\n\n self.total_entities = len(output.groupby(['ENTITY_ID']))\n\n logger.info('Total number of entities after de-duplication: %s', self.total_entities)\n # Storing deduplication result. It contains the original records plus the entity id of each record.\n deduped_file_path = self.project['output_root'] + link_config.get(\n 'deduped_data_file', 'deduped_data.csv')\n\n result['ENTITY_ID'] = result['ENTITY_ID'].map(\n lambda x: '{:.0f}'.format(x)\n if pd.notnull(x)\n else np.nan)\n\n result.replace(np.nan, '', regex=True)\n result.to_csv(deduped_file_path, index_label=dataset['index_field'],\n header=True, index=True)\n logger.info('De-duplicated file generated at %s.', deduped_file_path)\n\n # Clean all remaining temp files\n if os.path.exists(self.temp_path):\n shutil.rmtree(self.temp_path)\n\n logger.debug('<<--- save ---<<')\n return generate_linking_summary(self, self.project['output_root'])",
"def __save_datasets(self):\n self.train.to_csv('{}/{}/{}'.format(path_to_train_set, img_format, 'train.csv'))\n self.valid.to_csv('{}/{}/{}'.format(path_to_valid_set, img_format, 'valid.csv'))\n self.test.to_csv('{}/{}/{}'.format(path_to_test_set, img_format, 'test.csv'))",
"def save_seqs_to_file(self):\n if self.blast_type == 'local':\n self.seq_file = os.path.join(self.cwd,\n 'db',\n \"{0}_seqs.fas\".format(self.gene_code))\n queryset = Sequences.objects.all().filter(gene_code=self.gene_code)\n\n my_records = []\n for i in queryset:\n item_id = i.code_id + '|' + i.gene_code\n seq = self.strip_question_marks(i.sequences)\n if seq != '':\n seq_record = SeqRecord(Seq(seq), id=item_id)\n my_records.append(seq_record)\n SeqIO.write(my_records, self.seq_file, \"fasta\")",
"def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))",
"def save_dataset(self, dest = \"./Datasets/IsophonicsDataset.ds\"):\n # Serialize the dataset.\n with lzma.open(dest, \"wb\") as dataset_file:\n pickle.dump((self.DATA, self.CHORDS, self.KEYS, self.SAMPLE_RATE, self.NFFT), dataset_file)\n\n print(\"[INFO] The Isophonics Dataset was saved successfully.\")",
"def save_file(self):\n # paginate over deputies and senators getting their fields\n fieldnames = set([])\n congressmen = self.deputies + self.senators\n for data in congressmen:\n fieldnames = fieldnames.union(data.dump().keys())\n\n\n with open(IDENTITY_FILE_UPDATED, 'a') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=list(fieldnames), delimiter=';')\n writer.writeheader()\n\n for data in congressmen:\n writer.writerow(data.dump())",
"def _create_file_dataset(self, records):\r\n\r\n _error_codes = {}\r\n _n_inconsistent = 0\r\n _n_records = len(records)\r\n\r\n _dialog = gtk.FileChooserDialog(_(u\"RTK: Save Data Set to File ...\"),\r\n None,\r\n gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\r\n (gtk.STOCK_OK, gtk.RESPONSE_ACCEPT,\r\n gtk.STOCK_CANCEL,\r\n gtk.RESPONSE_REJECT))\r\n _dialog.set_action(gtk.FILE_CHOOSER_ACTION_SAVE)\r\n if _dialog.run() == gtk.RESPONSE_ACCEPT:\r\n _filename = _dialog.get_filename()\r\n\r\n _dialog.destroy()\r\n\r\n _file = open(_filename, 'w')\r\n _file.write(\"Data Set Description: \" +\r\n self.txtDescription.get_text() + \"\\n\")\r\n _file.write(\"\\n\")\r\n _file.write(\"Record_ID\\tRequest_Date\\tLeft\\tRight\\tStatus\\tQuantity\\tUnit\\tTBF\\tAssembly_ID\\n\")\r\n\r\n # Write the first record to the open file.\r\n _file.write('0\\t' + str(records[0][9]) + '0\\t' + str(records[0][2]) +\r\n '\\t3\\t1\\t' + str(records[0][0]) + '\\t' +\r\n str(records[0][2]) + '\\t' + str(records[0][10]) + '\\n')\r\n\r\n # Write the remaining records to the open file.\r\n for i in range(1, _n_records):\r\n # Check the consistency of the two adjacent records. Any\r\n # inconsistent records will be logged, but they are always\r\n # added to the dataset.\r\n if self._consistency_check(records[i - 1], records[i]):\r\n _n_inconsistent += 1\r\n\r\n _left = self._interval_left(records[i][10], records[i - 1][10],\r\n records[i][2], records[i - 1][2])\r\n _right = float(records[i][2])\r\n _tbf = _right - _left\r\n _file.write(str(i) + '\\t' + str(records[0][9]) + str(_left) +\r\n '\\t' + str(_right) + '\\t3\\t1\\t' +\r\n str(records[0][0]) + '\\t' + str(_tbf) + '\\t' +\r\n str(records[0][10]) + '\\n')\r\n\r\n return(_error_codes, _n_inconsistent)",
"def save_csv(self, filename): # DONE\n self.data.to_csv(filename)",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def save(self, data, outpath):\n data.to_csv(outpath)",
"def save(self, filename):\n with open(filename, \"w\") as f:\n m = {\n \"order\": self.order,\n \"pad\": self.pad,\n \"records\": {str(k): v for k, v in self.records.items()}\n }\n json.dump(m, f)",
"def save(self, path_to_save):\n for item in self.data_array:\n item.save(path_to_save+item.file_name)",
"def save(self):\n\n print('Bye!')\n try:\n with open('records.txt', 'w') as fh:\n fh.write(str(self._initial_money))\n for n in self._records:\n fh.write('\\n'+n)\n except OSError:\n sys.stderr.write('Cannot open file.\\n')",
"def save_datasets(self):\n if self.processed_extension == '.csv':\n # Save to csv\n logger.info(f'Saving sets to csv:')\n \n # TRAIN\n logger.info(f'train: {self.train_path}')\n \n # Concatenate X and y\n train_data = self.train_data[0]\n train_data['TARGET'] = self.train_data[1]\n \n # Save as csv\n train_data.to_csv(self.train_path, index = False)\n \n \n # VAL\n logger.info(f'val: {self.val_path}')\n \n # Concatenate X and y\n val_data = self.val_data[0]\n val_data['TARGET'] = self.val_data[1]\n \n # Save as csv\n val_data.to_csv(self.val_path, index = False)\n \n # TEST\n logger.info(f'test: {self.test_path}')\n \n # Concatenate X and y\n test_data = self.test_data[0]\n test_data['TARGET'] = self.test_data[1]\n \n # Save as csv\n self.test_data.to_csv(self.test_path, index = False)\n \n elif self.processed_extension == '.npz':\n # Convert y to numpy array\n if isinstance(self.train_data[1], pd.Series):\n self.train_data[1] = self.train_data[1].to_numpy()\n if isinstance(self.val_data[1], pd.Series):\n self.val_data[1] = self.val_data[1].to_numpy()\n if isinstance(self.test_data[1], pd.Series):\n self.test_data[1] = self.test_data[1].to_numpy()\n \n # Save to npz (scipy sparse)\n logger.info(f'Saving sets to npz:')\n\n logger.info(f'train: {self.train_path}')\n train_data = [self.train_data[0], np.reshape(self.train_data[1], (-1,1))]\n sparse.save_npz(self.train_path, sparse.hstack(train_data))\n \n logger.info(f'val: {self.val_path}')\n val_data = [self.val_data[0], np.reshape(self.val_data[1], (-1,1))]\n sparse.save_npz(self.val_path, sparse.hstack(val_data))\n\n logger.info(f'test: {self.test_path}')\n test_data = [self.test_data[0], np.reshape(self.test_data[1], (-1,1))]\n sparse.save_npz(self.test_path, sparse.hstack(test_data))\n\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n \n self.input_size = self.train_data[0].shape[1]\n logger.info(f'Saved datasets.')",
"def save(self):\n assert self.data is not None\n with open(self._csv_path, mode=\"w\", encoding=\"utf-8\") as spp_file:\n # We don't want to save the index, as it's not especially meaningful, and makes life harder when trying to\n # restore the binary version from the csv (the index column would be imported and then need to be dropped).\n self.data.to_csv(spp_file, index=False)",
"def _save(self, dataset, path, files, copy_files=False):\n raise NotImplementedError('Loader {} does not support saving datasets.'.format(self.type()))",
"def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()",
"def save_data(self, filename, coordinates=True):\n\n if not coordinates:\n savetxt(self.wpath+filename, self.dataset, delimiter=',', fmt='%.18e,%.18e,%d,%s')\n else:\n print self.wpath+filename\n savetxt(self.wpath+filename, self.getDataCoordinates(), delimiter=',', fmt='%.18e,%.18e')",
"def to_file(self, records):\n self._file_manager.make_dir_when_no_dir(self._directory)\n file = os.path.join(self._directory, self._file_name + '.txt')\n record_lines = [rec.to_string() + \"\\n\" for rec in records]\n self._file_manager.write_lines(file, record_lines)",
"def save(datastream):",
"def save(self):\n # TODO: save the file",
"def export_dataset(self):\n raise NotImplementedError",
"def save(self, output, data):",
"def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)",
"def write_tfrecords(self, records, dataset):\n\n cnt_inc = len(records)\n self.count += cnt_inc\n\n if dataset == 'test':\n self.count_test += cnt_inc\n for s in records:\n self.test.write(s)\n elif dataset == 'train':\n self.count_train += cnt_inc\n for s in records:\n self.train[self.pick_file()].write(s)\n elif dataset == 'valid':\n self.count_eval += cnt_inc\n for s in records:\n self.valid.write(s)\n else:\n raise ValueError(\"invalid dataset: %s\" % dataset)",
"def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")"
] |
[
"0.68754226",
"0.67101437",
"0.65302217",
"0.6478312",
"0.63904744",
"0.63881606",
"0.6382442",
"0.6357445",
"0.6306369",
"0.6278397",
"0.62303036",
"0.6225361",
"0.62247103",
"0.62021756",
"0.6199742",
"0.6193617",
"0.61871",
"0.6167681",
"0.61663264",
"0.61569935",
"0.6148366",
"0.6145898",
"0.61226773",
"0.6118035",
"0.6117085",
"0.6103276",
"0.60883915",
"0.60543036",
"0.60488796",
"0.60428363"
] |
0.6829742
|
1
|
>>> p = Primes(10) >>> len(p) 4
|
def __len__(self):
return len(self.primes)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def n_length_primes(n):\n assert n > 0, \"Cannot generate a list of %d length primes.\" % n\n a = []\n for i in range(10**(n-1), 10**n):\n if is_prime(i):\n a.append(str(i))\n return a",
"def prime_pi(n):\n if n < 2:\n return 0\n\n primes = sieve(n)\n return len(primes)",
"def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums",
"def numberOfPrimeFactors(n):\n ans = ()\n for prime in primes:\n if prime > n:\n break\n if n % prime == 0:\n ans += (prime,)\n return len(ans)",
"def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]",
"def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)",
"def count_primes(n):\n i, total = 1, 0\n while i <= n:\n if is_prime(i):\n total += 1\n i += 1\n return total",
"def countPrimesSimple(self, n: int) -> int:\n if n < 2:\n return 0\n\n primes = []\n is_prime = [False, False] + [True for i in range(n-1)]\n\n for x in range(2, n):\n if is_prime[x]:\n primes.append(x)\n for i in range(x*2, n, x):\n is_prime[i] = False\n\n return len(primes)",
"def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes",
"def primes(count):\n\n # START SOLUTION\n\n primes = []\n num = 2\n\n while count > 0:\n\n if is_prime(num):\n primes.append(num)\n count -= 1\n\n num += 1\n\n return primes",
"def count_prime_args(num):\n nums = []\n for i in range(2, num):\n if is_prime(i):\n nums.append(i)\n return nums",
"def primes(count):\n\n prime_nums = [2]\n prime = 3\n\n for i in range(1, count):\n\n while prime not in [3, 5, 7] and (\n prime % 3 == 0 or prime % 5 == 0 or prime % 7 == 0\n ):\n prime += 2\n\n prime_nums.append(prime)\n prime += 2\n\n return prime_nums",
"def get_primes(n):\n\n return list(primes_sieve(n))",
"def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes",
"def primes(count):\n\n prime_numbers = [2]\n next_num = 3 \n\n def is_prime(next_num):\n if next_num % 2 == 0:\n return False \n \n for i in range(3, next_num, 2):\n if next_num % i == 0:\n return False \n return True \n\n while count > len(prime_numbers): \n if is_prime(next_num): \n prime_numbers.append(next_num)\n next_num += 1\n\n return prime_numbers",
"def gen_primes():\n\n n = 1\n while True:\n while not isPrime(n):\n n += 1\n\n yield n\n n += 1",
"def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]",
"def generate_primes(L):\n # We need to compute the Bound of the factor set.\n i = 0\n list_p = []\n for p in prime_sieve():\n i += 1\n list_p.append(p)\n if i >= L:\n break\n return list_p",
"def test_12():\n assert primes(12) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61]",
"def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list",
"def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)",
"def e_sieve(n):\n sieve = [True] * n\n for i in xrange(3, int(n**0.5)+1, 2):\n if sieve[i]:\n sieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n primes_of_length = dict()\n for i in range(1, len(str(n))):\n primes_of_length[i] = [[]]*10\n for j in range(1, 10):\n primes_of_length[i][j] = list()\n for prime in [2] + [i for i in xrange(3, n, 2) if sieve[i]]:\n primes_of_length[len(str(prime))][int(str(prime)[0])].append(prime)\n return primes_of_length",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes",
"def get_primes(s):\n primes = bytearray([1] * s)\n for i in range(2, s):\n if primes[i] == 1:\n for j in range(i, s):\n if i * j < s:\n primes[i * j] = 0\n else:\n break\n return primes",
"def test_if_it_outputs_correct_output_for_numbers_greater_than_50(self):\n self.assertEquals(len(prime_numbers(55)), 16)",
"def primesList(n):\n sieve = [True]*n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[2*i::i] = [False]*(len(sieve[2*i::i]))\n return [2]+[i for i in range(3,n,2) if sieve[i]]",
"def list_primes(n):\n primeList = []\n for i in range(n):\n if is_prime(i):\n primeList.append(i)\n return primeList",
"def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result",
"def __init__(self, N=40):\n self._primes = []\n self.find_primes(N)",
"def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)"
] |
[
"0.72399455",
"0.71607894",
"0.6955966",
"0.6808245",
"0.6790493",
"0.67851686",
"0.6727017",
"0.6705161",
"0.6661847",
"0.66584545",
"0.65762335",
"0.6457321",
"0.6454395",
"0.6433077",
"0.6403811",
"0.63543993",
"0.633442",
"0.628941",
"0.62281543",
"0.622775",
"0.622196",
"0.6214861",
"0.62077624",
"0.620315",
"0.6202038",
"0.62009025",
"0.6199705",
"0.61944157",
"0.6186604",
"0.6183098"
] |
0.7829653
|
0
|
clean_up on place shall remove link with associated visitors but not delete it. However since each album needs to be associated with a place, clean_up on place shall clean_up and delete associated albums.
|
def clean_up(self, graph):
# Delete albums associated with place
if len(self.albums) != 0:
for album in self.albums:
album.clean_up()
album.delete(graph)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clean():\n\n tracks = []\n removed_playlists = 0\n for playlist in PlaylistManager.find():\n\n if len(playlist.tracks) == 0:\n PlaylistManager.remove(playlist.id)\n removed_playlists += 1\n else:\n tracks += playlist.tracks\n\n tracks = list(set(tracks))\n removed_tracks = 0\n for track in TrackManager.find():\n if track.id not in tracks:\n TrackManager.remove(track.id)\n removed_tracks += 1\n\n click.secho(\"Cleanup removed:\", bold=True)\n click.secho(\n tabulate( # type: ignore\n [\n (magenta(\"Tracks:\"), removed_tracks),\n (magenta(\"Playlists:\"), removed_playlists),\n ],\n tablefmt=\"plain\",\n colalign=(\"right\", \"left\"),\n )\n )",
"def tearDown(self) -> None:\n storage.delete(self.place)\n storage.delete(self.user)\n storage.delete(self.city)\n storage.delete(self.state)\n storage.save()",
"def clean_up(self):\n while len(self.__refs_for_deletion): \n attr = self.__refs_for_deletion.pop()\n obj = getattr(self, attr)\n if hasattr(obj, 'clean_up'):\n obj.clean_up()\n delattr(self, attr)",
"def _clean_up(self):",
"def remove_place_triggers(apps, schema_editor):\n Trigger = apps.get_model('goals', 'Trigger')\n for t in Trigger.objects.filter(trigger_type='place'):\n t.delete()",
"def tearDown(self) -> None:\n place = storage.get(Place, self.place_id)\n if place is not None:\n storage.delete(place)\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def tearDown(self) -> None:\n place = storage.get(Place, self.place_id)\n if place is not None:\n storage.delete(place)\n user = storage.get(User, self.user_id)\n if user is not None:\n storage.delete(user)\n city = storage.get(City, self.city_id)\n if city is not None:\n storage.delete(city)\n state = storage.get(State, self.state_id)\n if state is not None:\n storage.delete(state)\n storage.save()",
"def tearDown(self):\n Pics.objects.all().delete()\n Category.objects.all().delete()\n Location.objects.all().delete()",
"def spatialitedbs_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def cleanup(self):\r\n check = []\r\n delete_this = [(API.url_plan, self.plans), (API.url_workout, self.workouts), (API.url_schl, self.schedules), (API.url_link, self.links)]\r\n for delete in delete_this:\r\n while delete[1] != []:\r\n self.delete_field(delete[0], delete[1])\r\n if requests.get(delete[0], headers=self.headers).json()['results'] == []:\r\n check.append(True)\r\n else:\r\n check.append(False) \r\n if False in check:\r\n return False\r\n return True",
"def remove_link():",
"def clean_up(P, c):\n if not P.pairs: return # root case\n P.siblings.pop()\n s,r = P.pairs.pop()\n P.shape_pool.add(s)\n P.resource_pool.add(r)\n if P.r2p:\n pathway_id = P.r2p[P.resources[r]]\n if c and c[pathway_id]:\n c[pathway_id].pop()\n if P.segments[pathway_id]:\n P.segments[pathway_id].pop()",
"def cleanup(self):\n return self.cleanupNonces(), self.cleanupAssociations()",
"def basemap_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def pre_delete_centroid(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()",
"def cleanup(child):\n children = child.get('children', [])\n for childchild in children:\n cleanup(childchild)\n cleaned = {u'title': child['Title'], u'name': child['id'],\n u'children': children}\n child.clear()\n child.update(cleaned)",
"def unlink(self):\n album_id = self.albums_map[self.artist][self.c_album][1]\n # clear entry in self.albums_map[artist]\n self.albums_map[self.artist].pop(self.c_album)\n # remove Albums recording only if no more references to the album exist\n still_present = False\n for item in self.albums_map[self.artist].values():\n if item[1] == album_id:\n still_present = True\n if not still_present:\n dmla.unlink_album(self.a_album)\n self.modified = True\n self.refresh_screen(self.artists_list.currentIndex(),\n self.albums_list.currentIndex(), modifyoff=False)",
"def clean_place(place, places):\n result = place\n result = result.replace(\" \", \" \")\n for match in places:\n result = result.replace(match, places[match])\n return result",
"def fusion_api_delete_uplink_set(self, name=None, uri=None, api=None, headers=None):\n return self.uplink_set.delete(name, uri, api, headers)",
"def cleanUp(self):\r\n pass",
"def clean_up(self):\n pass",
"def clean_up(self):\n pass",
"def cleanUp():\n pass",
"def clean_up(self):\n\t\tpass",
"async def clean_up(self) -> None:",
"def remove_refs(self):\n\n self.reference = None\n self.url = None",
"def testOnlyPlace(self):\n state = State(name='toto')\n city = City(name='toto', state_id=state.id)\n user = User(email='email', password='password')\n place = Place(name='toto', city_id=city.id, user_id=user.id)\n storage.new(state)\n storage.new(city)\n storage.new(user)\n storage.new(place)\n storage.save()\n response = requests.get(url=self.url)\n json_data = response.json()\n\n for element in json_data:\n self.assertEqual(element['__class__'], 'Place', WRONG_OBJ_TYPE_MSG)\n storage.delete(place)\n storage.delete(user)\n storage.delete(city)\n storage.delete(state)\n storage.save()",
"def test_relation_before_remove():\n assert query_row(db_conf, 'osm_buildings', 50011)['type'] == 'yes'\n assert query_row(db_conf, 'osm_landusages', -50021)['type'] == 'park'",
"def _fix_items(items):\n for _, item in items.iteritems():\n if 'url' in item['fields']:\n del item['fields']['url']\n return items",
"def pre_delete_crossing(sender, instance, **kwargs):\n Link.objects.filter(origin=instance.id).delete()\n Link.objects.filter(destination=instance.id).delete()"
] |
[
"0.56522363",
"0.5632935",
"0.556085",
"0.551513",
"0.54856205",
"0.5455005",
"0.5455005",
"0.5452388",
"0.545235",
"0.544706",
"0.5406526",
"0.5401399",
"0.53751695",
"0.5368347",
"0.53581285",
"0.5356732",
"0.53475654",
"0.53157127",
"0.53075457",
"0.5305903",
"0.5292561",
"0.5292561",
"0.52741766",
"0.524347",
"0.5196666",
"0.5191206",
"0.5162387",
"0.51594293",
"0.51513684",
"0.5145506"
] |
0.7646151
|
0
|
The subsystems which the pulses work on.
|
def onSubSys(self) -> List[int]:
return self._onSubSys
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def subsystems():\n return list(x for x in SystemScope if x is not SystemScope.SYSTEM)",
"def all_present_subsystems(self):\n return _yield_subdir_names(self.subsys_configs)",
"def subsystems(self) -> Optional[Iterable[str]]:\n\n return self.get_project_subsystems()",
"def assign_subsystems(self):\n\n self.driver = MecDriver()\n\n systems = {}\n systems[\"ctrl\"] = self\n systems[\"driver\"] = self.driver\n\n self.logger.debug(\"Systems: {}\".format(systems))\n return systems",
"def test_subsystems(self):\n pass",
"def measurement_systems(self) -> localedata.LocaleDataDict:\n return self._data['measurement_systems']",
"def list_systems():\n return sorted(systems.keys())",
"def test_get_systems(self):\n pass",
"def get_known_systems(cls):\n return cls.coord_systems.keys()",
"def get_system_keys(self) -> list[str]:\n return self.get(\"npc.systems\").keys()",
"def get_all_typesystems(self):\n return list(self._type_systems.keys())",
"def get_devices(self):\n\n \"\"\"\n # Note: This code is no longer required with the latest spt updates.\n # But that said, leaving for now so I don't risk breaking folks!\n if not self._use_lsscsi:\n message = \"Find Number of IOM's\"\n command = \"lsscsi | fgrep enclo | egrep 'HGST|WDC' | wc -l\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n ioms = (int)(pdata['stdout'].strip())\n if ioms > 1:\n self._use_lsscsi = True\n if not self._use_lsscsi and os.path.exists('/etc/multipath.conf'):\n self._use_lsscsi = True\n \"\"\"\n # Allow above logic or options to override lsscsi vs. spt usage.\n if not self._use_lsscsi or self._force_spt:\n self.get_devices_spt()\n else:\n self.get_devices_lsscsi()\n return",
"def mechanisms(self):\n return list(self)",
"def unit_system(self):\n val = self._stub.List(self._message).unit_system\n return map_unit_system[val]",
"def system(self):\n return self['system']",
"def describe_operating_systems():\n pass",
"def getEcosystems(self):\n return self.__getColumnData(Q_ECOSYSTEMS, 'ecosystem')",
"def subsystem(self) -> str:\n return self.raw.get(\"subsystem\", \"\")",
"def Platforms():\n return platforms",
"def test_get_systems_expanded(self):\n pass",
"def subsystem_count(self):\n return len(self)",
"def spectators(self):\n return self._return_if('_spectators')",
"def residue_subsystem():\n net = residue_network()\n state = (0, 0, 0, 0, 0)\n return Subsystem(net, state)",
"def get_ring_system(self):\n for system in self.owner.ring_systems:\n if system.contains(self):\n return system",
"def platforms(self):\n return self.rpc.call(MsfRpcMethod.ModulePlatforms)",
"def get_platforms(self):\n _log.debug(\"Passing platforms back: {}\".format(\n self._registered_platforms.keys()))\n return self._registered_platforms.values()",
"def components(self):\n return ML600Pump(\"pump\", self), ML600Valve(\"valve\", self)",
"def __init__(self, *subsystems):\n self.subsystems = subsystems",
"def _get_all_spectra(self):\n pass",
"def applications(self):\n return [self.app] + self.mounts.values()"
] |
[
"0.7108462",
"0.6836988",
"0.6729169",
"0.65037864",
"0.6058275",
"0.59678674",
"0.5963102",
"0.5925865",
"0.58703226",
"0.5808296",
"0.57273567",
"0.57026535",
"0.5601382",
"0.5600621",
"0.5598558",
"0.55863845",
"0.5569667",
"0.5562868",
"0.55053097",
"0.5504476",
"0.55015695",
"0.54367936",
"0.53830016",
"0.53750813",
"0.53491837",
"0.53384143",
"0.53375834",
"0.5335662",
"0.5313191",
"0.53072006"
] |
0.6854705
|
1
|
Compare two GatePulsePair object.
|
def _compareObj(self, other):
if not isinstance(other, GatePulsePair):
raise Error.ArgumentError(f"GatePulsePair object can not compare with a {type(other)}.")
if isinstance(other.cirLine.data, FixedGateOP) and isinstance(self.cirLine.data, RotationGateOP):
return False
if isinstance(other.cirLine.data, RotationGateOP) and isinstance(self.cirLine.data, FixedGateOP):
return False
if other.cirLine.data.name != self.cirLine.data.name:
return False
if other.cirLine.qRegIndexList != self.cirLine.qRegIndexList:
return False
if isinstance(self.cirLine.data, RotationGateOP):
argLen = len(other.cirLine.data.uGateArgumentList)
for idx in range(argLen):
verify = abs(other.cirLine.data.uGateArgumentList[idx] -
self.cirLine.data.uGateArgumentList[idx])
if verify > sys.float_info.epsilon:
return False
return True
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gateCompare(self,gate1,gate2):\n \n if gate1.getDist()>gate2.getDist():\n return 1\n elif gate1.getDist()==gate2.getDist():\n return 0\n else: #gate1Num<gate2Num\n return -1",
"def __eq__(self, other: 'PairwiseInfo') -> bool:\n\n return (\n self.their_did == other.their_did and\n self.their_verkey == other.their_verkey and\n self.my_did == other.my_did and\n self.my_verkey == other.my_verkey and\n self.metadata == other.metadata)",
"def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions",
"def __eq__(self, other):\n if not isinstance(other, LabelsBetweenObjectPair):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n return self.conn == other.conn and self.p1 == other.p1 and self.p2 == other.p2",
"def __eq__(self, other: 'Pair') -> bool:\n return self.names == other.names",
"def __eq__(self, other):\n return sorted(self.points) == sorted(other.points)",
"def __eq__(self, other):\n return self.points == other.points",
"def __eq__(self, other):\n if not isinstance(other, FlashSwapCurrencyPair):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n if not isinstance(other, HandwrittenSignature):\n return False\n\n return self.__dict__ == other.__dict__",
"def compare(self):\n samples = self.data[-2:]\n if len(samples) != 2:\n return\n\n timestamp_a, data_a = samples[0]\n timestamp_b, data_b = samples[1]\n LOG.debug(\"%s comparing sample from %s to %s\", self, timestamp_a, timestamp_b)\n changes = dict_compare(data_a, data_b)\n for key in changes:\n OUTPUT.info(\"%s:%s: %s -> %s\", self, key, get_value(data_a, key), get_value(data_b, key))",
"def __lt__(self, other: 'Pair') -> bool:\n return self.names < other.names",
"def basic_compare(self, other: \"Molecule\") -> bool:\n return self.inchi_key[:14] == other.inchi_key[:14]",
"def __eq__(self, other):\n return (self.name == other.name) and (self.wavelength_control == other.wavelength_control) \\\n and (self.gonio_angles == other.gonio_angles) and (self.wl_angles == other.wl_angles) \\\n and (self.wavelength_minimum == other.wavelength_minimum) \\\n and (self.wavelength_maximum == other.wavelength_maximum) \\\n and (self.wavelength_bandwidth == other.wavelength_bandwidth)",
"def __cmp__(self, secondPoint):\n return __cmp__(self.value, secondPoint.value)",
"def __eq__(self, other):\n if not isinstance(other, LakeFormationPolicy):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return self.point == other.point",
"def __eq__(self, second):\r\n\t\treturn self.x == other.x and self.y == other.y",
"def __le__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set <= other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)",
"def __eq__(self, other):\n if not isinstance(other, CrossMarginLoan):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n return self.semitone_interval == other.semitone_interval",
"def __eq__(self, other):\n if not isinstance(other, PoliciesPeripheralsUsbDeviceCommon):\n return False\n\n return self.__dict__ == other.__dict__",
"def __eq__(self, other):\n return (isinstance(other, KGCorrelation) and\n self.nbins == other.nbins and\n self.bin_size == other.bin_size and\n self.min_sep == other.min_sep and\n self.max_sep == other.max_sep and\n self.sep_units == other.sep_units and\n self.coords == other.coords and\n self.bin_type == other.bin_type and\n self.bin_slop == other.bin_slop and\n self.min_rpar == other.min_rpar and\n self.max_rpar == other.max_rpar and\n self.xperiod == other.xperiod and\n self.yperiod == other.yperiod and\n self.zperiod == other.zperiod and\n np.array_equal(self.meanr, other.meanr) and\n np.array_equal(self.meanlogr, other.meanlogr) and\n np.array_equal(self.xi, other.xi) and\n np.array_equal(self.xi_im, other.xi_im) and\n np.array_equal(self.varxi, other.varxi) and\n np.array_equal(self.weight, other.weight) and\n np.array_equal(self.npairs, other.npairs))",
"def __gt__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set > other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)",
"def __eq__(self,other):\n if isinstance(other, RegularPoly):\n return(self.vert_count == other.vert_count and self.radius == other.radius)\n else:\n raise NotImplementedError('Incorrect data type')",
"def __eq__(self, other):\n return LimitedGoniometer.__eq__(self,other) and \\\n (np.deg2rad(self.chi) == other.chi) and \\\n (np.deg2rad(self.omega) == other.omega)",
"def __eq__(self, other):\n return self.abs2phy.__eq__(other)",
"def __eq__(self, other):\n return self.abs2phy.__eq__(other)",
"def __le__(self, other):\n return self._key <= other._key",
"def _cmp_(self, other):\n if(not isinstance(other, VVHarmonicWeakMaassForms)):\n return False\n eq = (self.multiplier() == other.WR) and (self._weight_rat == other._weight_rat)\n eq = eq and (self.prec == other.prec) and (self._sym_type == other._sym_type)\n eq = eq and (self._is_dual_rep == other._is_dual_rep)\n return eq"
] |
[
"0.6439984",
"0.61721945",
"0.60912037",
"0.60420144",
"0.602644",
"0.59933335",
"0.5925109",
"0.5911374",
"0.58882856",
"0.5884855",
"0.5847428",
"0.5843614",
"0.58279824",
"0.5825291",
"0.5825062",
"0.5806124",
"0.5805809",
"0.57896304",
"0.57886875",
"0.5786566",
"0.5779113",
"0.5707098",
"0.57040477",
"0.56736165",
"0.56538475",
"0.5648659",
"0.56486577",
"0.56486577",
"0.564839",
"0.5633432"
] |
0.7468586
|
0
|
Return the evaluation of the goal function f at point theta.
|
def evaluate_goal(self, theta, old_theta, i, res, iter):
base_theta = utils.true_param(old_theta)
v = self.f(i, base_theta, **theta)
# Store the value in history. This is only stored if iter is below
# iter_parallel_start, otherwise we store the history after the
# parallel matches are both finished.
self.history_eval[self.history_count % 1000] = v
self.history_theta[self.history_count % 1000] = theta
self.history_count += 1
# Todo: Improve method to return values.
if iter < self.iter_parallel_start:
return v # Run match one at a time
res[i] = v # Run matches in parallel
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def f(self, x, theta):\n raise NotImplementedError(\n \"f has not been implemented for this Experiment\")",
"def evaluate(self, theta=np.array([]) ):\n ### 2018-05-06 - check whether the parameter-set is within\n ### physical bounds. If not, return -np.inf here.\n if not self.parsAreWithinBounds(theta):\n return -np.inf\n\n return self.methPrior(theta)",
"def solve(self, theta: float = 1e-6) -> Tuple[np.ndarray, np.ndarray]:\n self.mdp.ensure_compiled()\n self.theta = theta\n return self._policy_improvement()",
"def solve(self, theta: float = 1e-6) -> Tuple[np.ndarray, np.ndarray]:\n self.mdp.ensure_compiled()\n V = self._calc_value_func(theta)\n policy = self._calc_policy(V)\n\n return policy, V",
"def evaluate_random_function(f, x, y):\n elementary_func = ['prod', 'avg', 'cos_pi', 'sin_pi', 'square', 'root']\n if f[0] == \"x\":\n return x\n elif f[0] == \"y\":\n return y\n\n # Kindof effort instensive way to do this, but it allows for a\n # changeable list of functions with less effort\n else:\n if f[0] == elementary_func[0]:\n first_argument = evaluate_random_function(f[1], x, y)\n second_argument = evaluate_random_function(f[2], x, y)\n return first_argument * second_argument\n elif f[0] == elementary_func[1]:\n first_argument = evaluate_random_function(f[1], x, y)\n second_argument = evaluate_random_function(f[2], x, y)\n return .5*(first_argument + second_argument)\n elif f[0] == elementary_func[2]:\n argument = evaluate_random_function(f[1], x, y)\n ans = math.cos(math.pi * argument)\n return ans\n elif f[0] == elementary_func[3]:\n argument = evaluate_random_function(f[1], x, y)\n ans = math.sin(math.pi * argument)\n return ans\n elif f[0] == elementary_func[4]:\n argument = evaluate_random_function(f[1], x, y)\n return argument**2\n elif f[0] == elementary_func[5]:\n argument = evaluate_random_function(f[1], x, y)\n return math.sqrt(math.fabs(argument))",
"def positive_eval(self, input_tensor: torch.Tensor, theta: float):\n y = self(input_tensor)\n return y, torch.square(y).mean(dim=1) - theta",
"def evaluate_random_function(f, x, y):\n\n # your code goes here",
"def theta():\n pass",
"def evaluate_random_function(f, x, y):\n\n if f[0] == \"prod\":\n return evaluate_random_function(f[1],x,y) * evaluate_random_function(f[2],x,y)\n elif f[0] == \"sin_pi\":\n return sin(evaluate_random_function(f[1],x,y) * pi)\n elif f[0] == \"cos_pi\":\n return cos(evaluate_random_function(f[1],x,y) * pi)\n elif f[0] == \"x\":\n return x\n else:\n return y",
"def g(f, x: float):\n return lambda x: f(x + f(x)) / f(x) - 1",
"def evaluationFunction(self, currentGameState, action):\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n return scoreEvaluationFunction(successorGameState)",
"def evaluationFunction(self, currentGameState, action):\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n return betterEvaluationFunction(successorGameState)",
"def func(x, f, fp):\n\n return np.sqrt((1+fp(x)**2) / (2 * g * f(x)))",
"def approximate_gradient(self, theta, c, iter):\n\n true_theta = utils.true_param(theta)\n\n if self.history_count > 0:\n current_goal, _ = self.average_evaluations(30)\n else:\n current_goal = SPSA_minimization.BAD_GOAL\n\n logging.info(f'{__file__} > current_goal: {current_goal}')\n\n print(f'current optimizer mean goal: {current_goal:0.5f} (low is better, lowest: -1.0, highest: 1.0)')\n # print(f'Sample, optimizer goal = -(engine match score) or -(3.0 pts/4 games) or -0.75')\n\n bernouilli = self.create_bernouilli(theta)\n\n count = 0\n while True:\n logging.info(f'{__file__} Apply bernouilli term to theta, theta={theta}, c={c}, bernouilli={bernouilli}')\n # Calculate two evaluations of f at points M + c * bernouilli and\n # M - c * bernouilli to estimate the gradient. We do not want to\n # use a null gradient, so we loop until the two functions evaluations\n # are different. Another trick is that we use the same seed for the\n # random generator for the two function evaluations, to reduce the\n # variance of the gradient if the evaluations use simulations (like\n # in games).\n state = random.getstate()\n theta1 = utils.linear_combinaison(1.0, theta, c, bernouilli)\n logging.info(f'{__file__} theta1: {theta1}')\n\n # Apply parameter limits\n logging.info(f'{__file__} > Apply limits to theta1 before sending to engine')\n theta1 = utils.apply_limits(theta1)\n logging.info(f'{__file__} theta1 with limits: {theta1}')\n logging.info(f'{__file__} > run 1st match with theta1: {theta1}')\n\n random.setstate(state)\n theta2 = utils.linear_combinaison(1.0, theta, -c, bernouilli)\n logging.info(f'{__file__} theta2: {theta2}')\n\n # Apply parameter limits\n logging.info(f'{__file__} > Apply limits to theta2 before sending to engine')\n theta2 = utils.apply_limits(theta2)\n logging.info(f'{__file__} theta2 with limits: {theta2}')\n logging.info(f'{__file__} > run 2nd match with theta2: {theta2}')\n\n # Run the 2 matches in parallel after iteration 1.\n manager = multiprocessing.Manager()\n res = manager.dict()\n thetas = [theta1, theta2]\n\n if iter < self.iter_parallel_start:\n print('Run match 1 ...')\n true_param = utils.true_param(theta1)\n print('test_engine param:')\n for (name, val), (name1, val1) in zip(true_param.items(), true_theta.items()):\n print(f' {name}: {val[\"value\"]}, ({val[\"value\"] - val1[\"value\"]:+})')\n\n print('base_engine param:')\n for name, val in utils.true_param(theta).items():\n print(f' {name}: {val[\"value\"]}')\n\n t1 = time.perf_counter()\n f1 = self.evaluate_goal(theta1, theta, 0, res, iter)\n logging.info(f'f1 elapse: {time.perf_counter() - t1:0.2f}s')\n print(f'Done match 1!, elapse: {time.perf_counter() - t1:0.2f}sec')\n print(f'goal after match 1: {f1:0.5f}')\n\n # Run match 2\n print('Run match 2 ...')\n true_param = utils.true_param(theta2)\n print('test_engine param:')\n for (name, val), (name1, val1) in zip(true_param.items(), true_theta.items()):\n print(f' {name}: {val[\"value\"]}, ({val[\"value\"] - val1[\"value\"]:+})')\n\n print('base_engine param:')\n for name, val in utils.true_param(theta).items():\n print(f' {name}: {val[\"value\"]}')\n\n t1 = time.perf_counter()\n f2 = self.evaluate_goal(theta2, theta, 1, res, iter)\n logging.info(f'f2 elapse: {time.perf_counter() - t1:0.2f}s')\n print(f'Done match 2!, elapse: {time.perf_counter() - t1:0.2f}sec')\n print(f'goal after match 2: {f2:0.5f}')\n\n print('Done engine match!')\n else:\n print('Run 2 matches in parallel ...')\n t1 = time.perf_counter()\n jobs = []\n for i in range(2):\n print(f'Run match {i + 1} ...')\n\n true_param = utils.true_param(thetas[i])\n print('test_engine param:')\n for (name, val), (name1, val1) in zip(true_param.items(), true_theta.items()):\n print(f' {name}: {val[\"value\"]}, ({val[\"value\"] - val1[\"value\"]:+})')\n\n print('base_engine param:')\n for name, val in utils.true_param(theta).items():\n print(f' {name}: {val[\"value\"]}')\n\n p = multiprocessing.Process(target=self.evaluate_goal, args=(thetas[i], theta, i, res, iter))\n jobs.append(p)\n p.start()\n\n for num, proc in enumerate(jobs):\n proc.join()\n\n # If match is done in parallel, update the history count, eval and theta here.\n self.history_eval[self.history_count % 1000] = res.values()[num]\n self.history_theta[self.history_count % 1000] = thetas[num]\n self.history_count += 1\n\n print(f'Done match {num + 1}!, elapse: {time.perf_counter() - t1:0.2f}sec')\n\n logging.info(f'parallel elapse: {time.perf_counter() - t1:0.2f}s')\n\n print('Done engine match!')\n\n f1, f2 = res.values()[0], res.values()[1]\n\n logging.info(f'{__file__} > f1: {f1}, f2: {f2}')\n print(f'optimizer goal after match 1: {f1:0.5f} (low is better)')\n print(f'optimizer goal after match 2: {f2:0.5f} (low is better)')\n\n if f1 != f2:\n break\n\n print('perf is the same in match 1 and 2, launch new matches ...')\n\n count = count + 1\n logging.info(f'{__file__} > f1 and f2 are the same, try the engine match again. num_tries = {count}')\n\n if count >= 100:\n logging.info(f'{__file__} > too many evaluation to find a gradient, function seems flat')\n break\n\n # Update the gradient\n gradient = copy.deepcopy(theta)\n # print(f'Basic gradient after 2 engine matches:')\n for name, value in theta.items():\n gradient[name]['value'] = (f1 - f2) / (2.0 * c * bernouilli[name]['value'])\n # print(f' {name}: {gradient[name][\"value\"]}')\n logging.info(f'{__file__} > {name} gradient: {gradient}')\n\n if (f1 > current_goal) and (f2 > current_goal):\n logging.info(f'{__file__} > function seems not decreasing')\n gradient = utils.linear_combinaison(0.1, gradient)\n\n print('Modify the gradient because the results of engine matches\\n'\n 'did not improve when using the new param. But we will not\\n'\n 're-run the engine matches.')\n\n print('Modified gradient at alpha=0.1:')\n for n, v in gradient.items():\n print(f' {n}: {v[\"value\"]}')\n\n # For the correction factor used in the running average for the gradient,\n # see the paper \"Adam: A Method For Stochastic Optimization, Kingma and Lei Ba\"\n\n beta = 0.9\n correction = 1.0 / 1.0 - pow(beta, self.iter)\n\n gradient = utils.linear_combinaison((1 - beta), gradient, beta, self.previous_gradient)\n gradient = utils.linear_combinaison(correction, gradient)\n\n # print('New gradient after applying correction:')\n # for n, v in gradient.items():\n # print(f' {n}: {v[\"value\"]}')\n\n # Store the current gradient for the next time, to calculate the running average\n self.previous_gradient = gradient\n\n # Store the best the two evals f1 and f2 (or both)\n if (f1 <= current_goal):\n self.best_eval[self.best_count % 1000] = f1\n self.best_theta[self.best_count % 1000] = theta1\n self.best_count += 1\n\n if (f2 <= current_goal):\n self.best_eval[self.best_count % 1000] = f2\n self.best_theta[self.best_count % 1000] = theta2\n self.best_count += 1\n\n logging.info(f'{__file__} > final gradient: {gradient}')\n \n # Return the estimation of the new gradient\n return gradient",
"def cost_function(theta, X, y):\n\n l = None\n #######################################################################\n # TODO: #\n # Compute and return the cost l of a particular choice of #\n # theta. #\n # #\n #######################################################################\n thetaX = np.dot(X, theta)\n g = tanh(thetaX)-y\n l = np.sum(g*g) / X.shape[0]\n \n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return l",
"def evaluate_policy(self, policy, discount_factor=1.0, max_steps=None, theta=0.00001):\n env = self.get_environment()\n # Start with a random (all 0) value function\n V = np.zeros(env.nS)\n steps = 0\n while max_steps is None or steps < max_steps:\n delta = 0\n # For each state, perform a \"full backup\"\n for s in range(env.nS):\n v = 0\n # Look at the possible next actions\n for a, action_prob in enumerate(policy[s]):\n # For each action, look at the possible next states...\n for prob, next_state, reward, done in env.P[s][a]:\n # Calculate the expected value\n v += action_prob * prob * (reward + discount_factor * V[next_state])\n # How much our value function changed (across any states)\n delta = max(delta, np.abs(v - V[s]))\n V[s] = v\n # print('{} {} {}'.format(steps, delta, v))\n steps += 1\n # print(\"delta: {}, theta: {}\".format(delta, theta))\n # Stop evaluating once our value function change is below a threshold\n if delta < theta:\n break\n\n return np.array(V)",
"def eval_func(f, x, y, t):\n if f[0] == 'x':\n return x\n if f[0] == 'y':\n return y\n if f[0] == 't':\n return y-t\n if f[0] == 'prod':\n return eval_func(f[1],x,y,t)*eval_func(f[2],x,y,t)\n if f[0] == 'avg':\n return (eval_func(f[1],x,y,t)+eval_func(f[2],x,y,t))/2.0\n if f[0] == 'cos_pi':\n return math.cos(math.pi*eval_func(f[1],x,y,t))\n if f[0] == 'sin_pi':\n return math.sin(math.pi*eval_func(f[1],x,y,t))\n if f[0] == 'subtract':\n return eval_func(f[1],x,y,t)-eval_func(f[2],x,y,t)\n if f[0] == 'add':\n return eval_func(f[1],x,y,t)+eval_func(f[2],x,y,t)",
"def _objective_function(self, thetas, X, Y):\n \n # Convert thetas vector to form total_cost can understand\n thetas = self.reshape_thetas(thetas, 'list')\n self.thetas = thetas\n \n # Get cost function value\n fval = self.total_cost(X, Y, thetas)\n \n # Get derivatives using back propagation\n Deltas = self.get_gradients(X, Y)\n dfval = self.reshape_thetas(Deltas, 'vector')\n \n return fval, dfval",
"def evaluate_random_function(f, x, y, t=0):\n if f[0] == \"x\":\n return x\n elif f[0] == \"y\":\n return y\n elif f[0] == \"t\":\n return t\n elif f[0] == \"prod\": \n return evaluate_random_function(f[1], x, y, t)*evaluate_random_function(f[2], x, y, t)\n elif f[0] == \"avg\":\n return 0.5*(evaluate_random_function(f[1], x, y, t)+evaluate_random_function(f[2], x, y, t))\n elif f[0] == \"cos_pi\":\n return cos(pi*evaluate_random_function(f[1], x, y, t))\n elif f[0] == \"sin_pi\":\n return sin(pi*evaluate_random_function(f[1], x, y, t))\n elif f[0] == \"hypot\":\n first = evaluate_random_function(f[1], x, y, t)\n second = evaluate_random_function(f[1], x, y, t)\n tester = first*abs(first) + second*abs(second)\n if tester >= 0:\n return sqrt(first**2 + second**2)/sqrt(2)\n else:\n return -sqrt(first**2 + second**2)/sqrt(2)\n elif f[0] == \"pow\":\n val = evaluate_random_function(f[1], x, y, t)\n if val >= 0:\n return 1-val**val\n else:\n return -(1-abs(val)**abs(val))\n elif f[0] == \"add\":\n first = evaluate_random_function(f[1], x, y, t)\n second = evaluate_random_function(f[1], x, y, t)\n if first + second > 1:\n return (1 - (first+second) + 1)\n elif first + second < -1:\n return (-2 - (first+second))\n else:\n return (first + second)\n elif f[0] == \"cube\":\n return evaluate_random_function(f[1], x, y, t)**3",
"def objective_function(theta, X, y):\n # m number of training instances\n m = X.shape[0]\n jtheta = sum((np.dot(X, theta) - y)**2) / (2.0*m)\n return jtheta",
"def solution_update(self, theta, force=False):\n \n self.x = self.eval(theta, force)\n \n return",
"def reward_func(self, state):\n if abs(state['theta']) < 1:\n return 2.0\n\n elif abs(state['theta']) < 3:\n return 0.0\n\n elif abs(state['theta']) > 30:\n return -100.0\n return -2.0",
"def evaluate(self, state):\n _n = len(state)\n _t = np.ceil(self.t_pct*_n)\n\n # Calculate head and tail values\n tail_0 = self.tail(0, state)\n head_1 = self.head(1, state)\n\n # Calculate R(X, T)\n if (tail_0 > _t and head_1 > _t):\n _r = _n\n else:\n _r = 0\n\n # Evaluate function\n fitness = max(tail_0, head_1) + _r\n self.num_evals += 1\n return fitness",
"def compute_gradients(self, f, args, grad_ys=None):\n if tf.executing_eagerly():\n grad_fn = tf.contrib.eager.gradients_function(f)\n if grad_ys is not None:\n grads = grad_fn(*args, dy=grad_ys)\n else:\n grads = grad_fn(*args)\n else:\n res = f(*args)\n grads = tf.gradients(res, args, grad_ys=grad_ys)\n return self.evaluate(grads)",
"def evaluate_random_function(f, x, y):\n if (f[0]=='x'):\n return X(x,y)\n elif (f[0]=='y'):\n return Y(x,y)\n elif (f[0]=='sin_pi'):\n return sin_pi(evaluate_random_function(f[1],x,y))\n elif (f[0]=='cos_pi'):\n return cos_pi(evaluate_random_function(f[1],x,y))\n elif (f[0]=='times'):\n return times(evaluate_random_function(f[1],x,y),evaluate_random_function(f[2],x,y))\n elif (f[0]=='root'):\n return root(evaluate_random_function(f[1],x,y))\n elif (f[0]=='square'):\n return square(evaluate_random_function(f[1],x,y))",
"def policy_evaluation_on_grid_world() -> ValueFunction:\n return get_policy_evaluation(grid_world, 0.9999, 0.0001)",
"def evaluationFunction(problem, gFunc, hFunc, node):\n #g = getattr(searchAgents, gFunc)\n #h = getattr(searchAgents, hFunc)\n h = hFunc\n #return g(node) + h(node)\n return gFunc + h(node, problem)",
"def execution_rule(f):\n return _ExecutionRuleFunction(f)",
"def call(self, y_true, theta):\n\n @tf.custom_gradient\n def forward(theta):\n diff = self.perturbed(theta) - tf.cast(y_true, dtype=theta.dtype)\n if not self._maximize:\n diff = -diff\n\n def grad(dy):\n if self._batched: # dy has shape (batch_size,) in this case.\n dy = tf.reshape(dy, [tf.shape(dy)[0]] + (diff.shape.rank - 1) * [1])\n return dy * diff\n\n # Computes per-example loss for batched inputs. If the total loss for the\n # batch is the desired output, use `SUM` or `SUM_OVER_BATCH` as reduction.\n if self._batched:\n loss = tf.reduce_sum(\n tf.reshape(diff, [tf.shape(diff)[0], -1]) ** 2, axis=-1)\n else: # Computes loss for unbatched inputs.\n loss = tf.reduce_sum(diff ** 2)\n\n return loss, grad\n\n return forward(theta)",
"def policy_evaluation_on_line_world() -> ValueFunction:\n return get_policy_evaluation(line_world, 0.9999, 0.0001)"
] |
[
"0.695184",
"0.63509303",
"0.61093414",
"0.6083635",
"0.5811482",
"0.5794628",
"0.5793523",
"0.57917213",
"0.5768919",
"0.5766916",
"0.5753904",
"0.5725246",
"0.5663353",
"0.5654156",
"0.5633597",
"0.5614551",
"0.5608629",
"0.55932194",
"0.55601877",
"0.554822",
"0.55387604",
"0.5535864",
"0.5534173",
"0.5529528",
"0.5514182",
"0.5503393",
"0.55009496",
"0.547664",
"0.5467798",
"0.5458384"
] |
0.6660737
|
1
|
Return the average of the n last evaluations of the goal function. This is a fast function which uses the last evaluations already done by the SPSA algorithm to return an approximation of the current goal value (note that we do not call the goal function another time, so the returned value is an upper bound of the true value).
|
def average_evaluations(self, n):
assert(self.history_count > 0), "not enough evaluations in average_evaluations!"
n = max(1, min(1000, n))
n = min(n, self.history_count)
# print(f'n = {n}')
# print(f'hist_cnt = {self.history_count}')
sum_eval = 0.0
sum_theta = utils.linear_combinaison(0.0, self.theta0)
for i in range(n):
j = ((self.history_count - 1) % 1000) - i
if j < 0:
j += 1000
if j >= 1000:
j -= 1000
# print(f'i={i}, j={j}, hist_cnt: {self.history_count}, hist_eval[{j}] = {self.history_eval[j]}')
sum_eval += self.history_eval[j]
sum_theta = utils.sum(sum_theta, self.history_theta[j])
# return the average
alpha = 1.0 / (1.0 * n)
return (alpha * sum_eval, utils.linear_combinaison(alpha, sum_theta))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def average_best_evals(self, n):\n\n assert(self.best_count > 0), \"not enough evaluations in average_evaluations!\"\n\n n = max(1, min(1000, n))\n n = min(n, self.best_count)\n\n sum_eval = 0.0\n sum_theta = utils.linear_combinaison(0.0, self.theta0)\n for i in range(n):\n\n j = ((self.best_count - 1) % 1000) - i\n if j < 0:\n j += 1000\n if j >= 1000:\n j -= 1000\n\n sum_eval += self.best_eval[j]\n sum_theta = utils.sum(sum_theta, self.best_theta[j])\n\n # return the average\n alpha = 1.0 / (1.0 * n)\n return (alpha * sum_eval, utils.linear_combinaison(alpha, sum_theta))",
"def moving_average(self, a, n=3):\n ret = np.nancumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def evaluate_policy(self, n_episodes = 100):\n episode_scores = []\n if self.mode=='debug':print(\"Running {} episodes!\".format(n_episodes))\n for e,episode in enumerate(range(n_episodes)):\n score = self.run_episode()\n episode_scores.append(score)\n if self.mode == 'debug': print(\"Score in {} episode = {}\".format(e,score))\n return np.mean(episode_scores)",
"def compute_avg_reward(env, policy, num_episodes):\n total_return = 0.0\n for _ in range(num_episodes):\n state = env.reset()\n done = False\n episode_return = 0.0\n while not done:\n action = policy(state)\n next_state, reward, done, _ = env.step(action)\n if done:\n reward = -1.0\n episode_return += reward\n state = next_state\n total_return += episode_return\n avg_return = total_return / num_episodes\n return avg_return",
"def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret",
"def expected_return(self, n_step):\r\n value = 0\r\n n_experiences = 50\r\n for i in range(n_experiences):\r\n trajectory = self.domain_exploration(n_step)\r\n value += self.compute_j(trajectory)\r\n return value/n_experiences",
"def average(self, n=0):\n assert n >= 0\n for key in self.value_history:\n values = np.array(self.value_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n avg = np.sum(values * nums) / np.sum(nums)\n self.output[key] = avg",
"def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg",
"def moving_average(sig, n=100):\n window = deque(maxlen=n) # last n scores\n sig_ma = []\n for i in range(len(sig)):\n window.append(sig[i])\n sig_ma.append(np.mean(window))\n return sig_ma",
"def average(self):\n return (self.current + self.last) / 2.0",
"def average(self, n=0):\n assert n >= 0\n for key in self.val_history:\n values = np.array(self.val_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n if values.shape == nums.shape:\n avg = np.sum(values * nums) / np.sum(nums)\n else:\n avg = np.mean(values, axis=0).tolist()\n self.output[key] = avg\n self.ready = True",
"def moving_average(a, n: int = 3) -> np.array:\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def averageDistance(nbSteps, nbWalks, func):\n totalDistance = 0\n startPoint = (0, 0)\n for _ in range(nbWalks):\n arrival = None\n while arrival is None:\n arrival = func(startPoint, nbSteps)\n totalDistance += distance(startPoint, arrival)\n return pow(totalDistance/nbWalks, 2)",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def compute_avg_return_and_steps(environment, policy, num_episodes=10):\n total_return = 0.0\n total_steps = 0.0\n for _ in range(num_episodes):\n\n time_step = environment.reset()\n episode_return = 0.0\n episode_steps = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n episode_steps += 1\n total_return += episode_return\n total_steps += episode_steps\n\n average_return = total_return / num_episodes\n average_episode_steps = total_steps / num_episodes\n return average_return.numpy()[0], average_episode_steps",
"def average_value_estimation_scorer(algo, episodes, window_size=1024):\n total_values = []\n for episode in episodes:\n for batch in _make_batches(episode, window_size, algo.n_frames):\n actions = algo.predict(batch.observations)\n values = algo.predict_value(batch.observations, actions)\n total_values += values.tolist()\n # smaller is better, maybe?\n return -np.mean(total_values)",
"def moving_average(a, n=3) :\r\n a = a.ravel()\r\n a = np.concatenate(([a[0]]*(n-1),a)) # repeating first values\r\n ret = np.cumsum(a, dtype = float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n ret=ret[n - 1:] / n\r\n return ret",
"def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def get_mean(data, n=-1):\n \n return round((sum(data)/n),1)",
"def moving_average(iterable, n):\n it = iter(iterable)\n d = collections.deque(itertools.islice(it, n-1))\n d.appendleft(0)\n s = sum(d)\n for elem in it:\n s += elem - d.popleft()\n d.append(elem)\n yield s / float(n)",
"def get_mean_n_times_comeback(self):\n return self.mean_n_times_comeback",
"def simple_moving_average(n, data):\n result = []\n for m in range(n-1, len(data)):\n total = sum([data[m-i] for i in range(n)])\n result.append(total/n)\n return result",
"def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)",
"def running_avg (mylist, N):\n import numpy as np\n \n cumsum = np.cumsum(np.insert(mylist, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)",
"def make_averaged(fn, num_samples=1000):\n # BEGIN PROBLEM 7\n def average_function(*args):\n counter = 0\n result = 0\n while(counter<num_samples):\n result_holder = fn(*args)\n result+= result_holder\n counter+=1\n return result/num_samples\n return average_function\n # END PROBLEM 7",
"def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):\n rewards = []\n for _ in range(n_games):\n s = env.reset()\n reward = 0\n for _ in range(t_max):\n qvalues = agent.get_qvalues([s])\n action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]\n s, r, done, _ = env.step(action)\n reward += r\n if done:\n break\n\n rewards.append(reward)\n return np.mean(rewards)",
"def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)",
"def average_fitness(self):\n return sum([e.fitness for e in self.population]) / len(self.population)"
] |
[
"0.7095657",
"0.64972407",
"0.6409218",
"0.6360709",
"0.6347688",
"0.6334922",
"0.632885",
"0.625927",
"0.62543154",
"0.6250692",
"0.6192058",
"0.61885005",
"0.6188313",
"0.61422193",
"0.6113202",
"0.61050177",
"0.60652506",
"0.60049415",
"0.5967572",
"0.59209144",
"0.59175146",
"0.59164155",
"0.5908086",
"0.58867985",
"0.5883193",
"0.58711296",
"0.58676606",
"0.5864302",
"0.5833162",
"0.58190054"
] |
0.71149397
|
0
|
Return the average of the n last best evaluations of the goal function. This is a fast function which uses the last evaluations already done by the SPSA algorithm to return an approximation of the current goal value (note that we do not call the goal function another time, so the returned value is an upper bound of the true value).
|
def average_best_evals(self, n):
assert(self.best_count > 0), "not enough evaluations in average_evaluations!"
n = max(1, min(1000, n))
n = min(n, self.best_count)
sum_eval = 0.0
sum_theta = utils.linear_combinaison(0.0, self.theta0)
for i in range(n):
j = ((self.best_count - 1) % 1000) - i
if j < 0:
j += 1000
if j >= 1000:
j -= 1000
sum_eval += self.best_eval[j]
sum_theta = utils.sum(sum_theta, self.best_theta[j])
# return the average
alpha = 1.0 / (1.0 * n)
return (alpha * sum_eval, utils.linear_combinaison(alpha, sum_theta))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def average_evaluations(self, n):\n\n assert(self.history_count > 0), \"not enough evaluations in average_evaluations!\"\n\n n = max(1, min(1000, n))\n n = min(n, self.history_count)\n # print(f'n = {n}')\n # print(f'hist_cnt = {self.history_count}')\n\n sum_eval = 0.0\n sum_theta = utils.linear_combinaison(0.0, self.theta0)\n for i in range(n):\n\n j = ((self.history_count - 1) % 1000) - i\n if j < 0:\n j += 1000\n if j >= 1000:\n j -= 1000\n\n # print(f'i={i}, j={j}, hist_cnt: {self.history_count}, hist_eval[{j}] = {self.history_eval[j]}')\n\n sum_eval += self.history_eval[j]\n sum_theta = utils.sum(sum_theta, self.history_theta[j])\n\n # return the average\n alpha = 1.0 / (1.0 * n)\n return (alpha * sum_eval, utils.linear_combinaison(alpha, sum_theta))",
"def evaluate_policy(self, n_episodes = 100):\n episode_scores = []\n if self.mode=='debug':print(\"Running {} episodes!\".format(n_episodes))\n for e,episode in enumerate(range(n_episodes)):\n score = self.run_episode()\n episode_scores.append(score)\n if self.mode == 'debug': print(\"Score in {} episode = {}\".format(e,score))\n return np.mean(episode_scores)",
"def average_value_estimation_scorer(algo, episodes, window_size=1024):\n total_values = []\n for episode in episodes:\n for batch in _make_batches(episode, window_size, algo.n_frames):\n actions = algo.predict(batch.observations)\n values = algo.predict_value(batch.observations, actions)\n total_values += values.tolist()\n # smaller is better, maybe?\n return -np.mean(total_values)",
"def compute_avg_reward(env, policy, num_episodes):\n total_return = 0.0\n for _ in range(num_episodes):\n state = env.reset()\n done = False\n episode_return = 0.0\n while not done:\n action = policy(state)\n next_state, reward, done, _ = env.step(action)\n if done:\n reward = -1.0\n episode_return += reward\n state = next_state\n total_return += episode_return\n avg_return = total_return / num_episodes\n return avg_return",
"def expected_return(self, n_step):\r\n value = 0\r\n n_experiences = 50\r\n for i in range(n_experiences):\r\n trajectory = self.domain_exploration(n_step)\r\n value += self.compute_j(trajectory)\r\n return value/n_experiences",
"def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def moving_average(self, a, n=3):\n ret = np.nancumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def averageDistance(nbSteps, nbWalks, func):\n totalDistance = 0\n startPoint = (0, 0)\n for _ in range(nbWalks):\n arrival = None\n while arrival is None:\n arrival = func(startPoint, nbSteps)\n totalDistance += distance(startPoint, arrival)\n return pow(totalDistance/nbWalks, 2)",
"def average_fitness(self):\n return sum([e.fitness for e in self.population]) / len(self.population)",
"def extrapolate_with_worst_case(values: List[float], n: int = 5) -> float:\n n = min(len(values), n)\n return values[-1] + max(v_next - v_prev for v_prev, v_next in zip(values[-n:], values[-n+1:]))",
"def moving_average(sig, n=100):\n window = deque(maxlen=n) # last n scores\n sig_ma = []\n for i in range(len(sig)):\n window.append(sig[i])\n sig_ma.append(np.mean(window))\n return sig_ma",
"def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def compute_optimal_policy(self):\n\n self.theta_history.append(self.theta)\n\n since = time()\n for it in range(self.n_itr):\n print(\"lr: {} | Iteration N: {} \\r\".format(self.lr, it), end=\"\")\n\n self.policy = GaussianPolicy(self.theta, self.sigma)\n\n # Simulate N trajectories\n paths = collect_episodes(\n self.sim, policy=self.policy, horizon=self.T, n_episodes=self.n_episodes)\n\n avg_return = self._compute_performance(paths=paths)\n self.avg_returns.append(avg_return)\n\n # Gradient update\n self.theta += self.update_rule(self.policy.grad_J(\n paths, self.discounts, n_ep=self.n_episodes, T=self.T), lr=self.lr)\n\n # History update\n self.theta_history.append(self.theta)\n\n # print(\"\\nTook {}s\".format(round(time() - since, 2)))\n print(\"lr: {} | Iteration N: {} | Took: {}s\".format(self.lr, self.n_itr, round(time() - since, 2)))",
"def moving_average(a, n=3) :\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n return ret[n - 1:] / n",
"def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg",
"def fitness(self):\r\n history = self.history\r\n return sum(history) / len(history)",
"def est(self):\n self.min_key = 'average'\n if self.iteration >= 3:\n # Poll estimated times from different algorithms\n lin_est_time = self.lin_est_no_outlier()\n lin_no_est_time = self.lin_est_outlier()\n average_est_time = self.avg_est_no_outlier()\n average_no_est_time = self.avg_est_outlier()\n\n # Record discrepancies between the estimated delta t's and the\n # actual delta t.\n if self.iteration > 8:\n self.err_rec()\n\n # Review the choice of algorithm after every 15 jobs and switch\n # to a better one if necessary.\n if not self.override:\n if self.iteration % 5 == 0 and self.iteration > 8:\n self.least_err()\n\n # Return the time associated with the algorithm that offers the\n # highest accuracy.\n if self.min_key is 'average':\n est_time = average_est_time\n if self.min_key is 'average_no':\n est_time = average_no_est_time\n elif self.min_key is 'lin':\n est_time = lin_est_time\n elif self.min_key is 'lin_no':\n est_time = lin_no_est_time\n\n est_time = int(round(est_time))\n else:\n est_time = 0\n\n # Bypasses negative estimates occasionally generated by the linear\n # algorithm and huge numbers occasionally generated by the positive\n # exponential algorithm. 3.2e7 is a little over a year.\n if est_time < 0:\n est_time = self.est_time\n if not self.override:\n self.min_key = 'average'\n else:\n self.est_time = est_time\n\n return est_time",
"def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):\n rewards = []\n for _ in range(n_games):\n s = env.reset()\n reward = 0\n for _ in range(t_max):\n qvalues = agent.get_qvalues([s])\n action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]\n s, r, done, _ = env.step(action)\n reward += r\n if done:\n break\n\n rewards.append(reward)\n return np.mean(rewards)",
"def average(self):\n return (self.current + self.last) / 2.0",
"def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn",
"def dishlist_avg(n:list)->float:\r\n all_prices = dishlist_prices(n)\r\n return sum(all_prices)/len(all_prices)",
"def moving_average(a, n=5):\n ret = np.cumsum(a, dtype=float)\n ret[n:] = ret[n:] - ret[:-n]\n ret[n-1:] *= 1 / n\n ret[:n-1] *= 1 / np.arange(1, n)\n return ret",
"def make_epsilon_greedy_policy(estimator, epsilon, nA):\r\n def policy_fn(observation):\r\n A = np.ones(nA, dtype=float) * epsilon / nA\r\n q_values = estimator.predict(observation)\r\n best_action = np.argmax(q_values)\r\n A[best_action] += (1.0 - epsilon)\r\n return A\r\n return policy_fn",
"def algorithm_avg_time(n, score, algorithm, *args, **kwargs):\r\n algorithms = []\r\n for num in range(n):\r\n algorithms.append(algorithm(*args, **kwargs))\r\n\r\n prof = cProfile.Profile()\r\n for num in range(n):\r\n prof.runctx('algorithms[num].run_to_score(score)', globals(), locals())\r\n stats = pstats.Stats()\r\n stats.add(prof)\r\n return(stats)",
"def get_mean(data, n=-1):\n \n return round((sum(data)/n),1)",
"def compute_avg_return_and_steps(environment, policy, num_episodes=10):\n total_return = 0.0\n total_steps = 0.0\n for _ in range(num_episodes):\n\n time_step = environment.reset()\n episode_return = 0.0\n episode_steps = 0.0\n\n while not time_step.is_last():\n action_step = policy.action(time_step)\n time_step = environment.step(action_step.action)\n episode_return += time_step.reward\n episode_steps += 1\n total_return += episode_return\n total_steps += episode_steps\n\n average_return = total_return / num_episodes\n average_episode_steps = total_steps / num_episodes\n return average_return.numpy()[0], average_episode_steps",
"def average(self, n=0):\n assert n >= 0\n for key in self.value_history:\n values = np.array(self.value_history[key][-n:])\n nums = np.array(self.n_history[key][-n:])\n avg = np.sum(values * nums) / np.sum(nums)\n self.output[key] = avg",
"def get_avg(self) -> float:\n if self._cur_elem_count < 1:\n return 0\n self._mtx.acquire()\n avg = self._sum / float(self._cur_elem_count)\n self._mtx.release()\n return avg"
] |
[
"0.6813846",
"0.6276238",
"0.61682856",
"0.61212015",
"0.6074704",
"0.6072416",
"0.6014111",
"0.6009451",
"0.60071987",
"0.5985506",
"0.5965429",
"0.59616864",
"0.59548175",
"0.5951732",
"0.5939928",
"0.5937624",
"0.5937097",
"0.5905596",
"0.5884166",
"0.58767945",
"0.58598846",
"0.5851168",
"0.5839295",
"0.5806022",
"0.5773479",
"0.5770612",
"0.5744604",
"0.57396305",
"0.57181495",
"0.5708941"
] |
0.749127
|
0
|
Generator for model filters in an operator.
|
def _model_filter_in_operator_generator(filter_operator: Operator) -> Generator:
for operator in filter_operator:
if isinstance(operator.unresolved_value, ModelFilter):
yield operator
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_filters(self):",
"def filter(self, filters):",
"def condition_filters(self):\r\n return filters.Filters(self)",
"def _build_filters(self, criteria: Q):\n # Decide the function based on the connector type\n func = and_ if criteria.connector == criteria.AND else or_\n params = []\n for child in criteria.children:\n if isinstance(child, Q):\n # Call the function again with the child\n params.append(self._build_filters(child))\n else:\n # Find the lookup class and the key\n stripped_key, lookup_class = self.provider._extract_lookup(child[0])\n\n # Instantiate the lookup class and get the expression\n lookup = lookup_class(stripped_key, child[1], self.model_cls)\n if criteria.negated:\n params.append(~lookup.as_expression())\n else:\n params.append(lookup.as_expression())\n\n return func(*params)",
"def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}",
"def filters(self):\n return self.__filters",
"def get_filter_operator(self):\n return self.instrument.get_filter_operator()",
"def test_all_filter_op_fields(self):\n for field in OCIFilterSerializer._opfields:\n field = \"and:\" + field\n filter_param = {field: [\"1\", \"2\"]}\n serializer = OCIFilterSerializer(data=filter_param)\n self.assertTrue(serializer.is_valid())\n for field in OCIFilterSerializer._opfields:\n field = \"or:\" + field\n filter_param = {field: [\"1\", \"2\"]}\n serializer = OCIFilterSerializer(data=filter_param)\n self.assertTrue(serializer.is_valid())",
"def test_operator(self):\n\t\tfor op in self.ops:\n\t\t\tself.filter.set_operator(op)\n\t\t\tself.assertEqual(self.filter.operator.value, op)",
"def OperatorFilter(\n self,\n name: str,\n cutoffFrequency: float,\n order: int = 2,\n operation: SymbolicConstant = NONE,\n halt: Boolean = OFF,\n limit: float = None,\n invariant: SymbolicConstant = NONE,\n ) -> OperatorFilter:\n self.filters[name] = operatorFilter = OperatorFilter(\n name, cutoffFrequency, order, operation, halt, limit, invariant\n )\n return operatorFilter",
"def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item",
"def filters(self):\n return {\n 'dict_merge': do_merge,\n 'list_merge': do_list_merge,\n 'attrs': do_attrs,\n 'merge_mysql_privs': do_merge_mysql_privs,\n 'role': do_role,\n 'reduce': do_reduce,\n 'dict_join': do_dict_join,\n 'get': do_get,\n 'contains': do_contains,\n 'selectattrs': do_selectattrs,\n 'convert_integer': do_convert_integer,\n 'camel': do_camel\n }",
"def filters(self):\n return self._filters",
"def filters(self):\n\t\treturn self.local_filter",
"def getFilter(self):\n col = self.filtercol.get()\n val = self.filtercolvalue.get()\n op = self.operator.get()\n booleanop = self.booleanop.get()\n return col, val, op, booleanop",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a | c, b & d\n yield ((ab | cd) & 2) | ((ab & cd) & 1)",
"def items():\n for point in boolfunc.iter_points(inputs):\n # pylint: disable=C0103\n ab = self.restrict(point).pcdata[0]\n cd = other.restrict(point).pcdata[0]\n # a & c, b | d\n yield ((ab & cd) & 2) | ((ab | cd) & 1)",
"def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data",
"def binary_ops(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n for type, expr_group in ctx.groupby_type():\n if type in (bool, Callable):\n continue\n # TODO: Allow tuple comparisons?\n if TypeAnnotation(type).iterable:\n continue\n\n for commutative_operator in self.commutative_operators:\n for left, right in combinations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=commutative_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )\n for dependent_operator in self.non_commutative_operators:\n for left, right in permutations(expr_group, 2):\n yield AnnotatedExpression(\n ast.BinOp(\n left=left.expr, op=dependent_operator(), right=right.expr\n ),\n TypeAnnotation(type),\n )",
"def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters",
"def _filter_model(data: list, line: int, col: int, filters: list):\n\n return filter(\n lambda proxies: proxies[line][col] in filters,\n data\n )",
"def process_filters(self, filters, queryset, view):\n return filters",
"def _build_filter(self, **kwargs):\n\n def object_filter(obj):\n for key, value in kwargs.items():\n # we replace dango-like lookup by dots, so attrgetter can do his job\n\n getter = utils.attrgetter(key)\n if hasattr(value, '__call__'):\n # User passed a callable for a custom comparison\n if not value(getter(obj)):\n return False\n else:\n if not getter(obj) == value:\n return False\n return True\n\n return object_filter",
"def fanins(op):\n return {t.op for t in op.inputs}",
"def get_filters() -> List[Tuple[str, Callable]]:\n return [\n ('group_files', group_files),\n ('timesince', timesince),\n ('just_updated', just_updated),\n ('get_category_name', get_category_name),\n ('process_status_display', process_status_display),\n ('compilation_status_display', compilation_status_display),\n ('duration', duration),\n ('tidy_filesize', tidy_filesize),\n ('asdict', asdict),\n ('compilation_log_display', compilation_log_display)\n ]",
"def filter(self, *args, **kwargs):",
"def __getSelectionFilter(self):\n \n selectionPairs = []\n selectionPairs.append(('field','field'))\n selectionPairs.append(('spw','spw'))\n selectionPairs.append(('polarization','correlation'))\n selectionPairs.append(('baseline','antenna'))\n selectionPairs.append(('time','timerange'))\n selectionPairs.append(('scan','scan'))\n selectionPairs.append(('uvdist','uvrange'))\n selectionPairs.append(('scanintent','intent'))\n selectionPairs.append(('observation','observation'))\n return self.__generateFilter(selectionPairs)",
"def _get_filters(self, req):\n filters = {}\n for param in req.str_params:\n if param in SUPPORTED_FILTERS or param.startswith('property-'):\n # map filter name or carry through if property-*\n filter_name = SUPPORTED_FILTERS.get(param, param)\n filters[filter_name] = req.str_params.get(param)\n return filters",
"def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()",
"def _process_model_like_filter(model, query, filters):\n if query is None:\n return query\n\n for key in sorted(filters):\n column_attr = getattr(model, key)\n if 'property' == type(column_attr).__name__:\n continue\n value = filters[key]\n if not (isinstance(value, (six.string_types, int))):\n continue\n query = query.filter(\n column_attr.op('LIKE')(u'%%%s%%' % value))\n return query"
] |
[
"0.6577749",
"0.6336782",
"0.62646586",
"0.61139977",
"0.58842677",
"0.5834455",
"0.58338654",
"0.5800242",
"0.57979363",
"0.57475597",
"0.5693952",
"0.5657997",
"0.56530845",
"0.5643832",
"0.5632665",
"0.5546424",
"0.5539193",
"0.55261505",
"0.54394835",
"0.53975594",
"0.5394605",
"0.5392298",
"0.53736484",
"0.5373525",
"0.53609115",
"0.5346609",
"0.53382224",
"0.5338082",
"0.5332467",
"0.5317776"
] |
0.841577
|
0
|
Iterate over the operators in the filter, assign resolved value if found in second arg. If not found, assigns ``UNKNOWN``.
|
def _put_resolved_booleans_into_filter(
filter_operator: Operator, model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues]
) -> None:
for operator in _model_filter_in_operator_generator(filter_operator):
model_filter = operator.unresolved_value
operator.resolved_value = model_filters_to_resolved_values.get(
model_filter, BooleanValues.UNKNOWN
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_operator(self):\n\t\tfor op in self.ops:\n\t\t\tself.filter.set_operator(op)\n\t\t\tself.assertEqual(self.filter.operator.value, op)",
"def _model_filter_in_operator_generator(filter_operator: Operator) -> Generator:\n for operator in filter_operator:\n if isinstance(operator.unresolved_value, ModelFilter):\n yield operator",
"def _populate_model_filters_to_resolved_values(\n manifest_specs_cached_values: Dict[str, Any],\n model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues],\n model_filters: Operator,\n) -> None:\n for model_filter in model_filters:\n if model_filter.key in manifest_specs_cached_values:\n cached_model_value = manifest_specs_cached_values[model_filter.key]\n evaluated_expression: BooleanValues = evaluate_filter_expression(\n model_filter, cached_model_value\n )\n model_filters_to_resolved_values[model_filter] = evaluated_expression",
"def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)",
"def _select_operator(self, state, action):\n if self.operators_as_actions:\n # There should be only one possible operator if actions are operators\n possible_operators = set()\n for name, operator in self.domain.operators.items():\n if name.lower() == action.predicate.name.lower():\n assert len(possible_operators) == 0\n possible_operators.add(operator)\n else:\n # Possibly multiple operators per action\n possible_operators = set(self.domain.operators.values())\n\n # Knowledge base: literals in the state + action taken\n kb = set(state.literals) | {action}\n\n selected_operator = None\n assignment = None\n for operator in possible_operators:\n if isinstance(operator.preconds, Literal):\n conds = [operator.preconds]\n else:\n conds = operator.preconds.literals\n # Necessary for binding the operator arguments to the variables\n if self.operators_as_actions:\n conds = [action.predicate(*operator.params)] + conds\n # Check whether action is in the preconditions\n action_literal = None\n for lit in conds: \n if lit.predicate == action.predicate:\n action_literal = lit\n break\n if action_literal is None:\n continue\n # For proving, consider action variable first\n action_variables = action_literal.variables\n variable_sort_fn = lambda v : (not v in action_variables, v)\n assignments = find_satisfying_assignments(kb, conds,\n variable_sort_fn=variable_sort_fn,\n type_to_parent_types=self.domain.type_to_parent_types)\n num_assignments = len(assignments)\n if num_assignments > 0:\n assert num_assignments == 1, \"Nondeterministic envs not supported\"\n selected_operator = operator\n assignment = assignments[0]\n break\n\n return selected_operator, assignment",
"def apply_rule(operator, pattern, replacement):\n new_op = operator.match_first(pattern)\n if new_op is None:\n return None\n return new_op.replace_first(\"generic\", replacement)",
"def applyOperator(self, operator, operand):\n if self.currentTotal == None:\n self.currentTotal = operand\n elif operator == \"=\":\n self.equalsOp(operand)\n elif self.previousOperand:\n self.previousOperand = None\n else:\n self.computeTotal(operator, operand)\n if operator != \"=\":\n self.previousOperator = operator",
"def set_operator(self, operator):\n\n self['dimensionFilterClauses']['operator'] = operator.upper()\n\n return self",
"def filter(self, param, value, op=None):\n if op is None:\n self.params[param] = value\n elif op in ('le', 'lt', 'ge', 'gt', 'like', 'not_like', 'ne'):\n param_key = '{param}__{op}'.format(param=param, op=op.upper())\n self.params[param_key] = value\n else:\n raise TypeError('Invalid operator: %r' % op)\n return self",
"def operator(self, sort):\r\n return None",
"def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2",
"def Rewrite(self, expression, defaults=None, keys=None):\n self._keys = keys or {}\n _, operands = super(FilterScopeRewriter, self).Rewrite(\n expression, defaults=defaults)\n if isinstance(operands, six.string_types):\n operands = set([operands])\n return None, operands",
"def resolve_op(self, op, intermediate_values,\n **_):\n op_name = op.name.lower()\n op_type = op.type\n\n input_names = op.input_names\n input_values = [intermediate_values[key.lower()] for key in input_names]\n\n input_kwargs: Dict[str, Any] = op.input_kwargs\n op_kwargs: Dict[str, Any] = op.op_kwargs\n op_kwargs[\"name\"] = op_name\n\n if op_type == OpType.NONE:\n pass\n\n elif op_type == OpType.IDENTITY:\n pass\n\n # nn.linear\n\n elif op_type == OpType.DENSE:\n _kv_resolve_symbolic(op_kwargs, [\"kernel_init\", \"bias_init\"])\n _kv_resolve_symbolic(op_kwargs, [\"features\"], input_values,\n intermediate_values)\n\n elif op_type == OpType.DENSE_GENERAL:\n _kv_to_int(op_kwargs, [\"axis\", \"batch_dims\"])\n _kv_resolve_symbolic(op_kwargs, [\"kernel_init\", \"bias_init\"])\n _kv_resolve_symbolic(op_kwargs, [\"features\"], input_values,\n intermediate_values)\n\n elif op_type == OpType.CONV:\n _kv_to_int(op_kwargs, [\n \"kernel_size\",\n \"strides\",\n \"input_dilation\",\n \"kernel_dilation\",\n \"padding\",\n ])\n _kv_resolve_symbolic(op_kwargs, [\"kernel_init\", \"bias_init\"])\n _kv_resolve_symbolic(op_kwargs, [\"features\", \"feature_group_count\"],\n input_values, intermediate_values)\n\n # others\n\n elif op_type == OpType.ADD:\n _kv_to_float(op_kwargs, [\"layer_drop_rate\"])\n\n elif op_type == OpType.SCALAR_ADD:\n _kv_to_float(input_kwargs, [\"const\"])\n\n elif op_type == OpType.MUL:\n pass\n\n elif op_type == OpType.SCALAR_MUL:\n _kv_to_float(input_kwargs, [\"const\"])\n\n elif op_type == OpType.DOT_GENERAL:\n _kv_to_int(input_kwargs, [\"dimension_numbers\"])\n\n elif op_type == OpType.EINSUM:\n pass\n\n # nn.attention\n\n elif op_type == OpType.SELF_ATTENTION:\n _kv_resolve_symbolic(op_kwargs, [\"kernel_init\", \"bias_init\"])\n _kv_resolve_symbolic(op_kwargs,\n [\"num_heads\", \"qkv_features\", \"out_features\"],\n input_values, intermediate_values)\n\n # nn.activation\n\n elif op_type in [OpType.RELU, OpType.GELU, OpType.SWISH, OpType.SIGMOID]:\n pass\n\n elif op_type == OpType.SOFTMAX:\n _kv_to_int(input_kwargs, [\"axis\"])\n\n # nn.normalization\n\n elif op_type == OpType.BATCH_NORM:\n _kv_to_int(op_kwargs, [\"axis\"])\n _kv_resolve_symbolic(op_kwargs, [\"scale_init\", \"bias_init\"])\n\n elif op_type == OpType.LAYER_NORM:\n pass\n\n elif op_type == OpType.GROUP_NORM:\n _kv_resolve_symbolic(op_kwargs, [\"num_groups\", \"group_size\"],\n input_values, intermediate_values)\n\n # reshape operators\n\n elif op_type == OpType.RESHAPE:\n _kv_resolve_symbolic(input_kwargs, [\"new_shape\"], input_values,\n intermediate_values)\n _kv_to_int(input_kwargs, [\"new_shape\"])\n\n elif op_type == OpType.FLATTEN:\n pass\n\n elif op_type == OpType.TRANSPOSE:\n _kv_to_int(input_kwargs, [\"axes\"])\n\n # nn.stochastic\n\n elif op_type == OpType.DROPOUT:\n _kv_to_int(op_kwargs, [\"broadcast_dims\"])\n _kv_to_float(op_kwargs, [\"rate\"])\n\n elif op_type == OpType.STOCH_DEPTH:\n _kv_to_float(op_kwargs, [\"layer_drop_rate\"])\n\n # nn.pooling\n\n elif op_type == OpType.AVG_POOL:\n _kv_to_int(input_kwargs, [\"window_shape\", \"strides\"])\n\n elif op_type == OpType.MAX_POOL:\n _kv_to_int(input_kwargs, [\"window_shape\", \"strides\"])\n\n elif op_type == OpType.MEAN:\n _kv_to_int(input_kwargs, [\"axis\"])\n\n # new param\n\n elif op_type == OpType.PARAM:\n _kv_to_int(input_kwargs, [\"shape\"])\n _kv_resolve_symbolic(input_kwargs, [\"shape\", \"init_fn\"], input_values,\n intermediate_values)\n\n else:\n raise ValueError(f\"op_type {op_type} not supported...\")\n\n return new_op(\n op_name,\n op_type,\n input_names,\n input_kwargs,\n op_kwargs,\n num_outputs=op.num_outputs)",
"def _to_ops(from_op):\n\n for to_op in OPERATORS:\n if to_op and isinstance(from_op, ast.Not):\n # 'not' can only be removed but not replaced with\n # '+', '-' or '~' b/c that may lead to strange results\n pass\n elif isinstance(from_op, ast.UAdd) and (to_op is None):\n # '+1' => '1' yields equivalent mutations\n pass\n else:\n yield to_op",
"def __setitem__(self, filter, value):\n if isinstance(value, tuple):\n value = list(value)\n\n datastore_types.ValidateProperty(' ', value, read_only=True)\n match = self._CheckFilter(filter, value)\n property = match.group(1)\n operator = match.group(3)\n\n dict.__setitem__(self, filter, value)\n\n if operator in self.INEQUALITY_OPERATORS:\n if self.__inequality_prop is None:\n self.__inequality_prop = property\n else:\n assert self.__inequality_prop == property\n self.__inequality_count += 1\n\n if filter not in self.__filter_order:\n self.__filter_order[filter] = self.__filter_counter\n self.__filter_counter += 1\n\n self.__cached_count = None",
"def all_compare_operators(request: Any) -> Any:\n return request.param",
"def all_compare_operators(request: Any) -> Any:\n return request.param",
"def test_custom_operators():\n grid = UnitGrid([32])\n field = ScalarField.random_normal(grid)\n eq = PDE({\"u\": \"undefined(u)\"})\n\n with pytest.raises(ValueError):\n eq.evolution_rate(field)\n\n def make_op(state):\n return lambda state: state\n\n UnitGrid.register_operator(\"undefined\", make_op)\n\n eq._cache = {} # reset cache\n res = eq.evolution_rate(field)\n np.testing.assert_allclose(field.data, res.data)\n\n del UnitGrid._operators[\"undefined\"] # reset original state",
"def _apply_binary_op_elementwise(\n self: ConcreteStructuredMetricValue, other: ConcreteStructuredMetricValue,\n op: Callable[[float, float], float]) -> ConcreteStructuredMetricValue:\n ...",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)",
"def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast",
"def _remove_operator(self, operator):",
"def update(self, other):\n for filter, value in other.items():\n self.__setitem__(filter, value)",
"def _op_inplace(self, op: str, other: t.Any) -> te.Self:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n if getattr(self.__members__, op)(other) is NotImplemented:\n return NotImplemented\n return self\n return NotImplemented",
"def parse(self, data):\n val = data.get(self.name, missing)\n if not isinstance(val, dict):\n return (self.operators['$eq'], self.field.deserialize(val)),\n\n return tuple(\n (\n self.operators[op],\n (self.field.deserialize(val)) if op not in self.list_ops else [\n self.field.deserialize(v) for v in val])\n for (op, val) in val.items() if op in self.operators\n )",
"def _attribute_lookup(\n obj, query_string, query_value, sep=SEP,\n operator_collection=OperatorCollection,\n):\n attribute_list = query_string.split(sep)\n if hasattr(operator_collection, attribute_list[-1]) \\\n and callable(getattr(operator_collection, attribute_list[-1])):\n operator_name = attribute_list.pop()\n else:\n operator_name = operator_collection.default_operator\n operator = getattr(operator_collection, operator_name)\n\n value = _get_attribute(obj, attribute_list)\n\n return operator(value, query_value)",
"def match_value(self, value, op):\n if self.date_value is not None:\n try:\n return op(hxl.datatypes.normalise_date(value), self.date_value)\n except ValueError:\n pass\n\n if self.number_value is not None:\n try:\n return op(hxl.datatypes.normalise_number(value), self.number_value)\n except:\n pass\n\n return self.op(hxl.datatypes.normalise_string(value), self.string_value)",
"def test_alias_with_implicit_filter(self):\n\n # Setup alias with simple filter string\n self.t.config(\"alias.foofilter\", \"project:Home _projects\")\n\n # Setup tasks for projects Home and Work\n self.t(\"add project:Home Home task\")\n self.t(\"add project:Work Work task\")\n\n # Sanity check that _projects command outputs\n # both the \"Home\" and \"Work\" projects\n code, out, err = self.t(\"_projects\")\n self.assertIn(\"Home\", out,\n msg=\"task _projects -> Home\")\n self.assertIn(\"Work\", out,\n msg=\"task _projects -> Work\")\n\n # Check that foo command outputs the \"Home\" project\n code, out, err = self.t(\"foofilter\")\n self.assertIn(\"Home\", out,\n msg=\"task foofilter -> project:Home _projects > Home\")\n self.assertNotIn(\"Work\", out,\n msg=\"task foofilter -> project:Home _projects > Work\")",
"def ops(rule):\n ops_dict = {'>' : operator.gt,\n '<' : operator.lt,\n '>=': operator.ge,\n '<=': operator.le,\n '=' : operator.eq,\n '==' : operator.eq}\n return ops_dict[rule]"
] |
[
"0.60437655",
"0.5642127",
"0.5469392",
"0.5314203",
"0.52503777",
"0.5242413",
"0.50989926",
"0.5098336",
"0.50518554",
"0.5039647",
"0.50250936",
"0.49366522",
"0.4914287",
"0.4895046",
"0.48431635",
"0.48428714",
"0.48428714",
"0.47520688",
"0.47494227",
"0.4746074",
"0.4746074",
"0.47278437",
"0.4720686",
"0.47017017",
"0.46808794",
"0.4679788",
"0.4661639",
"0.4631636",
"0.4615833",
"0.46153212"
] |
0.6927108
|
0
|
Iterate over the model filters, if the filter key has a cached value, evaluate the filter. The resolved filter values are placed in ``model_filters_to_resolved_values``.
|
def _populate_model_filters_to_resolved_values(
manifest_specs_cached_values: Dict[str, Any],
model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues],
model_filters: Operator,
) -> None:
for model_filter in model_filters:
if model_filter.key in manifest_specs_cached_values:
cached_model_value = manifest_specs_cached_values[model_filter.key]
evaluated_expression: BooleanValues = evaluate_filter_expression(
model_filter, cached_model_value
)
model_filters_to_resolved_values[model_filter] = evaluated_expression
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_filters(self, filters, queryset, view):\n return filters",
"def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)",
"def _get_filters(self, request, queryset, view): # noqa\n self.opts = queryset.model._meta\n filter_fields = getattr(view, \"filter_fields\", None)\n self.exclude = {}\n self.filters = {}\n\n if filter_fields:\n blacklist = RexList(getattr(view, \"filter_blacklist\", []))\n mapping = self._get_mapping(view)\n\n for fieldname_arg in self.query_params:\n raw_value = self.query_params.get(fieldname_arg)\n if raw_value in [\"''\", '\"\"']:\n raw_value = \"\"\n\n negate = fieldname_arg[-1] == \"!\"\n\n if negate:\n filter_field_name = fieldname_arg[:-1]\n TARGET = self.exclude\n else:\n TARGET = self.filters\n filter_field_name = fieldname_arg\n\n if filter_field_name in self.excluded_query_params:\n continue\n if self.ignore_filter(request, filter_field_name, view):\n continue\n try:\n if filter_field_name in blacklist:\n raise InvalidQueryArgumentError(fieldname_arg)\n parts = None\n if \"__\" in filter_field_name:\n parts = filter_field_name.split(\"__\")\n filter_field_name = parts[0]\n op = parts[-1]\n else:\n op = \"\"\n processor = getattr(\n self,\n \"process_{}\".format(filter_field_name),\n getattr(view, \"drfqs_filter_{}\".format(filter_field_name), None),\n )\n\n if (filter_field_name not in filter_fields) and (not processor):\n self.unknown_arguments.append((fieldname_arg, filter_field_name))\n continue\n # raise InvalidQueryArgumentError(filter_field_name)\n if raw_value is None and not processor:\n continue\n # field is configured in Serializer\n # so we use 'source' attribute\n if filter_field_name in mapping:\n real_field_name = mapping[filter_field_name].source\n # if '.' in real_field_name:\n # real_field_name = real_field_name.split('.')[0]\n # field_name = real_field_name.replace('.', '__')\n else:\n real_field_name = filter_field_name\n\n if processor:\n payload = {\n \"field\": filter_field_name,\n \"request\": request,\n \"param\": fieldname_arg,\n \"negate\": negate,\n \"op\": op,\n \"field_name\": real_field_name,\n \"parts\": parts,\n \"value\": raw_value,\n \"real_field_name\": real_field_name,\n }\n _f, _e = processor(dict(self.filters), dict(self.exclude), **payload)\n self.filters.update(**_f)\n self.exclude.update(**_e)\n else:\n if not raw_value:\n continue\n # field_object = opts.get_field(real_field_name)\n value_type = self.field_type(real_field_name)\n if parts:\n f = \"{}__{}\".format(real_field_name, \"__\".join(parts[1:]))\n else:\n f = filter_field_name\n if op in [\"in\", \"contained_by\"]:\n value = raw_value.split(\",\")\n elif op == \"acontains\":\n value = raw_value.split(\",\")\n f = f.replace(\"__acontains\", \"__contains\")\n elif op == \"isnull\":\n value = parse_bool(raw_value)\n elif value_type == bool:\n value = parse_bool(raw_value)\n else:\n value = raw_value\n TARGET[f] = value\n except ValueError:\n raise InvalidQueryValueError(fieldname_arg, raw_value)\n except QueryFilterException:\n raise\n except Exception as e:\n logger.exception(e)\n raise\n return self.filters, self.exclude",
"def _process_model_like_filter(model, query, filters):\n if query is None:\n return query\n\n for key in sorted(filters):\n column_attr = getattr(model, key)\n if 'property' == type(column_attr).__name__:\n continue\n value = filters[key]\n if not (isinstance(value, (six.string_types, int))):\n continue\n query = query.filter(\n column_attr.op('LIKE')(u'%%%s%%' % value))\n return query",
"def _process_model_like_filter(model, query, filters):\n if query is None:\n return query\n\n if filters:\n for key in sorted(filters):\n column_attr = getattr(model, key)\n if 'property' == type(column_attr).__name__:\n continue\n value = filters[key]\n if not (isinstance(value, (str, int))):\n continue\n query = query.filter(\n column_attr.op('LIKE')(u'%%%s%%' % value))\n return query",
"def filter_by(self, key: str, *args, **kwargs):\n filter_ = self.filters.get(key)\n if not filter_:\n raise ValueError(key)\n return filter_(*args, **kwargs)",
"def _put_resolved_booleans_into_filter(\n filter_operator: Operator, model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues]\n) -> None:\n for operator in _model_filter_in_operator_generator(filter_operator):\n model_filter = operator.unresolved_value\n operator.resolved_value = model_filters_to_resolved_values.get(\n model_filter, BooleanValues.UNKNOWN\n )",
"def filters(self):\n filters = IterDict()\n for key in self.FILTERS:\n filter = IterDict()\n filter_param = ((self.prefix or '') + '-' + key).strip('-')\n\n for value, display in self.fields[key].choices:\n choice = {}\n choice['value'] = value\n choice['display'] = display\n\n # These are raw values so they must come from data, and be\n # coerced to strings\n choice['active'] = str(value) == self.data.get(filter_param, '')\n\n params = copy.copy(self.data)\n # Filter by this current choice\n params[filter_param] = value\n choice['querystring'] = urllib.urlencode(params)\n # remove this filter\n params[filter_param] = ''\n choice['remove'] = urllib.urlencode(params)\n\n filter[value] = choice\n filters[key] = filter\n return filters",
"def get_filter_values(self):\n return [f.get() for f in self._filters[:-1]] # Ignore placeholder",
"def _filter(self, values, asset):\n log.debug(\"Testing trigger filters against asset %s\", asset['id'])\n for filter in self.filters:\n if not filter._apply(values, asset):\n return False\n return True",
"def _set_runtime_filters(self):\n runtime_filters = []\n if not all(len(filter_tuple) == 3 for filter_tuple in self.filters):\n raise TypeError(\n '%s: filters must be a sequence of tuple with length=3'\n ' got %r instead' % (self.__class__.__name__, self.filters))\n\n for filter_type, filter_operator, filter_value in self.filters:\n if isinstance(filter_type, ValueProvider):\n filter_type = filter_type.get()\n if isinstance(filter_operator, ValueProvider):\n filter_operator = filter_operator.get()\n if isinstance(filter_value, ValueProvider):\n filter_value = filter_value.get()\n runtime_filters.append((filter_type, filter_operator, filter_value))\n\n return runtime_filters or ()",
"def evaluate_filter(self, x):\n raise NotImplementedError",
"def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data",
"def apply_filters(self, query, model_class, filters, whitelist=None,\n nested_conditions=None, stack_size_limit=100,\n convert_key_names_func=str, gettext=None):\n return apply_mql_filters(\n query,\n model_class,\n filters=filters,\n nested_conditions=nested_conditions,\n whitelist=whitelist,\n stack_size_limit=stack_size_limit,\n convert_key_names_func=convert_key_names_func,\n gettext=gettext\n )",
"def extract_filters(self):\n self.filters = self.controller.filters\n\n self.extract_core_stats()\n self.extract_abilities()\n # goes through and adds all list-based filters\n for filterType, elements in self.filters.items():\n if type(elements) == list and len(elements) > 0:\n self.extract_filter_list(filterType, elements)",
"def matches_filters(self, entity):\r\n item = dict(entity)\r\n item[self.query.get_meta().pk.column] = entity.key()\r\n return self._matches_filters(item, self.query.where)",
"def _filter_model(data: list, line: int, col: int, filters: list):\n\n return filter(\n lambda proxies: proxies[line][col] in filters,\n data\n )",
"def filter(self, filter_dict):\n pass",
"def evaluate_filters(\n isovar_result,\n filter_thresholds,\n filter_flags=[]):\n filter_values_dict = evaluate_boolean_filters(isovar_result, filter_flags)\n filter_values_dict.update(\n evaluate_threshold_filters(isovar_result, filter_thresholds))\n return filter_values_dict",
"def post_process(klass, results, unsafe_filters):\n logging.info(u'DatastoreModel.post_process() handled unsafe filters:')\n logging.info(u'{}'.format(unsafe_filters))\n all_matching_sets = []\n for k, v in unsafe_filters.items():\n matches = set([e for e in results if getattr(e, k) in v])\n all_matching_sets.append(matches)\n return set.intersection(*all_matching_sets)",
"def _apply_filters(self, metadata):\n if \"keywords\" in self.filters:\n if not metadata.keywords:\n return False\n if not all(keyword in metadata.keywords for keyword in self.filters[\"keywords\"]):\n return False\n if \"features\" in self.filters:\n if not metadata.features:\n return False\n if not all(feature in metadata.features for feature in self.filters[\"features\"]):\n return False\n if \"authors\" in self.filters:\n if not metadata.authors:\n return False\n if not all(author in metadata.authors for author in self.filters[\"authors\"]):\n return False\n if \"version\" in self.filters:\n if not metadata.pylith_version:\n return False\n for verMeta in metadata.pylith_version:\n if not eval(\"{ver} {verMeta}\".format(ver=self.filters[\"version\"], verMeta=verMeta)):\n return False\n return True",
"def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)",
"def filters():\n return {\"reform_vlans\": FilterModule.reform_vlans}",
"def apply_filters(filters, items):\n return scom.apply_filters(filters, items)",
"def get_filters(self):",
"def _filter(\n self,\n data: List[dict],\n filters: List[Tuple[str, SupportedFilterType]],\n state_dataclass: StateSchema,\n detail: bool,\n ) -> List[dict]:\n filters = _convert_filters_type(filters, state_dataclass)\n result = []\n for datum in data:\n match = True\n for filter_column, filter_predicate, filter_value in filters:\n filterable_columns = state_dataclass.filterable_columns()\n filter_column = filter_column.lower()\n if filter_column not in filterable_columns:\n raise ValueError(\n f\"The given filter column {filter_column} is not supported. \"\n f\"Supported filter columns: {filterable_columns}\"\n )\n\n if filter_column not in datum:\n match = False\n elif filter_predicate == \"=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n # Case insensitive match for string filter values.\n match = datum[filter_column].lower() == filter_value.lower()\n else:\n match = datum[filter_column] == filter_value\n elif filter_predicate == \"!=\":\n if isinstance(filter_value, str) and isinstance(\n datum[filter_column], str\n ):\n match = datum[filter_column].lower() != filter_value.lower()\n else:\n match = datum[filter_column] != filter_value\n else:\n raise ValueError(\n f\"Unsupported filter predicate {filter_predicate} is given. \"\n \"Available predicates: =, !=.\"\n )\n\n if not match:\n break\n\n if match:\n result.append(filter_fields(datum, state_dataclass, detail))\n return result",
"def _filter(self, _model, **kwargs):\n return _model.objects.filter(**kwargs)",
"def test_get_models_filters(fc: fetcher.Fetcher, test_project_name, test_model):\n ml = fc.get_models(project=test_project_name)\n assert all(m.project_name == test_project_name for m in ml)\n\n ml = fc.get_models(model=test_model[\"name\"])\n assert all(m.name == test_model[\"name\"] for m in ml)\n\n ml = fc.get_models(project=test_project_name, model=test_model[\"name\"])\n assert all(\n m.project_name == test_project_name and m.name == test_model[\"name\"] for m in ml\n )",
"def solve_filter_metrics(self):\n\n if 'metrics' in self.filter_request:\n filter_metrics = self.filter_request['metrics']\n metrics_request = {}\n\n temp = filter_metrics.split(',')\n for i in temp:\n metrics_request[i.strip()] = None\n\n for i in range(len(self.list_pack)):\n self.apply_filter_metrics(i, metrics_request.copy())",
"def apply_filters(self, queryset, applicable_filters=None, applicable_exclusions=None):\n for field, options in applicable_filters[\"field_facets\"].items():\n queryset = queryset.facet(field, **options)\n\n for field, options in applicable_filters[\"date_facets\"].items():\n queryset = queryset.date_facet(field, **options)\n\n for field, options in applicable_filters[\"query_facets\"].items():\n queryset = queryset.query_facet(field, **options)\n\n return queryset"
] |
[
"0.60582566",
"0.58838373",
"0.5869402",
"0.5766602",
"0.5689414",
"0.5587206",
"0.55353963",
"0.5509496",
"0.54153544",
"0.5376045",
"0.5346958",
"0.53428364",
"0.53274405",
"0.5317073",
"0.5286032",
"0.5283004",
"0.5261405",
"0.52375436",
"0.52314645",
"0.5215077",
"0.520189",
"0.52006847",
"0.5195232",
"0.51687443",
"0.5125886",
"0.512555",
"0.5118607",
"0.5112352",
"0.5106123",
"0.5097889"
] |
0.747011
|
0
|
Parse the model ID, return a tuple framework, task, restofid.
|
def extract_framework_task_model(model_id: str) -> Tuple[str, str, str]:
_id_parts = model_id.split("-")
if len(_id_parts) < 3:
raise ValueError(f"incorrect model ID: {model_id}.")
framework = _id_parts[0]
task = _id_parts[1]
name = "-".join(_id_parts[2:])
return framework, task, name
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def parse_id(app_object_id_string):\n splitter = re.compile(r'-')\n tokens = splitter.split(app_object_id_string)\n app_string = tokens[0]\n model_string = tokens[1]\n content_id = int(tokens[2])\n content_type = ContentType.objects.get(app_label=app_string, model=model_string)\n object = content_type.model_class().objects.get(id=content_id)\n return object",
"def parse_task(task: str) -> tuple[int, int]:\n parts = task.strip().split()\n return int(parts[0]), int(parts[6])",
"def parseID(uid):\n\n info = uid.split('_')\n if len(info) != 3:\n print('invalid ID')\n return(1)\n plot = info[0]\n height = info[1]\n tree_id = info[2]\n\n return plot, height, tree_id",
"def _get_id_and_model(self, id_or_model):\n if isinstance(id_or_model, self.collection.model):\n model = id_or_model\n elif isinstance(id_or_model, str):\n # Assume we have an ID string\n model = self.collection.get(id_or_model)\n else:\n raise TypeError('Unexpected type {}, expected {} or {}'.format(\n type(id_or_model), str, self.collection.model))\n\n return model.id, model",
"def find_id(self):\n\t\tx , y = self.id.split(':')\n\t\treturn int(x), int(y)",
"def _parse(self, the_id: typing.Union[int, str]) -> int:\n return int(the_id)",
"def get_model_id(thing: Union[\"Model\", t.ModelId, UUID, str]) -> t.ModelId:\n if isinstance(thing, UUID):\n return t.ModelId(thing)\n elif isinstance(thing, Model):\n return thing.id\n return t.ModelId(UUID(thing))",
"def parse_id(id: str) -> Union[WorkflowId, CromwellWorkflowLabel, ExperimentId]:\n workflow_id = WorkflowId.from_string(id)\n if workflow_id is not None:\n return workflow_id\n experiment_id = ExperimentId.from_string(id)\n if experiment_id is not None:\n return experiment_id\n workflow_label = CromwellWorkflowLabel.from_string(id)\n return workflow_label",
"def async_format_model_id(model: str, id_: str) -> str:\n return f\"{async_format_model(model)} {async_format_id(id_)}\"",
"def _get_job_id(self) -> str:\n return self.split_name[2][3:]",
"def getid(data):\n return int(data.split('/')[-1])",
"def process_id_from(self):\r\n return self._tokens[1]",
"def parse_id(string):\n return string.split('/')[-1]",
"def model_id(self):\n date_str = dt.now().strftime(\"%Y-%m-%d_%H:%M\")\n return \"%s_%s_%s_%s\" % (date_str, self.embedding_type, self.class_label, self.drop_irrelevant)",
"def _parse_rec_id(rec_id: str) -> Tuple[int, bool, Optional[int]]:\n\n if m := re.match(ORIGINAL_RECORD_PATTERN, rec_id):\n return [int(m.group(1)), \"A\", None]\n elif m := re.match(DUPE_RECORD_PATTERN, rec_id):\n return [int(m.group(1)), \"B\", m.group(2)]\n else:\n raise Exception(f\"Unable to parse rec_id: {rec_id}\")",
"def _extract_id(self, dirty_id):\n if dirty_id[:1] == \"/\":\n return dirty_id.split(\"/\")[-1]\n else:\n return dirty_id",
"def get_app_id_and_task_uuid_from_executor_id(executor_id: str) -> Tuple[str, str]:\n app_id, task_uuid = executor_id.rsplit(\".\", 1)\n return app_id, task_uuid",
"def parse_res_id(response):\n pass",
"def getTrid(request):\n words = request.replace('\\'').strip().split(',')[2].split(':')\n return int(words[0])",
"def get_task_data(self, task):\n raw = pickle.loads(task)\n if len(raw) == 7:\n task_id, klass_str, _, _, _, _, _ = raw\n elif len(raw) == 6:\n task_id, klass_str, _, _, _, _ = raw\n return task_id, klass_str",
"def process_id_to(self):\r\n return self._tokens[3]",
"def get_id(request, request_type):\n if request_type == \"post\":\n id = request.POST['id']\n else:\n id = request.GET['id']\n id = id.rsplit('_')\n id = int(id[1])\n return id",
"def get_model_id(model_name, workspace, header, user):\n uri = \"https://api.anaplan.com/1/3/workspaces/{}/models/\".format(workspace)\n response = requests.get(uri, headers = header)\n response_json = json.loads(models.text.encode(\"utf-8\"))\n for model in response_json:\n if model[u\"name\"] == unicode(model_name):\n return model[u\"id\"]",
"def src_task_id(self):\n return struct.unpack('<H', self.pkt.payload[4:6])[0]",
"def split_action_id (id):\n assert isinstance(id, basestring)\n split = id.split ('.', 1)\n toolset = split [0]\n name = ''\n if len (split) > 1:\n name = split [1]\n return (toolset, name)",
"def get_short_task_id(task_id: str) -> str:\n return task_id.split(MESOS_TASK_SPACER)[-1]",
"def get_task_id(self):\n if self.task_id:\n return self.task_id\n return (f'{self.task_type}_{self.get_source_system().lower()}'\n f'.{self.get_source_subsystem().lower()}.{self.get_name().upper()}')",
"def get_hosted_model_id(self):\n payload = {}\n payload[\"request_type\"] = \"model_id\"\n payload[\"observation\"] = None\n response = self._realtime_predictor.predict(payload)\n model_id = response[\"model_id\"]\n\n return model_id",
"def _parse_id(self, id):\n try:\n name, pid = id.split(':', 1)\n except ValueError:\n raise ValueError('id %s is not in the form provider:pid' % id)\n if name not in self.providers:\n raise ValueError('no such provider \"%s\"' % name)\n return self.providers[name], tostr(pid)",
"def test_model_identifier_value(self):\n \n model_identifier = get_model()[1]\n \n # Check to make sure the model_name is 'iMac14,2'\n self.assertEqual(model_identifier, 'iMac14,2')"
] |
[
"0.63340414",
"0.6116038",
"0.6081815",
"0.6078587",
"0.6059168",
"0.6045916",
"0.60072386",
"0.59797734",
"0.58408475",
"0.5745599",
"0.57417506",
"0.57395905",
"0.5736701",
"0.57130075",
"0.56183445",
"0.5603926",
"0.5577302",
"0.5551033",
"0.5548943",
"0.55013406",
"0.5496628",
"0.54573363",
"0.54405934",
"0.5425904",
"0.5419722",
"0.5417763",
"0.5411665",
"0.5382536",
"0.5370452",
"0.53490233"
] |
0.79884815
|
0
|
List tasks for JumpStart, and optionally apply filters to result.
|
def list_jumpstart_tasks( # pylint: disable=redefined-builtin
filter: Union[Operator, str] = Constant(BooleanValues.TRUE),
region: str = JUMPSTART_DEFAULT_REGION_NAME,
) -> List[str]:
tasks: Set[str] = set()
for model_id, _ in _generate_jumpstart_model_versions(filter=filter, region=region):
_, task, _ = extract_framework_task_model(model_id)
tasks.add(task)
return sorted(list(tasks))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_tasks(ctx):\n ctx.run(\"invoke --list\")",
"def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)",
"def task_list(request):\n ip = get_ip(request)\n tasks = Task.objects.filter(ip=ip).order_by(\"-start_time\")\n # pager\n paginator = Paginator(tasks, 15)\n num_pages = paginator.num_pages\n page_list = paginator.page_range\n page_number = request.GET.get(\"page\", 1)\n page_obj = paginator.get_page(page_number)\n\n current_page = page_obj.number\n display_page_list = []\n if len(page_list) <= MAX_PAGE_NUM:\n for i in page_list:\n display_page_list.append((i, f\"?page={i}\"))\n else:\n if current_page <= num_pages - MAX_PAGE_NUM:\n for i in range(current_page, current_page + 4):\n display_page_list.append((i, f\"?page={i}\"))\n display_page_list.append((\"...\", \"#\"))\n for i in range(1, 0 - 1, -1):\n t = num_pages - i\n display_page_list.append((t, f\"?page={t}\"))\n pass\n else:\n for i in range(num_pages - MAX_PAGE_NUM, num_pages + 1):\n display_page_list.append((i, f\"?page={i}\"))\n\n return render(request, \"ponsol2web/task_list.html\",\n {\"count\": num_pages, \"page_obj\": page_obj, \"page_list\": display_page_list})",
"def describe_import_tasks(filters=None, maxResults=None, nextToken=None):\n pass",
"async def list_tasks():",
"def tasks_list(self, use_json_output, **kwargs):\n tasks = self._get_tasks_list()\n log.info('jobs_id\\tproject id\\tname')\n for t in tasks:\n jobs_id = [job[\"id\"] for segment in t[\"segments\"] for job in segment[\"jobs\"]]\n if use_json_output:\n log.info(json.dumps(t, indent=4))\n else:\n log.info('{jobs_id}\\t{id}\\t{name}'.format(jobs_id=\", \".join(map(str, jobs_id)), **t))",
"def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )",
"def test_list_tasks_filter_name(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={\"filters\": {\"name\": \"task\"}})\n result = rv.json()\n\n expected = util.MOCK_TASK_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)",
"def see_tasks(self, widget):\n my_task_list = tasklistwindow.TaskListWindow(self.task_list)",
"def task_get_all(context, filters=None, marker=None, limit=None,\n sort_key='created_at', sort_dir='desc', admin_as_user=False):\n filters = filters or {}\n\n session = get_session()\n query = session.query(models.Task)\n\n if not (context.is_admin or admin_as_user) and context.owner is not None:\n query = query.filter(models.Task.owner == context.owner)\n\n _task_soft_delete(context, session=session)\n\n showing_deleted = False\n\n if 'deleted' in filters:\n deleted_filter = filters.pop('deleted')\n query = query.filter_by(deleted=deleted_filter)\n showing_deleted = deleted_filter\n\n for (k, v) in filters.items():\n if v is not None:\n key = k\n if hasattr(models.Task, key):\n query = query.filter(getattr(models.Task, key) == v)\n\n marker_task = None\n if marker is not None:\n marker_task = _task_get(context, marker,\n force_show_deleted=showing_deleted)\n\n sort_keys = ['created_at', 'id']\n if sort_key not in sort_keys:\n sort_keys.insert(0, sort_key)\n\n query = _paginate_query(query, models.Task, limit,\n sort_keys,\n marker=marker_task,\n sort_dir=sort_dir)\n\n task_refs = query.all()\n\n tasks = []\n for task_ref in task_refs:\n tasks.append(_task_format(task_ref, task_info_ref=None))\n\n return tasks",
"def tasks(self):\n args = Namespace(rev=self.rev)\n data = run_query('push_results', args)['data']\n\n tasks = []\n for kwargs in data:\n # Do a bit of data sanitization.\n if any(a not in kwargs for a in ('label', 'duration', 'result', 'classification')):\n continue\n\n if kwargs['duration'] <= 0:\n continue\n\n tasks.append(Task(**kwargs))\n\n return tasks",
"def list_jumpstart_scripts( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n) -> List[str]:\n if (isinstance(filter, Constant) and filter.resolved_value == BooleanValues.TRUE) or (\n isinstance(filter, str) and filter.lower() == BooleanValues.TRUE.lower()\n ):\n return sorted([e.value for e in JumpStartScriptScope])\n\n scripts: Set[str] = set()\n for model_id, version in _generate_jumpstart_model_versions(filter=filter, region=region):\n scripts.add(JumpStartScriptScope.INFERENCE)\n model_specs = accessors.JumpStartModelsAccessor.get_model_specs(\n region=region,\n model_id=model_id,\n version=version,\n )\n if model_specs.training_supported:\n scripts.add(JumpStartScriptScope.TRAINING)\n\n if scripts == {e.value for e in JumpStartScriptScope}:\n break\n return sorted(list(scripts))",
"def show_tasks(self):\n task_ids = [\n t and t['id'] for t in self.controller.selected_tasks\n ]\n\n if self._check_cluster():\n self.print_list(\n ('id', 'status'), self.controller.get_tasks(),\n lambda x: task_ids.index(x['id'])\n )",
"def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))",
"def filter_tasks(tasks, task_list):\n qs = [tasks.filter(name=n) for n in task_list]\n return list(map(lambda o: o[0] if o else None, qs))",
"def tasks(self, tags=None, summary=True, tags_intersect=None):\n return list(self.all_tasks(summary=summary, tags=tags, tags_intersect=tags_intersect))",
"def _execute(self):\n return self.warrior.filter_tasks(self.filter_obj)",
"def start_scans_for_lists_who_are_up_for_scanning() -> Task:\n\n tasks = []\n\n for urllist in UrlList.objects.all().filter():\n # this also gets the lists that are not scanned. The scan date needs to progress, otherwise it will be\n # scanned instantly when the list will be enabled. This also goes for deleted lists.\n if urllist.enable_scans is False or urllist.is_deleted is True:\n urllist.renew_scan_moment()\n continue\n\n if urllist.is_due_for_scanning():\n tasks.append(initialize_scan.si(urllist))\n\n # placed here, as otherwise the list is never due for scanning as the date might be updated to something\n # new in the future.\n urllist.renew_scan_moment()\n\n # using this in create_function_job so a job is created, allowing for tracking this a bit\n return group(tasks)",
"def get_tasks(data: dict) -> dict:\n status_code = http.HTTPStatus.OK\n body = {\"filters\": data}\n try:\n tasks = actions.get_tasks(data)\n body[\"tasks\"] = [task.to_dict() for task in tasks]\n except tskexc.TaskHTTPException as e:\n body = {\"error\": e.message}\n status_code = e.http_status\n return {\"statusCode\": status_code, \"body\": json.dumps(body)}",
"def do_tasks(self, arg):\n args = shlex.split(arg)\n if not args:\n # TODAY\n started = datetime.date.fromtimestamp(0)\n finished = datetime.date.today()\n limit = 10\n else:\n limit = 0\n try:\n started, finished = helpers.parse_date_parameters(args)\n except ValueError, err:\n print(err)\n return\n tasks = self.db.get_profiled_tasks(started, finished, limit)\n def _display_fields(task):\n return [\n task['tid'],\n u'{task}#{project}'.format(\n task=task['tname'], project=task['pname']),\n u'{delta} / {started}'.format(\n delta=helpers.timedelta_to_human(datetime.datetime.now() -\n task['started']),\n started=datetime.datetime.strftime(\n task['started'], '%c').decode('utf8')\n ) if not task['finished'] else '[closed]',\n task['description'].decode('utf8')\n ]\n refined = map(_display_fields, tasks)\n print(tabulate(refined, ['ID', 'Task', 'Activity', 'Description']))",
"def list_tasks(q = None):\n to = {\"p\":{}, \"v\":{}}\n for k, v in to.items():\n pin = HeaterController.pin_ids[k]\n state = subprocess.check_output([\"gpio\", 'read', pin]).strip()\n to[k][\"state\"] = \"on\" if state==\"0\" else \"off\"\n to[k][\"on_id\"] = \"\"\n to[k][\"on_time\"] = \"\"\n to[k][\"off_id\"] = \"\"\n to[k][\"off_time\"] = \"\"\n\n tasks = []\n if q is None:\n output = subprocess.check_output([\"atq\"])\n else:\n output = subprocess.check_output([\"atq\", \"-q\", q])\n for t in output.split(\"\\n\"):\n m = HeaterController.task_parse.match(t.strip())\n if m is not None:\n task_id = m.group(1)\n task_time = datetime.strptime(m.group(2), r'%a %b %d %H:%M:%S %Y').strftime(r'%y%m%d%H%M')\n q_name = m.group(3)\n tasks.append((task_id, task_time, q_name))\n tasks = sorted(tasks, key=lambda x: x[2] + x[1])\n while len(tasks):\n task_id, task_time, q_name = tasks.pop(0)\n output = subprocess.check_output([\"at\", \"-c\", task_id])\n # get last line of the output\n lines = output.strip().split(\"\\n\")\n # find value of -o parameter that specifies operation\n m = HeaterController.cmd_parse.match(lines[-1].strip())\n if m is not None:\n cmd = m.group(1)\n if cmd == r'on':\n to[q_name][\"on_id\"] = task_id\n to[q_name][\"on_time\"] = task_time\n elif cmd == r'off':\n to[q_name][\"off_id\"] = task_id\n to[q_name][\"off_time\"] = task_time\n else:\n assert False, \"Unexpected value of -o parameter: {}\".format(cmd)\n\n return {\"tasks\":to}",
"def show_tasks():\n top_level_tasks = query_with_results(\"select label, description from task where parent = ''\", [])\n for task in top_level_tasks:\n _show_task(task)",
"def get_task_list(self):\n raise NotImplementedError()",
"async def list_tasks(self, *, option: ListApiOptions) -> ListApiResponse:\n try:\n reply = await self._client.get_all_task_info(\n timeout=option.timeout,\n filters=option.filters,\n exclude_driver=option.exclude_driver,\n )\n except DataSourceUnavailable:\n raise DataSourceUnavailable(GCS_QUERY_FAILURE_WARNING)\n\n result = [\n protobuf_to_task_state_dict(message) for message in reply.events_by_task\n ]\n\n num_after_truncation = len(result)\n num_total = num_after_truncation + reply.num_status_task_events_dropped\n\n result = self._filter(result, option.filters, TaskState, option.detail)\n num_filtered = len(result)\n\n result.sort(key=lambda entry: entry[\"task_id\"])\n result = list(islice(result, option.limit))\n return ListApiResponse(\n result=result,\n total=num_total,\n num_after_truncation=num_after_truncation,\n num_filtered=num_filtered,\n )",
"def tasks():",
"def main(to_be_scheduled):\n\n tasks = order_by_ftime(to_be_scheduled)\n print select_activity(tasks)",
"def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)",
"def get_tasks(**filters):\n return db.task.find(filters) if filters else db.task.find()",
"async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks",
"def _process_filter(self, task_selection):\n filter_list = []\n def add_filtered_task(seq, f_name):\n \"\"\"add task to list `filter_list` and set task.options from params\n @return list - str: of elements not yet\n \"\"\"\n filter_list.append(f_name)\n # only tasks specified by name can contain parameters\n if f_name in self.tasks:\n # parse task_selection\n the_task = self.tasks[f_name]\n\n # Initialize options for the task\n seq = the_task.init_options(seq)\n\n # if task takes positional parameters set all as pos_arg_val\n if the_task.pos_arg is not None:\n # cehck value is not set yet\n # it could be set directly with api.run_tasks()\n # -> NamespaceTaskLoader.load_tasks()\n if the_task.pos_arg_val is None:\n the_task.pos_arg_val = seq\n seq = []\n return seq\n\n # process...\n seq = task_selection[:]\n # process cmd_opts until nothing left\n while seq:\n f_name = seq.pop(0) # always start with a task/target name\n # select tasks by task-name pattern\n if '*' in f_name:\n for task_name in self._get_wild_tasks(f_name):\n add_filtered_task((), task_name)\n else:\n seq = add_filtered_task(seq, f_name)\n return filter_list"
] |
[
"0.62302595",
"0.5944628",
"0.5892642",
"0.5848125",
"0.58470535",
"0.5805977",
"0.5783318",
"0.5729834",
"0.56717557",
"0.5634618",
"0.56331176",
"0.5580704",
"0.5574483",
"0.5566287",
"0.55040795",
"0.5500361",
"0.5482131",
"0.54775214",
"0.54646075",
"0.546253",
"0.54547966",
"0.5446945",
"0.543273",
"0.5415748",
"0.54090035",
"0.54076576",
"0.5371621",
"0.53624874",
"0.5346444",
"0.5328104"
] |
0.7140575
|
0
|
List frameworks for JumpStart, and optionally apply filters to result.
|
def list_jumpstart_frameworks( # pylint: disable=redefined-builtin
filter: Union[Operator, str] = Constant(BooleanValues.TRUE),
region: str = JUMPSTART_DEFAULT_REGION_NAME,
) -> List[str]:
frameworks: Set[str] = set()
for model_id, _ in _generate_jumpstart_model_versions(filter=filter, region=region):
framework, _, _ = extract_framework_task_model(model_id)
frameworks.add(framework)
return sorted(list(frameworks))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def list_frameworks():\n sys.stdout.write(\n \"\"\"Testable frameworks: %s\n\nNote that membership in this list means the framework can be tested with\nPyMongo, not necessarily that it is officially supported.\n\"\"\"\n % \", \".join(sorted(FRAMEWORKS))\n )",
"def _add_framework_name(frameworks: Namespace):\n for name, framework in frameworks:\n framework.name = str_sanitize(name)",
"def _find_all_parents(framework, frameworks):\n parents = []\n while \"extends\" in framework and framework.extends is not None:\n framework = frameworks[framework.extends]\n parents.append(framework)\n return parents",
"def populateTabs(self):\n frameworks = self.data['frameworks']\n for fw in frameworks:\n frameworkElements = frameworks[fw]\n for element in frameworkElements:\n\n ui = self.framework2gui[fw].get(element)\n\n if isinstance(ui, QComboBox):\n ui.clear()\n ui.setView(QListView())\n for i, deviceName in enumerate(frameworks[fw][element]):\n ui.addItem(deviceName)\n if frameworks[fw]['deviceName'] == deviceName:\n ui.setCurrentIndex(i)\n\n elif isinstance(ui, QLineEdit):\n ui.setText(f'{frameworks[fw][element]}')\n\n elif isinstance(ui, QCheckBox):\n ui.setChecked(frameworks[fw][element])\n\n elif isinstance(ui, QDoubleSpinBox):\n ui.setValue(frameworks[fw][element])\n return True",
"def import_frameworks(self):\n num_attacks = 0\n for framework in os.listdir(config.attacks_path):\n if framework[-3:] == \".py\":\n continue\n\n if \"__\" in framework:\n continue\n\n else:\n self.loaded_frameworks[framework] = []\n for module in os.listdir(f\"{config.attacks_path}/{framework}\"):\n if module == \"__init__.py\" or module[-3:] != \".py\":\n continue\n else:\n attacks_path_split = config.attacks_path.split(\"/\")\n module_directory = attacks_path_split[0]\n attack_path = attacks_path_split[1]\n self.loaded_frameworks[framework].append(\n f\"{module_directory}/{attack_path}/{framework}/{module}\"\n )\n num_attacks += 1\n return num_attacks",
"def selectTabs(self):\n firstFramework = next(iter(self.data['frameworks']))\n framework = self.data.get('framework')\n if not framework:\n framework = firstFramework\n\n tabIndex = self.getTabIndex(self.ui.tab, framework)\n self.ui.tab.setCurrentIndex(tabIndex)\n\n for index in range(0, self.ui.tab.count()):\n isVisible = self.ui.tab.widget(index).objectName() in self.data['frameworks']\n self.ui.tab.setTabVisible(index, isVisible)\n return True",
"def load_framework_definitions(frameworks_file: Union[str, List[str]], config: Namespace) -> Namespace:\n frameworks = _load_and_merge_framework_definitions(frameworks_file, config)\n for tag, defs in frameworks:\n _sanitize_and_add_defaults(defs, config)\n return frameworks",
"def main():\n usage = \"\"\"python %s FRAMEWORK_NAME\n\nTest PyMongo with a variety of greenlet-based monkey-patching frameworks. See\npython %s --help-frameworks.\"\"\" % (\n sys.argv[0],\n sys.argv[0],\n )\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"h\", [\"help\", \"help-frameworks\"])\n except getopt.GetoptError as err:\n print(str(err))\n print(usage)\n sys.exit(2)\n\n for option_name, _ in opts:\n if option_name in (\"-h\", \"--help\"):\n print(usage)\n sys.exit()\n elif option_name == \"--help-frameworks\":\n list_frameworks()\n sys.exit()\n else:\n raise AssertionError(\"unhandled option\")\n\n if not args:\n print(usage)\n sys.exit(1)\n\n if args[0] not in FRAMEWORKS:\n print(\"%r is not a testable framework.\\n\" % args[0])\n list_frameworks()\n sys.exit(1)\n\n run(\n args[0], *args[1:] # Framework name.\n ) # Command line args to pytest, like what test to run.",
"def cli_list(ctx):\n\n _list_spiders(ctx)",
"def list_jumpstart_scripts( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n) -> List[str]:\n if (isinstance(filter, Constant) and filter.resolved_value == BooleanValues.TRUE) or (\n isinstance(filter, str) and filter.lower() == BooleanValues.TRUE.lower()\n ):\n return sorted([e.value for e in JumpStartScriptScope])\n\n scripts: Set[str] = set()\n for model_id, version in _generate_jumpstart_model_versions(filter=filter, region=region):\n scripts.add(JumpStartScriptScope.INFERENCE)\n model_specs = accessors.JumpStartModelsAccessor.get_model_specs(\n region=region,\n model_id=model_id,\n version=version,\n )\n if model_specs.training_supported:\n scripts.add(JumpStartScriptScope.TRAINING)\n\n if scripts == {e.value for e in JumpStartScriptScope}:\n break\n return sorted(list(scripts))",
"def get_middlewares(self) -> List:\n\n raise NotImplementedError()",
"def list_analyzers(args: argparse.Namespace):\n first = True\n queue = [tuple(c) + (\"lookout.\",) for c in pkgutil.iter_modules(lookout.__path__)]\n while queue:\n importer, name, ispkg, prefix = queue.pop(0)\n\n if not ispkg or name == \"core\":\n continue\n\n m = importer.find_module(name).load_module(name)\n if getattr(m, \"__meta__\", False):\n queue.extend(tuple(c) + (prefix + name + \".\",)\n for c in pkgutil.iter_modules(m.__path__))\n continue\n\n try:\n cls = m.analyzer_class\n except AttributeError:\n continue\n if first:\n first = False\n else:\n print()\n print(prefix + name)\n print(\"\\t%s\" % cls.version)\n print(\"\\t\" + cls.description)",
"def list(args):\n experiments = sorted(os.listdir('./litmus'))\n print_color(\"Available Litmus Chaos Experiments:\\n\\n\")\n if (f\"{args.platform}\" == \"GKE\"):\n i = 1\n for experiment_file in experiments:\n print_color(f\"\\t{i}. {experiment_file.replace('.yaml', '')}\")\n i += 1\n\n if (f\"{args.platform}\" == \"kind\"):\n kind_supported = [\"pod-delete\",\"container-kill\",\"node-cpu-hog\",\"node-memory-hog\"]\n i = 0\n for i in range(0, len(kind_supported)):\n print_color(f\"\\t{i+1}. {kind_supported[i]}\")\n i += 1\n\n if (f\"{args.platform}\" == \"EKS\"):\n i = 1\n for experiment_file in experiments:\n print_color(f\"\\t{i}. {experiment_file.replace('.yaml', '')}\")\n i += 1",
"def _update_frameworks_with_parent_definitions(frameworks: Namespace):\n for name, framework in frameworks:\n parents = _find_all_parents(framework, frameworks)\n for parent in parents:\n framework |= copy.deepcopy(parent)",
"def spiders(args):\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n project_spiders = lib.get_spiders(\n args.target, project, username=args.username, password=args.password\n )\n if not args.verbose:\n print(f\"{project}:\")\n if project_spiders:\n print(indent(\"\\n\".join(project_spiders), INDENT_PREFIX))\n else:\n print(INDENT_PREFIX + \"No spiders.\")\n elif project_spiders:\n print(\"\\n\".join(f\"{project} {x}\" for x in project_spiders))",
"def list_models(\n architecture: Optional[str] = typer.Option(None, '-n', '--name', help='Model architecture name'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n engine: Optional[Engine] = typer.Option(None, '-e', '--engine', case_sensitive=False, help='Serving engine'),\n version: Optional[int] = typer.Option(None, '-v', '--version', help='Version'),\n list_all: Optional[bool] = typer.Option(\n False,\n '-a', '--all', is_flag=True,\n help='Display queried models. otherwise, only partial result will be shown.'\n ),\n):\n\n payload = remove_dict_null(\n {'architecture': architecture, 'framework': framework, 'engine': engine, 'version': version}\n )\n with requests.get(f'{app_settings.api_v1_prefix}/model', params=payload) as r:\n model_list = r.json()\n model_view([MLModel.parse_obj(model) for model in model_list], list_all=list_all)",
"def get_skills(lang, spec=None):\n\n if lang.lower() == 'java':\n output = get_java(spec)\n if spec is not None:\n print(f\"Our suggestions for {spec} are: \", end='')\n print(*output[:-1], sep=', ', end='')\n print(f\" and {output[-1]}.\\n\")\n else:\n print(\"Our suggestions for are: \", end='')\n print(*output[:-1], sep=', ', end='')\n print(f\" and {output[-1]}.\\n\")\n elif lang.lower() == 'python':\n output = get_python(spec)\n if spec is not None:\n print(f\"Our suggestions for {spec} are: \", end='')\n print(*output[:-1], sep=', ', end='')\n print(f\" and {output[-1]}.\\n\")\n else:\n print(\"Our suggestions for are: \", end='')\n print(*output[:-1], sep=', ', end='')\n print(f\" and {output[-1]}.\\n\")",
"def supportedSoftwares():\n return [\"any\"]",
"def supportedSoftwares():\n return [\"any\"]",
"def get_framework(tjs_url, framework_uri):\n payload = {'service': 'TJS',\n 'version': '1.0.0',\n 'request': 'DescribeFrameworks',\n 'FrameworkURI': framework_uri}\n y = requests.get(tjs_url, params=payload, verify=False)\n xml = etree.fromstring(y.content)\n xml_temp = etree.tostring(xml[0])\n # Quick&dirty removal of namespace prefix\n root = xml_temp.replace(b'ns0:', b'')\n parser = etree.XMLParser(ns_clean=True, encoding='utf-8')\n framework = etree.fromstring(root, parser=parser)\n\n return framework",
"def report_development(request):\n apps = Application.objects.filter(app_status__name__icontains='Development').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));",
"def order_frameworks_for_reuse(frameworks):\n return sorted(\n filter(lambda i: i['allowDeclarationReuse'] and i['applicationsCloseAtUTC'], frameworks),\n key=lambda i: datetime.strptime(i['applicationsCloseAtUTC'], DATETIME_FORMAT),\n reverse=True\n )",
"def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))",
"def ls(filter=None):",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')",
"def _sanitize_definitions(frameworks: Namespace):\n _add_framework_name(frameworks)\n _remove_frameworks_with_unknown_parent(frameworks)\n _remove_self_reference_extensions(frameworks)",
"def test_marwil_compilation(self):\n config = marwil.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 0 # Run locally.\n num_iterations = 2\n\n # Test for all frameworks.\n for _ in framework_iterator(config):\n trainer = marwil.MARWILTrainer(config=config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n trainer.train()\n check_compute_action(trainer, include_prev_action_reward=True)\n trainer.stop()",
"def labs(lab_sources, headers, deps):\n return [compile(src, headers) for src in lab_sources]",
"def _load_and_merge_framework_definitions(frameworks_file: Union[str, List[str]], config) -> Namespace:\n log.info(\"Loading frameworks definitions from %s.\", frameworks_file)\n if not isinstance(frameworks_file, list):\n frameworks_file = [frameworks_file]\n\n definitions_by_tag = Namespace()\n for tag in [default_tag]+config.frameworks.tags:\n definitions_by_file = [config_load(_definition_file(file, tag)) for file in frameworks_file]\n if not config.frameworks.allow_duplicates:\n for d1, d2 in itertools.combinations([set(dir(d)) for d in definitions_by_file], 2):\n if d1.intersection(d2) != set():\n raise ValueError(f\"Duplicate entry '{d1.intersection(d2).pop()}' found.\")\n definitions_by_tag[tag] = Namespace.merge(*definitions_by_file)\n\n return definitions_by_tag",
"def run(framework_name, *args):\n # Monkey-patch.\n FRAMEWORKS[framework_name]()\n\n # Run the tests.\n sys.exit(pytest.main(list(args)))"
] |
[
"0.641015",
"0.54722816",
"0.52246255",
"0.5197344",
"0.51471305",
"0.49741188",
"0.4967614",
"0.49433026",
"0.486602",
"0.47791523",
"0.47461623",
"0.47451043",
"0.47303116",
"0.46275637",
"0.46110302",
"0.45978498",
"0.45566553",
"0.45515397",
"0.45515397",
"0.44752362",
"0.44729772",
"0.44705683",
"0.44188303",
"0.43951872",
"0.43908244",
"0.4389423",
"0.4386495",
"0.4362529",
"0.43587482",
"0.43542314"
] |
0.7124517
|
0
|
List scripts for JumpStart, and optionally apply filters to result.
|
def list_jumpstart_scripts( # pylint: disable=redefined-builtin
filter: Union[Operator, str] = Constant(BooleanValues.TRUE),
region: str = JUMPSTART_DEFAULT_REGION_NAME,
) -> List[str]:
if (isinstance(filter, Constant) and filter.resolved_value == BooleanValues.TRUE) or (
isinstance(filter, str) and filter.lower() == BooleanValues.TRUE.lower()
):
return sorted([e.value for e in JumpStartScriptScope])
scripts: Set[str] = set()
for model_id, version in _generate_jumpstart_model_versions(filter=filter, region=region):
scripts.add(JumpStartScriptScope.INFERENCE)
model_specs = accessors.JumpStartModelsAccessor.get_model_specs(
region=region,
model_id=model_id,
version=version,
)
if model_specs.training_supported:
scripts.add(JumpStartScriptScope.TRAINING)
if scripts == {e.value for e in JumpStartScriptScope}:
break
return sorted(list(scripts))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def listscripts(self):\n code, data, listing = self.__send_command(\n \"LISTSCRIPTS\", withcontent=True)\n if code == \"NO\":\n return None\n ret = []\n active_script = None\n for l in listing.splitlines():\n if self.__size_expr.match(l):\n continue\n m = re.match(br'\"([^\"]+)\"\\s*(.+)', l)\n if m is None:\n ret += [l.strip(b'\"').decode(\"utf-8\")]\n continue\n script = m.group(1).decode(\"utf-8\")\n if self.__active_expr.match(m.group(2)):\n active_script = script\n continue\n ret += [script]\n self.__dprint(ret)\n return (active_script, ret)",
"def do_list():\n\n print_debug('Getting scripts to list...')\n scripts = get_scripts()\n print('\\nFound {} scripts:'.format(len(scripts)))\n curdir = None\n for scriptpath in scripts:\n thisdir, thisscript = os.path.split(scriptpath)\n if thisdir != curdir:\n # Directory change, print it.\n print('\\n {}:'.format(thisdir))\n curdir = thisdir\n # Print the filename with indention.\n print(' {}'.format(thisscript))",
"def list_scripts(self):\n from evennia.scripts.models import ScriptDB\n from django.db.models import Q\n from evennia.utils.evtable import EvTable\n\n if self.args and self.args.isdigit():\n scripts = ScriptDB.objects.filter(\n Q(id=self.args) | Q(db_obj__id=self.args) | Q(db_account__id=self.args)\n )\n else:\n scripts = ScriptDB.objects.filter(\n Q(db_key__icontains=self.args)\n | Q(db_obj__db_key__iexact=self.args)\n | Q(db_account__username__iexact=self.args)\n )\n if not scripts:\n self.msg(\"<No scripts>\")\n return\n\n table = EvTable(\n \"{wdbref{n\",\n \"{wobj{n\",\n \"{wkey{n\",\n \"{wintval{n\",\n \"{wnext{n\",\n \"{wtypeclass{n\",\n align=\"r\",\n border=\"cells\",\n width=78,\n )\n for script in scripts:\n nextrep = script.time_until_next_repeat()\n if nextrep is None:\n nextrep = \"PAUS\" if script.db._paused_time else \"--\"\n else:\n nextrep = \"%ss\" % nextrep\n\n def script_obj_str():\n \"\"\"Prettyprint script key/id\"\"\"\n if script.obj:\n return \"%s(#%s)\" % (crop(script.obj.key, width=10), script.obj.id)\n return \"<Global>\"\n\n table.add_row(\n script.id,\n script_obj_str(),\n script.key,\n script.interval if script.interval > 0 else \"--\",\n nextrep,\n script.typeclass_path.rsplit(\".\", 1)[-1],\n )\n self.msg(\"%s\" % table)",
"def list_jumpstart_tasks( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n) -> List[str]:\n\n tasks: Set[str] = set()\n for model_id, _ in _generate_jumpstart_model_versions(filter=filter, region=region):\n _, task, _ = extract_framework_task_model(model_id)\n tasks.add(task)\n return sorted(list(tasks))",
"def ls(filter=None):",
"def script(self):\n return list(\n itertools.chain.from_iterable(story.script for story in self.stories)\n )",
"def before_controller_hook(*args, **kwargs):\n c.javascripts.append(('pokedex', 'pokedex-suggestions'))",
"async def get_all(request: web.Request) -> web.Response:\n try:\n storage = connect.get_storage_async()\n cf_mgr = ConfigurationManager(storage)\n payload = PayloadBuilder().SELECT(\"name\", \"steps\", \"acl\").payload()\n result = await storage.query_tbl_with_payload('control_script', payload)\n scripts = []\n if 'rows' in result:\n if result['rows']:\n # Get all schedules\n schedule_list = await server.Server.scheduler.get_schedules()\n for row in result['rows']:\n # Add configuration to script\n cat_name = \"{}-automation-script\".format(row['name'])\n get_category = await cf_mgr.get_category_all_items(cat_name)\n row['configuration'] = {}\n if get_category is not None:\n row['configuration'] = {\"categoryName\": cat_name}\n row['configuration'].update(get_category)\n # Add schedule to script\n for sch in schedule_list:\n row['schedule'] = {}\n if sch.name == row['name'] and sch.process_name == \"automation_script\":\n row['schedule'] = {\n 'id': str(sch.schedule_id),\n 'name': sch.name,\n 'processName': sch.process_name,\n 'type': Schedule.Type(int(sch.schedule_type)).name,\n 'repeat': 0,\n 'time': 0,\n 'day': sch.day,\n 'exclusive': sch.exclusive,\n 'enabled': sch.enabled\n }\n break\n scripts.append(row)\n except Exception as ex:\n msg = str(ex)\n _logger.error(ex, \"Get Control script failed.\")\n raise web.HTTPInternalServerError(reason=msg, body=json.dumps({\"message\": msg}))\n else:\n return web.json_response({\"scripts\": scripts})",
"def iter_scripts ( self ):\n for script in self.scripts.values():\n if script.is_visible():\n yield script",
"def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')",
"def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())",
"def browse_main(self, start_index, list_count):\n params = [\n ('startindex', int(start_index)),\n ('listcount', int(list_count)),\n ]\n\n response = self.get(COMMAND_CPM, 'BrowseMain', params)\n\n if not int(response['listcount']):\n return []\n\n return response_list(response['menulist']['menuitem'])",
"def cloudflare_waf_filter_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n zone_id = args.get('zone_id', client.zone_id)\n filter_id = args.get('id')\n expression = args.get('expression')\n ref = args.get('ref')\n description = args.get('description')\n paused = arg_to_boolean(args.get('paused')) # type: ignore\n\n page = arg_to_number(args.get('page'))\n page_size = arg_to_number(args.get('page_size'))\n limit = arg_to_number(args.get('limit'))\n\n validate_pagination_arguments(page, page_size, limit)\n\n command_args = {'zone_id': zone_id, 'filter_id': filter_id,\n 'description': description, 'ref': ref, 'paused': paused, 'expression': expression}\n pagination_args = {'limit': limit, 'page': page, 'page_size': page_size}\n response, output, pagination_message = pagination(\n client.cloudflare_waf_filter_list_request, command_args, pagination_args)\n\n for filter in output:\n filter['zone_id'] = zone_id\n\n readable_output = tableToMarkdown(\n name='Filter list',\n metadata=pagination_message,\n t=output,\n headers=['id', 'expression', 'ref', 'description', 'paused'],\n headerTransform=pascalToSpace\n )\n\n return CommandResults(\n readable_output=readable_output,\n outputs_prefix='CloudflareWAF.Filter',\n outputs_key_field='id',\n outputs=output,\n raw_response=response\n )",
"def list_jumpstart_frameworks( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n) -> List[str]:\n\n frameworks: Set[str] = set()\n for model_id, _ in _generate_jumpstart_model_versions(filter=filter, region=region):\n framework, _, _ = extract_framework_task_model(model_id)\n frameworks.add(framework)\n return sorted(list(frameworks))",
"def get_script ( self, name ):\n return list ( self.find_all_by_name ( name ) )",
"def scan ( self ):\n root = self.root\n try:\n filenames = sorted ( os.listdir ( root ) )\n except OSError as oserr:\n if oserr.errno != errno.ENOENT:\n raise\n else:\n HOOK_CLS = self.HOOK_SCRIPT_CLS\n for fname in filenames:\n if self.filename_filter ( fname ):\n fspath = root + os.sep + fname\n if os.path.isfile ( fspath ):\n script_obj = HOOK_CLS ( fspath, filename=fname )\n self.scripts [script_obj.name] = script_obj",
"def list(self, config_path: str, results_filter: Optional[ObjectType]) -> List[str]:\n ...",
"def get_results_from_script(self, script):\n raise NotImplementedError()",
"def runscript(host, script, list_scripts, multi_host, hosts_filter):\n if list_scripts:\n pprint(menu_generator(cs.get_scripts()))\n if host:\n session = cs.init_session(host)\n response = cs.execute_active_responder_command(\"runscript\", f\"-CloudFile={script}\", session)\n pprint(response)\n if multi_host:\n batch_id = cs.new_batch_job(hosts_string=multi_host)\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)\n if hosts_filter:\n query_filter = hosts_filter.split(\":\")\n batch_id = cs.new_batch_job(filter_parameter=query_filter[0], filter_value=query_filter[1])\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)",
"def list_scripts(self):\n glob_exp = self.script_dir + \"/*.sh\"\n return [re.sub(r'.*/(.*)\\.sh', r'\\1', x) for x in glob(glob_exp)]",
"def cli_list(ctx):\n\n _list_spiders(ctx)",
"def main_list(args):\n return list_commands(args.directory)",
"def scan_scripts ( self ):\n for event, hook in self.iter_scripts():\n if hook.event is None:\n hook.event = event",
"def find_all ( self, condition, c_args=(), c_kwargs={}, event=None ):\n if event is None:\n for event_name, script in self.iter_scripts():\n if condition ( script, *c_args, **c_kwargs ):\n yield script\n else:\n for script in self.iter_scripts ( event=event, ignore_missing=True ):\n if condition ( script, *c_args, **c_kwargs ):\n yield script",
"def build_scripts_report(**kwargs):\n # All report functions support kwargs to support a unified interface,\n # even if they don't use them.\n _ = kwargs\n jss_connection = JSSConnection.get()\n all_policies = jss_connection.Policy().retrieve_all(\n subset=[\"general\", \"scripts\"])\n all_configs = jss_connection.ComputerConfiguration().retrieve_all()\n all_scripts = [(script.id, script.name) for script in\n jss_connection.Script()]\n if not all_scripts:\n report = Report(\"Script\", [], \"Script Usage Report\", {})\n else:\n policy_xpath = \"scripts/script\"\n config_xpath = \"scripts/script\"\n report = build_container_report(\n [(all_policies, policy_xpath), (all_configs, config_xpath)],\n all_scripts)\n report.get_result_by_name(\"Used\").description = (\n \"All scripts which are installed by policies or imaging \"\n \"configurations.\")\n report.get_result_by_name(\"Unused\").description = (\n \"All scripts which are not installed by any policies or imaging \"\n \"configurations.\")\n\n report.heading = \"Script Usage Report\"\n\n return report",
"def listScripts(self):\n return self._client.listScripts()",
"def do_seeSkipList(self, args):\n sl.showList()",
"def scripts(self) -> localedata.LocaleDataDict:\n return self._data['scripts']",
"def GetActiveRenderScripts():\n scripts = []\n\n # some very contrived examples\n # this just shows the flexibility\n # the point is rendering can be triggered for\n # any plot under any condition\n\n # after M iterations every N iterations plot...\n #if (warp.top.it >= 10) and ((warp.top.it % 8) == 0):\n # scripts.append('particle v')\n # scripts.append('volume phi')\n\n # every N interations plot ...\n #if ((warp.top.it % 5) == 0):\n # scripts.append('max(v(x,y))')\n # scripts.append('num b vs. v')\n\n\n if ((warp.top.it % 50) == 0):\n scripts.append('scatter bv')\n scripts.append('particle v')\n scripts.append('volume phi')\n scripts.append('max(v(x,y))')\n\n return scripts",
"def scan ( self, prune_empty=True ):\n def get_script_name ( filename ):\n \"\"\"Returns the script name of the given filename.\n\n arguments:\n * filename --\n \"\"\"\n prio, sepa, name = filename.partition ( '-' )\n if name:\n try:\n prio_int = int ( prio, 10 )\n except ValueError:\n return filename\n else:\n return name\n else:\n return filename\n # --- end of get_script_name (...) ---\n\n def create_hookscript (\n fspath, filename, root, HOOK_SCRIPT_CLS=self.HOOK_SCRIPT_CLS\n ):\n \"\"\"Creates a new hook script object.\n\n arguments:\n * fspath -- absolute path to the script file\n * filename -- name of the script file\n * root -- directory of the script file\n * HOOK_SCRIPT_CLS -- hook script class.\n Defaults to elf.HOOK_SCRIPT_CLS.\n \"\"\"\n return HOOK_SCRIPT_CLS ( fspath, filename=filename )\n # --- end of create_hookscript (...) ---\n\n new_scripts = roverlay.fsutil.get_fs_dict (\n self.root,\n create_item = create_hookscript,\n dict_cls = self.SUBDIR_CLS,\n dirname_filter = self.dirname_filter,\n filename_filter = self.filename_filter,\n include_root = False,\n prune_empty = prune_empty,\n file_key = get_script_name,\n toplevel_files = False,\n )\n self.scripts.update ( new_scripts )\n self.scan_scripts()"
] |
[
"0.5531334",
"0.5428492",
"0.52650154",
"0.5204197",
"0.5131874",
"0.510841",
"0.5031889",
"0.4875896",
"0.48689678",
"0.48459044",
"0.48197192",
"0.48173922",
"0.48037595",
"0.47818825",
"0.47726372",
"0.4759749",
"0.47552037",
"0.4720627",
"0.46446505",
"0.46405977",
"0.46376982",
"0.4636826",
"0.46306524",
"0.46067056",
"0.46040225",
"0.4596945",
"0.4587825",
"0.45713976",
"0.45678583",
"0.45621267"
] |
0.73214215
|
0
|
Generate models for JumpStart, and optionally apply filters to result.
|
def _generate_jumpstart_model_versions( # pylint: disable=redefined-builtin
filter: Union[Operator, str] = Constant(BooleanValues.TRUE),
region: str = JUMPSTART_DEFAULT_REGION_NAME,
list_incomplete_models: bool = False,
) -> Generator:
if isinstance(filter, str):
filter = Identity(filter)
models_manifest_list = accessors.JumpStartModelsAccessor._get_manifest(region=region)
manifest_keys = set(models_manifest_list[0].__slots__)
all_keys: Set[str] = set()
model_filters: Set[ModelFilter] = set()
for operator in _model_filter_in_operator_generator(filter):
model_filter = operator.unresolved_value
key = model_filter.key
all_keys.add(key)
model_filters.add(model_filter)
for key in all_keys:
if "." in key:
raise NotImplementedError(f"No support for multiple level metadata indexing ('{key}').")
metadata_filter_keys = all_keys - SPECIAL_SUPPORTED_FILTER_KEYS
required_manifest_keys = manifest_keys.intersection(metadata_filter_keys)
possible_spec_keys = metadata_filter_keys - manifest_keys
unrecognized_keys: Set[str] = set()
is_task_filter = SpecialSupportedFilterKeys.TASK in all_keys
is_framework_filter = SpecialSupportedFilterKeys.FRAMEWORK in all_keys
is_supported_model_filter = SpecialSupportedFilterKeys.SUPPORTED_MODEL in all_keys
for model_manifest in models_manifest_list:
copied_filter = copy.deepcopy(filter)
manifest_specs_cached_values: Dict[str, Union[bool, int, float, str, dict, list]] = {}
model_filters_to_resolved_values: Dict[ModelFilter, BooleanValues] = {}
for val in required_manifest_keys:
manifest_specs_cached_values[val] = getattr(model_manifest, val)
if is_task_filter:
manifest_specs_cached_values[
SpecialSupportedFilterKeys.TASK
] = extract_framework_task_model(model_manifest.model_id)[1]
if is_framework_filter:
manifest_specs_cached_values[
SpecialSupportedFilterKeys.FRAMEWORK
] = extract_framework_task_model(model_manifest.model_id)[0]
if is_supported_model_filter:
manifest_specs_cached_values[SpecialSupportedFilterKeys.SUPPORTED_MODEL] = Version(
model_manifest.min_version
) <= Version(get_sagemaker_version())
_populate_model_filters_to_resolved_values(
manifest_specs_cached_values,
model_filters_to_resolved_values,
model_filters,
)
_put_resolved_booleans_into_filter(copied_filter, model_filters_to_resolved_values)
copied_filter.eval()
if copied_filter.resolved_value in [BooleanValues.TRUE, BooleanValues.FALSE]:
if copied_filter.resolved_value == BooleanValues.TRUE:
yield (model_manifest.model_id, model_manifest.version)
continue
if copied_filter.resolved_value == BooleanValues.UNEVALUATED:
raise RuntimeError(
"Filter expression in unevaluated state after using values from model manifest. "
"Model ID and version that is failing: "
f"{(model_manifest.model_id, model_manifest.version)}."
)
copied_filter_2 = copy.deepcopy(filter)
model_specs = accessors.JumpStartModelsAccessor.get_model_specs(
region=region,
model_id=model_manifest.model_id,
version=model_manifest.version,
)
model_specs_keys = set(model_specs.__slots__)
unrecognized_keys -= model_specs_keys
unrecognized_keys_for_single_spec = possible_spec_keys - model_specs_keys
unrecognized_keys.update(unrecognized_keys_for_single_spec)
for val in possible_spec_keys:
if hasattr(model_specs, val):
manifest_specs_cached_values[val] = getattr(model_specs, val)
_populate_model_filters_to_resolved_values(
manifest_specs_cached_values,
model_filters_to_resolved_values,
model_filters,
)
_put_resolved_booleans_into_filter(copied_filter_2, model_filters_to_resolved_values)
copied_filter_2.eval()
if copied_filter_2.resolved_value != BooleanValues.UNEVALUATED:
if copied_filter_2.resolved_value == BooleanValues.TRUE or (
BooleanValues.UNKNOWN and list_incomplete_models
):
yield (model_manifest.model_id, model_manifest.version)
continue
raise RuntimeError(
"Filter expression in unevaluated state after using values from model specs. "
"Model ID and version that is failing: "
f"{(model_manifest.model_id, model_manifest.version)}."
)
if len(unrecognized_keys) > 0:
raise RuntimeError(f"Unrecognized keys: {str(unrecognized_keys)}")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def build_models():\n train_models()\n return build_response.sent_ok()",
"def build_model():",
"def printModels(cls, options):\n print \"Generating experiment requests...\"\n\n searchParams = _ClientJobUtils.makeSearchJobParamsDict(options=options)",
"def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()",
"def create_models( self ):",
"def list_jumpstart_scripts( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n) -> List[str]:\n if (isinstance(filter, Constant) and filter.resolved_value == BooleanValues.TRUE) or (\n isinstance(filter, str) and filter.lower() == BooleanValues.TRUE.lower()\n ):\n return sorted([e.value for e in JumpStartScriptScope])\n\n scripts: Set[str] = set()\n for model_id, version in _generate_jumpstart_model_versions(filter=filter, region=region):\n scripts.add(JumpStartScriptScope.INFERENCE)\n model_specs = accessors.JumpStartModelsAccessor.get_model_specs(\n region=region,\n model_id=model_id,\n version=version,\n )\n if model_specs.training_supported:\n scripts.add(JumpStartScriptScope.TRAINING)\n\n if scripts == {e.value for e in JumpStartScriptScope}:\n break\n return sorted(list(scripts))",
"def generate():\n global transformer_top\n assert transformer_top is not None\n global transformer_bottom\n assert transformer_bottom is not None\n global label_encoders_per_modality\n assert label_encoders_per_modality is not None\n global DEVICE\n assert DEVICE is not None\n global partial_sample_model\n assert partial_sample_model is not None\n\n temperature = float(request.args.get('temperature'))\n pitch = int(request.args.get('pitch'))\n instrument_family_str = str(request.args.get('instrument_family_str'))\n\n class_conditioning_top = class_conditioning_bottom = {\n 'pitch': pitch,\n 'instrument_family_str': instrument_family_str\n }\n class_conditioning_tensors_top = make_conditioning_tensors(\n class_conditioning_top,\n label_encoders_per_modality)\n class_conditioning_tensors_bottom = make_conditioning_tensors(\n class_conditioning_bottom,\n label_encoders_per_modality)\n\n batch_size = 1\n top_code = partial_sample_model(\n model=transformer_top,\n batch_size=batch_size,\n codemap_size=transformer_top.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_top\n )\n bottom_code = partial_sample_model(\n model=transformer_bottom,\n condition=top_code,\n batch_size=batch_size,\n codemap_size=transformer_bottom.shape,\n temperature=temperature,\n class_conditioning=class_conditioning_tensors_bottom,\n )\n\n class_conditioning_top_map = {\n modality: make_matrix(transformer_top.shape,\n value)\n for modality, value in class_conditioning_top.items()\n }\n class_conditioning_bottom_map = {\n modality: make_matrix(transformer_bottom.shape,\n value)\n for modality, value in class_conditioning_bottom.items()\n }\n\n response = make_response(top_code, bottom_code,\n class_conditioning_top_map,\n class_conditioning_bottom_map)\n return response",
"def pre_pipeline(self, results):\n results[\"img_prefix\"] = self.img_prefix\n results[\"seg_prefix\"] = self.seg_prefix\n results[\"proposal_file\"] = self.proposal_file\n results[\"bbox_fields\"] = []\n results[\"mask_fields\"] = []\n results[\"seg_fields\"] = []\n results[\"site_fields\"] = []\n results[\"label_fields\"] = []",
"def generate_set(models, begin=0, start_pos=[256, 192], group_id=-1, length_multiplier=1, plot_map=True):\n extvar[\"begin\"] = begin\n extvar[\"start_pos\"] = start_pos\n extvar[\"length_multiplier\"] = length_multiplier\n extvar[\"next_from_slider_end\"] = GAN_PARAMS[\"next_from_slider_end\"]\n\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n max_epoch = GAN_PARAMS[\"max_epoch\"]\n good_epoch = GAN_PARAMS[\"good_epoch\"] - 1\n g_multiplier = GAN_PARAMS[\"g_epochs\"]\n c_multiplier = GAN_PARAMS[\"c_epochs\"]\n g_batch = GAN_PARAMS[\"g_batch\"]\n g_input_size = GAN_PARAMS[\"g_input_size\"]\n c_true_batch = GAN_PARAMS[\"c_true_batch\"]\n c_false_batch = GAN_PARAMS[\"c_false_batch\"]\n c_randfalse_batch = GAN_PARAMS[\"c_randfalse_batch\"]\n\n reset_model_weights(models)\n set_extvar(models, extvar)\n gmodel, mapping_layer, classifier_model, mmodel, default_weights = models\n\n for i in range(max_epoch):\n\n gnoise = np.random.random((g_batch, g_input_size))\n glabel = [np.zeros((g_batch, note_group_size * 4)),\n np.ones((g_batch,)), np.ones((g_batch,))]\n ginput = conv_input(gnoise, extvar)\n\n # fit mmodel instead of gmodel\n history = mmodel.fit(ginput, glabel, epochs=g_multiplier,\n validation_split=0.2, verbose=0,\n callbacks=[])\n\n pred_noise = np.random.random((c_false_batch, g_input_size))\n pred_input = conv_input(pred_noise, extvar)\n predicted_maps_data, predicted_maps_mapped, _predclass = mmodel.predict(\n pred_input)\n new_false_maps = predicted_maps_mapped\n new_false_labels = np.zeros(c_false_batch)\n\n # random numbers as negative samples\n # special_train_data.shape[2] == 6\n randfalse_maps = np.random.rand(\n c_randfalse_batch, note_group_size, special_train_data.shape[2])\n randfalse_labels = np.zeros(c_randfalse_batch)\n\n rn = np.random.randint(0, special_train_data.shape[0], (c_true_batch,))\n actual_train_data = np.concatenate(\n (new_false_maps, randfalse_maps, special_train_data[rn]), axis=0)\n actual_train_labels = np.concatenate(\n (new_false_labels, randfalse_labels, special_train_labels[rn]), axis=0)\n\n history2 = classifier_model.fit(actual_train_data, actual_train_labels, epochs=c_multiplier,\n validation_split=0.2, verbose=0,\n callbacks=[])\n\n # calculate the losses\n g_loss = np.mean(history.history['loss'])\n c_loss = np.mean(history2.history['loss'])\n print(\"Group {}, Epoch {}: G loss: {} vs. C loss: {}\".format(\n group_id, 1+i, g_loss, c_loss))\n\n # delete the history to free memory\n del history, history2\n\n # make a new set of notes\n res_noise = np.random.random((1, g_input_size))\n res_input = conv_input(res_noise, extvar)\n _resgenerated, res_map, _resclass = mmodel.predict(res_input)\n if plot_map:\n plot_current_map(tf.convert_to_tensor(res_map, dtype=tf.float32))\n\n # early return if found a good solution\n # good is (inside the map boundary)\n if i >= good_epoch:\n current_map = res_map\n if inblock_trueness(current_map[:, :, 0:2]).numpy()[0] == 0 and inblock_trueness(current_map[:, :, 4:6]).numpy()[0] == 0:\n # debugging options to check map integrity\n # print(tf.reduce_mean(current_map));\n # print(\"-----MAPLAYER-----\")\n # print(tf.reduce_mean(mapping_layer(conv_input(tf.convert_to_tensor(_resgenerated, dtype=\"float32\"), extvar))));\n # print(\"-----CMWS-----\")\n # print(tf.reduce_mean(construct_map_with_sliders(tf.convert_to_tensor(_resgenerated, dtype=\"float32\"), extvar=mapping_layer.extvar)));\n break\n\n if plot_map:\n for i in range(3): # from our testing, any random input generates nearly the same map\n plot_noise = np.random.random((1, g_input_size))\n plot_input = conv_input(plot_noise, extvar)\n _plotgenerated, plot_mapped, _plotclass = mmodel.predict(\n plot_input)\n plot_current_map(tf.convert_to_tensor(\n plot_mapped, dtype=tf.float32))\n\n # Don't do this in this version. It's for old versions where models are rebuilt each loop\n # del mmodel, mapping_layer;\n\n return res_map.squeeze()",
"def get_models(self, offset=0, sum_=False):\n \n self.models = {}\n self.predict = pd.DataFrame()\n min_value = min(self.conf[\"w_sizes\"].values())\n \n output_width = int(30/self.conf[\"time_step\"])\n \n \n for name in self.conf[\"w_sizes\"].keys():\n \n size = self.conf[\"w_sizes\"][name]\n self.create_train_test(name=name, f_size=size, offset=offset, output_width=output_width, sum_=sum_)\n model, loss, val_loss = get_model(name, self.trainX, self.trainY)\n \n pred = pd.DataFrame({name: model.predict(self.testX).tolist()},\n index=range(size-min_value, len(self.testY)+(size-min_value)))\n \n pred[name] = pred[name].apply(lambda x: np.array(x))\n \n self.predict = pd.concat([self.predict, pred], axis=1)\n \n self.models[name] = model\n \n del model, pred\n \n self.create_train_test(name=\"CNN\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test\"] = self.testY.tolist()\n self.create_train_test(name=\"MLP\", f_size=min_value, offset=offset, output_width=output_width, sum_=sum_)\n self.predict[\"test_dis\"] = self.testY.tolist()\n \n self.predict.dropna(inplace=True)",
"def run(cls, model):\n label = model.label\n print(\"stage1: {label} model: initializing\".format(label=label))\n\n defs_input = model.define_api() # input, original definitions\n\n print(\"stage1: {label} model: analyzing API\".format(label=label))\n\n # Compute any needed derivatives which are not already in the API\n # and for which we have the defs.\n defs = defs_input.copy() # output, final optimized definitions\n for j, key in enumerate(sorted(defs_input.keys(), key=symutil.sortkey), start=1): # sort for progress readability\n name = symutil.derivatives_to_names_in(key) # key is a Symbol or a Derivative\n expr = defs_input[key]\n\n print(\"stage1: ({iteration:d}/{total:d}) {label} model: processing {name}\".format(iteration=j,\n total=len(defs_input.keys()),\n label=label, name=name))\n\n defs[key] = cls.process(expr, defs, model.simplify)\n\n # Delete identically zero definitions\n zero = sy.S.Zero\n defs = {k: v for k, v in defs.items() if v != zero}\n\n print(\"stage1: {label} model: generating code\".format(label=label))\n\n basename = \"mgs_{label}_impl\".format(label=label) # filename without extension\n name_expr_pairs = cls.make_name_expr_pairs(defs)\n generated_code = codegen(name_expr_pairs,\n language=\"f95\",\n project=\"elmer-mgs-galfenol\",\n prefix=basename)\n\n return [(label, filename, cls.finalize(content))\n for filename, content in generated_code]",
"def main(model,pmap):\n\n addPppParams(model)\n\n# addTransportParams(model,pmap)\n\n #translationSources(model)\n\n #addLipidMetabs(model)\n\n return",
"def find_starting_values(self, *args, **kwargs):\n if self.is_group_model:\n self.approximate_map(*args, **kwargs)\n else:\n self.map(*args, **kwargs)",
"def build(self):\n\t\n\t\tprint 'BUILDING HOMOLOGY MODELS'\n\t\tif self.procedure != 'mutator': self.get_targets()\n\t\tself.get_templates()\n\t\tif self.procedure == 'single': self.build_model_single()\n\t\telif self.procedure == 'multi': self.build_model_multi()\n\t\telif self.procedure == 'mutator': self.build_model_mutator()",
"def main():\n season_2019_preds_for_tipresias_2020 = Q(ml_model__name=\"tipresias_2020\") & Q(\n match__start_date_time__year=2019\n )\n season_2020_preds_for_round_1 = Q(match__start_date_time__year=2020) & (\n Q(match__round_number=1)\n )\n\n prediction_records = Prediction.objects.filter(\n season_2019_preds_for_tipresias_2020 | season_2020_preds_for_round_1\n ).values(*_get_fields_for(Prediction))\n\n prediction_dump = [\n _reshape_record_fields(\"prediction\", record) for record in prediction_records\n ]\n dump_filepath = os.path.join(\n settings.BASE_DIR, APP_NAME, \"fixtures\", f\"{date.today()}-prediction-dump.json\",\n )\n\n with open(dump_filepath, \"w\") as file:\n json.dump(prediction_dump, file, indent=2)",
"def get_requests( self ,search_id:int ,filter_on:str ,from_date:datetime ,upto_date:datetime ,from_page:int=1 ,upto_page:int=20 ,step=20 ,loopback:dict=None ) -> list((str,dict,str,dict)):\n reqs = []\n params = self.request_params\n params[ 'searchid' ] = search_id\n f = 0\n if self._internal_ids:\n f += 1\n params[ f'field{f}' ] = 'internalid'\n params[ f'operator{f}'] = 'anyof'\n params[ f'field{f}a' ] = self._internal_ids\n\n if filter_on:\n f += 1\n# params[ f'join{f}' ] = self._join_to\n params[ f'operator{f}'] = 'onorafter'\n params[ f'field{f}a' ] = from_date.strftime(\"%m/%d/%Y %I:%M %p\"), # Date format is NOT negotiable! Value is ib parent object.\n f += 1\n# params[ f'join{f}' ] = self._join_to\n params[ f'field{f}' ] = filter_on\n params[ f'operator{f}'] = 'before'\n params[ f'field{f}a' ] = upto_date.strftime(\"%m/%d/%Y %I:%M %p\"), # Date format is NOT negotiable! Value is ib parent object.\n\n for page_from in range( from_page ,upto_page ,step ):\n param = params.copy()\n param[ 'from_page'] = page_from\n param[ 'upto_page'] = page_from + step\n\n if loopback:\n ctxback = loopback.copy()\n else:\n ctxback = self.get_loopback() # NOTE: Does make a copy.\n ctxback['from_page'] = param['from_page']\n ctxback['upto_page'] = param['upto_page']\n ctxback['ordinal' ] = page_from // step\n\n reqs.append( (HTTP_GET ,self._request_url ,param ,None ,ctxback) )\n\n return reqs",
"def __init__(self, models, neg_sample_generator, lookup):\n super(ModelSelector, self).__init__(None, neg_sample_generator)\n self.models = models\n self.lookup = lookup",
"def build_model_fn(self):",
"def train():\n \n ## check for request data\n if not request.json:\n print(\"ERROR: API (train): did not receive request data\")\n return jsonify(False)\n\n ## set the test flag\n test = False\n if 'mode' in request.json and request.json['mode'] == 'test':\n test = True\n\n print(\"... training model\")\n model = model_train(test=test)\n print(\"... training complete\")\n\n return(jsonify(True))",
"def startExperiment(parameters):\n\n optimizationPath = str(os.path.join(\n parameters[\"path\"], parameters[\"experimentId\"]))\n json_file = str(os.path.join(optimizationPath,\n parameters[\"experimentId\"] + \".json\"))\n if os.path.isfile(json_file):\n Optimizer = importOptimizer()\n optimizer = Optimizer()\n optimizer.resume_optimization(json_file)\n else:\n # Import dataset class and initialize an instance with the chosen dataset\n dataset_class = importDataset()\n dataset = dataset_class()\n dataset_path = str(os.path.join(\n pathDataset, \"preprocessed_datasets\", parameters[\"dataset\"]))\n dataset.load_custom_dataset_from_folder(dataset_path)\n\n model_class = importModel(parameters[\"model\"][\"name\"])\n model = model_class()\n\n model.hyperparameters.update(parameters[\"model\"][\"parameters\"])\n model.partitioning(parameters[\"partitioning\"])\n\n search_space = {}\n\n for key, value in parameters[\"optimization\"][\"search_spaces\"].items():\n if \"low\" in value:\n if isinstance(value[\"low\"], float) or isinstance(value[\"high\"], float):\n search_space[key] = Real(\n low=value[\"low\"], high=value[\"high\"])\n else:\n search_space[key] = Integer(\n low=value[\"low\"], high=value[\"high\"])\n else:\n search_space[key] = Categorical(value)\n\n metric_parameters = parameters[\"optimize_metrics\"][0][\"parameters\"]\n for key in metric_parameters:\n if metric_parameters[key] == \"use dataset texts\":\n metric_parameters[key] = dataset.get_corpus()\n elif metric_parameters[key] == \"use selected dataset\":\n metric_parameters[key] = dataset\n elif os.path.isdir(str(metric_parameters[key])):\n metricDataset = dataset_class()\n metricDataset.load_custom_dataset_from_folder(\n metric_parameters[key])\n metric_parameters[key] = metricDataset.get_corpus()\n\n metric_class = importMetric(parameters[\"optimize_metrics\"][0][\"name\"])\n metric = metric_class(**metric_parameters)\n\n metrics_to_track = []\n for single_metric in parameters[\"track_metrics\"]:\n metric_class = importMetric(single_metric[\"name\"])\n single_metric_parameters = single_metric[\"parameters\"]\n for key in single_metric_parameters:\n if single_metric_parameters[key] == \"use dataset texts\":\n single_metric_parameters[key] = dataset.get_corpus()\n elif single_metric_parameters[key] == \"use selected dataset\":\n single_metric_parameters[key] = dataset\n new_metric = metric_class(**single_metric_parameters)\n metrics_to_track.append(new_metric)\n\n vocabulary_path = str(os.path.join(\n parameters[\"path\"], parameters[\"experimentId\"], \"models\"))\n\n Path(vocabulary_path).mkdir(parents=True, exist_ok=True)\n\n vocabulary_path = str(os.path.join(vocabulary_path, \"vocabulary.json\"))\n\n file = open(vocabulary_path, \"w\")\n json.dump(dict(corpora.Dictionary(dataset.get_corpus())), file)\n file.close()\n\n Optimizer = importOptimizer()\n optimizer = Optimizer()\n optimizer.optimize(model, dataset, metric, search_space, metrics_to_track, random_state=True,\n initial_point_generator=\"random\",\n surrogate_model=parameters[\"optimization\"][\"surrogate_model\"],\n model_runs=parameters[\"optimization\"][\"model_runs\"],\n n_random_starts=parameters[\"optimization\"][\"n_random_starts\"],\n acq_func=parameters[\"optimization\"][\"acquisition_function\"],\n number_of_call=parameters[\"optimization\"][\"iterations\"],\n save_models=True, save_name=parameters[\"experimentId\"], save_path=optimizationPath)",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')",
"def search_model():\n search_condition = request.stream.read()\n try:\n search_condition = json.loads(search_condition if search_condition else \"{}\")\n except Exception:\n raise ParamValueError(\"Json data parse failed.\")\n\n model_lineage_info = _get_lineage_info(\n lineage_type=\"model\",\n search_condition=search_condition\n )\n\n return jsonify(model_lineage_info)",
"def setup_models(self):\n pass",
"def run_models(\n weather_fn: str,\n weather_header_row: int,\n start_date: str,\n start_time: str,\n duration: int,\n selected_models: Dict,\n params_grass: Dict,\n params_mk5: Dict,\n params_vesta: Dict,\n params_vesta_fhr: Dict,\n ) -> Dict:\n start = dt.datetime.now()\n weather_df = get_weather(weather_fn, weather_header_row)\n weather_df = trim_weather(weather_df, start_date, start_time, duration)\n \n\n MODELS = {\n # 'GRASS_Cheney_98': ros_grass_cheney(weather_df, grass_state, grass_curing),\n 'GRASS_Cheney_98': ros_grass_cheney(weather_df, params_grass),\n 'FOREST_Mk5': ros_forest_mk5(weather_df, params_mk5),\n 'FOREST_Vesta': ros_forest_vesta(weather_df, params_vesta),\n 'FOREST_Vesta_FHR': ros_forest_vesta_fhr(weather_df, params_vesta_fhr),\n 'FOREST_Vesta_KT': ros_forest_vesta_kt(weather_df, params_vesta),\n }\n\n model_outputs = {} # model name as key, dataframes as val\n\n models_run = 0\n for key, val in selected_models.items():\n if val:\n model_outputs[key] = MODELS[key]\n models_run += 1\n\n time_elapsed = dt.datetime.now()-start\n print(f'{models_run} models run in {time_elapsed}')\n return model_outputs",
"def list_jumpstart_models( # pylint: disable=redefined-builtin\n filter: Union[Operator, str] = Constant(BooleanValues.TRUE),\n region: str = JUMPSTART_DEFAULT_REGION_NAME,\n list_incomplete_models: bool = False,\n list_old_models: bool = False,\n list_versions: bool = False,\n) -> List[Union[Tuple[str], Tuple[str, str]]]:\n\n model_id_version_dict: Dict[str, List[str]] = dict()\n for model_id, version in _generate_jumpstart_model_versions(\n filter=filter, region=region, list_incomplete_models=list_incomplete_models\n ):\n if model_id not in model_id_version_dict:\n model_id_version_dict[model_id] = list()\n model_id_version_dict[model_id].append(Version(version))\n\n if not list_versions:\n return sorted(list(model_id_version_dict.keys()))\n\n if not list_old_models:\n model_id_version_dict = {\n model_id: set([max(versions)]) for model_id, versions in model_id_version_dict.items()\n }\n\n model_id_version_set: Set[Tuple[str, str]] = set()\n for model_id in model_id_version_dict:\n for version in model_id_version_dict[model_id]:\n model_id_version_set.add((model_id, str(version)))\n\n return sorted(list(model_id_version_set), key=cmp_to_key(_compare_model_version_tuples))",
"def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)",
"def run_models(\n self,\n normal=True,\n interrupt=True,\n run_start=None,\n state_builder=\"acis\",\n hrc=False,\n ):\n if hrc:\n loads = hrc_loads\n else:\n loads = test_loads\n if normal and \"normal\" in loads:\n for load in loads[\"normal\"]:\n self.run_model(\n load_week=load,\n run_start=run_start,\n state_builder=state_builder,\n )\n if interrupt and \"interrupt\" in loads:\n for load in loads[\"interrupt\"]:\n self.run_model(\n load_week=load,\n interrupt=True,\n run_start=run_start,\n state_builder=state_builder,\n )",
"def __build_pyramid(models, features):\n return [__build_model_pyramid(name, model, features) for name, model in models]",
"def build_model(self):\n pass"
] |
[
"0.5248083",
"0.5081667",
"0.48777255",
"0.4869814",
"0.48449266",
"0.48347938",
"0.48122686",
"0.47640938",
"0.47469398",
"0.4737511",
"0.4721504",
"0.47007674",
"0.470074",
"0.46955106",
"0.46910864",
"0.46489784",
"0.4646657",
"0.46447644",
"0.4593294",
"0.45908704",
"0.45884347",
"0.4588416",
"0.45787355",
"0.45783898",
"0.45772347",
"0.45722958",
"0.4570754",
"0.45643216",
"0.45601305",
"0.45469412"
] |
0.64913726
|
0
|
Function which mormalizes the data. Rows of D data points. Original varible D is not modified, new variables are allocated from scratch. When external means and stds are not provided, means and stds from the data are calculated.
|
def normalize(D, ntype=0, means=None, stds=None):
if (not isinstance(D,np.ndarray)) or (len(D.shape) > 2):
raise AssertionError("Input D must be derivative of numpy.ndarray and have less than 3 dimensions.")
(D,initial_shape) = ensure_column(D)
n_rows = D.shape[0]
if means is None:
means = bn.nanmean(D, axis= 0)
tmp = D - np.tile( means, (n_rows,1) ) # temporary result. Data with
# substracted mean
if stds is None:
if (ntype == 0):
stds = bn.nanstd(tmp,axis=0, ddof=1 ) # one degree of freadom as matlab default
elif (ntype == 1):
stds = bn.nanmax(np.abs(tmp), axis=0)
elif (ntype == 2):
stds = np.sqrt( bn.nansum( np.power(tmp,2) , axis = 0) )
elif (ntype == 3):
stds = np.ones( (D.shape[1],) )
else:
raise ValueError("Normalization type %s is unknown" % ntype)
# result = np.dot( tmp , np.diagflat( 1./stds ) )
result = np.divide( tmp, stds )
result = rev_ensure_column(result,initial_shape)
D = rev_ensure_column(D,initial_shape)
return (result,means,stds)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def denormalize(D, means, stds=None): \n \n (D,initial_shape) = ensure_column(D) \n \n n_rows = D.shape[0] \n \n if stds is not None:\n result = np.multiply( D, stds ) + np.tile( means, (n_rows,1) )\n else:\n result = D + np.tile( means, (n_rows,1) )\n \n result = rev_ensure_column(result,initial_shape)\n D = rev_ensure_column(D,initial_shape) \n \n return result",
"def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore):\n T = normalizedData.shape[0]\n D = data_mean.shape[0]\n\n origData = np.zeros((T, D), dtype=np.float32)\n dimensions_to_use = []\n for i in range(D):\n if i in dimensions_to_ignore:\n continue\n dimensions_to_use.append(i)\n dimensions_to_use = np.array(dimensions_to_use)\n\n origData[:, dimensions_to_use] = normalizedData\n\n # potentially ineficient, but only done once per experimentdata_conversions\n stdMat = data_std.reshape((1, D))\n stdMat = np.repeat(stdMat, T, axis=0)\n meanMat = data_mean.reshape((1, D))\n meanMat = np.repeat(meanMat, T, axis=0)\n origData = np.multiply(origData, stdMat) + meanMat\n return origData",
"def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))",
"def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std",
"def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data",
"def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean",
"def denormalize(x, std, mean):\n out = x * std + mean\n return out.clamp(0, 1)",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs",
"def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values",
"def normalize_multivariate_data(data, scaling_values=None):\n normed_data = np.zeros(data.shape, dtype=data.dtype)\n scale_cols = [\"mean\", \"std\"]\n if scaling_values is None:\n scaling_values = pd.DataFrame(np.zeros((data.shape[-1], len(scale_cols)), dtype=np.float32),\n columns=scale_cols)\n for i in range(data.shape[-1]):\n scaling_values.loc[i, [\"mean\", \"std\"]] = [data[:, :, :, i].mean(), data[:, :, :, i].std()]\n normed_data[:, :, :, i] = (data[:, :, :, i] - scaling_values.loc[i, \"mean\"]) / scaling_values.loc[i, \"std\"]\n return normed_data, scaling_values",
"def _normalize(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n if \"norm\" in ds:\n norm = ds.norm\n else:\n norm = ds.data.mean(dim=dim)\n norm.attrs[\"_group_apply_reshape\"] = True\n\n return xr.Dataset(\n dict(data=apply_correction(ds.data, invert(norm, kind), kind), norm=norm)\n )",
"def unnormalize(tensor, mean, std):\n for t, m, s in zip(tensor, mean, std):\n t.mul_(s).add_(m)\n return tensor",
"def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std",
"def normalize(self, mean=None, std=None):\n if mean is None:\n mean = self.mean\n if std is None:\n std = self.std\n\n new = self.copy()\n new.data = (new.data - mean) / std\n return new",
"def _denormalize_joints(x, mean, std):\n assert x.ndim == 3\n assert x.shape == mean.shape == std.shape\n return x * std + mean",
"def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X",
"def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]",
"def denormalize(y, close):\n mean = close[:,-1].reshape(y.shape[0],1).repeat(1,y.shape[1])\n std = torch.std(close, dim=1).reshape(y.shape[0],1).repeat(1,y.shape[1])\n return std*y + mean",
"def normalize(\n ds: xr.Dataset,\n *,\n dim: str,\n kind: str = ADDITIVE,\n) -> xr.Dataset:\n\n if \"norm\" in ds:\n norm = invert(ds.norm, kind)\n else:\n norm = invert(ds.data.mean(dim=dim), kind)\n\n return xr.Dataset(dict(data=apply_correction(ds.data, norm, kind)))",
"def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std",
"def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))",
"def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data",
"def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)",
"def denormalize(tensors):\r\n for c in range(3):\r\n tensors[:, c].mul_(std[c]).add_(mean[c])\r\n return torch.clamp(tensors, 0, 255)",
"def set_normalization(self, dataloader):\n mean = 0\n square = 0\n for (data_in, _) in dataloader:\n mean += data_in.mean()\n square += data_in.pow(2).mean()\n\n mean /= len(dataloader)\n square /= len(dataloader)\n std = np.sqrt(square - mean ** 2)\n\n # The input data should be roughly normally distributed after\n # passing through net_fixed.\n self.scale_in.bias.data.fill_(- mean / std)\n self.scale_in.weight.data.fill_(1 / std)",
"def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True",
"def denormalize(tensors):\n if len(tensors.shape) < 4:\n for c in range(3):\n tensors[c, ...].mul_(std[c]).add_(mean[c])\n else:\n for c in range(3):\n tensors[:, c].mul_(std[c]).add_(mean[c])\n return torch.clamp(tensors, 0, 255)",
"def normalize(df, mean=None, std=None):\n if mean is None:\n mean = df.mean(0)\n if std is None:\n std = df.std(0)\n\n # ensure we don't divide by zero in columns with zero std (all entries identical)\n try:\n # if df was a 1d array or pd.Series to begin with, std will be a\n # non-subscriptable float, so we handle that case in except\n std[std == 0] = 1\n except TypeError:\n std = std if std > 0 else 1\n\n # return mean and std to be able to revert normalization later\n return (df - mean) / std, [mean, std]"
] |
[
"0.78407174",
"0.6774901",
"0.67637706",
"0.67298394",
"0.6704539",
"0.6699134",
"0.66663307",
"0.6653064",
"0.6653064",
"0.6491102",
"0.6440225",
"0.6440225",
"0.6421771",
"0.6375681",
"0.63658774",
"0.6330248",
"0.6249732",
"0.6249581",
"0.6240338",
"0.6208789",
"0.62005764",
"0.61950326",
"0.61687845",
"0.6119247",
"0.6114693",
"0.6111194",
"0.6111045",
"0.61089605",
"0.6085293",
"0.6082458"
] |
0.71338135
|
1
|
Denormalizes the data using means and stds. Original varible D is not modified, new variables are allocated from scratch.
|
def denormalize(D, means, stds=None):
(D,initial_shape) = ensure_column(D)
n_rows = D.shape[0]
if stds is not None:
result = np.multiply( D, stds ) + np.tile( means, (n_rows,1) )
else:
result = D + np.tile( means, (n_rows,1) )
result = rev_ensure_column(result,initial_shape)
D = rev_ensure_column(D,initial_shape)
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normalize(D, ntype=0, means=None, stds=None):\n \n if (not isinstance(D,np.ndarray)) or (len(D.shape) > 2):\n raise AssertionError(\"Input D must be derivative of numpy.ndarray and have less than 3 dimensions.\")\n \n (D,initial_shape) = ensure_column(D)\n \n n_rows = D.shape[0] \n \n if means is None:\n means = bn.nanmean(D, axis= 0) \n \n tmp = D - np.tile( means, (n_rows,1) ) # temporary result. Data with \n # substracted mean \n \n if stds is None:\n if (ntype == 0): \n stds = bn.nanstd(tmp,axis=0, ddof=1 ) # one degree of freadom as matlab default\n \n elif (ntype == 1):\n stds = bn.nanmax(np.abs(tmp), axis=0)\n \n elif (ntype == 2): \n stds = np.sqrt( bn.nansum( np.power(tmp,2) , axis = 0) ) \n \n elif (ntype == 3): \n stds = np.ones( (D.shape[1],) )\n \n else:\n raise ValueError(\"Normalization type %s is unknown\" % ntype)\n \n # result = np.dot( tmp , np.diagflat( 1./stds ) )\n result = np.divide( tmp, stds ) \n \n result = rev_ensure_column(result,initial_shape)\n D = rev_ensure_column(D,initial_shape) \n \n return (result,means,stds)",
"def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))",
"def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean",
"def denormalize(x, std, mean):\n out = x * std + mean\n return out.clamp(0, 1)",
"def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std",
"def normalize_standard_deviation(dataset):\n return dataset*(1/np.std(dataset))",
"def transform(self, data):\n data -= self.mean\n if 0.0 in self.std:\n self.std = np.where(self.std == 0.0, 1.0, self.std)\n data /= self.std\n return data",
"def normalize(self, mean=None, std=None):\n if mean is None:\n mean = self.mean\n if std is None:\n std = self.std\n\n new = self.copy()\n new.data = (new.data - mean) / std\n return new",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec",
"def normalize_data(self, data):\n self.find_mean_std(data)\n return (data - self._data_mean) / self._data_std",
"def unnormalize_multivariate_data(normed_data, scaling_values):\n data = np.zeros(normed_data.shape, dtype=normed_data.dtype)\n for i in range(normed_data.shape[-1]):\n data[:, :, :, i] = normed_data[:, :, :, i] * scaling_values.loc[i, \"std\"] + scaling_values.loc[i, \"mean\"]\n return data",
"def normalize_data(data):\n mean = np.mean(data)\n std = np.std(data)\n return (data - mean) / std",
"def normalize_data(self):\n self.x_mean, self.x_std = du.get_mean_std(self.x_train)\n self.x_train = du.normalize(self.x_train, self.x_mean, self.x_std)\n if self.x_test is not None and self.y_test is not None:\n self.x_test = du.normalize(self.x_test, self.x_mean, self.x_std)\n self.normalized_data = True",
"def normalize(data):\n\n p_means = np.mean(data,axis=0)\n p_vars = np.var(data,axis=0)\n\n # subtract dc component\n data = data-p_means\n\n # contrast normalize \n data = data/np.sqrt(p_vars+10) # plus 10 to account for small variances\n \n return data",
"def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X",
"def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs",
"def unNormalizeData(normalizedData, data_mean, data_std, dimensions_to_ignore):\n T = normalizedData.shape[0]\n D = data_mean.shape[0]\n\n origData = np.zeros((T, D), dtype=np.float32)\n dimensions_to_use = []\n for i in range(D):\n if i in dimensions_to_ignore:\n continue\n dimensions_to_use.append(i)\n dimensions_to_use = np.array(dimensions_to_use)\n\n origData[:, dimensions_to_use] = normalizedData\n\n # potentially ineficient, but only done once per experimentdata_conversions\n stdMat = data_std.reshape((1, D))\n stdMat = np.repeat(stdMat, T, axis=0)\n meanMat = data_mean.reshape((1, D))\n meanMat = np.repeat(meanMat, T, axis=0)\n origData = np.multiply(origData, stdMat) + meanMat\n return origData",
"def _denormalize_joints(x, mean, std):\n assert x.ndim == 3\n assert x.shape == mean.shape == std.shape\n return x * std + mean",
"def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std",
"def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)",
"def denormalize(y, close):\n mean = close[:,-1].reshape(y.shape[0],1).repeat(1,y.shape[1])\n std = torch.std(close, dim=1).reshape(y.shape[0],1).repeat(1,y.shape[1])\n return std*y + mean",
"def normalization_stats(completeData):\n data_mean = np.mean(completeData, axis=0)\n data_std = np.std(completeData, axis=0)\n\n dimensions_to_ignore = []\n dimensions_to_use = []\n\n dimensions_to_ignore.extend(list(np.where(data_std < 1e-4)[0]))\n dimensions_to_use.extend(list(np.where(data_std >= 1e-4)[0]))\n\n data_std[dimensions_to_ignore] = 1.0\n\n return data_mean, data_std, dimensions_to_ignore, dimensions_to_use",
"def standardize(X):\n X_std = X\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n for col in range(np.shape(X)[1]):\n if std[col]:\n X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]\n # X_std = (X - X.mean(axis=0)) / X.std(axis=0)\n return X_std",
"def normalize(values):\n return (values - np.mean(values)) / np.std(values)",
"def unscale_data(self, data):\n return (data + self.mean)*self.std",
"def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std",
"def unnormalize_deltas(deltas, mean, std):\n return deltas * std + mean",
"def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data",
"def standardize_data(Xtrain,Xtest):\n \n ### Import modulates\n import numpy as np\n\n Xmean = np.nanmean(Xtrain,axis=0)\n Xstd = np.nanstd(Xtrain,axis=0)\n Xtest = (Xtest - Xmean)/Xstd\n Xtrain = (Xtrain - Xmean)/Xstd\n \n stdVals = (Xmean,Xstd)\n stdVals = stdVals[:]\n \n return Xtrain,Xtest,stdVals"
] |
[
"0.7247053",
"0.7206705",
"0.71938586",
"0.7177489",
"0.7083183",
"0.70006365",
"0.69111687",
"0.68807256",
"0.6869511",
"0.6869511",
"0.68402255",
"0.68382424",
"0.6825936",
"0.67422646",
"0.6687607",
"0.6647537",
"0.66458845",
"0.66431385",
"0.663447",
"0.661706",
"0.6613688",
"0.65412104",
"0.64802384",
"0.6448669",
"0.6447026",
"0.6446652",
"0.6444238",
"0.64432937",
"0.6412662",
"0.6369026"
] |
0.77888536
|
0
|
This function is reverse with respect to ensure_coulmn It restores the original dimensions of the vector
|
def rev_ensure_column(v,initial_shape):
if initial_shape: # check that the tuple is nonempty
v.shape = initial_shape
return v
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def orthonormalize_inplace(self):\n Q = np.linalg.qr(self.components.T)[0].T\n self.components[...] = Q",
"def resize(self, *args):\n return _ida_hexrays.qvector_lvar_t_resize(self, *args)",
"def _fix_shape(x, n, axis):\n s = list(x.shape)\n if s[axis] > n:\n index = [slice(None)]*len(s)\n index[axis] = slice(0,n)\n x = x[tuple(index)]\n return x, False\n else:\n index = [slice(None)]*len(s)\n index[axis] = slice(0,s[axis])\n s[axis] = n\n z = zeros(s,x.dtype.char)\n z[tuple(index)] = x\n return z, True",
"def ensure_column(v):\n \n initial_shape = v.shape\n if len(initial_shape) == 1: # one dimensional array\n v.shape = (initial_shape[0],1)\n else:\n if (len(initial_shape) == 2) and (initial_shape[0] == 1): # row vector \n v.shape = (initial_shape[1],1) \n\n return v,initial_shape",
"def cvector(rvector):\n \n if(rvector is None):\n return rvector\n elif(isinstance(rvector,list)):\n return np.array([np.array(rvector)]).T\n elif(rvector.shape==(3,1)):\n return rvector\n elif(rvector.shape==(3,)):\n return np.array([np.array(rvector)]).T\n\n print(rvector.shape)\n print('wooooooops')",
"def _frz(a):\n if a.ndim == 0:\n a.shape = (1,)\n return a",
"def __rmul__(self,nb):\n\t\treturn Vect2D(nb*self._vec)",
"def _rmatvec(self, u: np.ndarray) -> np.ndarray:\n return convolve(self.x.conj()[::-1], u, mode='valid', method=self.method)",
"def of_dim(val, args):\r\n args.reverse()\r\n res = val\r\n for d in args:\r\n res = [res] * d\r\n return res",
"def _fix_dimension(self, rot: tf.Tensor) -> tf.Tensor:\n even_n = [i for i in range(0, self.circuit_model.nqubit * 2, 2)]\n odd_n = [i for i in range(1, self.circuit_model.nqubit * 2, 2)]\n perm = even_n + odd_n\n rot = tf.transpose(rot, perm=perm)\n rot = tf.reshape(rot, shape=(2 ** self.circuit_model.nqubit, 2 ** self.circuit_model.nqubit))\n return rot",
"def squeeze(self):\n remove_axes = []\n for axes_ix, axes_value in enumerate(self.coords):\n if len(axes_value) == 1:\n remove_axes.append(axes_ix)\n\n reverse_remove_axes = remove_axes[::-1]\n for index_ix, index_value in enumerate(reverse_remove_axes):\n self.coords.pop(index_value)\n self.dims.pop(index_value)\n self.values = np.squeeze(self.values)",
"def x_nondim(self, x):\n x[0:4] /= self.r_scale\n return x",
"def _fix_shape(self, value):\n for k, v in self.variables.items():\n if len(v.shape) < len(value.shape):\n a, b = self._broadcast(value, v)\n self.variables[k] = np.zeros(a.shape, dtype=b.dtype) + b",
"def _fix_squeeze(self, inputs, new_attr):\n axes = new_attr.get('axis')\n op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)\n for i in axes[1:]:\n op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1)\n return op",
"def _squeeze_output(out):\r\n out = out.squeeze()\r\n if out.ndim == 0:\r\n out = out[()]\r\n return out",
"def update(self):\n for i in range(self.min_y, self.max_y + 1):\n for j in range(self.min_x, self.max_x + 1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = self.lis[i-self.min_y][j-self.min_x]\n except IndexError:\n pass",
"def resize(self, *args):\n return _ida_hexrays.qvector_ccase_t_resize(self, *args)",
"def remove(self):\n for i in range(self.min_y+1, self.max_y+1):\n for j in range(self.min_x+1, self.max_x+1):\n try:\n DIMENSIONAL_ARRAY[i-1][j-1] = ' '\n except IndexError:\n pass",
"def resize(self, *args):\n return _ida_hexrays.qvector_carg_t_resize(self, *args)",
"def conj_inplace(a):",
"def trim(x):\n # make sure we get a 3D stack not 2D slice\n assert (x.shape) != 3\n if x.shape[-1] > 576:\n newx = x[:,32:-32, 32:-32]\n else:\n newx = x\n return newx[np.newaxis,...]",
"def _norm_along_last_axis(x):\n return np.sqrt(np.sum(np.square(x), axis=x.ndim - 1))",
"def squeeze(x):\r\n view = x.dimshuffle([i for i in range(x.ndim)\r\n if not x.broadcastable[i]])\r\n return view",
"def normalizeRows(x):\n\n ### YOUR CODE HERE\n # using l2 norm to normalize\n x = x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))\n ### END YOUR CODE\n\n return x",
"def __len__(self):\n return self.flatten_dim(self.shape[0])",
"def relu(x: jnp.DeviceArray) -> jnp.DeviceArray:\n return jnp.clip(x, a_min=0)",
"def combine_last_two_dim(self, x):\n old_shape = list(x.size())\n a, b = old_shape[-2:]\n new_shape = old_shape[:-2] + [a * b if a and b else None]\n ret = x.contiguous().view(new_shape)\n return ret",
"def combine_last_two_dim(self, x):\n old_shape = list(x.size())\n a, b = old_shape[-2:]\n new_shape = old_shape[:-2] + [a * b if a and b else None]\n ret = x.contiguous().view(new_shape)\n return ret",
"def combine_last_two_dim(self, x):\n old_shape = list(x.size())\n a, b = old_shape[-2:]\n new_shape = old_shape[:-2] + [a * b if a and b else None]\n ret = x.contiguous().view(new_shape)\n return ret",
"def _ureduce(a, func, **kwargs):\n a = np.asanyarray(a)\n axis = kwargs.get('axis', None)\n if axis is not None:\n keepdim = list(a.shape)\n nd = a.ndim\n axis = _nx.normalize_axis_tuple(axis, nd)\n\n for ax in axis:\n keepdim[ax] = 1\n\n if len(axis) == 1:\n kwargs['axis'] = axis[0]\n else:\n keep = set(range(nd)) - set(axis)\n nkeep = len(keep)\n # swap axis that should not be reduced to front\n for i, s in enumerate(sorted(keep)):\n a = a.swapaxes(i, s)\n # merge reduced axis\n a = a.reshape(a.shape[:nkeep] + (-1,))\n kwargs['axis'] = -1\n keepdim = tuple(keepdim)\n else:\n keepdim = (1,) * a.ndim\n\n r = func(a, **kwargs)\n return r, keepdim"
] |
[
"0.5942021",
"0.5941977",
"0.5830956",
"0.58093876",
"0.5779417",
"0.5717236",
"0.56827855",
"0.56485176",
"0.56421804",
"0.5637114",
"0.56165284",
"0.5542016",
"0.5524448",
"0.54983765",
"0.54652536",
"0.54402447",
"0.541388",
"0.5400957",
"0.5379518",
"0.53780234",
"0.53077716",
"0.52953035",
"0.5266331",
"0.5251257",
"0.52508724",
"0.5245662",
"0.5222179",
"0.5222179",
"0.5222179",
"0.520774"
] |
0.6151537
|
0
|
Function determines dimension of a vector. E. g. if vector has a shape (1,len) or (len,1) or (len,) it returns len.
|
def vector_len( vector ):
if not isinstance(vector, np.ndarray ):
return len(vector)
else:
shape = vector.shape # shape is a tuple
sl = len(shape)
if sl == 0:
return 0
elif sl == 1:
return shape[0]
else:
non_one_dims = [ s for s in shape if s > 1 ]
non_one_dims_len = len(non_one_dims)
if non_one_dims_len > 1:
raise ValueError("Function vector_len: Not a vector provided, shape : %s", shape)
elif non_one_dims_len == 0:
return 1
else:
return non_one_dims[0]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_vector_length(v):\r\n v = as_tensor_variable(v)\r\n if v.ndim != 1:\r\n raise TypeError('argument must be symbolic vector')\r\n if v.type.broadcastable[0]:\r\n return 1\r\n if isinstance(v, gof.Constant) and v.type.ndim == 1:\r\n return len(v.data)\r\n if v.owner and isinstance(v.owner.op, theano.tensor.opt.MakeVector):\r\n return len(v.owner.inputs)\r\n if v.owner and isinstance(v.owner.op, Shape):\r\n return v.owner.inputs[0].type.ndim\r\n raise ValueError(\"length not known\")",
"def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim",
"def get_vector_length(vector):\n return np.linalg.norm(vector)",
"def dim_from_concatenated_vector(v):\n return int(np.sqrt(v.shape[0] - 1))",
"def dims(x):\n return len(x.shape)",
"def ndims(x):\n return len(x.get_shape())",
"def ndim(x):\n dims = x.get_shape()._dims\n if dims is not None:\n return len(dims)\n return None",
"def veclength(vec):\n vec = np.array(vec, copy=False).reshape(-1, 3)\n return np.sqrt(np.einsum('ij,ij->i', vec, vec))",
"def length(vec):\n\n return math.sqrt(dotproduct(vec, vec))",
"def _getdim(x):\n \n if np.ndim(x) > 1:\n \n dim = x.shape[-1]\n \n else:\n \n dim = 0\n \n return dim",
"def vec_len(x):\r\n \r\n length = math.sqrt(x[0]**2 + x[1]**2)\r\n return length",
"def length(vec):\n return np.linalg.norm(vec)",
"def length_vector(v):\n return sqrt(dot_vectors(v, v))",
"def length(vector):\n a, b, c = vector\n return math.sqrt(a ** 2 + b ** 2 + c ** 2)",
"def length(vec):\n return vec.dot(vec)**.5",
"def length_vector_numpy(vector):\n return np.linalg.norm(vector)",
"def length(a):\n return max(a.shape)",
"def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0",
"def get_dimension_length(self):\n pass",
"def vectorLength(v1, v2=None):\n if v2 is None:\n v2 = v1\n return math.sqrt(dotProduct(v1, v2))",
"def ndim(a):\n if isinstance(a, np.ndarray):\n return a.ndim\n else:\n return K.ndim(a)",
"def tensor_length(data):\n if hasattr(data, \"shape\"):\n return data.shape[0]\n else:\n try:\n length = len(data)\n except TypeError:\n length = 0\n return length",
"def length_func(list_or_tensor):\n if type(list_or_tensor) == list:\n return len(list_or_tensor)\n return list_or_tensor.shape[0]",
"def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?",
"def _call_ndim(vecObj):\n res = vecObj.ndim\n return res",
"def size(v=(0, 0)):\n return _check_two_scalars('size', v)",
"def length(v):\n return math.sqrt(v[0]**2 + v[1]**2)",
"def ndim(tensor):\n raise NotImplementedError",
"def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1",
"def ndim(self):\n # type: () -> int\n return len(self.shape)"
] |
[
"0.78375614",
"0.76818323",
"0.7277682",
"0.71983206",
"0.7103266",
"0.6978397",
"0.6914042",
"0.68944025",
"0.6851971",
"0.6821197",
"0.6734348",
"0.67316663",
"0.6725683",
"0.66860473",
"0.66572464",
"0.6627746",
"0.6585282",
"0.65730613",
"0.6547987",
"0.6525411",
"0.6513237",
"0.6502853",
"0.6497109",
"0.648924",
"0.6467384",
"0.6382331",
"0.63673997",
"0.63662624",
"0.6317114",
"0.6291945"
] |
0.8467635
|
0
|
View lists all activities in a recipe
|
def view_activities(recipe_id):
if 'name' in session:
recipe = PLAN.get_name_from_id(recipe_id)
activities = PLAN.users[session['name']].view_recipe_activities(recipe)
return render_template('activities.html',
activities=activities,
recipe=recipe,
recipe_id=recipe_id)
return redirect(url_for('log_in'))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )",
"def view_recipe(request, recipe, **_kwargs):\n return render(request, \"deployments/disp_recipe.html\", {\"recipe\": recipe})",
"def show_recipe_results():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n\n data = search_recipes(request)\n recipes = data['results']\n print(recipes)\n \n return render_template('recipes/show.html',recipes=recipes)",
"def recipes_list(request):\n recipes = Recipe.objects.all().order_by('published_date')\n\n # prawidlowy sposob zbierania URLa - object.image.url\n # recipe = recipes[0]\n # print(\"path: \" + recipe.image.url)\n\n paginator = Paginator(recipes, INITIAL_PAGE_SIZE)\n page = paginator.page(1)\n\n context = {\n 'page': page,\n 'display_likes': True,\n }\n\n return render(request, 'recipes_index.html', context)",
"def view_recipes():\n if 'name' in session:\n recipeitem = PLAN.users[session['name']].view_recipes()\n return render_template('recipes.html', recipeitem=recipeitem)\n return redirect(url_for('log_in'))",
"def recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes/list.html\", recipes=recipes)",
"def list_recipes(environ, start_response):\n return list_entities(environ, start_response, 'list_recipes')",
"def __ui_list_all_activities(self):\n activities_list = self.__activity_service.service_get_list_of_activities()\n if len(activities_list) == 0:\n print(\"The list of activities is empty!\\n\")\n else:\n for activity in activities_list:\n print(activity)\n print(\"\")",
"def show_recipes():\r\n args = request.args.get\r\n\r\n # Read page arguments and set defaults if None.\r\n sort_type = args(str(\"sort\")) or \"views\"\r\n page_args = int(args(\"page\")) if args(\"page\") is not None else 1\r\n order_type = int(args(\"order\")) if args(\"order\") else -1\r\n\r\n # Find all recipes and display based on arguments.\r\n sort = coll_recipes.find().skip((page_args * 8) - 8).limit(8).sort(\r\n [(sort_type, order_type)])\r\n\r\n # Pagination\r\n pages, previous_page, next_page, count, total_recipes, recipe_count = Helpers.pagination(\r\n sort, page_args, coll_recipes)\r\n\r\n return render_template(\r\n \"showrecipes.html\",\r\n recipes=sort,\r\n recipe_count=recipe_count,\r\n total_recipes=total_recipes,\r\n count=count,\r\n pages=pages,\r\n page=page_args,\r\n previous_page=previous_page,\r\n next_page=next_page)",
"def get_activities():\n pass",
"def show_entries():\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries order by id asc')\n entries = cur.fetchall()\n return render_template('show_entries.html', entries=entries)",
"def all_activity(self):\n\t\tself.db = DB()\n\t\tactivity_all = self.db.select_all_from(\"activity\")\n\t\ttmpl = lookup.get_template(\"activity.html\")\n\t\treturn (tmpl.render(activity=activity_all))",
"def activities(self):\r\n return resources.Activities(self)",
"def list(request):\n return EntryView.__index(request)",
"def recipes():\n # pylint: disable=redefined-outer-name\n recipes = list(mongo.db.recipes.find().sort('_id', -1))\n return render_template(\n \"recipes.html\", page_title=\"All Recipes\", recipes=recipes)",
"def showRecipes(category_id):\n recipes = session.query(Recipe).filter_by(\n category_id=category_id).order_by(asc(Recipe.name))\n return render_template('recipes.html', recipes=recipes)",
"def recipes():\n recipes = mongo.db.recipes.find()\n return render_template(\"recipes.html\", recipes=recipes)",
"def index():\n\n recipes = db.session.query(Recipe).order_by(Recipe.id.desc()) \\\n .limit(8).all()\n\n categories = db.session.query(Category).limit(6).all()\n return render_template(\n 'general/index.html', recipes=recipes, categories=categories\n )",
"def recipe(id):\n\n selected_recipe = mongo.db.recipes.find_one({'_id': ObjectId(id)})\n\n # Using create list function to display these sections easier\n display_method = create_list(selected_recipe[\"method\"])\n display_ingredients = create_list(selected_recipe[\"ingredients\"])\n display_equipment = create_list(selected_recipe[\"equipment\"])\n\n show_ad = make_comparison(ad_equipment, display_equipment)\n\n return render_template('view_recipe.html', recipe=selected_recipe,\n title='Recipe', display_method=display_method,\n ad_equipment=ad_equipment,\n display_ingredients=display_ingredients,\n display_equipment=display_equipment,\n show_ad=show_ad)",
"def show_recipe_details(id):\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n\n \n recipe = get_recipe(id)\n print(recipe['instructions'])\n \n return render_template(\"recipes/detail.html\", recipe=recipe)",
"def index():\n\n return render_template('index.html', recipes=mongo.db.recipes.find())",
"def index(self):\n\n\t\tself.db = DB()\n\t\tactivityTuple = self.db.select_all_from(\"activity\")[1]\n\t\ttmpl = lookup.get_template(\"index.html\")\n\t\treturn (tmpl.render(activity=activityTuple))",
"def activities(self):\r\n return activities.Activities(self)",
"def get_activity_list(self):\n return self._request_activity_list(self.athlete)",
"def show_fav_recipes():\n if not g.user:\n flash(\"Please login to view.\",\"warning\")\n return redirect('/login')\n \n data = search_recipes(request) \n favorite_list = [l.id for l in g.user.recipes]\n favorites = [f['id'] for f in data['results'] if f['id'] in favorite_list]\n \n\n return render_template(\"favs/show.html\", favorites=favorites)",
"def viewVocab(self): \n mapping = []\n views = registration.getViews(IBrowserRequest)\n for view in views:\n if view.name and self.getRenderableView(view.name):\n mapping.append((view.name, view.name))\n return atapi.DisplayList(mapping)",
"def fridge_recipes(request):\n\n user = request.user\n fridge = Fridge.objects.get_or_create(user=user)[0]\n fridge_ingredients = fridge.ingredients.all()\n ingredient_names = [ingredient.name for ingredient in fridge_ingredients]\n recipes = recipes_containing(ingredient_names, fridge=fridge)\n\n content = {\n 'ingredients': ingredient_names,\n 'recipes': recipes,\n }\n\n return render(request, 'fridge/fridge_recipes.html', content)",
"def view_index(\n request: HttpRequest,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n # Get the views\n views = workflow.views.values(\n 'id',\n 'name',\n 'description_text',\n 'modified')\n\n # Build the table only if there is anything to show (prevent empty table)\n return render(\n request,\n 'table/view_index.html',\n {\n 'query_builder_ops': workflow.get_query_builder_ops_as_str(),\n 'table': ViewTable(views, orderable=False),\n },\n )",
"def activities(self):\r\n return v3.Activities(self)",
"def full_list_of_movies():\n\n movie_list = Movie.query.order_by(Movie.title).all()\n return render_template('movie_list.html', movie_list=movie_list)"
] |
[
"0.7098218",
"0.64416516",
"0.6359117",
"0.6214234",
"0.62102234",
"0.60915476",
"0.60626256",
"0.5996576",
"0.5990855",
"0.5967303",
"0.5947807",
"0.5926748",
"0.5922374",
"0.58748513",
"0.5871159",
"0.58557916",
"0.5843883",
"0.58284855",
"0.5797355",
"0.57198846",
"0.56897914",
"0.56305057",
"0.55806524",
"0.55648345",
"0.5541615",
"0.5540062",
"0.5452418",
"0.54517776",
"0.53793156",
"0.5370392"
] |
0.7231245
|
0
|
Extracts the name from a single sentence of the 'database' input_string. The name is the first word in the string.
|
def extract_name(sentence):
pos = sentence.find(' ')
return sentence[:pos]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence",
"def extract_first_name(s):\n clean_name = re.sub(r'\\s+', r' ', s).split()\n\n for name in clean_name:\n if len(name) > 1:\n return name.title()\n else:\n pass\n\n return None",
"def get_name():\n return input(\"Enter Name: \").capitalize()",
"def getName():\n\n tcflush(sys.stdin, TCIFLUSH)\n name = input(\" You say:\\n \")\n updateNameDatabase(name)\n return name",
"def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()",
"async def get_demon(self, ctx, game: str, name: str):\n\n name = await self.nearest_spelling(ctx, name.lower(), self.names[game])\n if name is not None:\n name = \" \".join([i.capitalize() for i in name.split()])\n return name",
"def simplifyname(string): # {{{2\n global _cachednames\n if _cachednames.has_key(string):\n return _cachednames[string]\n else:\n result = string.lower()\n result = re.sub('^the\\s+', '', result)\n result = re.sub(',\\s+the$', '', result)\n result = re.sub('[^a-z0-9 -]', '', result)\n result = re.sub('\\s+', ' ', result)\n _cachednames[string] = result\n return result",
"def query_business_name():\n print()\n business_name = input(\n 'Please enter full business name or type \"back\" or \"quit\": ')\n print()\n if business_name == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if business_name == \"back\":\n return \"back\"\n\n business_object = business_col.find_one({\"name\": business_name})\n if business_object is None:\n print(\"No business found with given name.\")\n\n return business_object",
"def get_db_object_name(name):\n\n # Default output list\n out_list = ['lemma', 'dbo', 'udb_plt']\n\n # Replace the elements of out_list if and only if there exists a\n # replacement for it\n parts = name.split('.')\n for (i, j) in enumerate(range(len(parts) - 1, -1, -1)):\n if parts[j]:\n out_list[(len(out_list) - 1) - i] = parts[j]\n return tuple(out_list)",
"def parse_modelname(string,labellist,ensemblesfolder):\n ## We need to account for two different prefixes now. \n split_ens_temp = ensemble_template.split(\"{f}\")\n template_prefix = split_ens_temp[0]\n\n template_seedind = split_ens_temp[1].split(\"{s}\")[0]\n if string.startswith(template_prefix): ## TODO or other prefix\n frames,seedext = string.split(template_prefix)[-1].split(template_seedind)\n seed=seedext.split(\"results.json\")[0]\n return {\"name\":string,\n \"frames\":int(frames),\n \"seed\":int(seed),\n \"template\":ensemble_template,\n \"outliers\":determine_outliers(labellist,int(seed),int(frames)),\n }",
"def get_initials(fullname):\r\n # TODO your code here\r\n #Ozzie Smith OS\r\n #Bonnie blair BB\r\n #George G\r\n #Daniel Day Lewis DDL\r\n #xs = (fullname)\r\n initial = \"\"\r\n split_name = fullname.split()\r\n print(split_name)\r\n for i in split_name:\r\n initial = initial + i[0].upper()\r\n return initial",
"def parse_name(self, transcript: str) -> None:\n name_match = re.match(\n r\".*(?=Unofficial\\ UNDERGRADUATE\\ ACADEMIC\\ RECORD)\", transcript, RE_OPT\n )\n if not name_match:\n raise ValueError(\"Name not found\")\n self.name = name_match.group(0).strip()",
"def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break",
"def get_table_name(query: str) -> str:\n find_table_name_from_query = r'(FROM `)(\\w+.\\w+)(`)'\n search_result = re.search(find_table_name_from_query, query)\n if search_result:\n return search_result.group(2)\n return \"Unrecognized table name\"",
"def process_name(device_index, mp3_filename, record):\n\n text = \"May I please ask your name?\"\n name = process_extract_name_organization_details(device_index, mp3_filename, text, record)\n\n if name is None:\n text = process_name_organization(device_index, mp3_filename, record)\n else:\n text = \"All right, and what company are you with?\"\n text = process_organization(device_index, mp3_filename, record, text, name)\n\n return text",
"def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")",
"def _extract_db_name_from_db_path(self):\n return os.path.basename(self.db_path).rsplit('.', 1)[0]",
"def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)",
"def format_title(input_str):\n title_mapping = {'PD_whole_tree': 'Phylogenetic Diversity'}\n\n if input_str in title_mapping:\n return title_mapping[input_str]\n else:\n return ' '.join(map(lambda e: e[0].upper() + e[1:],\n input_str.split('_')))",
"def titlecase(input_str):\n return \"\".join([x.title() for x in input_str.split('_')])",
"def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None",
"def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")",
"def input_name(inputname):\n\tpath= (os.path.abspath(inputname))\n\treturn (path.split(\"/\")[-1].split(\".\")[0])",
"def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)",
"def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name",
"def get_name(descr: str) -> str:\n return descr.split()[0]",
"def _extract_name(line: str) -> str:\n tokens = line[19:-2].split(\" {\")\n name = tokens[0]\n return name",
"def split_name(fullname):",
"def get_data_name(name):\n if name.find('Data') == 0:\n name = name[4:]\n name_ = ''\n for i, char in enumerate(name):\n if char.isupper() and i > 0:\n name_ += '_'\n name_ += char.lower()\n return name_",
"def normalize(s):\n s = str(s.strip())\n patt = re.compile(r'^\\?\\w+$')\n if patt.match(s):\n name = s[1:]\n return Variable(name)\n return s"
] |
[
"0.56790894",
"0.5624334",
"0.5608065",
"0.5596785",
"0.55544513",
"0.5494415",
"0.54825205",
"0.5446229",
"0.5429508",
"0.53938454",
"0.5371457",
"0.5356211",
"0.5354216",
"0.53345764",
"0.53151315",
"0.5298442",
"0.5283765",
"0.52683014",
"0.5237332",
"0.52276117",
"0.5225467",
"0.5215087",
"0.5202172",
"0.5198761",
"0.5192456",
"0.5185291",
"0.5172108",
"0.51642305",
"0.51552343",
"0.51354706"
] |
0.66267174
|
0
|
Extracts the comma separated connection or film data from a sentence of the 'database' input_string. Starting from a given substring to the end of the string. To retrieve connections, start_str='to'. To retrieve games, start_str='play'
|
def extract_data(sentence, start_str):
pos = sentence.find(start_str)
if pos == -1:
return None
if pos + len(start_str) == len(sentence) - 1:
return []
items = sentence[pos + (len(start_str) + 1):].split(',')
return list(map(lambda x: x.lstrip(), items))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_match_rows(self, string):\n # Strings to build the query\n fieldlist = 'w0.urlid'\n tablelist = ''\n clauselist = ''\n wordids = []\n\n # Split the words by spaces\n words = string.split(' ')\n tablenumber = 0\n\n for word in words:\n # Get the word ID\n wordrow = self.con.execute(\"select rowid from wordlist where word='%s'\"\n % word).fetchone()\n # Builds the query\n if wordrow is not None:\n wordid = wordrow[0]\n wordids.append(wordid)\n if tablenumber > 0:\n tablelist += ','\n clauselist += ' and '\n clauselist += 'w%d.urlid=w%d.urlid and ' % (tablenumber-1, tablenumber)\n fieldlist += ',w%d.location' % tablenumber\n tablelist += 'wordlocation w%d' % tablenumber\n clauselist += 'w%d.wordid=%d' % (tablenumber, wordid)\n tablenumber += 1\n\n if not tablelist:\n print(\"No matches found\")\n return None\n\n # Create the query from the separate parts\n fullquery = 'select %s from %s where %s' % (fieldlist, tablelist, clauselist)\n print(fullquery)\n cur = self.con.execute(fullquery)\n rows = [row for row in cur]\n return rows, wordids",
"def query(self, string):\n rows, wordids = self.get_match_rows(string)\n scores = self.get_scored_list(rows, wordids)\n rankedscores = [(score, url) for (url, score) in scores.items()]\n rankedscores.sort()\n rankedscores.reverse()\n for (score, urlid) in rankedscores[0:10]:\n print('%f\\t%s' % (score, self.get_url_name(urlid)))\n return wordids, [r[1] for r in rankedscores[0:10]]",
"def seperate_data(string_log):\n seperated_data = []\n ending_len = len(STOP_DATA)+1\n starting_len = len(START_DATA)+1\n\n moving_pointer = 0\n chunk_end = 0\n while 1:\n moving_pointer = string_log.find(START_DATA, moving_pointer)\n chunk_end = string_log.find(STOP_DATA, moving_pointer)\n\n if moving_pointer == -1 or chunk_end == -1:\n break\n\n chunk_end += ending_len\n\n seperated_data.append(string_log[moving_pointer+starting_len+1:\n chunk_end-ending_len-1])\n moving_pointer = chunk_end - starting_len\n return seperated_data, moving_pointer",
"def create_data_structure(string_input):\n sentences = string_input.split('.')\n network = {}\n if string_input != '':\n for i in range(0, len(sentences) - 1, 2):\n connections = extract_data(sentences[i], 'to')\n games = extract_data(sentences[i + 1], 'play')\n name = extract_name(sentences[i])\n\n if not name in network:\n network[name] = {'connections': [], 'games': []}\n network[name]['connections'] += connections\n network[name]['games'] += games\n return network",
"def create_data_structure(string_input):\n sentence_list = string_input.split(\".\")\n friend_list = [sentence_list[i].split(\" is connected to \")\n for i in range(len(sentence_list))\n if i % 2 == 0 and sentence_list[i] != \"\"]\n games_list = [sentence_list[i].split(\" likes to play \")\n for i in range(len(sentence_list))\n if i % 2 == 1 and sentence_list[i] != \"\"]\n network = {friend_list[i][0]:\n [friend_list[i][1].split(\", \"), games_list[i][1].split(\", \")]\n for i in range(len(friend_list))}\n # for i in range(len(friend_list)):\n # network[friend_list[i][0]] = [friend_list[i][1].split(\", \"),\n # games_list[i][1].split(\", \")]\n\n return network",
"def _transform_select_string(select_string, logfile):\n\tselected_sources = []\n\ttmp = select_string.split(',')\n\tfor s in tmp:\n\t\tif s in logfile.sources and s not in selected_sources:\n\t\t\tselected_sources.append(s)\n\tif len(selected_sources) == 0:\n\t\tselected_sources = logfile.sources\n\treturn selected_sources",
"def commaStringParse(string):\n dels = []\n cur = \"\"\n length = len(string)\n for c in string:\n # skip spaces outside words\n if c == \" \" and cur == \"\":\n continue\n # new delegation found\n elif c == \",\":\n dels.append(cur)\n cur = \"\"\n # last name in list\n elif string.index(c) == length - 1:\n cur += c\n dels.append(cur)\n else:\n cur += c\n return dels",
"def extract_string(begin, end, string):\n b = string.find(begin) + len(begin)\n e = string.find(end, b)\n\n return string[b:e]",
"def parseConnection(str_in):\n m = re.match(r\"(.+):(in|out)\", str_in)\n return {'name': m.group(1), 'type': m.group(2)}",
"def get_parameters_from_input_string(string):\n parameter_array = []\n start_found = False\n item = str(\"\")\n for i in range(len(string)): \n if start_found == True and string[i] != \",\" and string[i] !=\")\":\n item += string[i]\n elif start_found == True and string[i] == \",\":\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n elif start_found == True and string[i] == \")\":\n start_found = False\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n # Start here and set start_found to True\n elif string[i] == \"(\":\n start_found = True\n return parameter_array",
"def subsentence(sentence, start, end):\r\n \r\n sentence_list=re.split(' ', sentence)\r\n return sentence_list[start:end]",
"def extractseq(geneID, db=dbpaths['cds'], startpos=0, endpos=-1):\n #fname = dbpaths[type]\n geneseq = \"\"\n fobj = open(db, 'rb')\n for line in fobj:\n if line[0] == '>':\n query = re.search( geneID + '[\\s]', line)\n if query:\n thisline = fobj.next()\n\n while thisline[0] != '>':\n geneseq += thisline.strip()\n try:\n thisline = fobj.next()\n except StopIteration:\n break\n else:\n break\n fobj.close()\n return geneseq[startpos:endpos]",
"def __init__(\n self,\n connect_string,\n prefix=\"chattymarkov\",\n separator=\"\\x01\",\n stop_word=\"\\x02\",\n ):\n self.db = database.build_database_connection(connect_string)\n self.separator = separator\n self.stop_word = stop_word\n self.prefix = prefix",
"def __init__(\n self,\n connect_string,\n prefix=\"chattymarkov\",\n separator=\"\\x01\",\n stop_word=\"\\x02\",\n ):\n self.db = database.build_database_connection(connect_string, True)\n self.separator = separator\n self.stop_word = stop_word\n self.prefix = prefix",
"def _parse_ins_string(string):\n istart_markers = set([\"[\", \"(\", \"!\"])\n marker_dict = {\"[\": \"]\", \"(\": \")\", \"!\": \"!\"}\n # iend_markers = set([\"]\",\")\",\"!\"])\n setdum = {\"dum\", \"DUM\"}\n obs_names = []\n slen = len(string)\n idx = 0\n while True:\n if idx >= slen - 1:\n break\n char = string[idx]\n if char in istart_markers:\n # em = iend_markers[istart_markers.index(char)]\n em = marker_dict[char]\n # print(\"\\n\",idx)\n # print(string)\n # print(string[idx+1:])\n # print(string[idx+1:].index(em))\n # print(string[idx+1:].index(em)+idx+1)\n eidx = min(slen, string.find(em, idx + 1))\n obs_name = string[idx + 1 : eidx]\n if obs_name not in setdum:\n obs_names.append(obs_name)\n idx = eidx + 1\n else:\n idx += 1\n return obs_names",
"def _parse_connection_string(self, connection_string):\n self.host = '127.0.0.1'\n self.port = 3306\n self.db = None\n self.user = None\n self.pwd = None\n for part in connection_string.split(';'):\n part = part.strip()\n if part != '':\n k, v = part.split('=')\n k = k.lower()\n if k == 'server':\n self.host = v.strip()\n elif k == 'port':\n self.port = int(v.strip())\n elif k == 'database':\n self.db = v.strip()\n elif k == 'uid':\n self.user = v.strip()\n elif k == 'pwd':\n self.pwd = v.strip()",
"def stringSlicer(string, pos, enditerand):\n pos += 2\n stringOut = \"\"\n while True:\n if enditerand == 5 and string[pos] == '}':\n stringOut = stringOut[:len(stringOut) - 2]\n return stringOut, pos\n elif string[pos] == ',':\n return stringOut, pos\n else:\n stringOut += string[pos]\n pos += 1",
"def next_occurrence(substring, string, start=0, sensitive=True):\n if start in range(0, len(string)):\n ls = SE.occurrences(substring, string[start:], sensitive)\n if ls:\n return [ls[0][0] + start, ls[0][1] + start]\n return []",
"def main(self, regex_string):\n sql_sen = regex_string[0][0]\n reg = \"\\$\\w+\"\n if re.search(reg, sql_sen, re.I):\n\n p = re.compile(reg)\n match = p.findall(sql_sen)\n return match\n return None",
"def lldp_neighbour(self, output_str):\n\n date_time = get_date_time()\n # connections = []\n # top = 212\n # # bottom = -33\n # trimmed_str = output_str[top:]\n # for line in trimmed_str.split('\\n'):\n # line_content = line.split()\n # local_port = line_content[1] + line_content[2]\n # remote_device = line_content[0].split('.')[0]\n # remote_port = line_content[8] + line_content[9]\n # connections.append([self.device, local_port.strip(), remote_device.strip(), remote_port.strip(),\n # date_time])\n #\n # connections_df = pd.DataFrame(connections,\n # columns=['local_device', 'local_port', 'remote_device', 'remote_port',\n # 'date_time'])\n\n connections = []\n top = 289\n bottom = -33\n trimmed_str = output_str[top:bottom]\n for line in trimmed_str.split('\\n'):\n line_content = line.split()\n local_port = line_content[1] + line_content[2]\n remote_device = line_content[0].split('.')[0]\n remote_port = line_content[9] + line_content[10]\n connections.append([self.device, local_port.strip(), remote_device.strip(), remote_port.strip(),\n date_time])\n\n connections_df = pd.DataFrame(connections,\n columns=['local_device', 'local_port', 'remote_device', 'remote_port',\n 'date_time'])\n return connections_df",
"def seq_query():\n query_type = input(\n '1.Specific fragment\\n'\n '2.Specific Organism\\n'\n '3.Specific gene\\n'\n '4.All\\n'\n '5.All cds\\n'\n )\n organize = input('Organize output?(y/n)\\n')\n if query_type not in ['1', '2', '3', '4', '5']:\n raise ValueError('wrong input!\\n')\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n if query_type == '1':\n organism = input('Organism:\\n')\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? AND Organism=?',\n ('%' + gene + '%', frag_type, organism))\n result = cur.fetchall()\n elif query_type == '2':\n organism = input('Organism:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer, whole, fragments):\\n')\n if frag_type == 'fragments':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism = ? ORDER BY Head',\n (organism,))\n else:\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism LIKE ? AND Type = ? ORDER BY Head',\n ('%' + organism + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '3':\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? ORDER BY Taxon',\n ('%' + gene + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '4':\n cur.execute('SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main ORDER BY Taxon')\n result = cur.fetchall()\n elif query_type == '5':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE type = \"cds\" ORDER BY Taxon')\n result = cur.fetchall()\n\n query_result = []\n for i in result:\n title = '{0}|{1}|{2}|{3}'.format(i[0], i[1], i[2], i[3])\n sequence = MutableSeq(i[5])\n gene = i[2]\n if i[4] == '-1':\n sequence.seq = sequence.reverse_complement()\n record = [title, gene, sequence]\n query_result.append(record)\n\n if organize == 'y':\n if not exists('output'):\n makedirs('output')\n for i in query_result:\n file_name = 'output/{0}.fasta'.format(i[1].replace('/', ''))\n with open(file_name, 'a') as output_file:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n else:\n output = input('Enter output filename:\\n')\n with open('{0}.fasta'.format(output), 'w') as output_file:\n for i in query_result:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n\n cur.close()\n con.close()\n print('Done.\\n')",
"def extract(string, start_marker, end_marker):\n start_loc = string.find(start_marker)\n end_loc = string.find(end_marker)\n if start_loc == -1 or end_loc == -1:\n return \"\"\n return string[start_loc+len(start_marker):end_loc]",
"def reformulate_query(s):\n words = tokenize(s)\n tags = [tag for _, tag in pos_tag(words)]\n\n if tags[-1] == '.':\n words.pop()\n\n # what/who questions\n if tags[0] in set(['WP', 'WDT']):\n if tags[1] in set(['VBZ', 'VBD', 'VBP']):\n if tags[-1] is not 'IN':\n exact_query = '{0}\\s*{1}\\s*{2}'.format(' '.join(words[2:]),\n '(?:\\(.*\\))?', words[1])\n inexact_query = '{0} {1}'.format(' '.join(words[2:]), words[1])\n return exact_query, inexact_query\n return s, s",
"def part2(input_string):\n length = len(input_string[0])\n for i in range(length):\n modified_input = [line[:i] + line[i+1:] for line in input_string]\n for line in modified_input:\n if modified_input.count(line) == 2:\n return line",
"def valid_sql_in_clause_str(input_str):\n\n if not input_str:\n return False\n\n if re.search(r\"^(\\s)*'(.+)'(\\s)*((\\s)*(,)(\\s)*('(.+)'))*$\", input_str):\n return True\n \n return False",
"def get_chrom_start_end_from_string(s):\n try:\n chrom, s_e = s.split('__substr__')\n start, end = s_e.split('_')\n return chrom, int(start), int(end)\n except Exception:\n raise ValueError(\"String %s must be of format '{chrom}__substr__{start}_{end}'\" % s)",
"def getbook():\n #Opens the database file to read the lines\n with open(\"database.txt\", \"r\") as f:\n #Iterates through each line and splits the line into individual strings\n for line in f:\n s=line.strip()\n string=s.split(\":\")\n return(string)",
"def from_string(string):\n return Sentence(string.split(\" \"))",
"def parse_connection_string(self, constring):\r\n try:\r\n host, port, db = constring.split(\":\")\r\n port = port if host == \"unix\" else int(port)\r\n db = int(db)\r\n return host, port, db\r\n except (ValueError, TypeError):\r\n raise ImproperlyConfigured(\"Incorrect format '%s'\" % (constring))",
"def parse_digtxt(querystr,resultset):\n response = pydig.query(querystr, 'txt')\n for elem in response[0].split():\n if 'include:' in elem:\n resultset = parse_digtxt(elem[8:], resultset)\n else:\n if 'ip4' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n if 'ip6' in elem:\n if elem[4:] not in resultset:\n resultset[elem[4:]] = \"GCP\"\n return resultset"
] |
[
"0.5334885",
"0.5186011",
"0.5090417",
"0.50633615",
"0.50150484",
"0.4872764",
"0.48694447",
"0.48500234",
"0.484397",
"0.48406842",
"0.47947022",
"0.47689897",
"0.4748765",
"0.47441426",
"0.473664",
"0.46660376",
"0.46588197",
"0.46465313",
"0.46278796",
"0.45753732",
"0.45597258",
"0.45427942",
"0.45322603",
"0.45303854",
"0.45173997",
"0.4505211",
"0.44957006",
"0.44205183",
"0.44190827",
"0.44149742"
] |
0.58856046
|
0
|
Returns a list of all the games a user likes
|
def get_games_liked(network, user):
if not user in network:
return None
if not 'games' in network[user]:
return []
return network[user]['games']
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))",
"def get_likes_list(self, username):\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})",
"def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)",
"def get_user_games_playing(user):\n if not user: return []\n playing = db.Query(GamePlayer).filter('user =', user)\n return [p.game for p in playing]",
"def get_games_liked(network, user):\n if user not in network or network[user][1] == []:\n return None\n return network[user][1]",
"def users_by_game(network, game):\n result = []\n for user in network:\n if game in get_games_liked(network, user):\n result.append(user)\n if result == []:\n return None\n return result",
"def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked",
"def get_user_games(self, request):\n return games_ctrl.get_user_games(request.user_name)",
"def get_likes(self):\n source, edge = self.id, \"likes\"\n return User.graph().get_connections(source, edge, limit=100000)[\"data\"]",
"def getLikedOkCupidUsers(self):\n\t\tself.logger.info(\"Get all liked OkCupid users\")\n\t\tusers = self.session.query(Models.Okcupid).filter(Models.Okcupid.liked==True).all()\n\t\treturn users",
"def get_user_games_moderating(user):\n if not user: return []\n moderating = db.Query(GameModerator).filter('user =', user)\n return [m.game for m in moderating]",
"def get_users_from_likes(self, type, owner_id, item_id):\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=1)\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=likes['count'])\n return likes['items']",
"async def list(self, ctx, user: discord.Member=None):\n\n author = ctx.message.author\n\n if not user:\n user = author\n\n game_list = get_library()\n\n if check_key(user.id) and game_list.get(user.id).get(\"games\", False):\n user_game_list = get_library(user.id)\n\n message = pagify(\", \".join(sorted(user_game_list)), [', '])\n\n await self.bot.say(\"Please check your DM for the full list of games, {}.\".format(author.mention))\n await self.bot.send_message(author, \"{}'s games:\".format(user.mention))\n\n for page in message:\n await self.bot.send_message(author, (box(page)))\n else:\n await self.bot.say(\"{}, you do not have any games. Add one using `{p}game add <game_name>` and/or link your Steam profile with `{p}game steamlink <steam_id>`.\".format(user.mention, p=ctx.prefix))",
"def likes():\n click.echo(chalk.blue(\"For whom you want to view likes for\"))\n friend_name = input().strip().lower()\n FRIENDS_FILE_PATH = get_friends_file_path(friend_name)\n\n if os.path.isfile(FRIENDS_FILE_PATH):\n with open(FRIENDS_FILE_PATH) as fin:\n contents = yaml.load(fin)\n entries = contents[\"entries\"]\n likes = []\n for entry in entries:\n if \"likes\" in entry:\n likes.extend(entry[\"likes\"])\n click.echo(\"Likes:\")\n for i, n in enumerate(likes):\n click.echo(str(i) + \": \" + n)\n else:\n click.echo(\n chalk.red(\n 'The Likes file path for this module does not exist. Please type \"yoda people like\" to create a new one'\n )\n )",
"def get_user_games(self, req):\n return models.BattleShip.getUserGames(req.user_name)",
"def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)",
"def get_user_games(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n games = Game.query(Game.user == user.key).filter(Game.game_over == False)\n return GameForms(items=[game.to_form('Active games for this user are...!') for game in games])",
"def get_user_games(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n games = Game.query(Game.user == user.key)\n games = games.filter(Game.game_over == False)\n if games.count() > 0:\n return GameForms(items=[game.to_form(\"{}'s active games.\".format(\n request.user_name)) for game in games])\n else:\n raise endpoints.NotFoundException('This user has no active games!')",
"def get_all_userleagues(self):\n self.logger.info(\"Returning all userleagues\")\n q = UserLeague.query.all()\n self.logger.debug(q)\n return q",
"def like_cafes():\n if CURR_USER_KEY in session:\n cafe_id = request.args.get(\"cafe_id\")\n user = g.user\n print(user.id)\n print(user.likes)\n for like in user.likes:\n if like.cafe_id == int(cafe_id or 0):\n print(\"get True\")\n return jsonify({\"likes\": True})\n\n return jsonify({\"likes\": False})\n\n return jsonify({\"error\": \"Not logged in\"})",
"def likes(self):\r\n return Likes(self)",
"def list_games(self, user_id: UUID) -> Iterable[UUID]:\n return (game_id for game_id, game in self.games.items() if user_id in game.players)",
"def ListLikes(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_hikes():\n\n return Hike.query.all()",
"def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)",
"def get_recommended_games(user):\n games = list(Game.objects.all())\n games.sort(key = lambda x: abs(x.level-user.level))\n return games[0:5]",
"def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()",
"def likes(self):\n return self.get_queryset().filter(vote__gt=0)",
"def get_user_rankings(self, request):\n difficulty = validateGameDifficultyValue(request, True)\n users = User.query().fetch()\n items = []\n total_games = 0\n wins = 0\n for user in users:\n scores = Score.query()\n # Filter by game difficulty and only wons that resulted in a win\n scores = scores.filter(Score.user == user.key, \\\n Score.difficulty == difficulty)\n scores.fetch()\n\n for score in scores:\n total_games += 1\n if score.won:\n wins += 1\n if total_games > 0:\n items.append(\n RankingForm(user_name=user.name,\n difficulty=getattr(GameDifficulty, difficulty),\n win_percentage=round(float(wins)/total_games*100, 2),\n wins=wins)\n )\n total_games = 0\n wins = 0\n return RankingForms(items=items)",
"def get_user_games(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A user with that name does not exist!')\n games = Game.query(Game.user == user.key, Game.game_over == False)\n return GameForms(items=[game.to_form(message=\"Game In Progress\")\n for game in games])"
] |
[
"0.7015423",
"0.69208395",
"0.67245775",
"0.67051995",
"0.6687749",
"0.65598106",
"0.64529777",
"0.64155376",
"0.6378932",
"0.63228387",
"0.62585735",
"0.6232062",
"0.6226145",
"0.61290556",
"0.6088099",
"0.6082818",
"0.6048625",
"0.5938895",
"0.5935001",
"0.5928655",
"0.5897898",
"0.5862954",
"0.58426225",
"0.5841192",
"0.5838119",
"0.58294386",
"0.5798031",
"0.57747316",
"0.57302946",
"0.5727461"
] |
0.76072174
|
0
|
Adds a connection from user_A to user_B.
|
def add_connection(network, user_A, user_B):
if user_A not in network or user_B not in network:
return False
if not user_B in network[user_A]['connections']:
network[user_A]['connections'].append(user_B)
return network
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def view_addConnection(self, user, tagA, tagB):\r\n eTagA, iTagA = tagA.split('/', 2)\r\n eTagB, iTagB = tagB.split('/', 2)\r\n\r\n ifA = user.getEndpoint(eTagA).getInterface(iTagA)\r\n ifB = user.getEndpoint(eTagB).getInterface(iTagB)\r\n\r\n if ifA.clsName != ifB.clsName:\r\n raise InvalidRequest('Can not connect two interfaces with '\r\n 'different message/service type.')\r\n\r\n if not Types.connectable(ifA.iType, ifB.iType):\r\n raise InvalidRequest('Can not connect an interface of type {0} '\r\n 'and an interface of type '\r\n '{1}.'.format(Types.decode(ifA.iType),\r\n Types.decode(ifB.iType)))\r\n\r\n key = int(md5(tagA).hexdigest(), 16) ^ int(md5(tagB).hexdigest(), 16)\r\n\r\n if key in user.connections:\r\n raise InvalidRequest('Can not add the same connection twice.')\r\n\r\n connection = user.realm.createConnection(ifA.obj, ifB.obj)\r\n user.connections[key] = connection\r\n connection.notifyOnDeath(user.connectionDied)\r\n\r\n # TODO: Return some info about success/failure of request\r",
"def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if user_B not in network[user_A][0]:\n network[user_A][0].append(user_B)\n return network[user_A][0]",
"def addConnection(tagA, tagB): #@NoSelf",
"def add_connections(self, user_data):\n self.__check_opts()\n self.user_data = user_data\n for ssid in Config.ssids:\n self.__delete_existing_connection(ssid)\n self.__add_connection(ssid)\n for ssid in Config.del_ssids:\n self.__delete_existing_connection(ssid)",
"def add_connection(self, connection):\n self.connections[connection.id] = connection",
"def add_connection(self, connection):\n self.connections.append(connection)",
"def add_connection(self, n1: Node, n2: Node):\n if n2.node_id in n1.get_connections_ids() or n1.node_id in n2.get_connections_ids():\n return\n n1.add_child(n2)\n n2.add_child(n1)",
"def add_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()",
"def __init__(self, connectionA, connectionB):\r\n assert connectionA != connectionB\r\n\r\n self._connectionA = connectionA\r\n connectionA.registerUser(self, connectionB.getID())\r\n\r\n self._connectionB = connectionB\r\n connectionB.registerUser(self, connectionA.getID())\r\n\r\n self._cbs = set()",
"def connect(self, channel, a, b):\n a.sender.channels.append(channel)\n channel.receivers.append(b)",
"def connect(self, source, target):\r\n connection = (self.coalesce_node(source), self.coalesce_node(target))\r\n self.connections.add(connection)",
"def add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self.add(node1, node2)",
"def add_conn(self, a1, a2):\n if self.use_pconn:\n raise ValueError(\"Can not add bonds to systems with pconn - well, we can fix this ;) \")\n self.conn[a1].append(a2)\n self.conn[a2].append(a1)\n d,v,imgi = self.get_distvec(a1,a2)\n self.pconn[a1].append(images[imgi])\n d,v,imgi = self.get_distvec(a2,a1)\n self.pconn[a2].append(images[imgi])\n logger.warning('pconn may not be properly updated!!!')\n return",
"def _add_connections(self, connections):\r\n\r\n for node1, node2 in connections:\r\n self._add(node1, node2)",
"def add_connection(\n self, port1: ryvencore.NodePort.NodeOutput, port2: ryvencore.NodePort.NodeInput\n ) -> ryvencore.Connection.DataConnection:\n ryven_connection = self.script.flow.connect_nodes(port1, port2)\n if not ryven_connection:\n return\n\n # Add connection in compas graph\n node1 = port1.node\n node2 = port2.node\n edge_key = (node1.GLOBAL_ID, node2.GLOBAL_ID)\n if not self.has_edge(*edge_key):\n self.add_edge(*edge_key, {\"connections\": []})\n connections = self.edge_attribute(edge_key, \"connections\")\n connections.append({\"port1\": self.get_port_info(port1), \"port2\": self.get_port_info(port2)})\n self.edge_attribute(edge_key, \"connections\", connections)\n\n return ryven_connection",
"def add_connections(self, connections):\r\n\r\n for node1, node2, w in connections:\r\n self.add(node1, node2, w)",
"def add_connection(self, connection, from_scratch=False):\n if from_scratch:\n self.children = set()\n self.children.add(connection)\n connection.add_state_listener(self.state_listener)",
"def register_connection(self, source, target, name):\n logger.info(f'Registered {name} connection: {source} -> {target}')\n self.connections.setdefault(target.id, set()).add((source, name))",
"def view_removeConnection(self, user, tagA, tagB):\r\n key = int(md5(tagA).hexdigest(), 16) ^ int(md5(tagB).hexdigest(), 16)\r\n\r\n try:\r\n connection = user.connections.pop(key)\r\n except KeyError:\r\n raise InvalidRequest('Can not disconnect two unconnected '\r\n 'interfaces.')\r\n\r\n connection.dontNotifyOnDeath(user.connectionDied)\r\n connection.destroy()\r\n\r\n # TODO: Return some info about success/failure of request\r",
"def add(self, host, auth, conn):\n self.conns[(host, auth)] = conn",
"def connect_user(self, user):\n\t\tis_user_added = False\n\t\tif not user in self.users.all():\n\t\t\tself.users.add(user)\n\t\t\tself.save()\n\t\t\tis_user_added = True\n\t\telif user in self.users.all():\n\t\t\tis_user_added = True\n\t\treturn is_user_added",
"def addConnection(catalog, graph, origin, destination, weight):\n edge = gr.getEdge(catalog[graph], origin, destination)\n if edge is None:\n gr.addEdge(catalog[graph], origin, destination, weight)\n #gr.addEdge(catalog[graph], destination, origin, weight)\n return catalog",
"def add_incoming_connection(intersection, road):\n intersection.add_incoming_connection(road)",
"def _add_connection(self, con):\n # get connectors by the above specified labels\n start = self.connector_by_label(con[0])\n end = self.connector_by_label(con[1])\n if start.parent_type == 'box' and end.parent_type == 'box':\n # make sure, that not two inputs or two outputs are connected\n if start.connector_type == end.connector_type:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"input to input or output to output.\")\n # make sure, that inputs are always first\n # and outputs are always second\n elif (start.connector_type == 'output'\n or end.connector_type == 'input'):\n start, end = end, start\n # make sure, that a switch does not connect to itself\n elif start.parent_type == 'switch' and end.parent_type == 'switch':\n if start.switch == end.switch:\n raise ConnectorError(f\"Connection {con} connects \"\n f\"a switch to itself.\")\n\n # create connection\n connection = ArduinoSwitchControlConnection(start, end)\n\n # add connection to attributes\n self.connections.append(connection)",
"def createConnection(self, interfaceA, interfaceB):\r\n return self._network.createConnection(interfaceA, interfaceB)",
"def add_outgoing_connection(intersection, road):\n return intersection.add_outgoing_connection(road)",
"def add_edge(self, nodes, X, U, V):\n assert(nodes[0] in self.nodes)\n assert(nodes[1] in self.nodes)\n\n if nodes[0] != nodes[1]:\n\n self.edges[nodes] = Graph.new_path([X, U, V])\n self.nodes[nodes[0]].linked_to.append(nodes[1])\n self.join_connex_groups(self.connex_elements[nodes[0]],\n self.connex_elements[nodes[1]])",
"def _threadsafe_add_connection(self, con):\n self._connections.append(con)",
"def add_connection(intersection, angle, distance, in_ln, out_ln, speed_limit, name):\n intersection.add_connection(angle, distance, in_ln, out_ln, speed_limit, name)",
"def userJoin(self, __userID):\n\n\t\tif (__userID not in self.connectedUsers):\n\t\t\tself.connectedUsers.append(__userID)"
] |
[
"0.7987027",
"0.7670441",
"0.71820414",
"0.6541292",
"0.6537126",
"0.6513064",
"0.63350296",
"0.62619996",
"0.62432957",
"0.6166243",
"0.6136505",
"0.60668856",
"0.6046037",
"0.5969228",
"0.58736104",
"0.58450896",
"0.5791936",
"0.5787016",
"0.57681024",
"0.5718977",
"0.57006156",
"0.568804",
"0.5682361",
"0.5651152",
"0.56147724",
"0.5544444",
"0.5543871",
"0.5535348",
"0.54728854",
"0.5431403"
] |
0.79342914
|
1
|
Creates a new user profile and adds that user to the network, along with any game preferences specified in games. Users have no connections to begin with.
|
def add_new_user(network, user, games):
if not user in network:
network[user] = {'connections': [], 'games': games}
return network
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_new_user(network, user, games):\n if user not in network:\n network[user] = [[], games]\n return network",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)",
"def createProfile(self):\n if self.profile:\n return\n from soc.modules.gsoc.models.profile import GSoCProfile\n user = self.createUser()\n properties = {'link_id': user.link_id, 'student_info': None, 'user': user,\n 'parent': user, 'scope': self.program, 'status': 'active'}\n self.profile = seeder_logic.seed(GSoCProfile, properties)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def createUserProfile(user):\n MyProfile.objects.get_or_create(user=user)",
"def create_new_profile():\n client_nickname = input('Enter client profile name: ')\n client_username = input('Enter client username: ')\n client_hostname = input('Enter client hostname: ')\n client_port = '-p' + input('Enter client port: ')\n new_profile = SshUsers(client_nickname, client_username, client_hostname, client_port)\n return add_user_to_db(new_profile)",
"def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n# prepend \"key_\" to the key_name, because key_names can't start with numbers\n registrationprofile = RegistrationProfile(user=user, activation_key=activation_key)\n db = DB_Session()\n db.add(registrationprofile)\n db.flush()\n db.refresh(registrationprofile)\n db.commit()\n db.close()\n return registrationprofile",
"def create_user_profile(sender, instance, created, **kwargs):\n if created:\n user_profile = UserProfile.objects.create(user=instance)",
"def create_user_profile(instance, created, **_):\n if created:\n Profile.objects.create(user=instance)",
"def create_profile_for_new_user(sender, created, instance, **kwargs):\n if created:\n profile = self.get_model('profile')(user=instance)\n profile.save()",
"def create_profile(self, user):\r\n salt = sha.new(str(random.random())).hexdigest()[:5]\r\n activation_key = sha.new(salt+user.username).hexdigest()\r\n return self.create(user=user,\r\n activation_key=activation_key)",
"def create_profile(self, user):\n salt = sha.new(str(random.random())).hexdigest()[:5]\n activation_key = sha.new(salt+user.username).hexdigest()\n return self.create(user=user,\n activation_key=activation_key)",
"def create_profile(self, user, *args, **kwargs):\n salt = hashlib.sha1(str(random.random())).hexdigest()[:5]\n activation_key = hashlib.sha1(salt + user.username).hexdigest()\n return self.create(user=user, activation_key=activation_key, **kwargs)",
"def create_user_profile(sender, instance, created, **kwargs):\n\n if created:\n user_profile = UserProfile.objects.create(user=instance)",
"def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)",
"def add_user(platform, sender):\n profile_info = get_user_profile(platform, sender)\n user = User(platform_id=platform.id,\n platform_user_ident=sender,\n last_seen=datetime.datetime.now(),\n **profile_info).add()\n DatabaseManager.commit()\n\n statsd.gauge('users', User.count(), tags=[config.ENV_TAG])\n track(TrackingInfo.Event(sender, '%s.User' % platform.type_enum.value,\n 'Add', profile_info['first_name']))\n return user",
"def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id",
"def create_profile_for_new_users(sender, instance, created, **kwargs):\n if not created:\n return\n\n profile = Profile.objects.filter(user=instance).first()\n if profile is None:\n profile = Profile(user=instance)\n profile.save()",
"def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)",
"def add_user_to_db(new_profile):\n try:\n params = (new_profile.client_nickname,\n new_profile.client_username,\n new_profile.client_hostname,\n new_profile.client_port)\n client_db.execute(\"INSERT INTO clients VALUES (?, ?, ?, ?)\", params)\n client_detail_list.commit()\n client_detail_list.close()\n except:\n print('User already exists, try deleting the profile first.')",
"def create_profile(username):\n user = User.objects.create(username=username)\n return Profile.objects.create(user=user)",
"def create_player_profile(sender, **kwargs):\n if kwargs.get('created') is True:\n PlayerProfile.objects.create(user=kwargs.get('instance'))",
"def create_profile(sender, instance, created, **kwargs):\n if created:\n profile, created = UserProfile.objects.get_or_create(user=instance)",
"def update_user_profile(sender, instance, created, **kwargs):\n if created:\n GameplanUser.objects.create(user=instance)\n instance.gameplanuser.save()",
"def create_user_profile(IamUserArn=None, SshUsername=None, SshPublicKey=None, AllowSelfManagement=None):\n pass",
"def manage_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n else:\n instance.profile.save()",
"def create_profile(sender, **kwargs):\n user = kwargs[\"instance\"]\n if kwargs[\"created\"]:\n user_profile = Profile(user=user)\n user_profile.save()",
"def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n #By adding wins, it added it to the create_user input #api page.\n wins = defaults['wins']\n user = User(name=request.user_name, email=request.email, wins = wins)\n #user.put() sends the user info that is ndb\n user.put()\n\n for key,val in sorted(craft.items()):\n outmessage =(\"{} : Can be make with {}\".format(key, val))\n return StringMessage(message='User {} created!'.format(\n outmessage))\n #This just returns a message for response at bottom of API\n #screen."
] |
[
"0.7117497",
"0.70152885",
"0.69539124",
"0.69374615",
"0.69374615",
"0.69374615",
"0.6920384",
"0.69119585",
"0.68634856",
"0.6826175",
"0.6809397",
"0.6758873",
"0.67379594",
"0.67251223",
"0.6718418",
"0.6712079",
"0.66611475",
"0.6638409",
"0.6633247",
"0.6613752",
"0.661286",
"0.6601974",
"0.65833056",
"0.65692276",
"0.65532815",
"0.6539918",
"0.6520316",
"0.65196466",
"0.65040475",
"0.648856"
] |
0.7374734
|
0
|
Finds all the secondary connections (i.e. connections of connections) of a given user. Secondary connections can include the user himself/herself and a user's primary connection that is a secondary connection as well.
|
def get_secondary_connections(network, user):
if user not in network:
return None
if network[user]['connections'] != []:
result = []
for conn in get_connections(network, user):
for conn_2 in get_connections(network, conn):
if conn_2 not in result:
result.append(conn_2)
return result
return []
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_secondary_connections(network, user):\n if user not in network:\n return None\n if network[user][0] == []:\n return []\n return [person\n for group in\n [network[connection][0] for connection in network[user][0]]\n for person in group]",
"def get_connections(network, user):\n if not user in network:\n return None\n if not 'connections' in network[user]:\n return []\n return network[user]['connections']",
"def user_connections(self):\r\n return users.UserConnections(self)",
"def get_connections(network, user):\n if user not in network or network[user][0] == []:\n return None\n return network[user][0]",
"def getConnectionsBetweenSuperPeers(self):\r\n raise NotImplementedError()",
"def connections(self, recurse = True):\n \n return NeuroObject.connections(self, recurse) + [self.root] + self.arborizations(False) + self.gapJunctions(False) + self.innervations(False) + self.synapses(False)",
"def _get_connections(self) -> _ConnectionsMap:\n seen: Dict[int, Any] = {}\n for parent in self.target.ancestors:\n if not isinstance(parent, NodeInstance):\n continue\n if parent is self.target.root:\n break\n if self.operation_host:\n self._get_connection(self.operation_host, parent, seen)\n self._get_connection(self.target.root, parent, seen)\n # get the rest of the default connections\n self._get_connection(self.target.root, None, seen)\n\n # reverse so nearest relationships replace less specific ones that have matching names\n connections = _ConnectionsMap( # the list() is for Python 3.7\n (rel.name, rel) for rel in reversed(list(seen.values()))\n )\n return connections",
"def get_conections(user_id, limit):\n url_conect = \"https://bio.torre.co/api/people/{}/connections?limit={}\".format(user_id, limit)\n request = get(url_conect)\n if request.status_code != 200:\n abort(404, 'Not Found')\n return request.json()",
"def get_outgoing_connections(self, comp):\n return self.connections.get(comp.id, [])",
"def sitecurclntconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecurclntconnections\n\t\texcept Exception as e:\n\t\t\traise e",
"def connections2Neo(db, user, renderedTwits, friends=True):\n started = datetime.now()\n right_now = started.isoformat()\n \n users2Neo(db, renderedTwits)\n \n match = (\"MATCH (t:twitter_user {{screen_name: '{}'}}),\" +\n \" (f:twitter_user {{screen_name: d.screen_name}})\").format(user)\n\n if friends:\n merge = \"MERGE (t)-[:FOLLOWS]->(f)\"\n update = \"SET {}.friends_last_scraped = '{}'\".format('t'+user, right_now)\n else:\n merge = \"MERGE (t)<-[:FOLLOWS]-(f)\"\n update = \"SET {}.followers_last_scraped = '{}'\".format('t'+user, right_now)\n \n query = '\\n'.join(['UNWIND $data AS d', match, merge])\n \n data = [{'screen_name': twit.get('screen_name', False)}\n for twit in renderedTwits if twit.get('screen_name', False)]\n\n userNode = nodeRef(user, 'twitter_user', {'screen_name': user})\n update_query = '\\n'.join([mergeNode(userNode, match=True), update])\n\n neo_tx(db, update_query)\n neo_tx(db, query, data=data)\n\n how_long = (datetime.now() - started).seconds\n logging.info(\n '*** PUSHED %d CONNECTIONS FOR %s TO NEO IN %ds ***' %\n (len(renderedTwits), user, how_long))",
"def get_connections(self):\n return self.connections",
"def get_connections(self):\n return self.connections",
"def connections_outgoing(self):\n return self.client.call('GET', self.name + 'connections/outgoing')",
"def add_connections(self, user_data):\n self.__check_opts()\n self.user_data = user_data\n for ssid in Config.ssids:\n self.__delete_existing_connection(ssid)\n self.__add_connection(ssid)\n for ssid in Config.del_ssids:\n self.__delete_existing_connection(ssid)",
"def list_connections(self, show_passthrough=True):\n return self._exprmapper.list_connections(show_passthrough)",
"def list_connections(self):\n return self.network.list_connections()",
"def all_connections(self):\n for i in _xrange(self.num_patterns):\n for c in self._available_connections[i]:\n yield c\n for c in self._in_use_connections[i]:\n yield c",
"def sitecursrvrconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecursrvrconnections\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_connections(self):\n return self._connections",
"def get_connection(self, user_id):\n logging.debug('ConnectionsClient/get_connection()')\n url = '/pod/v1/connection/user/{0}/info'.format(user_id)\n return self.bot_client.execute_rest_call('GET', url)",
"def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res",
"def list_connections(self, show_passthrough=True):\n excludes = set([name for name, data in self._exprgraph.nodes(data=True)\n if data['expr'].refs_parent()])\n if show_passthrough:\n return [(u, v) for u, v in self._exprgraph.edges() if not (u in excludes or v in excludes)]\n else:\n return [(u, v) for u, v in self._exprgraph.edges()\n if '.' in u and '.' in v and not (u in excludes or v in excludes)]",
"def list_ipsec_site_connections(self, retrieve_all=True, **_params):\r\n return self.list('ipsec_site_connections',\r\n self.ipsec_site_connections_path,\r\n retrieve_all,\r\n **_params)",
"def list_ipsec_site_connections(self, params = None):\n return self._get(self.ipsec_site_connections_path, params=params)",
"def c2p_connection(self, u, v):\n qnode = namedtuple('Node', 'asn path_type')\n\n q = list()\n q.append(qnode(u, -1))\n\n visited = defaultdict(int)\n\n while q:\n node = q.pop()\n visited[node.asn] = 1\n\n if self.has_node(node.asn):\n for neighbor in nx.all_neighbors(self, node.asn):\n if visited[neighbor] != 1:\n edge_data = self.get_edge_data(node.asn, neighbor)\n\n # c2p - p2c link\n if edge_data[\"relationship\"] == -1:\n if node.path_type == -1:\n path_type = 0 if node.asn == edge_data[\"as2\"] else 1\n else:\n path_type = node.path_type\n # c2p\n if (node.asn == edge_data[\"as2\"] and path_type == 0)\\\n or (node.asn == edge_data[\"as1\"] and path_type == 1):\n if neighbor == v:\n return True, -1 if path_type == 0 else 1\n q.append(qnode(neighbor, path_type))\n return False, None",
"def inner_recurse(friends_to_check, current_connections, current_degree):\n\n # base cases to the recursion\n\n if current_degree > max_degree:\n return\n\n if len(current_connections) == 0:\n return\n\n if len(friends_to_check) == 0:\n return\n\n # list of connections to NOT check on next recursion\n connections_to_ignore = set()\n\n # list of people from previous 'degree'\n new_friends_to_check = set()\n\n # unused connections to pass to next level\n new_connections = set()\n\n # loop through all connections and record friendship along with current degree\n for connection in current_connections:\n friend1_id, friend2_id = connection\n friend1 = User.query.get(friend1_id)\n friend2 = User.query.get(friend2_id)\n for friend_to_check in friends_to_check:\n # if the first person listed is one of our recorded friends,\n # then the second person is a 'friend', making this person a\n #connection of whatever degree we're currently in\n if friend_to_check == friend1_id:\n friends_lst.add((friend2.username, friend1.username, current_degree))\n new_friends_to_check.add(friend2_id)\n # already recorded this connection so can skip in the future\n connections_to_ignore.add(connection)\n\n #same as above but need to check both since either could be match\n elif friend_to_check == friend2_id:\n friends_lst.add((friend1.username, friend2.username, current_degree))\n new_friends_to_check.add(friend1_id)\n\n # already recorded this connection so can skip in the future\n connections_to_ignore.add(connection)\n\n for connection in current_connections:\n if connection not in connections_to_ignore:\n new_connections.add(connection)\n\n inner_recurse(new_friends_to_check, new_connections, current_degree + 1)",
"def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)",
"def get_users_connection(self):\n return self.m_connection.users",
"def get_connections(self, id, connection_name, **args):\n return self.request(id + \"/\" + connection_name, args)"
] |
[
"0.8156165",
"0.67806035",
"0.6443198",
"0.6302404",
"0.6001595",
"0.58823705",
"0.56933975",
"0.5661499",
"0.5622436",
"0.56224114",
"0.56027144",
"0.55988115",
"0.55988115",
"0.5553347",
"0.5547893",
"0.5535188",
"0.54834986",
"0.54733616",
"0.5465031",
"0.54358125",
"0.5396651",
"0.53785145",
"0.5323563",
"0.5322699",
"0.52809757",
"0.52715063",
"0.52429175",
"0.5209564",
"0.518709",
"0.5185563"
] |
0.8376694
|
0
|
Finds the number of people that user_A and user_B have in common.
|
def count_common_connections(network, user_A, user_B):
if user_A not in network or user_B not in network:
return False
common_connections = 0
for conn in network[user_A]['connections']:
if conn in network[user_B]['connections']:
common_connections += 1
return common_connections
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count_common_connections(network, user_A, user_B):\n count = 0\n if user_A not in network or user_B not in network:\n return False\n for person in network[user_A][0]:\n if person in network[user_B][0]:\n count += 1\n return count",
"def common_count(self, node_1, node_2):\n return int(len(set(nx.neighbors(self.graph, node_1)).intersection(set(nx.neighbors(self.graph, node_2)))))",
"def count_all(a, b):\n return len([1 for w in b if w == a])",
"def calculate_pairwise_user_similarity(self, user1_preferences, user2_preferences):\r\n\r\n shared_items = set(user1_preferences.indices) & set(user2_preferences.indices)\r\n\r\n all_items = set(user1_preferences.indices) | set(user2_preferences.indices)\r\n\r\n num_agreements = sum(1 for x in shared_items if abs(user1_preferences[0, x] - user2_preferences[0, x]) <= 2)\r\n\r\n return (num_agreements / len(all_items) if len(all_items) > 0 else 0)",
"def friend_overlap(users):\n ###TODO-- Completed\n\n #Creating a list of tuples to store the values for number of shared accounts by each of the user\n overlap_tuples = []\n\n #Trying for all the combination if user's without repetition\n for outer_idx,_ in enumerate(users):\n for inner_idx,_ in enumerate(users):\n if (inner_idx != len(users)-1) and (outer_idx < inner_idx+1):\n #Creating a SET of friends for 2 users and finding the INTERSECTION i.e. Common friends between these users\n overlap_tuples.append(tuple((users[outer_idx]['screen_name'],users[inner_idx+1]['screen_name'],\n len(list(set(users[outer_idx]['friends']) & set(users[inner_idx+1]['friends']))))))\n\n #Sort based on first KEY as N i.e. number of shared account in descending order,\n # for ties break using screen_name of user one, further on screen_name of user two\n return sorted(overlap_tuples, key=lambda x:[-x[2], x[0], x[1]])\n\n #for perm in combinations(screen_names,2):\n # overlap_tuples.append(tuple(perm[0],perm[1],len(list(set(user[perm[0]]['friends']) & set(perm[1]['friends'])))))\n #print(len(list(set(users[0]['friends']) & set(users[1]['friends']))))",
"def ismember(A,B):\n return [np.sum(a==B) for a in A]",
"def common_friends(self, user):\n\n self_friend_ids = set(self.friends.keys()) if self.friends else set()\n other_friend_ids = set(user.fb_profile.friends.keys()) if user.fb_profile.friends else set()\n\n common_friend_ids = self_friend_ids.intersection(other_friend_ids)\n\n return common_friend_ids",
"def get_jurisdiction_common_members(a: List[int], b: List[int]) -> Set[int]:\n a_set = set(a)\n b_set = set(b)\n\n if a_set & b_set:\n return a_set & b_set\n else:\n return set()",
"def euclidean_distance(user1: User, user2: User) -> float:\r\n common_animes = set.intersection(set(user1.neighbor_anime.keys()),\r\n set(user2.neighbor_anime.keys()))\r\n return sqrt(sum(pow(anime.neighbor_users[user1] - anime.neighbor_users[user2], 2)\r\n for anime in common_animes))",
"def numcheck(list1, list2):\r\n set1 = set(list1)\r\n set2 = set(list2)\r\n #set3 contains all items common to set1 and set2\r\n set3 = set1.intersection(set2)\r\n # return number of matching items\r\n return len(set3)",
"def common_likes(self, user):\n\n self_like_ids = set(self.likes.keys()) if self.likes else set()\n other_like_ids = set(user.fb_profile.likes.keys()) if user.fb_profile.likes else set()\n\n common_like_ids = self_like_ids.intersection(other_like_ids)\n\n return common_like_ids",
"def count(a, b):\r\n\r\n length = len(a)\r\n count = 0\r\n for i in range(length):\r\n if a[i] in b:\r\n index = b.find(a[i])\r\n b = b[:index] + b[(index + 1):]\r\n count += 1\r\n return count",
"def count_task2_group(answers):\n return len(set.intersection(*answers))",
"def count_common(self, other, downsample=False):\n if not isinstance(other, MinHash):\n raise TypeError(\"Must be a MinHash!\")\n return self._methodcall(lib.kmerminhash_count_common, other._get_objptr(), downsample)",
"def costFunction(a, b):\n pairs = [('G', 'C'), ('C', 'G'), ('A', 'U'), ('U', 'A')]\n\n if UNCOMMON:\n pairs.append([('G', 'U'), ('U', 'G')])\n\n if (a, b) in pairs:\n return 1\n return 0",
"def countMatches(g1, g2):\n if g1 is None or g2 is None or len(g1) == 0 or len(g1[0]) == 0: # sanity check\n return 0\n count = 0\n for i in range(len(g1)):\n for j in range(len(g1[0])):\n if g1[i][j] == g2[i][j] == 1 and search_grid(g1, g2, i, j):\n count = count + 1\n return count",
"def intersection_size(comm_1, comm_2):\n if comm_1 == MPI.COMM_NULL or comm_2 == MPI.COMM_NULL:\n return None\n group_1 = comm_1.Get_group()\n group_2 = comm_2.Get_group()\n inter_group = MPI.Group.Intersect(group_1, group_2)\n return inter_group.Get_size()",
"def get_common_friends(user, friends, friends_of_friends, data):\n common_friends_list = {}\n friends_set = set(friends)\n for friend_of_friend in list(set(friends_of_friends)):\n if int(friend_of_friend) != user and friend_of_friend not in friends:\n friend_of_friend_list = get_friends(friend_of_friend, data)\n score = len(list(friends_set.intersection(friend_of_friend_list)))\n if score in common_friends_list:\n common_friends_list[score].append(friend_of_friend)\n else:\n common_friends_list[score] = [friend_of_friend]\n return common_friends_list",
"def user_vs_vehicle_comparison(allotment, rates, vehicle_rate):\n\tnumber_of_users = 0\n\tfor user in allotment:\t\t\n\t\tif rates[user] >= vehicle_rate:\n\t\t\tnumber_of_users += 1\n\n\treturn number_of_users",
"def entity_relatedness(self, a, b):\n occ_a = self.occurrences(a)\n occ_b = self.occurrences(b)\n occ_common = occ_a.intersection(occ_b)\n\n try:\n logmax = max(len(occ_a), len(occ_b))\n logmin = min(len(occ_a), len(occ_b))\n logint = len(occ_common)\n return (logmax - logint) / (self.LOGW - logmin)\n except ValueError:\n return 0.0",
"def _compute_user_similarity(self, user1, user2):\n return self._compute_pearson(user1, user2)",
"def matching_score(self,set1, set2):\n set_set1=set(set1)\n set_set2=set(set2)\n '''print(\" set_set12\")\n print(set_set1)\n print(set_set2)'''\n return len(set_set1.intersection(set_set2)) ** 2 / (float(len(set1)) * len(set2))\n #return len(set_set1.intersection(set_set2)) / len(set_set1.union(set_set2))",
"def commonCharacterCount(s1, s2):\n return sum(min(s1.count(x),s2.count(x)) for x in set(s1))",
"def count_friends(users):\n all_friends=[]\n for u_dict in users:\n for items in u_dict['friends']:\n all_friends.append(items)\n count = Counter()\n for frnd in all_friends:\n count[frnd]+=1\n return count",
"def people_count(self):\n return len(self.__users)",
"def solution(A, B):\n if 1 not in A and 1 not in B:\n return 1\n combinations = [\n A[:each] + [B[each]] + A[each + 1 :] for each in range(len(A))\n ]\n combinations.append(A)\n score_list = [\n smallest_does_not_occur(sorted(each)) for each in combinations\n ]\n return max(score_list)",
"def friend_overlap(users):\n list_overlap = []\n list_common = []\n m=0\n for i in range(0,len(users)):\n \tfor j in range(i+1,len(users)):\n \t\ts1 = set.intersection(set(users[i].get('friends')), set(users[j].get('friends')))\n \t\tlist_common.append(s1)\n for i in range(0,len(users)):\n for j in range(i+1,len(users)):\n list_overlap.append((users[i]['screen_name'],users[j]['screen_name'],len(list_common[m])))\n m = m + 1\n return sorted(list_overlap, key=lambda x: (x[2]), reverse=True)",
"def hypergraph_common_edges(u, v, hypergraph):\n total = 0\n for e in hypergraph.edges():\n if u in e.elements and v in e.elements:\n total += 1\n return total",
"def people(self):\n count = self.db.query(\n 'select count(id) as people_count from \\\n (select id from staff union all select id from fellows)')\n return count.all()[0]['people_count']",
"def commonality(left_struc, right_struc):\n assert type(left_struc) is type(right_struc), (left_struc, right_struc)\n assert left_struc and right_struc, (left_struc, right_struc)\n if type(left_struc) is dict:\n (overlap, left, right) = compute_keysets(left_struc, right_struc)\n com = float(len(overlap))\n tot = len(overlap.union(left, right))\n else:\n assert type(left_struc) in (list, tuple), left_struc\n com = 0.0\n for elem in left_struc:\n if elem in right_struc:\n com += 1\n tot = max(len(left_struc), len(right_struc))\n\n return com / tot"
] |
[
"0.81366605",
"0.64713997",
"0.6242102",
"0.6077465",
"0.607496",
"0.58812374",
"0.58194506",
"0.5770734",
"0.576868",
"0.5764771",
"0.5734131",
"0.5663875",
"0.5648639",
"0.56372166",
"0.56187195",
"0.5590245",
"0.55224013",
"0.5519934",
"0.5516126",
"0.5513296",
"0.55109286",
"0.55034953",
"0.5494345",
"0.5489183",
"0.54816103",
"0.5478798",
"0.5473318",
"0.5469579",
"0.5463354",
"0.5451894"
] |
0.7500884
|
1
|
Finds a connections path from user_A to user_B. It has to be an existing path but it DOES NOT have to be the shortest path.
|
def find_path_to_friend(network, user_A, user_B, path=None):
if path is None:
path = []
if user_A in network and user_B in network:
path.append(user_A)
current_connections = get_connections(network, user_A)
if user_B in current_connections:
return [user_A, user_B]
for u in current_connections:
if u not in path:
next_path = find_path_to_friend(network, u, user_B, path)
if next_path:
return [user_A] + next_path
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def create_path(network, user_A, user_B, path=[]):\n path = path + [user_A] # all paths include starting node\n if user_A == user_B: # id the last node is user_B a valid path exists\n return path # base case\n for node in network[user_A][0]:\n if node not in path: # otherwise path is an infinite loop\n path = create_path(network, node, user_B, path)\n if path: # after the recursion hits the base case\n return path\n return None",
"def find_path_to_friend(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return None\n # if both users exist there may be a path\n return create_path(network, user_A, user_B)",
"def user_path(a, b):\n tx = cypher_transaction()\n\n # Limit the number of relationships in the path?\n # p = shortestPath((a)-[*..15]-(b))\n query = \"\"\"\n MATCH\n (a:user {username:{username_a}}),\n (b:user {username:{username_b}}),\n p = shortestPath((a)-[]->(b))\n RETURN LENGTH(p), p\n \"\"\"\n params = {\n 'username_a': a['username'],\n 'username_b': b['username']\n }\n tx.append(query, parameters=params)\n results = _first(tx.commit())\n paths = []\n for record in results:\n length, path = record.values\n m = \"There are {0} hops from {1} to {2}:\\n\"\n print(m.format(length, a['name'], b['name']))\n for rel in path.relationships:\n print(\" ({0})-[:{1}]->({2})\".format(\n rel.start_node['name'],\n rel.type,\n rel.end_node['name']\n ))\n paths.append(path)\n return paths",
"def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if user_B not in network[user_A][0]:\n network[user_A][0].append(user_B)\n return network[user_A][0]",
"def _find_path(self, node1, node2, path=[]):\r\n\r\n path = path + [node1]\r\n if node1 == node2:\r\n return path\r\n if node1 not in self._graph:\r\n return None\r\n for node in self._graph[node1]:\r\n if node not in path:\r\n new_path = self._find_path(node, node2, path)\r\n if new_path:\r\n return new_path\r\n return None",
"def _path(from_object, to_object):\n\n if from_object._root != to_object._root:\n raise ValueError(\"No connecting path found between \" +\n str(from_object) + \" and \" + str(to_object))\n\n other_path = []\n obj = to_object\n while obj._parent is not None:\n other_path.append(obj)\n obj = obj._parent\n other_path.append(obj)\n object_set = set(other_path)\n from_path = []\n obj = from_object\n while obj not in object_set:\n from_path.append(obj)\n obj = obj._parent\n index = len(from_path)\n i = other_path.index(obj)\n while i >= 0:\n from_path.append(other_path[i])\n i -= 1\n return index, from_path",
"def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # graphs=Graph()\n # for i in self.users:\n # graphs.add_vertex(i)\n \n # for i in self.users:\n # for x in self.friendships[i]:\n # graphs.add_edge(i,x)\n\n # for i in graphs.vertices:\n # if graphs.bfs(i,user_id):\n # visited[i]=graphs.bfs(i,user_id)\n queue=Queue()\n queue.enqueue([user_id])\n while queue.size()>0:\n path=queue.dequeue()\n current_user = path[-1]\n if current_user not in visited:\n visited[current_user]=path\n for ID in self.friendships[current_user]:\n new_path=list(path)\n new_path.append(ID)\n queue.enqueue(new_path)\n return visited",
"def path(most_important_up, most_important_down, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return source_to_source(most_important_up, most_important_down), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def get_all_social_paths(self, user_id):\n if len(self.friendships) > 0:\n visited = {}\n q = Queue()\n q.enqueue([user_id])\n\n while q.size() > 0:\n curr_path = q.dequeue()\n curr_vertex = curr_path[-1]\n\n if curr_vertex not in visited:\n visited[curr_vertex] = curr_path\n\n for friend in self.friendships[curr_vertex]:\n path_copy = curr_path[:]\n path_copy.append(friend)\n q.enqueue(path_copy)\n\n return visited\n\n else:\n print(\"There are currently no friendship paths in the network\")",
"def path(self, first, second):\r\n if not((0 <= first < self.size) and (0 <= second < self.size)):\r\n raise ValueError(\"Cannot find distances for nodes not in the graph\")\r\n if first == second:\r\n return 0\r\n dist_tracker = self._perform_dijkstra(first, second)\r\n first_dist = dist_tracker.get_min_distance(first)\r\n second_dist = dist_tracker.get_min_distance(second)\r\n if first_dist == float('inf') or second_dist == float('inf'):\r\n return []\r\n furthest = first if first_dist > second_dist else second\r\n potential_path = dist_tracker.get_min_path(furthest)\r\n if first in potential_path and second in potential_path:\r\n return potential_path\r\n return []",
"def get_connections(network, user):\n if user not in network or network[user][0] == []:\n return None\n return network[user][0]",
"def get_all_social_paths(self, user_id):\n visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n # BFTs starting at user_id, return first path to every reachable person\n q = [[user_id]]\n while q:\n path = q.pop(0)\n person = path[-1]\n # add the person and the path to the person\n for friend in self.friendships[person]:\n if friend not in visited and friend != user_id:\n q.append(path + [friend])\n visited[friend] = path + [friend]\n\n return visited",
"def add_connection(network, user_A, user_B):\n if user_A not in network or user_B not in network:\n return False\n if not user_B in network[user_A]['connections']:\n network[user_A]['connections'].append(user_B)\n return network",
"def find_candidate_paths(G, Alice, Bob):\r\n min_path_length = nx.shortest_path_length(G, source=Alice, target=Bob, weight='weight')\r\n candidate_paths_gen = nx.all_simple_paths(G, source=Alice, target=Bob)\r\n\r\n candidate_paths = [tuple(candidate_paths)\r\n for candidate_paths in candidate_paths_gen]\r\n return candidate_paths, min_path_length",
"def find_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return path\n\n if start_vertex not in graph:\n return None\n\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_path = self.find_path(vertex, end_vertex,path)\n if extended_path:\n return extended_path\n return None",
"def find_path(sources, goals, connections):\n visited = set()\n expanded = set()\n queue = deque()\n\n for s in sources:\n queue.appendleft([s])\n\n while queue:\n path = queue.pop()\n head = path[-1]\n visited.add(head)\n\n neighbours = [o for (i, o) in connections if i == head]\n for neighbour in neighbours:\n if neighbour in goals:\n return path + [neighbour]\n elif neighbour not in visited:\n queue.appendleft(path + [neighbour])\n\n return []",
"def compute_user_pathway(user_jobs, job_graph, debug=False):\r\n pathway = []\r\n for i, job in enumerate(user_jobs):\r\n if i == 0:\r\n continue\r\n cluster, _ = job_graph.assign_job_to_jobgraph_state(job)\r\n pathway.append(cluster)\r\n return pathway",
"def compute_pathways(users, job_graph, debug, min_likelihood_thr=0.2):\r\n start_time = time.time()\r\n __print_msg('Computing career pathways...', debug)\r\n user_pathways = {}\r\n tot_users = len(users)\r\n i = 0\r\n for user, user_jobs in users.items():\r\n user_pathway = compute_user_pathway(user_jobs, job_graph)\r\n recommended_pathway = recommend_pathway(user_jobs, job_graph, user_pathway[-1], min_likelihood_thr)\r\n user_pathways[user] = (user_pathway, recommended_pathway)\r\n i += 1\r\n if i % 1000 == 0:\r\n __print_msg('Num users processed: {}/{}'.format(i, tot_users), debug)\r\n end_time = time.time()\r\n __print_msg('Execution time: {} seconds'.format(end_time - start_time), debug)\r\n return user_pathways",
"def findRoute(self, x1, y1, x2, y2):\r\n\r\n\t\t# Check to see if the start and end node are the same\r\n\t\tif x1 == x2 and y1 == y2:\r\n\t\t\treturn [(x1, y1)]\r\n\r\n\t\troot_node = DijkstraNode(x1, y1, None, 0)\r\n\t\troot_node.neighbours = self.getNeighbours(x1, y1)\r\n\r\n\t\t# Create a dictionary to store all of the nodes\r\n\t\tall_nodes = {(x1, y1): root_node}\r\n\t\t# If no starting place is found return nothing\r\n\t\tif len(root_node.neighbours) == 0:\r\n\t\t\treturn []\r\n\t\tcurrent_node = root_node\r\n\t\twhile (x2, y2) not in all_nodes:\r\n\r\n\t\t\t# If the algorithm hasn't found the target node and cannot explore further then return empty path\r\n\t\t\tif current_node is None:\r\n\t\t\t\treturn []\r\n\r\n\t\t\tcurrent_node.neighbours = self.getNeighbours(current_node.x, current_node.y)\r\n\r\n\t\t\t# The distance from the root node through the current node to the neighbour\r\n\t\t\tcurrent_neighbour_dist = current_node.dist + 1\r\n\r\n\t\t\tfor neighbour in current_node.neighbours:\r\n\t\t\t\tif neighbour in all_nodes:\r\n\t\t\t\t\tneighbour_node = all_nodes[neighbour]\r\n\t\t\t\t\tif current_neighbour_dist < neighbour_node.dist:\r\n\t\t\t\t\t\t# The new best path is through the current node\r\n\t\t\t\t\t\tneighbour_node.parent = current_node\r\n\t\t\t\t\t\tneighbour_node.dist = current_neighbour_dist\r\n\t\t\t\telse:\r\n\t\t\t\t\t# Add a new node if it doesn't exist within the currently explored nodes\r\n\t\t\t\t\tall_nodes[neighbour] = DijkstraNode(neighbour[0], neighbour[1], current_node, current_neighbour_dist)\r\n\r\n\t\t\t# Mark the current node as being explored as you have checked all the neighbours\r\n\t\t\tcurrent_node.explored = True\r\n\r\n\t\t\t# Gets a list of all of the unexplored nodes to check for the next node to explore\r\n\t\t\tunexplored_nodes = [node for _, node in all_nodes.items() if not node.explored]\r\n\r\n\t\t\tif len(unexplored_nodes) > 0:\r\n\t\t\t\t# Go to the next node with the smallest distance that hasn't been explored\r\n\t\t\t\tcurrent_node = min(unexplored_nodes, key=lambda node: node.dist)\r\n\t\t\telse:\r\n\t\t\t\tcurrent_node = None\r\n\r\n\t\t# Make your way back from the target node\r\n\t\tcurrent_node = all_nodes[(x2, y2)]\r\n\t\t# Initialise a list to hold the path going from the target to the root\r\n\t\treversed_path = []\r\n\t\t# This will end when the root node tries to travel to a None node\r\n\t\twhile current_node is not None:\r\n\t\t\t# Add the current node to the list\r\n\t\t\treversed_path.append((current_node.x, current_node.y))\r\n\t\t\t# Travel to the parent node\r\n\t\t\tcurrent_node = current_node.parent\r\n\t\t\t# current_node will be None at the root because the parent of the root node is 'None'\r\n\r\n\t\t# Return the list in the correct order\r\n\t\treturn list(reversed(reversed_path))",
"def Option2_routing(self, S, D, L):\n if self.has_path(S, D): \n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w') \n Opt_path = Shortest_path\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n return path_cost_with_weighted_sum, Opt_path\n\n while len(Shortest_path) != 0:\n path_cost = self.additive_path_cost(Shortest_path, 'w') \n #self.logger.info('Path cost - %d', path_cost)\n if path_cost <= L:\n \"\"\"go to path cost with weighted sum\"\"\"\n path_cost_with_weighted_sum = self.calculate_path_cost_with_weighted_sum(Shortest_path, 'c1', 'c2')\n self.G = self.rm_edge_constraint(path_cost_with_weighted_sum) # remove all links where the concave link is greater than PathConcave_cost \n Opt_path = Shortest_path\n if self.has_path(S, D):\n Shortest_path = nx.dijkstra_path(self.G, S, D, weight='w')\n else:\n Shortest_path = [] \n else:\n break \n else:\n self.logger.info('No path from %s to %s', S, D)\n Opt_path = []\n path_cost_with_weighted_sum = 0\n return path_cost_with_weighted_sum, Opt_path",
"def bi_dijkstra(graph, source1, source2):\n\n graph_up, graph_down, H_up, H_down = framework(graph, source1, source2)\n\n found_source2_after = [np.Inf, None]\n found_source1_after = [np.Inf, None]\n min_down = None\n while not H_up.is_empty() or H_down.is_empty():\n\n min_up = H_up.remove_min()\n\n # if newly extracted node from H_up is equal to destination,end process,\n # taking into account also previously stored distances and nodes of the \n # destination found in some adjacent lists\n if min_up == graph.Dictionary[source2]:\n\n return one_way_path(min_up, min_up.distance, found_source2_after, found_source1_after)\n \n for passage, vertice, weight in min_up.adj_list:\n\n bi_relax(H_up, min_up, graph_up.Dictionary[vertice], weight, passage, graph_up)\n \n # if destination is in the adjacent list, take that into account storing distance and node\n if vertice == source2:\n found_source2_after = [graph_up.Dictionary[vertice].distance, copy.deepcopy(graph_up.Dictionary[vertice])]\n\n if min_down is not None:\n # if the newly extracted node from H_up is equal to the previously extracted node in H_down, end process\n # taking into account also previously stored distances and nodes of the destination found in some adjacent lists\n if min_up == min_down:\n\n return path(min_up, min_down, min_up.distance + min_down.distance, found_source2_after, found_source1_after)\n\n min_down = H_down.remove_min()\n\n # if newly extracted node from H_down is equal to destination, end process\n # taking into account also previously stored distances and nodes of the \n # destination found in some adjacent lists\n if min_down == graph.Dictionary[source1]:\n\n return one_way_path(min_down, min_down.distance, found_source2_after, found_source1_after)\n \n for passage, vertice, weight in min_down.ancestors:\n\n bi_relax(H_down, min_down, graph_down.Dictionary[vertice], weight, passage, graph_down)\n \n # if destination is in the adjacent list, take that into account storing distance and node\n if vertice == source1:\n found_source1_after = [graph_down.Dictionary[vertice].distance, copy.deepcopy(graph_down.Dictionary[vertice])]\n\n # if the extracted nodes from H_up and H_down, end process taking into account also \n # previously stored distances and nodes of the destination found in some adjacent lists\n if min_up == min_down:\n\n return path(min_up, min_down, min_up.distance + min_down.distance, found_source2_after, found_source1_after)",
"def one_way_path(most_important, total_distance, to_source2, to_source1):\n\n if total_distance == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(most_important), total_distance\n elif to_source2[0] == min(total_distance, to_source2[0], to_source1[0]):\n return most_important_to_source(to_source2[1]), to_source2[0]\n else:\n return most_important_to_source(to_source1[1], up=False), to_source1[0]",
"def paths(self, source, target):\n assert source in self.node_map\n assert target in self.node_map\n if has_path(self.G2, source, target):\n return nx.all_simple_paths(self.G2, source=source, target=target)\n return None",
"def find_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n for node in self.graph[start]:\n if node not in path:\n newpath = self.find_path(node, end, path)\n if newpath:\n return newpath\n return None",
"def getAllSocialPaths(self, userID):\n visited = {}\n # use a queue\n q = []\n q.append([userID])\n # add userID as its own key and value to visited\n visited[userID] = [userID]\n\n while len(q) > 0:\n path = q.pop(0)\n curr_friend = path[-1]\n\n # for all the userID keys inside self.friendships\n for friend in self.friendships[curr_friend]:\n # add neighbor as a key, if not visited, in visited with an empty list as value\n if friend not in visited:\n visited[friend] = list()\n # break out of loop if already in visited\n else: \n continue\n \n # create a new list that holds the path from userID to friend\n friend_path = list(path)\n # add the friend onto the end of the list\n friend_path.append(friend)\n # also add path to the queue\n q.append(friend_path) \n # add path as the value to the friend\n visited[friend].extend(friend_path)\n \n return visited",
"def calc_path_2_ORCIDs(path=curr,node1=None,node2=None):\n\n with open(path + '/' + 'ORCID_graph.pkl', 'rb') as f:\n G = pickle.load(f)\n\n if (node1 is None) or (node2 is None):\n with open(path + '/' + 'centrality.csv', 'rb') as f:\n centrality = csv.reader(f, delimiter='\\t')\n rn = 0\n for row in centrality:\n if rn == 0:\n tmp1 = row\n rn += 1\n elif rn == 1:\n tmp2 = row\n rn += 1\n else:\n break\n if node1 is None:\n node1 = tmp1[0]\n if node2 is None:\n node2 = tmp2[0]\n\n try:\n short_path = nx.algorithms.shortest_paths.generic.shortest_path(G, source=node1,target=node2)\n except:\n return []\n\n return short_path",
"def connected( self, u, v ):\n try:\n self.shortestPath(u, v)\n return True\n except nx.NetworkXNoPath:\n return False",
"def findPath(self, source, destination, vertices, edges):\n connectedNodes = [vertices[index] for index, edge in enumerate(edges[source['index']]) if edge == 1]\n for node in connectedNodes:\n childConnectedNodes = [vertices[index] for index, edge in enumerate(edges[node['index']]) if edge == 1]\n for childNode in childConnectedNodes:\n if childNode['value'] == destination['value']:\n return node\n return None",
"def find_vertex_path(self, vertex_id1, vertex_id2, as_network_object):\n v1 = self.graph.vertex(vertex_id1)\n v2 = self.graph.vertex(vertex_id2)\n vertices, edges = graph_tool.topology.shortest_path(self.graph, v1, v2, weights=self.edge_weights)\n if v1 == v2:\n vertices = [v1, v1]\n if not as_network_object:\n vertices = [self.graph.vertex_index[vertex] for vertex in vertices]\n edges = [self.graph.edge_index[edge] for edge in edges]\n\n return vertices, edges",
"def dfs(self, starting_vertex, destination_vertex):\n visited = set()\n paths = [[starting_vertex]]\n \"\"\"\n While the length of possible paths is not zero. \n Store the current path and remove it from possible \n paths. Return the last path if it's the destination. \n If the path hasn't been visited yet add it to the \n visited list and loop over it's edges creating paths \n to check later. \n \"\"\"\n while len(paths) > 0:\n path = paths.pop(-1)\n vertex = path[-1]\n if vertex == destination_vertex:\n return path\n if vertex not in visited:\n visited.add(vertex)\n for key in self.get_neighbors(vertex):\n newPath = path + [key]\n paths.append(newPath)"
] |
[
"0.7669256",
"0.74481094",
"0.7430099",
"0.6393886",
"0.6260245",
"0.62537074",
"0.6090832",
"0.60771996",
"0.6049806",
"0.5955868",
"0.59534496",
"0.5941864",
"0.5919209",
"0.5871891",
"0.5821266",
"0.5804118",
"0.5768927",
"0.57547647",
"0.57235235",
"0.5713709",
"0.5698482",
"0.5696851",
"0.56866187",
"0.56802994",
"0.5668874",
"0.5630995",
"0.5628979",
"0.5624369",
"0.5610346",
"0.5578562"
] |
0.8000221
|
0
|
Filters users by a liked game.
|
def users_by_game(network, game):
result = []
for user in network:
if game in get_games_liked(network, user):
result.append(user)
if result == []:
return None
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_games_liked(network, user):\n if not user in network:\n return None\n if not 'games' in network[user]:\n return []\n return network[user]['games']",
"def get_games_liked(network, user):\n if user not in network or network[user][1] == []:\n return None\n return network[user][1]",
"def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked",
"def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()",
"def games_for_user(self, user):\n return super(GamesManager, self).get_queryset().filter(\n Q(first_player_id=user.id) | Q(second_player_id=user.id))",
"def likes(self):\n return self.get_queryset().filter(vote__gt=0)",
"def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)",
"def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()",
"def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()",
"def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()",
"def user_playing(self, user):\n if not user:\n return False\n query = db.Query(GamePlayer)\n query.filter('game =', self)\n query.filter('user =', user)\n return query.get()",
"def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)",
"def __user_interested_post_filter(self, avg_user_liked_post_weight):\n query = Post.objects.extra(select={'is_top': \"weight = \" + str(avg_user_liked_post_weight)})\n resultant_obj = query.extra(order_by = ['-is_top'])\n return resultant_obj",
"def user_moderating(self, user):\n if not user: return False\n query = db.Query(GameModerator)\n query.filter('game =', self)\n query.filter('user =', user)\n return query.get()",
"def getLikedOkCupidUsers(self):\n\t\tself.logger.info(\"Get all liked OkCupid users\")\n\t\tusers = self.session.query(Models.Okcupid).filter(Models.Okcupid.liked==True).all()\n\t\treturn users",
"def test_filter_users_like(app, add_ten_users):\n with app.app_context():\n add_ten_users()\n users = User.query\n users = apply_filter(users, User,\n {'column': 'username', 'type': 'like',\n 'value': '%name_1%'})\n result = users.all()\n assert len(result) == 1",
"def get_likes_list(self, username):\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})",
"def filter_query_with_visible_players(queryset, user):\n if is_in_group(user, 'coach'):\n return queryset.filter(team__coach=user.id)\n\n elif is_in_group(user, 'admin'):\n return queryset",
"def like_cafes():\n if CURR_USER_KEY in session:\n cafe_id = request.args.get(\"cafe_id\")\n user = g.user\n print(user.id)\n print(user.likes)\n for like in user.likes:\n if like.cafe_id == int(cafe_id or 0):\n print(\"get True\")\n return jsonify({\"likes\": True})\n\n return jsonify({\"likes\": False})\n\n return jsonify({\"error\": \"Not logged in\"})",
"def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)",
"def get_user_games_moderating(user):\n if not user: return []\n moderating = db.Query(GameModerator).filter('user =', user)\n return [m.game for m in moderating]",
"def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))",
"def is_liked(value, user: User):\n return value.is_liked(user)",
"def get_users_from_likes(self, type, owner_id, item_id):\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=1)\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=likes['count'])\n return likes['items']",
"def get_recommended_games(user):\n games = list(Game.objects.all())\n games.sort(key = lambda x: abs(x.level-user.level))\n return games[0:5]",
"def get_user_games_playing(user):\n if not user: return []\n playing = db.Query(GamePlayer).filter('user =', user)\n return [p.game for p in playing]",
"def get_user_games(self, request):\n user = User.query(User.name == request.user_name).get()\n if not user:\n raise endpoints.NotFoundException(\n 'A User with that name does not exist!')\n games = Game.query(Game.user == user.key).filter(Game.game_over == False)\n return GameForms(items=[game.to_form('Active games for this user are...!') for game in games])",
"def get_meals_user_disliked(username):\n meals_user_disliked = []\n user_disliked = Rating.objects.filter(member__username=username, like=False)\n for ratting in user_disliked:\n meals_user_disliked.append(ratting.meal)\n return meals_user_disliked",
"def recommend_for_user(self, R, user, n=10, filter_previously_seen=False,\n return_scores=True, **kwargs):",
"def get_all_game_players_but_indicated(self, user):\n return GamePlayer.objects.filter(Q(game=self) & ~Q(player=user))"
] |
[
"0.6602238",
"0.6027898",
"0.5924964",
"0.59089816",
"0.5883662",
"0.5883414",
"0.5834347",
"0.58041096",
"0.5760458",
"0.5760458",
"0.574534",
"0.5715337",
"0.57011193",
"0.5692843",
"0.5640665",
"0.5617214",
"0.5617123",
"0.55867946",
"0.5584217",
"0.5572285",
"0.5558541",
"0.5557116",
"0.55471957",
"0.5528562",
"0.5518737",
"0.549537",
"0.5493351",
"0.5459377",
"0.5444466",
"0.54368764"
] |
0.6763938
|
0
|
Removes a user from the network, including within connections.
|
def delete_user(network, user):
if user in network:
del network[user]
for u in network:
connections = get_connections(network, u)
if user in connections:
i = connections.index(user)
del connections[i]
return network
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def remove(self, user):\n self.packet.send_room([\"rp\", user.get_int_id(self.rooms),\n user.data.id], user.room)\n self.rooms[user.room][\"users\"].remove(user)",
"def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]",
"def del_user(self, username):\n pass",
"def del_user(self, name):\n del self.users[irc.strings.IRCFoldedCase(modules.trim_nick(name))]",
"def remove_user(self, username):\n del self.user_table[username]",
"def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)",
"def remove_user(username):\n db, c = config.start_db()\n c.execute(\n 'DELETE FROM users WHERE username=?',\n (username,)\n )\n config.end_db(db)",
"def sipserver_user_remove(self, user: str) -> None:\n self.remove_endpoint_from_sipserver(endpoint=user)",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"def delete_user(self):\n User.user_list.remove(self)",
"async def del_user(conn: LDAPConnection, user: dict, mailman: Client) -> None:\n await conn.delete(user[\"dn\"])\n uid = user[\"attributes\"][\"uid\"][0]\n rmtree(user[\"attributes\"][\"homeDirectory\"][0])\n rmtree(f\"/webtree/{uid[:1]}/{uid}\")\n mailing_list = mailman.get_list(\"announce-redbrick\")\n mailing_list.unsubscribe(f\"{uid}@redbrick.dcu.ie\")",
"def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)",
"def delete_user(self, user):\n self.delete(user)",
"def disconnect_user(self, user):\n\t\tis_user_removed = False\n\t\tif user in self.users.all():\n\t\t\tself.users.remove(user)\n\t\t\tself.save()\n\t\t\tis_user_removed = True\n\t\treturn is_user_removed",
"def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)",
"def delete_user(self):\n\n User.user_list.remove(self)",
"def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty",
"def remove_user(self, username):\n params = {\n \"f\" : 'json',\n \"username\" : username\n }\n uURL = self._url + \"/users/remove\"\n return self._con.post(path=uURL, postdata=params)",
"def remove_user(self, workspace, params={}, **options):\n path = \"/workspaces/%s/removeUser\" % (workspace)\n return self.client.post(path, params, **options)",
"def remove_user(self):\n self.currentuser = None\n self.carlocked = False",
"def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)",
"def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass",
"def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):\n return self.user.delete(name, uri, api, headers)",
"def del_user(self, server, username, quiet=False):\n self._op_user(\"del\", server, {\"username\": username}, quiet)",
"def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user",
"def remove(self, user_id):\n pass",
"def remove_user(self, username):\n u = self.get_user(username)\n self.s.query(User).filter(User.username == username).delete()\n if u is not None:\n self.s.commit()\n return True # Deleted\n return None # User not found",
"def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)",
"def remove_user(username):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n do_logout(username)\n\n db.session.delete(g.user)\n db.session.commit()\n\n return redirect(\"/register\")"
] |
[
"0.77904415",
"0.7550601",
"0.7473682",
"0.74685127",
"0.7452049",
"0.7438654",
"0.731242",
"0.7298207",
"0.722409",
"0.722409",
"0.722409",
"0.7219383",
"0.7191735",
"0.7148766",
"0.70995563",
"0.70881975",
"0.7084012",
"0.7063465",
"0.70371556",
"0.7036402",
"0.70335364",
"0.7014078",
"0.7013299",
"0.70122784",
"0.6975458",
"0.69601357",
"0.6957532",
"0.6937985",
"0.6919107",
"0.69060016"
] |
0.80548996
|
0
|
check type and dimensionality of an input signal. If signal is the first input signal, set the dimension of it as self.dim. So, this method have to be called before calling functions that use self.dim.
|
def __check_signal(self, signal):
if not(isinstance(signal, np.ndarray)):
raise TypeError()
if len(signal.shape) != 1:
raise TypeError()
if not(hasattr(self, 'dim')):
self.dim = signal.shape[0]
else:
if signal.shape[0] != self.dim:
raise TypeError()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __check_signal(self, signal: np.ndarray):\n # if signal is a list, convert it to nparray\n if isinstance(signal, list):\n signal = np.array(signal)\n # if signal is still not nparray, raise error\n if not (isinstance(signal, np.ndarray)):\n print(\"1\") \n # unknown type of input signal\n raise TypeError()\n # if signal is not a vector of shape(nFeature, )\n if len(signal.shape) != 1:\n print(\"2\") \n # input signal has to be a vector\n raise TypeError()\n # set self.dim\n self.dim = signal.shape[0]\n # if self still doesn't has the attribute 'dim', set it.\n if not (hasattr(self, 'dim')):\n self.dim = signal.shape[0]\n else:\n # if dim of signal doesn't match self.dim, raise error\n if signal.shape[0] != self.dim:\n print(\"3\")\n raise TypeError()\n return signal",
"def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")",
"def check_input_dimension(self, data):\n if len(data[0]) != self.input_dimension:\n raise ValueError(\"Received {} features, expected {}.\".format(self.input_dimension, len(data[0])))",
"def check_input_shape(self, op, block):\n\n ipt_name = op.input(op.input_names[0])\n ipt_shape = block.var(ipt_name).shape\n for i in ipt_shape:\n if i < 0:\n warning_msg = (\n f\"Input {ipt_name}(shape={ipt_shape}) has unkown dimension shapes. \"\n f\"Specifying static values may improve performance\"\n )\n warnings.warn(warning_msg)",
"def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))",
"def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )",
"def __DimSiz_restriction_correct_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def required_input_dim(space: gym.Space, **kwargs) -> int:",
"def get_input_dim(self) -> int:\n raise NotImplementedError",
"def __DimSiz_restriction_incorrect_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 4)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)",
"def input_dim(self):\n if hasattr(self, \"_input_dim\"):\n return self._input_dim\n return self.__input_dim",
"def set_io_dims(self, tup):\n # every layer except the first one won't have this one set\n # except it was inforced by the user\n if not hasattr(self, 'input_dims') or self.input_dims[0] is None:\n self.input_dims = tup\n # by default the shapes don't change\n if not hasattr(self, 'output_dims') or self.output_dims[0] is None:\n self.output_dims = tup",
"def check_dims(self, data):\n if np.ndim(data) != 2:\n raise ValueError('Input data must be a two dimensional numpy array. '\n 'Data received has shape (%g, %g).' % data.shape)",
"def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))",
"def get_input_dimension(self):\n return self.in_dim",
"def _check_shape(self, obj, expected_shape):\n if self.shape != expected_shape:\n raise ValueError(\n '%s expects internal signal %s to be %s, but it is %s' % (\n obj, self.name, Shaped(expected_shape).describe_shape(),\n self.describe_shape()))",
"def check_input(times,signal,**kwargs):\n #-- check if the input are arrays and have the same 1D shape\n is_array0 = isinstance(times,np.ndarray)\n is_array1 = isinstance(signal,np.ndarray)\n if not is_array0: print(termtools.red('ERROR: time input is not an array'))\n if not is_array1: print(termtools.red('ERROR: signal input is not an array'))\n if not is_array0 or not is_array1:\n times = np.asarray(times)\n signal = np.asarray(signal)\n print(termtools.green(\"---> FIXED: inputs are arrays\"))\n print(termtools.green(\"OK: inputs are arrays\"))\n onedim = (len(times.shape)==1) & (len(signal.shape)==1)\n same_shape = times.shape==signal.shape\n if not onedim or not same_shape:\n print(termtools.red('ERROR: input is not 1D or not of same length'))\n return False\n print(termtools.green(\"OK: inputs are 1D and have same length\"))\n #-- check if the signal constains nans or infs:\n isnan0 = np.sum(np.isnan(times))\n isnan1 = np.sum(np.isnan(signal))\n isinf0 = np.sum(np.isinf(times))\n isinf1 = np.sum(np.isinf(signal))\n if isnan0: print(termtools.red('ERROR: time array contains nans'))\n if isnan1: print(termtools.red('ERROR: signal array contains nans'))\n if isinf0: print(termtools.red('ERROR: time array contains infs'))\n if isinf1: print(termtools.red('ERROR: signal array contains infs'))\n if not isnan0 and not isnan1 and not isinf0 and not isinf1:\n print(termtools.green('OK: no infs or nans'))\n else:\n keep = -np.isnan(times) & -np.isnan(signal) & -np.isinf(times) & -np.isinf(signal)\n times,signal = times[keep],signal[keep]\n print(termtools.green('---> FIXED: infs and nans removed'))\n #-- check if the timeseries is sorted\n is_sorted = np.all(np.diff(times)>0)\n if not is_sorted:\n print(termtools.red('ERROR: time array is not sorted'))\n sa = np.argsort(times)\n times,signal = times[sa],signal[sa]\n print(termtools.green('---> FIXED: time array is sorted'))\n else:\n print(termtools.green(\"OK: time array is sorted\"))\n print(termtools.green(\"No inconsistencies found or inconsistencies are fixed\"))\n \n #-- check keyword arguments:\n fnyq = getNyquist(times,nyq_stat=np.min)\n print(\"Default Nyquist frequency: {}\".format(fnyq))\n if 'nyq_stat' in kwargs:\n fnyq = getNyquist(times,nyq_stat=kwargs['nyq_stat'])\n print(\"Nyquist value manually set to {}\".format(fnyq))\n if 'fn' in kwargs and kwargs['fn']>fnyq:\n print(termtools.red(\"Final frequency 'fn' is larger than the Nyquist frequency\"))\n return times,signal",
"def check_shape(self):\r\n if np.array(self.img).shape != (1536, 2048, 3):\r\n raise BadShape",
"def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")",
"def _verify_space(self) -> None:\n\n for dimension in self.space.values():\n\n if dimension.type != \"fidelity\" and dimension.prior_name not in [\n \"uniform\",\n \"reciprocal\",\n \"int_uniform\",\n \"int_reciprocal\",\n \"choices\",\n ]:\n raise ValueError(\n \"TPE now only supports uniform, loguniform, uniform discrete \"\n f\"and choices as prior: {dimension.prior_name}\"\n )\n\n shape = dimension.shape\n if shape and len(shape) != 1:\n raise ValueError(\"TPE now only supports 1D shape.\")",
"def _check_dataset(self, dataset):\n if not isinstance(dataset, Dataset):\n raise ValueError('wrong training_set or validation_set are not instances of the nn.Dataset class')\n\n if dataset.inputs.shape[1] != self.arch[0]:\n raise ValueError('dataset inputs shape is inconsistent with number of network input nodes.')\n\n if dataset.targets.shape[1] != self.arch[-1]:\n raise ValueError('dataset targets shape is inconsistent with number of network output nodes.')",
"def give_dim(self):\n smesh = self.get_smesh()\n dim = None\n if smesh.NbNodes():\n if smesh.NbVolumes():\n dim = 3\n elif smesh.NbFaces():\n dim = 2\n elif smesh.NbEdges():\n dim =1\n elif self.has_geom():\n import GEOM\n stype = self.give_geom().get_shape_type()\n if stype in [GEOM.COMPSOLID, GEOM.SOLID]:\n dim = 3\n elif stype in [GEOM.SHELL, GEOM.FACE]:\n dim = 2\n elif stype in [GEOM.WIRE, GEOM.EDGE, GEOM.VERTEX]:\n dim = 1\n return dim",
"def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err",
"def check_data_shape(self, data_shape):\n if not len(data_shape) == 2:\n raise ValueError('data_shape should have length 2')\n if not data_shape[0] == 1:\n raise ValueError('This iterator expects inputs to have 1 channels.')",
"def _check_shape(input_shape):\n msg = ('Input to FlattenAxis must be 5D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_temporal, '\n 'n_features), but received shape: {}'.format(input_shape))\n assert len(input_shape) == 5, msg",
"def input_shape(self) ->torch.Size:\n pass",
"def _dim_received(self):\n self._call_subscribers(on_level=-1)",
"def test_convolve_input_dim_check(self, case, fn, x_shape, y_shape):\n x = torch.rand(*x_shape, dtype=self.dtype, device=self.device)\n y = torch.rand(*y_shape, dtype=self.dtype, device=self.device)\n\n message = [\n \"The operands must be the same dimension\",\n \"Leading dimensions of x and y are not broadcastable\",\n ][case]\n with self.assertRaisesRegex(ValueError, message):\n fn(x, y)",
"def __DimSiz_restriction_correct_ndarray_number_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())"
] |
[
"0.7807708",
"0.66550505",
"0.6484145",
"0.6049809",
"0.59849834",
"0.59090304",
"0.589519",
"0.584538",
"0.5836356",
"0.5833826",
"0.5738557",
"0.5730873",
"0.56899583",
"0.5685632",
"0.5644865",
"0.5634468",
"0.55924934",
"0.55602676",
"0.554988",
"0.55450547",
"0.55286795",
"0.5522566",
"0.5515246",
"0.55069524",
"0.5491555",
"0.5484054",
"0.546603",
"0.54451704",
"0.5437311",
"0.5407329"
] |
0.8263599
|
0
|
Generates corr space data and saves it to disk
|
def gen_data(self,do_print=True,force_gen_inputs=False):
if do_print:
print
print 'Generating corr space data, id = %s'%self.id
self.post_init(force_gen_inputs=force_gen_inputs)
self.run()
self.post_run()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_data_raw (mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname,',',mdp.tag\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n # write first header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n # write another header\n #corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",\\\n # int(mdp.corr_num.strip(\"[]\").split(',')[0]))\n #uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n #uf.write_section(mdp.save_file,mdp.key)\n for num,key in zip(mdp.corr_num,mdp.key):\n corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n uf.write_header(mdp.save_file,corr_key,mdp.corr_len)\n uf.write_section(mdp.save_file,key)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def make_data_raw_fast(mdp,do_makedata,filename):\n #\n fin = open(filename,'r')\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n ## -- do_makedata tells it to go ahead with generating a new data output file\n ## -- otherwise, just saves parameters to metadata\n if do_makedata:\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##endif do_makedata\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return",
"def generate():",
"def save_data (mdp):\n for num,key in zip(mdp.corr_num,mdp.key):\n lnum = find_corr(mdp,int(num)) # get the line number of the correlator, if possible\n if lnum > -1:\n cdat = extract_data(mdp,lnum) # found the correlator, save to array\n try: # write it to file\n ## -- organizing is too slow, just write to end of file\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.save_file.write( key + ' ' + \\\n ' '.join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))+'\\n')\n #lsec = uf.find_data_section(mdp.save_file,key)\n #mdp.save_file.write( key + ' ' + ' '.\\\n #uf.ins_line(mdp.save_file, key + ' ' + ' '.\\\n # join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))\\\n # , lsec[1]+1\\\n # )\n #write_fname(mdp,lsec[0])\n except IndexError:\n print \"-- In file\",mdp.corr_file.name\n print \"Could not extract data from file\"\n else:\n print \"-- In file\",mdp.corr_file.name\n print \"Failed to find correlator #\",num",
"def make_phys():\n for rn in dcm_dict.keys():\n # PPG\n if not dcm_dict[rn]['ppg_file'] == 'File missing':\n # Files\n ppg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ppg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['ppg_file'],ppg_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 100.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(ppg_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # Respiration\n if not dcm_dict[rn]['resp_file'] == 'File missing':\n # Files\n resp_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.tsv.gz')\n resp_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-respiratory.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 25.0\n data['StartTime'] = -30.0\n data['Columns'] = 'respiratory'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)\n # ECG\n # What to do if they have PPG and ECG?\n if not dcm_dict[rn]['ecg_file'] == 'File missing':\n # Files\n ecg_tsv = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.tsv.gz')\n ecg_json = os.path.join(out_dir,subject+'_'+dcm_dict[rn]['out_name']+'_physio-cardiac.json')\n # TSV\n gzip_file(dcm_dict[rn]['resp_file'],resp_tsv)\n # JSON\n data = OrderedDict()\n data['SamplingFrequency'] = 1000.0\n data['StartTime'] = -30.0\n data['Columns'] = 'cardiac'\n with open(resp_json, 'w') as ff:\n json.dump(data, ff,sort_keys=False, indent=4)",
"def generate(self):\n self._open_file()\n # copied from GenerateCSPEC.py\n self._write_header_and_defaults()\n self._write_source()\n self._write_sample()\n\n self._write_all_components()\n self._write_mantle_module()\n self._write_segment()\n self._write_all_ids()\n self._write_footer()\n self._close_file()",
"def save_data_fast(mdp):\n htSize = np.power(2,14)\n corrTable,keyList = uf.build_corr_table(mdp.corr_file,htSize)\n for num,key in zip(mdp.corr_num,mdp.key):\n try:\n pos = uf.search_corr_table(keyList[int(num)],corrTable,mdp.corr_file) # get the position in file\n except IndexError:\n ## -- correlator not present in file, skip\n continue\n cdat = extract_data_fast(mdp,pos) # found the correlator, save to array\n try: # write it to file\n ## -- organizing is too slow, just write to end of file\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.save_file.write( key + mdp.tag + ' ' + \\\n ' '.join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))+'\\n')\n except IndexError:\n print \"-- In file\",mdp.corr_file.name\n print \"Could not extract data from file with (pos,key) = \",num,key,pos,keyList[int(num)]\n #else:\n # print \"-- In file\",mdp.corr_file.name\n # print \"Failed to find correlator #\",num",
"def generate(self):",
"def _generate_data(self, codec='deflate'):\n _logger.info('generating fake data')\n (desc, path) = mkstemp()\n os.close(desc)\n os.remove(path)\n try:\n call([\n 'node', osp.join(DPATH, os.pardir, os.pardir, 'scripts', 'random'),\n self.path, str(self.n_records), path\n ])\n yield path\n finally:\n if osp.exists(path):\n os.remove(path)",
"def write_central(ds):\n ### make central directory:\n try:\n os.mkdir(GOAL_DIR_CENTRAL)\n except OSError:\n print (\"Creation of the directory failed or already exists\") \n target = GOAL_DIR_CENTRAL + \"data.csv\"\n \n ### write central:\n r_list = list(range(len(ds.index)))\n random.Random(RANDOM_SEED).shuffle(r_list)\n total_df = ds.loc[r_list]\n total_df.to_csv(target, index=False)",
"def generate(self):\n super().generate()\n records = random.random((self._dimension, self._dimension, self.num_samples))\n record_labels = [0] * self.num_samples\n prev_out_spec =\"\"\n count = 0\n for i in range(0, int(self.num_files)):\n if i % self.comm_size == self.my_rank:\n progress(i+1, self.num_files, \"Generating NPZ Data\")\n out_path_spec = \"{}_{}_of_{}.npz\".format(self._file_prefix, i, self.num_files)\n if count == 0:\n prev_out_spec = out_path_spec\n if self.compression != Compression.ZIP:\n np.savez(out_path_spec, x=records, y=record_labels)\n else:\n np.savez_compressed(out_path_spec, x=records, y=record_labels)\n count += 1\n else:\n copyfile(prev_out_spec, out_path_spec)",
"def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)",
"def save_clusters(db, savepath, time_req):\n # try:\n # all_id = np.unique(db['pred_labels']) # Includes noise tag\n # except:\n print('fallo de unique')\n import pdb\n pdb.set_trace()\n all_id = []\n num_id = [0 for _ in range(50)]\n for line in db['pred_labels']:\n if all_id.count(line)==0:\n all_id.append(line)\n num_id[line+1]+=1\n pdb.set_trace()\n\n\n for iddty in all_id:\n data = db.loc[db['pred_labels'] == iddty]\n if len(data) >= time_req and iddty != -1:\n id_path = join(savepath, 'id_' + str(iddty))\n os.makedirs(id_path, exist_ok=True)\n\n data_vector, size = load_embeddings(data)\n centroid = np.mean(data_vector, axis=0)\n std = np.std(data_vector, axis=0)\n cov = np.cov(np.array(data_vector).T)\n\n print(all_id)\n print(cov)\n pdb.set_trace()\n\n #inv_cov = np.linalg.inv(cov)\n export(path=join(id_path, 'centroid'),\n data=centroid)\n export(path=join(id_path, 'std'),\n data=std)\n export(path=join(id_path, 'covmat'),\n data=cov)\n #export(path=join(id_path, 'inv_covmat'), data=inv_cov)\n\n imgs = data['img'].values\n for img_path in imgs:\n img = PIL.Image.open(img_path)\n img_name = img_path.split('/')[-1]\n img.save(join(id_path, img_name))",
"def write_data():",
"def writeRawFCD():\n global vehId, vehIdDict\n vehIdDict = {}\n vehId = 0\n day = 0\n\n def getVehId(orgId):\n \"\"\"creates new vehicle id's which consists only numerics\"\"\"\n global vehId, vehIdDict\n value = vehIdDict.get(orgId, vehId)\n if value is vehId:\n vehIdDict[orgId] = vehId\n vehId = (vehId + 1) % 65500\n return value\n\n outputFile = open(path.FQrawFCD, 'w')\n\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets():\n day += 86400\n # reset dict so that every taxi (even if the vehicle is chosen several\n # times) gets its own id\n vehIdDict = {}\n # dataset=0\n sortedKeys = vtypeDictR.keys()\n sortedKeys.sort()\n for timestep in sortedKeys:\n taxiList = vtypeDictR[timestep]\n for tup in taxiList: # all elements in this timestep\n # calc timestep ->for every period /quota set a new day\n time = timestep + day\n time = calcTime.getDateFromDepart(time)\n # dataset+=1\n # print ouptut\n # veh_id date (time to simDate+time) x (remove and\n # set comma new)\n outputFile.write(str(getVehId(tup[0])) + '\\t' + time + '\\t' + tup[3][0:2] + '.' + tup[3][2:7] + tup[3][8:] +\n # y (remove and set comma new)\n # status speed form m/s in km/h\n '\\t' + tup[4][0:2] + '.' + tup[4][2:7] + tup[4][8:] + '\\t' + \"90\" + '\\t' + str(int(round(tup[2] * 3.6))) + '\\n')\n # print dataset, time\n print(vehId)\n outputFile.close()",
"def Generar_Claves():\n salida=Keypp()\n savekey(salida)\n savecomp(salida)",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def generate(self):\n pass",
"def generate(self):\n pass",
"def generate(self):\n pass",
"def reduce_and_save():\n ### Get the signature information\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n ### Columns are:\n ### Index([u'sig_id', u'pert_id', u'pert_iname', u'pert_type', u'cell_id',\n ### u'pert_dose', u'pert_dose_unit', u'pert_idose', u'pert_time',\n ### u'pert_time_unit', u'pert_itime', u'distil_id'],\n ### dtype='object')\n\n ### Filter for signature ids for small molecule pertubagens\n small_mol_sigs = sig_info['sig_id'][sig_info['pert_type'] == \"trt_cp\"]\n ### Results in 205034 signatures\n\n ### Read in the gene info\n gene_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_gene_info.txt\"), sep='\\t')\n ### Index([u'pr_gene_id', u'pr_gene_symbol', u'pr_gene_title', u'pr_is_lm',\n ### u'pr_is_bing'],\n ### dtype='object')\n\n landmark_gene_ids = gene_info['pr_gene_id'][gene_info['pr_is_lm'] == 1] #Filters for directly measured transcripts\n ### Results in the 978 landmark pr_gene_ids\n\n ### LOAD in the main file filtering the columns so that only the small molecules signatures are loaded and the\n ### rows such that only the landmark genes are loaded into their custom gctoo container type\n relevent_sigs_gctoo = parse(join(FILE_PATH, \"GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_n473647x12328.gctx\"),\n cid=small_mol_sigs, rid=landmark_gene_ids)\n # print small_mol_sigs.data_df.shape\n ### Should write an intermediate file with dimensions (978, 205034)\n write_gctx.write(relevent_sigs_gctoo, join(FILE_PATH, \"lm_sm_aggz\"))",
"def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")",
"def generate_final_data(model_names):\n\n for model_name in model_names:\n print(\"Creating fina data for \" + model_name[0])\n\n final_data = {}\n brush_data = common.load_json(\"../steps/\" + model_name[0] + \"/brush_data.json\")\n diff_data = common.load_json(\"../steps/\" + model_name[0] + \"/diff_plot_data.json\")\n distance_data = common.load_json(\"../steps/\" + model_name[0] + \"/distance_data.json\")\n\n final_data[0] = {\n \"step_number\" : 0,\n \"valid\" : brush_data['0'][\"valid\"],\n \"brush_data\" : sanitize_brush_data(brush_data['0']),\n \"diff_data\" : null_diff_data(),\n \"distance_data\" : null_distance_data()\n }\n\n for step_idx in range(1, len(brush_data)):\n print(str(step_idx) + \" \",)\n final_data[step_idx] = {}\n final_data[step_idx][\"step_number\"] = step_idx\n final_data[step_idx][\"valid\"] = brush_data[str(step_idx)][\"valid\"]\n final_data[step_idx][\"brush_data\"] = sanitize_brush_data(brush_data[str(step_idx)])\n final_data[step_idx][\"diff_data\"] = get_diff_data_step(diff_data, step_idx - 1)\n final_data[step_idx][\"distance_data\"] = get_distance_data_step(distance_data, str(step_idx))\n\n common.save_json(final_data, \"../final_data/\" + model_name[0] + \"/final_data.json\", compressed=False)",
"def save_data_fast_bc(mdp,bcsign):\n htSize = np.power(2,14)\n corrTable,keyList = uf.build_corr_table(mdp.corr_file,htSize)\n for num,key in zip(mdp.corr_num,mdp.key):\n try:\n pos = uf.search_corr_table(keyList[int(num)],corrTable,mdp.corr_file) # get the position in file\n except IndexError:\n ## -- correlator not present in file, skip\n continue\n cdat = extract_data_fast(mdp,pos) # found the correlator, save to array\n if bcsign:\n #print \"correcting sign\"\n cdat = [-x for x in cdat]\n try: # write it to file\n ## -- organizing is too slow, just write to end of file\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.save_file.write( key + mdp.tag + ' ' + \\\n ' '.join('{:e}'.format(cdat[x]) for x in range(0,mdp.corr_len))+'\\n')\n except IndexError:\n print \"-- In file\",mdp.corr_file.name\n print \"Could not extract data from file with (pos,key) = \",num,key,pos,keyList[int(num)]\n #else:\n # print \"-- In file\",mdp.corr_file.name\n # print \"Failed to find correlator #\",num",
"def write_rcm(self,rcm_filename):\n \n if(self.buildingsAdded != True):\n self.run_nc.add_building_output_locations({}, 0, 0,0) #Set building locations # 0 in NETCDF file\n \n if(self.keyPointsAdded != True):\n self.run_nc.add_key_points_output_locations([], 0, 0, 0, 0) #Set key points to 0 in netcdf file\n \n self.ricom.write_rcm(rcm_filename)\n #self.run_nc.close()",
"def save(self, output, data):",
"def save(self, path=\"\"):\n path = path + \"model_\" + str(self.name) + \".txt\"\n if os.path.isfile(path):\n os.remove(path)\n f = open(path, \"w+\")\n for ident in self.networks:\n f.write(ident + \"_\" + self.networks[ident].descriptor.codify_components() + \"_\" + str(self.networks[ident].taking.size) + \",\" + self.networks[ident].taking.type + \"_\" + str(self.networks[ident].producing.size) + \",\" + self.networks[ident].producing.type + \"_\" +\n str(self.networks[ident].depth) + \"_\" + \",\".join(self.reachable[ident]) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.inputs:\n f.write(ident + \"_\" + str(self.inputs[ident].producing.size) + \"_\" + self.inputs[ident].producing.type + \"_\" + str(self.inputs[ident].depth) + \"\\n\")\n f.write(\"\\n\")\n\n for ident in self.outputs:\n f.write(ident + \"_\" + str(self.outputs[ident].taking.size) + \"_\" + self.outputs[ident].taking.type + \"_\" + str(self.outputs[ident].depth) + \"_\" + \",\".join(self.comps_below[ident]) + \"\\n\")\n f.write(\"\\n\")\n\n for con in self.connections:\n f.write(self.connections[con].codify() + \"\\n\")\n #f.write(\"\\n\")\n\n f.close()\n\n return path",
"def generatePredictorDataForComic(self, comicId):\n self.__pdir = Predictor.directory\n self.__predictorData = PredictorData(None)\n self.saveComic(comicId)",
"def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()",
"def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)"
] |
[
"0.65893036",
"0.6341853",
"0.62557477",
"0.6097091",
"0.6018437",
"0.60105604",
"0.5890326",
"0.58047855",
"0.57793343",
"0.57107365",
"0.5645555",
"0.5641321",
"0.560955",
"0.55532336",
"0.54753006",
"0.54725295",
"0.5466314",
"0.54604185",
"0.54604185",
"0.54604185",
"0.54458964",
"0.54447335",
"0.54398865",
"0.54330444",
"0.5423734",
"0.5393131",
"0.5392827",
"0.53898466",
"0.53895384",
"0.5385543"
] |
0.66838986
|
0
|
returns euclidean distance p1 is a list of features p2 is alsoa a list of features p1 and p2 should be the same size distance is the squareroot of sum of all squares of differences for each feature
|
def distance(p1, p2):
"""
(p1[0] - p2[0]) ** 2 +
(p1[1] - p2[1]) ** 2 +
"""
sum_all = 0
for i, v in enumerate(p1):
diff_squared = (v - p2[i]) ** 2
sum_all += diff_squared
return(math.sqrt(sum_all))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)",
"def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)",
"def feature_distance(feat1, feat2, eps=1e-7, sqrt=True):\n diff = torch.pow((feat1 - feat2), 2).sum(-1)\n if sqrt:\n diff = (diff + eps).sqrt()\n return diff",
"def euclidean_distance(p1, p2):\n distance = 0\n for i in range(len(p1)-1):\n distance += (p1[i]-p2[i])**(2)\n return sqrt(distance)",
"def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())",
"def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))",
"def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))",
"def get_euclidean_distance(p1, p2):\n return np.sqrt(np.power((p2[0] - p1[0]), 2) + np.power((p2[1] - p1[1]), 2))",
"def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)",
"def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))",
"def distance(f1, f2):\n\n return np.sum((np.sum([f1, -f2], axis=0))**2, axis=1)",
"def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))",
"def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))",
"def distance(p1, p2):\n return np.linalg.norm(p2-p1)",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))",
"def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d",
"def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))",
"def euclidean_distance(p1, p2):\n dist = np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n return dist",
"def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)",
"def euclidean(p1, p2):\n return p1.distance(p2)",
"def compute_dist(p_1, p_2):\n return sqrt((p_2[0] - p_1[0])**2 + (p_2[1] - p_1[1])**2 +\n (p_2[2] - p_1[2])**2)",
"def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))",
"def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)",
"def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)",
"def euclidean_distance(list1, list2):\n # Make sure we're working with lists\n # Sorry, no other iterables are permitted\n assert isinstance(list1, list)\n assert isinstance(list2, list)\n\n dist = 0\n\n # 'zip' is a Python builtin, documented at\n # <http://www.python.org/doc/lib/built-in-funcs.html>\n for item1, item2 in zip(list1, list2):\n dist += (item2 - item1)**2\n return math.sqrt(dist)",
"def euclidean_distance(data1, data2):\n #Convert data into numpy array\n array1 = np.array(data1)\n array2 = np.array(data2)\n \n #Create distance array\n dist_array = np.sqrt(np.sum((array2-array1)**2, axis=1))\n \n #Reshape array before return results\n return np.reshape(dist_array, [len(dist_array),1])",
"def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))",
"def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d",
"def euclidean_distance(point1, point2):\n return np.linalg.norm(np.array(point1) - np.array(point2))"
] |
[
"0.7802887",
"0.7571842",
"0.7497374",
"0.7418175",
"0.73836523",
"0.73509413",
"0.7349909",
"0.72473276",
"0.7206549",
"0.7182788",
"0.7123703",
"0.7121786",
"0.7101655",
"0.70998794",
"0.70989585",
"0.70877427",
"0.70846736",
"0.7084224",
"0.70823973",
"0.70755225",
"0.7070989",
"0.7053456",
"0.70511174",
"0.705073",
"0.70479465",
"0.7044049",
"0.7019589",
"0.7017948",
"0.70041096",
"0.6997811"
] |
0.76441604
|
1
|
Returns the nth 0based term of the Fibonacci sequence
|
def fibonacci_term(n):
return int(((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fibonacci(n):",
"def fibonacci(n):\n\tfib_seq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tfib_seq.append(0)\n\t\tif i == 1:\n\t\t\tfib_seq.append(1)\n\t\tif i > 1:\n\t\t\tnth_term = fib_seq[-1] + fib_seq[-2]\n\t\t\tfib_seq.append(nth_term)\n\t\n\tprint(fib_seq)\n\tprint(fib_seq[n])\n\treturn(fib_seq[n])",
"def fast_fibonacci(n):\n return _fast_fibonacci(n)[0]",
"def fib(index):\n return round((GR**index)/R5)",
"def nthFibonacci(n):\n\n # Run some basic error checking\n try:\n n = int(n)\n except: # if this fails not a number inputed\n sys.stderr.write('Incorrect data input\\n')\n return None\n if n < 0:\n sys.stderr.write('Only positive integers allowed\\n')\n return None\n \n # since the error checking slows down the recursion we run it as a seperate function\n [Fnm,Fn] = fastrecursivefibonacci(n)\n return Fnm",
"def fibi(n):\n a, b = 0, 1\n for i in range(n):\n # fibonacci series is next no. is sum of previous two number.\n temp = a\n a = b\n # now nth fibonacci no. is sum of previous two number.\n b = temp+b\n # returning a because a changing each places\n return a",
"def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n nth = fibonacci(n-1) + fibonacci(n-2)\n return nth",
"def fibonacci_number(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[n - 1]",
"def next_fib(f):\n for f in fib:\n i = fib.index(f)\n return f+fib[i-1]",
"def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)",
"def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new",
"def fib(i):\n if i < 2: return 1\n return fib(i-1) + fib(i-2)",
"def fibi(n):\n if n == 0: return 0\n if n == 1: return 1\n f_n2, f_n1 = 1, 1\n for i in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1",
"def find_fib(n):\n # fibo = 2.078087 * math.log(n) + 1.672276\n return 0 # fibo",
"def fib_formula(n):\n if n <= 1:\n return n\n else:\n return (fib_formula(n - 1) + fib_formula(n - 2))",
"def fib(n:int) -> int:\n if n<= 2:\n return 1\n else:\n return fibonacci.fib(n-1) + fibonacci.fib(n-2)",
"def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval",
"def fibonacci():\n return sum_series(a=0, b=1)",
"def fibonacci(n):\n sequence = [0, 1]\n for i in range(n + 1):\n value = add(sequence[-2], sequence[-1])\n sequence.append(value)\n return sequence[n]",
"def fib(n):\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)",
"def fib(n):\n if n < 2:\n return n\n else:\n return fib(n-1) + fib(n-2)",
"def fib(n):\n print(\"fib({})\".format(n))\n if(n <= 2):\n return 1\n else:\n return fib(n-1) + fib(n-2)",
"def fib(n):\n fib = [0, 1]\n if n > 2:\n for i in range(n):\n fib.append(fib[-1] + fib[-2])\n return fib[n-1]\n else:\n return fib[n-1]",
"def fibi(n: int) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n f_n2, f_n1 = 1, 1\n for _ in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1",
"def fib(n):\n if n in (0, 1): return n\n return fib(n-1) + fib(n-2)",
"def fib(n):\n if n == 0 or n == 1:\n return n\n else:\n return fib(n-2) + fib(n-1)",
"def fibonacci(n):\n\n ## Auxiliary functions for working in our polynomial ring.\n def poly_sqr((a, b)):\n a2 = a*a\n return 2*a*b + a2, a2 + b*b\n def poly_mul((a, b), (c, d)):\n ac = a*c\n return a*d + b*c + ac, ac + b*d\n\n ## Do the job. For negative indices, we take powers of t^{-1}.\n if n < 0: return power((1, -1), -n, (0, 1), poly_sqr, poly_mul)\n else: return power((1, 0), n, (0, 1), poly_sqr, poly_mul)",
"def fib(n: int) -> int:\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)",
"def fibonacci0(n):\n assert n == int(n) and n > 0\n if n in [1, 2]:\n return 1\n return fibonacci0(n-1) + fibonacci0(n-2)",
"def fib(n): \n if n == 0:\n return 0\n elif n == 1:\n return 1\n\n else:\n return fib(n-1) + fib(n-2)"
] |
[
"0.77835625",
"0.7699114",
"0.7652416",
"0.7619707",
"0.75898635",
"0.7513049",
"0.7501769",
"0.74895394",
"0.7479242",
"0.74768573",
"0.7446154",
"0.7437935",
"0.74273765",
"0.74261755",
"0.7385632",
"0.73672014",
"0.73632944",
"0.7333317",
"0.7329111",
"0.7326546",
"0.7319511",
"0.73146063",
"0.7296556",
"0.7295377",
"0.7289664",
"0.72841454",
"0.7276115",
"0.7273146",
"0.72719735",
"0.7269306"
] |
0.79072845
|
0
|
Returns the fibonacci sequence up to max
|
def fibonacci_sequence(max):
term = fibonacci_term(0)
f = []
i = 1
while term < max:
f.append(term)
term = fibonacci_term(i)
i += 1
return f
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fibonacci(max_num):\n\n\t# base case\n\tif max_num <= 1:\n\t\treturn 1\n\telse:\n\t\t# moving toward base case\n\t\t# calling itself recursively\n\t\treturn fibonacci(max_num - 1) + fibonacci(max_num - 2)",
"def fibonacci(n):",
"def generateFibonacci(upper_bound):\n fibs = [0]\n latest = 1\n while latest < upper_bound:\n fibs.append(latest) \n latest = fibs[-1] + fibs[-2]\n return fibs",
"def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element",
"def fib_iterative(n: int) -> int:\n print(n)\n return 0",
"def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new",
"def last_fib_digit(n):\n\n # global seq\n seq = []\n seq.append(1)\n seq.append(1)\n\n if n <= 2:\n return(1)\n\n for i in range(2, n):\n seq.append(last_digit(seq[i-1] + seq[i-2]))\n\n return seq[n-1]",
"def find_fib(n):\n # fibo = 2.078087 * math.log(n) + 1.672276\n return 0 # fibo",
"def fib(n):\n n = int(n)\n if n <= 1:\n return 1\n\n return fib(n-1) + fib(n-2)",
"def fibonacci():\n return sum_series(a=0, b=1)",
"def fast_fibonacci(n):\n return _fast_fibonacci(n)[0]",
"def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)",
"def fibonacci(n):\n sequence = [0, 1]\n for i in range(n + 1):\n value = add(sequence[-2], sequence[-1])\n sequence.append(value)\n return sequence[n]",
"def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b",
"def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval",
"def problem2(limit):\n index = 6\n total = 2\n while True:\n fib_n = fib(index)\n if fib_n <= limit:\n total += fib_n\n index += 3\n else:\n break\n return total",
"def fib(n: int) -> int:\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)",
"def fib(n):\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)",
"def fibonacci_number(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[n - 1]",
"def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b",
"def fib(i):\n if i < 2: return 1\n return fib(i-1) + fib(i-2)",
"def fibonacci(a):\n fib = [1,1]\n x = 0\n i = 1\n while x < a:\n x = fib [i] + fib[i-1]\n i += 1\n fib.append(x)\n return i, fib",
"def fib(n):\n a, b = 1, 1\n while n:\n a, b = b, a + b\n n -= 1\n return a",
"def fib(n):\n if n in (0, 1): return n\n return fib(n-1) + fib(n-2)",
"def fib(n:int) -> int:\n if n<= 2:\n return 1\n else:\n return fibonacci.fib(n-1) + fibonacci.fib(n-2)",
"def fib(n):\n if n == 1:\n return 1\n else:\n return n + fib(n-1)",
"def fib(index):\n return round((GR**index)/R5)",
"def get_fibonacci_last_digit_fast(n):\n fibonacci = [0 for i in range(n + 1)]\n fibonacci[1] = 1\n\n for i in range(2, n + 1):\n fibonacci[i] = (fibonacci[i - 1] + fibonacci[i - 2]) % 10\n\n return fibonacci[n]",
"def next_fib(f):\n for f in fib:\n i = fib.index(f)\n return f+fib[i-1]",
"def fibonacci(n):\r\n\r\n if n in past_fib:\r\n return past_fib[n]\r\n \r\n if n == 0 or n == 1:\r\n past_fib[n] = 1\r\n return 1\r\n\r\n total = fibonacci(n-1) + fibonacci(n-2)\r\n past_fib[n] = total\r\n return total"
] |
[
"0.8281956",
"0.7737896",
"0.75432396",
"0.72496516",
"0.7240206",
"0.714092",
"0.71191853",
"0.7100903",
"0.7076584",
"0.70763195",
"0.70746773",
"0.70539343",
"0.70536774",
"0.7052248",
"0.70504606",
"0.7048068",
"0.70352656",
"0.70310515",
"0.7012211",
"0.70013833",
"0.69907594",
"0.6987098",
"0.6973158",
"0.69725484",
"0.69700664",
"0.6957725",
"0.6956548",
"0.69514316",
"0.6949765",
"0.69481796"
] |
0.88220775
|
0
|
Returns the nth pentagonal number
|
def pentagonal(n):
return (n * ((3 * n) - 1)) / 2
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)",
"def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2",
"def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6",
"def heptagonal(n: int) -> int:\n return int(n * (5 * n - 3) / 2)",
"def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))",
"def hexagonal(n: int) -> int:\n return int(n * (2 * n - 1))",
"def hexagonal_number(n):\n return n * (2 * n - 1)",
"def is_pentagonal(n):\r\n if ((1+(24*n+1)**0.5) / 6)%1 == 0:\r\n return True\r\n return False",
"def is_pentagonal(n: int) -> bool:\r\n root = (1 + 24 * n) ** 0.5\r\n return ((1 + root) / 6) % 1 == 0",
"def is_pentagonal(n):\n if (1+(24*n+1)**0.5) % 6 == 0:\n return True\n return False",
"def is_pentagonal(P):\n return sqrt(1 + 24 * P) % 6 == 5",
"def solution():\n pentagonals = []\n n = 1\n while True:\n newp = (n*(3*n-1))/2\n for p in pentagonals:\n diff = abs(newp-p)\n if ispentagonal(diff) and ispentagonal(newp+p):\n return diff\n pentagonals.append(newp)\n n += 1",
"def isPentagonal(n):\n test = (sqrt(1+24*n)+1)/6\n return test == (int) (test)",
"def octant(p):\n x = p[0]\n y = p[1]\n z = p[2]\n if z > 0:\n if y > 0:\n if x > 0:\n return 1\n else:\n return 2\n else:\n if x > 0:\n return 4\n else:\n return 3\n else:\n if y > 0:\n if x > 0:\n return 5\n else:\n return 6\n else:\n if x > 0:\n return 8\n else:\n return 7",
"def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2",
"def n(self):\n return self._nx * self._ny",
"def collatz(n):\n if n%2==0: return n/2\n else: return 3*n+1",
"def is_pentagonal_number(n):\n _, x = quadratic.solve(3, -1, -2 * n)\n return is_number(x) and x.is_integer()",
"def nw(n):\n return 4*n*n + 1",
"def __call__(self, n):\n perimeter = 2 * math.pi\n return Point(math.cos(n / perimeter), math.sin(n / perimeter))",
"def rc2p(row, col, N):\n\n # print('row:{} col:{}'.format(row,col))\n return row * (N + 1) + col",
"def i_min(Pd):\n return int(pentagonal_index(2 * Pd))",
"def solution(limit: int = 5000) -> int:\r\n pentagonal_nums = [(i * (3 * i - 1)) // 2 for i in range(1, limit)]\r\n for i, pentagonal_i in enumerate(pentagonal_nums):\r\n for j in range(i, len(pentagonal_nums)):\r\n pentagonal_j = pentagonal_nums[j]\r\n a = pentagonal_i + pentagonal_j\r\n b = pentagonal_j - pentagonal_i\r\n if is_pentagonal(a) and is_pentagonal(b):\r\n return b\r\n\r\n return -1",
"def triangle_number(n):\n return n * (n + 1) / 2",
"def pentakis(self):\n return self.nlegomena(5)",
"def get_pent_idx(pent):\n pidx = 0\n for i in range(pent.shape[0]):\n for j in range(pent.shape[1]):\n if pent[i][j] != 0:\n pidx = pent[i][j]\n break\n if pidx != 0:\n break\n if pidx == 0:\n return -1\n return pidx - 1",
"def triangle(n: int) -> int:\n return int(n * (n + 1) / 2)",
"def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)",
"def ne(n):\n return 4*n*n - 2*n + 1",
"def pythagorean_triples(n):\n pass"
] |
[
"0.88123727",
"0.83205825",
"0.8142224",
"0.7177567",
"0.7158477",
"0.6992907",
"0.6940794",
"0.69250464",
"0.6844195",
"0.6839999",
"0.65696996",
"0.6385111",
"0.62820965",
"0.62305856",
"0.61976534",
"0.6148501",
"0.60797244",
"0.6061159",
"0.6033475",
"0.60114115",
"0.5987284",
"0.59227717",
"0.5909272",
"0.5879085",
"0.5878826",
"0.58617705",
"0.58026445",
"0.5798154",
"0.5793869",
"0.5793515"
] |
0.86722
|
1
|
Slice string `series` from index `start` for `length` characters and convert the resulting string into a list of ints
|
def _get_slice(series, start, length):
return [ int(s) for s in series[start:start+length] ]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def slices(series: str, length: int) -> List[str]:\n if length > len(series) or length <= 0:\n raise ValueError(f\"Error Length: {length}\")\n\n return [series[i:i + length] for i in range(len(series) - length + 1)]",
"def slices(digits, count):\n\n last = len(digits)-count+1\n\n if count == 0:\n raise ValueError('Zero-length slices are not allowed.')\n\n elif last <= 0:\n raise ValueError('Slice is longer than the string itself.')\n\n else:\n series = []\n\n for start in range(last):\n substring = digits[start:start+count]\n series.append([int(x) for x in substring])\n\n return series",
"def slices(series, num):\n\tif num > len(series) or num < 1:\n\t\traise ValueError\n\tif not series.isdigit():\n\t\traise TypeError(\"Input string must consist only of digits!\")\n\treturn [[int(x) for x in series[index:index+num]] for index in xrange(len(series) + 1 - num)]",
"def slices(digits, length):\n if len(digits) < length or length < 1:\n raise ValueError(\"Slice length %d is too long\" % length)\n\n digit_array = [int(c) for c in digits]\n for i in range(len(digits) - length + 1):\n yield digit_array[i:i+length]",
"def split_len(seq, length):\n return [seq[i:i+length] for i in range(0, len(seq), length)]",
"def slices(numstr, count):\n if count > len(numstr) or not count:\n raise ValueError(\"Slice can't be smaller than input!\")\n return [ map(int, list(numstr[i:i+count]))\n for i in xrange(len(numstr)-count+1) ]",
"def get_positions(start_idx, end_idx, length):\n return list(range(-start_idx, 0)) + [0] * (end_idx - start_idx + 1) + \\\n list(range(1, length - end_idx))",
"def get_positions(start_idx, end_idx, length):\n return list(range(-start_idx, 0)) + [0]*(end_idx - start_idx + 1) + \\\n list(range(1, length-end_idx))",
"def convert_to_ints(command, start, end):\n return [raw_bytes_to_int(command[x:x + BYTES_IN_INT]) for x in range(start, end, BYTES_IN_INT)]",
"def slices(series, length):\n if length == 0:\n raise ValueError(\"Slice length may not be 0\")\n num_slices = len(series)-length+1\n if num_slices <= 0:\n raise ValueError(\"Slice length may not be longer than series\")\n return [ _get_slice(series, i, length) for i in range(0, num_slices) ]",
"def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]",
"def get_data_range(self, start_position, length):\n pass",
"def remove_interval(s, start, stop):\n #s[:start] will get the string from start of string to 'start'->value stored in start\n #s[stop:] will get the string from 'stop'->value stored in the stop to end of the string\n temp_list = s[:start] + s[stop+1:]\n return temp_list",
"def slices(digits, size):\n if not 0 <= size <= len(digits):\n raise ValueError\n elif digits == '':\n return [[1]]\n\n slice_list = []\n\n for i in range(len(digits) - size + 1):\n slice_list.append([int(d) for d in digits[i:i+size]])\n return slice_list",
"def split_str_into_len(s, l=2):\r\n return [s[i:i+l] for i in range(0, len(s), l)]",
"def run_length_decode(integers):\n\n x = []\n for index, val in enumerate(integers[::2]):\n x += [val] * integers[1::2][index]\n return x",
"def _translate_range(self, len_, start, end):\n start = int(start)\n end = int(end)\n if start < 0:\n start += len_\n start = max(0, min(start, len_))\n if end < 0:\n end += len_\n end = max(-1, min(end, len_ - 1))\n return start, end",
"def split_seq(seq,size):\n return [seq[i:i+size] for i in range(0, len(seq), size)]",
"def _splitCount(s: str, count: int)->list:\n return [''.join(x) for x in zip(*[list(s[z::count]) for z in range(count)])]",
"def _slice(self, slc):\n char_indexes = self._char_indexes\n slice_indexes = char_indexes[slc]\n # If it's the end of the string, we need to append final color codes.\n if not slice_indexes:\n # if we find no characters it may be because we are just outside\n # of the interval, using an open-ended slice. We must replay all\n # of the escape characters until/after this point.\n if char_indexes:\n if slc.start is None and slc.stop is None:\n # a [:] slice of only escape characters\n return ANSIString(self._raw_string[slc])\n if slc.start is None:\n # this is a [:x] slice\n return ANSIString(self._raw_string[: char_indexes[0]])\n if slc.stop is None:\n # a [x:] slice\n return ANSIString(self._raw_string[char_indexes[-1] + 1 :])\n return ANSIString(\"\")\n try:\n string = self[slc.start or 0]._raw_string\n except IndexError:\n return ANSIString(\"\")\n last_mark = slice_indexes[0]\n # Check between the slice intervals for escape sequences.\n i = None\n for i in slice_indexes[1:]:\n for index in range(last_mark, i):\n if index in self._code_indexes:\n string += self._raw_string[index]\n last_mark = i\n try:\n string += self._raw_string[i]\n except IndexError:\n # raw_string not long enough\n pass\n if i is not None:\n append_tail = self._get_interleving(char_indexes.index(i) + 1)\n else:\n append_tail = \"\"\n return ANSIString(string + append_tail, decoded=True)",
"def int_to_indices(value: int, length: int, radix_bits: int) -> Iterable[int]:\n mask = (1 << radix_bits) - 1\n return ((value >> (i * radix_bits)) & mask for i in reversed(range(length)))",
"def _get_slice_len(s, axlen):\n if s.start is None:\n start = 0\n else:\n start = s.start\n if s.stop is None:\n stop = axlen\n else:\n stop = np.min([s.stop, axlen])\n if s.step is None:\n step = 1\n else:\n step = s.step\n\n return ((stop - 1 - start) // step) + 1",
"def string_to_int(string, length, vocab):\n\n # make lower to standardize\n string = string.split(' ')\n\n if len(string) > length:\n string = string[:length]\n\n rep = list(map(lambda x: vocab.get(x, vocab['<UNK>']), string))\n\n if len(string) < length:\n rep += [vocab['<PAD>']] * (length - len(string))\n\n # print (rep)\n return rep",
"def convert_to_bins(ts, start_time, num_segments, segment_length=None, time_period=None):\n if time_period is None:\n time_period = num_segments * segment_length\n\n return (((np.asarray(ts) - start_time) % time_period / time_period) * num_segments).astype(int)",
"def parse_range(seq: str) -> list[int]:\n seq = seq.split(\",\")\n acc = []\n for i in seq:\n m = re.match(r\" *(?P<start>\\d+) *(- *(?P<end>\\d+))? *\", i)\n\n if not m:\n continue\n\n a = [m.group(\"start\"), m.group(\"end\")]\n a = [int(x) for x in a if x]\n\n if len(a) > 1:\n a = range(int(a[0]), int(a[1] + 1))\n\n acc.append(a)\n\n return list(\n set([x for x in list(itertools.chain.from_iterable(acc)) if x])\n )",
"def split_string(line, nth):\n return [int(line[i:i+nth]) for i in range(0, len(line), nth)]",
"def limit(st,length):\n return st[:length]",
"def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))",
"def __parse_line(moves: str, start: int) -> List[str]:\n\n return [moves[start + j] for j in range(SIZE)]",
"def _parse_interval_str(cls, s):\n\n start, stop = s.split(':')\n if start == '':\n start = 0\n else:\n start = int(start)\n if stop == '':\n stop = None\n else:\n stop = int(stop)\n return slice(start, stop)"
] |
[
"0.699702",
"0.6602383",
"0.6324749",
"0.6079446",
"0.604875",
"0.5986106",
"0.5644645",
"0.5591515",
"0.5581331",
"0.5538795",
"0.55022365",
"0.5466425",
"0.54612",
"0.5392008",
"0.53775644",
"0.53083634",
"0.529448",
"0.5261992",
"0.52554864",
"0.523289",
"0.52198005",
"0.5218701",
"0.5215791",
"0.5182758",
"0.5179784",
"0.5169486",
"0.5156996",
"0.514828",
"0.5113412",
"0.5108801"
] |
0.7710995
|
0
|
Generate website group edges CSV.
|
def generate_website_group_edges(website_group_json, dst):
with open(website_group_json) as f_h:
with gremlin_writer(GremlinEdgeCSV, dst, attributes=[]) as writer:
for data in json_lines_file(f_h):
root_id = data["id"]
websites = data["websites"]
for website in websites:
writer.add(
_id=get_id(root_id, website, {}),
_from=root_id,
to=website,
label=WEBISTE_GROUP_EDGE_LABEL,
attribute_map={}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def writeEdges(self, fileName, format):\n edges = self.edgeIndex.values()\n if format == 'simple':\n f = open(fileName,'w')\n for edge in edges:\n f.write(\"%s -- %s\\n\" % (edge.startVertex.vertexNumber, edge.endVertex.vertexNumber))\n f.close()\n elif format == 'dot':\n f = open(fileName,'w')\n f.write(\"graph G { \\n\")\n for edge in edges:\n f.write(\"%s -- %s;\\n\" % (edge.startVertex.vertexNumber, edge.endVertex.vertexNumber))\n f.write(\"} \\n\")\n f.close()",
"def makeGraph(self):\n self.floorGraph = graph.Graph()\n file = open(\"edges.csv\")\n edges = file.readlines()\n for edge in edges:\n params = edge.split(\",\")\n self.floorGraph.addEdge(params[0],params[1],float(params[2]))\n self.floorGraph.addEdge(params[1],params[0],float(params[2]))",
"def export_groups(self):\n print('=== Exporting all group data...')\n\n for group in self.client.tenant.groups:\n print('- Exporting group:', group.name)\n\n json = {\n 'id': self.get_id(group),\n 'href': group.href,\n 'name': group.name,\n 'description': group.description,\n 'status': group.status,\n 'createdAt': group.created_at.isoformat(),\n 'modifiedAt': group.modified_at.isoformat(),\n 'customData': self.get_custom_data(group),\n 'directory': {\n 'id': self.get_id(group.directory),\n 'href': group.directory.href,\n 'name': group.directory.name,\n 'description': group.directory.description,\n 'status': group.directory.status,\n 'createdAt': group.directory.created_at.isoformat(),\n 'modifiedAt': group.directory.modified_at.isoformat(),\n },\n 'accounts': [],\n }\n\n for account in group.accounts:\n json['accounts'].append({\n 'id': self.get_id(account),\n 'href': account.href,\n 'username': account.username,\n 'email': account.email,\n 'fullName': account.full_name,\n 'givenName': account.given_name,\n 'middleName': account.middle_name,\n 'surname': account.surname,\n 'status': account.status,\n 'createdAt': account.created_at.isoformat(),\n 'modifiedAt': account.modified_at.isoformat(),\n })\n\n tenant = self.get_id(self.client.tenant)\n self.write('%s/%s/groups/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')",
"def make_edge_instance_row(edges):\n\theader = \"parent|source|target|source_port|target_port|message_type|properties|metadata\\n\"\n\tcontent = \"\"\n\n\tfor id, e in edges.items():\n\t\tcontent += \"{}|{}|{}|{}|{}|{}|{}|{}\\n\".format(e.parent.id, e.src_device.id, e.dst_device.id, e.src_port.name, e.dst_port.name, e.message_type.id, json.dumps(e.properties), e.metadata)\n\n\treturn header + content",
"def write_edgelist(H, path, delimiter=\" \", encoding=\"utf-8\"):\n with open(path, \"wb\") as file:\n for line in generate_edgelist(H, delimiter):\n line += \"\\n\"\n file.write(line.encode(encoding))",
"def generate_edgelist(H, delimiter=\" \"):\n for id in H.edges:\n e = H.edges.members(id)\n yield delimiter.join(map(str, e))",
"def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)",
"def _write_driver_graph_urls(self):\n try:\n driver_jobid = os.environ['LSB_JOBID']\n except KeyError:\n pass\n else:\n driver_rtm_url = construct_rtm_url(driver_jobid)\n driver_host = socket.gethostname()\n logger.info(f\"Driver LSB_JOBID is: {driver_jobid}\")\n logger.info(f\"Driver host is: {driver_host}\")\n logger.info(f\"Driver RTM graphs: {driver_rtm_url}\")\n\n start_timestamp = get_job_submit_time()\n ganglia_url = construct_ganglia_link(driver_host, start_timestamp)\n\n hostgraph_url_path = 'graph-links.txt'\n with open(hostgraph_url_path, 'a') as f:\n header = f\"=== Client RTM/Ganglia graphs ({socket.gethostname()}) ===\"\n f.write(header + \"\\n\")\n f.write(\"=\"*len(header) + \"\\n\")\n f.write(f\" {driver_rtm_url}\\n\")\n f.write(f\" {ganglia_url}\\n\\n\")",
"def generate_dataset_csv(request):\n\n response = csv_export(request,Dataset)\n return response",
"def export_graph(cls, graph, filename):\n edges = {}\n for node in graph.values():\n for neighbour, dist in node.distances.items():\n if (node.id, neighbour) in edges or (neighbour, node.id) in edges:\n continue\n edges[(node.id, neighbour)] = dist\n\n file_string = '{}\\n'.format(len(graph))\n for edge, dist in edges.items():\n file_string = file_string + '{} {} {}\\n'.format(edge[0], edge[1], dist)\n file_string = file_string[:-1] # Strip the last \\n\n\n with open(filename, 'w') as file:\n file.write(file_string)",
"def dump_gazettes_as_csv(self):\n # TODO: dump_gazettes_as_csv\n pass",
"def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n pass",
"def store_grouped_data(data,path):\n i = 0\n for name, group in data:\n l = len(group)\n print name, \", \", l\n if l > 999:\n group.to_csv(path + \"//clean.events\"+ str(i), index=False)\n i += 1",
"def save_posts_data(self, edges, username, user_id) -> None:\n with open(f'database/{self.username}_{self.name_file}/posts.csv', 'a', newline='', encoding='utf-8') as csvfile:\n writer = csv.writer(csvfile)\n for edge in edges:\n row = list()\n row.append(user_id)\n row.append(username)\n row.append(edge['node']['__typename'])\n row.append(edge['node']['taken_at_timestamp'])\n row.append(edge['node']['edge_media_to_comment']['count'])\n row.append(edge['node']['edge_media_preview_like']['count'])\n edges_caption = edge['node']['edge_media_to_caption']['edges']\n caption = str()\n for edge_caption in edges_caption:\n caption += edge_caption['node']['text']\n row.append(caption)\n writer.writerow(row)",
"def writeEDGE(self):\n\t\tpass",
"def coord_file(pybel_group, dihed, nonH, energy, name):\n csv_name = \"coords\" + name + \".csv\"\n #Open file, create writer\n f = open(csv_name, \"w\")\n wr = csv.writer(f)\n #Generate coords and write them\n for py_molec in pybel_group:\n wr.writerow(vector(py_molec, dihed, nonH, energy))\n f.close()\n return csv_name",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()",
"def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')",
"def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]), int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line3 = self.gtf_string.format(e_vals[2], int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype",
"def write_edges_shp(self,shpname,extra_fields=[]): \n base_dtype = [('edge_id1',np.int32),\n ('length',np.float64),\n ('depth_mean',np.float64)]\n \n side_depths_mean = self.edge_depths()\n \n try:\n side_depths_max = self.side_depths_max()\n extra_fields.append( ('depth_max',np.float64, lambda e: side_depths_max[e]) )\n except:\n pass\n \n \n for efi in range(len(extra_fields)):\n fname,ftype,ffunc = extra_fields[efi]\n if ftype == int:\n ftype = np.int32\n base_dtype.append( (fname,ftype) )\n \n edges = self.edges_as_nodes_cells_mark()\n vertices = self.nodes['x']\n \n edge_data = np.zeros(len(edges), dtype=base_dtype)\n edge_geoms = [None]*len(edges)\n \n for edge_id in range(edges.shape[0]):\n if edge_id % 500 == 0:\n print(\"%0.2g%%\"%(100.*edge_id/edges.shape[0]))\n \n nodes = vertices[edges[edge_id,:2]]\n g = geometry.LineString(nodes)\n edge_geoms[edge_id] = g\n edge_data[edge_id]['length'] = g.length\n edge_data[edge_id]['edge_id1'] = edge_id + 1\n edge_data[edge_id]['depth_mean'] = side_depths_mean[edge_id]\n\n for fname,ftype,ffunc in extra_fields:\n edge_data[edge_id][fname] = ffunc(edge_id)\n \n wkb2shp.wkb2shp(shpname,input_wkbs=edge_geoms,fields=edge_data,\n overwrite=True)",
"def edge_table(self, G, myfile):\r\n layout = \"{0}{1:>6}{2:>6}{3:>6}\"\r\n header = layout.format(\"Neighbor\", \"\\t\", \"Edge Weight\", \"\\n\")\r\n myfile.write(header)\r\n for pre, node in list(G.edges):\r\n data = layout.format((pre, node), '\\t', \"{0:.2f}\".format(G.edges[pre, node]['weight']), \"\\n\")\r\n myfile.write(data)\r\n return myfile",
"def download_group_data(driver, group_str):\n filename = FILEPATH + 'finviz.csv'\n remove_leftover_files()\n\n file_exists = False\n while not file_exists:\n try:\n driver.get(groups_url.format(group_str))\n except TimeoutException:\n pass\n\n # this only seems to happen without premium account login\n # try:\n # # if ad pops up, close it\n # ad_close_button = driver.find_element_by_id('close')\n # ad_close_button.click()\n # except NoSuchElementException:\n # pass\n\n try:\n # can't use url, need to click link\n # driver.get(groups_dl_url.format(group_str))\n driver.find_element_by_link_text('export').click()\n # this only seems to happen without premium account login\n # try:\n # # if ad pops up, close it\n # ad_close_button = driver.find_element_by_id('close')\n # ad_close_button.click()\n # except NoSuchElementException:\n # pass\n except TimeoutException:\n pass\n\n # make sure file is there\n file_exists = os.path.exists(filename)\n if file_exists: break\n time.sleep(3.46)\n\n\n latest_market_date = get_last_open_trading_day()\n dst_filename = FILEPATH + latest_market_date + '_finviz_' + group_str + '.csv'\n os.rename(filename, dst_filename)\n clean_group_data(dst_filename)",
"def generateEdgeFile(adj_filename):\n # Parse the adjacency file\n edges = []\n edge_id = 0\n min_v = None\n max_v = None\n min_depth = 0\n depth = 0\n num_nodes = 0\n with open(adj_filename, 'r') as adj_f:\n r = csv.reader(adj_f, delimiter='\\t')\n for row_idx, row in enumerate(r):\n if not num_nodes:\n num_nodes = len(row)\n else:\n assert len(row) == num_nodes\n for col_idx, v in enumerate(row):\n if v:\n v = float(v)\n if not min_v:\n min_v = v\n max_v = v\n if (v < min_v):\n min_v = v\n if v > max_v:\n max_v = v\n start = 'Node' + str(row_idx + 1)\n end = 'Node' + str(col_idx + 1)\n depth += 1 \n edges.append([edge_id, start, end, v, v, depth, ''])\n edge_id += 1\n max_depth = depth\n\n # Create temporary edge CSV file\n temp_edge_filename = 'temp_edges.csv'\n with open(temp_edge_filename, 'w') as edge_f:\n w = csv.writer(edge_f, delimiter='\\t')\n header_row = ['Id', 'Node1', 'Node2', 'Property1', 'Property2', 'Property3', 'Property4']\n meta_row_min = ['MIN_VAL', 'NA', 'NA', str(min_v), str(min_v), str(min_depth), 'NA']\n meta_row_max = ['MAX_VAL', 'NA', 'NA', str(max_v), str(max_v), str(max_depth), 'NA']\n meta_row_use = ['USE_AS', 'S', 'E', 'C', 'W', 'D', 'L']\n w.writerow(header_row)\n w.writerow(meta_row_min)\n w.writerow(meta_row_max)\n w.writerow(meta_row_use)\n for edge in edges: \n w.writerow(edge)\n return temp_edge_filename",
"def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[3], strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line3 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype",
"def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)",
"def export_events_gtf(self, edge):\n\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]) - edge, e_vals[2], strand,\n full_event, full_event, 'alternative2')\n yield line1, self.etype\n\n line2 = self.gtf_string.format(e_vals[3], int(e_vals[3]) + edge, strand,\n full_event, full_event, 'alternative2')\n yield line2, self.etype\n\n line3 = self.gtf_string.format(e_vals[2], e_vals[3], strand, full_event,\n full_event, 'alternative1')\n yield line3, self.etype",
"def save_friend_nodes(self):\n print \"Exporting to file tsv ...\"\n count_edge = 0\n count_node = 0\n with open('../data/yelp.tsv','w') as f:\n for user in self.df['user_id']:\n for friends in self.df['friends']:\n count_node += 1\n for friend in friends:\n f.write(\"%s\\t%s\\n\" % (user, friend))\n count_edge += 1\n print \"Graph Summary:\", count_node, \"nodes,\", count_edge, \"edges.\"",
"def generate_gene_edges(genbank):\n genome_key = genbank.id\n genome_id = _genome_vert_name + '/' + genome_key\n for (idx, feature) in enumerate(genbank.features):\n # Skip the 'source' feature, which describes the entire genome\n if feature.type == 'source' or 'locus_tag' not in feature.qualifiers:\n continue\n # Generate the edge from gene to genome\n gene_key = feature.qualifiers['locus_tag'][0]\n gene_id = _gene_vert_name + '/' + gene_key\n edge_key = gene_key + '-' + genome_key\n yield {'_from': gene_id, '_to': genome_id, '_key': edge_key}",
"def edges(self):\n return self.generate_edges()"
] |
[
"0.5825317",
"0.5617242",
"0.557936",
"0.5554491",
"0.55313265",
"0.54741925",
"0.5381994",
"0.5376619",
"0.5346174",
"0.53068507",
"0.5273619",
"0.5252207",
"0.5225125",
"0.52054346",
"0.518958",
"0.5181892",
"0.51808137",
"0.51801807",
"0.51615274",
"0.51400363",
"0.51153094",
"0.5106988",
"0.50893724",
"0.5088972",
"0.5086601",
"0.506989",
"0.5053901",
"0.5040551",
"0.5035053",
"0.5031202"
] |
0.77869326
|
0
|
Given a HMM model, calculate the onetimestep updates to the posterior.
|
def one_step_update(model, posterior_tm1, Y_t):
prediction = model.transmat_ @ posterior_tm1
likelihood = np.exp(model._compute_log_likelihood(Y_t))
posterior_t = prediction * likelihood
return posterior_t
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)",
"def moment_update(model, model_ema, m):\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\n p2.data.mul_(m).add_(1-m, p1.detach().data)",
"def moment_update(model, model_ema, m):\r\n for p1, p2 in zip(model.parameters(), model_ema.parameters()):\r\n p2.data.mul_(m).add_(1 - m, p1.detach().data)\r\n # p2.data.mul_(m).add_(1 - m, p1.data)",
"def momentum_update(model_q, model_k, m=0.999):\n for p1, p2 in zip(model_q.parameters(), model_k.parameters()):\n p2.data.mul_(m).add_(1 - m, p1.detach().data)",
"def _update_step(self, *, observations: types.ObservationsTorch) -> None:",
"def update(self, obs):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n for anobs in obs:\n anobs = array(anobs)\n newgmmpartial = []\n for j, comp in enumerate(predicted):\n newgmmpartial.append(GmphdComponent(\n self.detection * comp.weight * dmvnorm(nu[j], s[j], anobs),\n comp.loc + dot(k[j], anobs - nu[j]), pkk[j]))\n\n # The Kappa thing (clutter and reweight)\n weightsum = simplesum(newcomp.weight for newcomp in newgmmpartial)\n reweighter = 1.0 / (self.clutter + weightsum)\n for newcomp in newgmmpartial:\n newcomp.weight *= reweighter\n\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm",
"def MH_step(log_like, log_prior, model_func, prop_params, curr_params,\\\n curr_like, curr_prior, max_like, maxL_params):\n # proposed model:\n prop_model = model_func(prop_params)\n prop_like = log_like(prop_model)\n prop_prior = log_prior(prop_params)\n\n # posterior:\n post_old = curr_like + curr_prior\n post_new = prop_like + prop_prior\n \n # acceptance testing:\n a = np.exp(post_new - post_old)\n draw = np.random.uniform(0, 1)\n \n if (a > draw) and (a < np.inf):\n accept = True\n curr_params = prop_params\n #print(curr_like, max_like)\n if prop_like > max_like:\n max_like = prop_like\n maxL_params = curr_params\n else:\n accept = False\n curr_params = curr_params\n \n return(accept, curr_params, maxL_params, max_like)",
"def on_step(self, t, is_update): \n if t >= self.t_learn_start + self.t_start:\n if is_update:\n self.has_updated = True\n \n if t % self.t_save == 0 and self.has_updated:\n # Save model.\n self.save_model(t, self.saver)\n\n # Increment iteration count.\n self.t_add_op.eval(session=self.sess)",
"def postprocess_step(self, t, y):\n pass",
"def evaluate(env, model, num_env, iter_step):\n episode_rewards = []\n episode_reward = np.zeros((num_env))\n obs = env.reset()\n for _ in tqdm(range(iter_step)):\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n episode_reward += reward\n for i in range(num_env):\n if done[i]:\n episode_rewards.append(episode_reward[i])\n episode_reward[i] = 0\n return episode_rewards",
"def on_epoch_end(self):\n self.current_params = self.model.posterior_mean(self.params)\n self.current_epoch += 1\n self.parameter_values += [self.current_params]\n self.epochs += [self.current_epoch]",
"def evaluate(env, model):\n episode_rewards = []\n for _ in range(10):\n reward_sum = 0\n done = False\n obs = env.reset()\n while not done:\n action, _states = model.predict(obs)\n obs, reward, done, info = env.step(action)\n reward_sum += reward\n episode_rewards.append(reward_sum)\n return np.mean(episode_rewards)",
"def worker(model, max_steps=1000):\n train_data = []\n # https://gym.openai.com/envs/Breakout-v0/\n env = gym.make('Breakout-v0')\n obs = env.reset()\n obs = filter_obs(obs, obs_shape=(84, 84))\n\n ep_reward = 0\n for _ in range(max_steps):\n act, val = model.gen_actions_and_values([obs])\n act, val = act[0], val[0]\n\n next_obs, rew, d, _ = env.step(act)\n next_obs = filter_obs(next_obs, obs_shape=(84, 84))\n train_data.append([obs, act, rew, val, next_obs])\n obs = next_obs\n ep_reward += rew\n if d:\n break\n\n train_data = np.asarray(train_data)\n\n ep_reward = np.sum(train_data[:, 2])\n # Calculate GAEs and replace values with the new values.\n train_data[:, 3] = calculate_gaes(train_data[:, 2], train_data[:, 3])\n\n return train_data, ep_reward",
"def leapfrog(params, momentum, log_prob_func, steps=10, step_size=0.1, jitter=0.01, normalizing_const=1., softabs_const=1e6, explicit_binding_const=100, fixed_point_threshold=1e-20, fixed_point_max_iterations=6, jitter_max_tries=10, inv_mass=None, ham_func=None, sampler=Sampler.HMC, integrator=Integrator.IMPLICIT, metric=Metric.HESSIAN, store_on_GPU = True, debug=False, pass_grad = None):\n\n params = params.clone(); momentum = momentum.clone()\n # TodO detach graph when storing ret_params for memory saving\n if sampler == Sampler.HMC and integrator != Integrator.SPLITTING and integrator != Integrator.SPLITTING_RAND and integrator != Integrator.SPLITTING_KMID:\n def params_grad(p):\n p = p.detach().requires_grad_()\n log_prob = log_prob_func(p)\n # log_prob.backward()\n p = collect_gradients(log_prob, p, pass_grad)\n # print(p.grad.std())\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return p.grad\n ret_params = []\n ret_momenta = []\n momentum += 0.5 * step_size * params_grad(params)\n for n in range(steps):\n if inv_mass is None:\n params = params + step_size * momentum #/normalizing_const\n else:\n #Assum G is diag here so 1/Mass = G inverse\n if type(inv_mass) is list:\n i = 0\n for block in inv_mass:\n it = block[0].shape[0]\n params[i:it+i] = params[i:it+i] + step_size * torch.matmul(block,momentum[i:it+i].view(-1,1)).view(-1) #/normalizing_const\n i += it\n elif len(inv_mass.shape) == 2:\n params = params + step_size * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params = params + step_size * inv_mass * momentum #/normalizing_const\n p_grad = params_grad(params)\n momentum += step_size * p_grad\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n # only need last for Hamiltoninian check (see p.14) https://arxiv.org/pdf/1206.1901.pdf\n ret_momenta[-1] = ret_momenta[-1] - 0.5 * step_size * p_grad.clone()\n # import pdb; pdb.set_trace()\n return ret_params, ret_momenta\n elif sampler == Sampler.RMHMC and (integrator == Integrator.IMPLICIT or integrator == Integrator.S3):\n if integrator is not Integrator.S3:\n ham_func = None\n # Else we are doing semi sep and need auxiliary for Riemann version.\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for RMHMC')\n\n def fixed_point_momentum(params, momentum):\n momentum_old = momentum.clone()\n # print('s')\n for i in range(fixed_point_max_iterations):\n momentum_prev = momentum.clone()\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n # import pdb; pdb.set_trace()\n raise util.LogProbError()\n # import pdb; pdb.set_trace()\n # break\n\n momentum = momentum_old - 0.5 * step_size * params.grad\n momenta_diff = torch.max((momentum_prev-momentum)**2)\n if momenta_diff < fixed_point_threshold:\n break\n if debug == 1:\n print('Converged (momentum), iterations: {}, momenta_diff: {}'.format(i, momenta_diff))\n return momentum\n\n def fixed_point_params(params, momentum):\n params_old = params.clone()\n momentum = momentum.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n momentum = collect_gradients(ham,momentum)\n momentum_grad_old = momentum.grad.clone()\n for i in range(fixed_point_max_iterations):\n params_prev = params.clone()\n momentum = momentum.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n momentum = collect_gradients(ham,momentum)#collect_gradients(ham, params)\n params = params_old + 0.5 * step_size * momentum.grad + 0.5 * step_size * momentum_grad_old\n params_diff = torch.max((params_prev-params)**2)\n if params_diff < fixed_point_threshold:\n break\n if debug == 1:\n print('Converged (params), iterations: {}, params_diff: {}'.format(i, params_diff))\n return params\n ret_params = []\n ret_momenta = []\n for n in range(steps):\n # import pdb; pdb.set_trace()\n momentum = fixed_point_momentum(params, momentum)\n params = fixed_point_params(params, momentum)\n\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, softabs_const=softabs_const, normalizing_const=normalizing_const, ham_func=ham_func, sampler=sampler, integrator=integrator, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n raise util.LogProbError()\n # break\n momentum -= 0.5 * step_size * params.grad\n\n ret_params.append(params)\n ret_momenta.append(momentum)\n return ret_params, ret_momenta\n\n elif sampler == Sampler.RMHMC and integrator == Integrator.EXPLICIT:\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for RMHMC')\n\n #During leapfrog define integrator as implict when passing into riemannian_hamiltonian\n leapfrog_hamiltonian_flag = Integrator.IMPLICIT\n def hamAB_grad_params(params,momentum):\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum.detach(), log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n params = collect_gradients(ham, params)\n\n # draw the jitter on the diagonal of Fisher again (probably a better place to do this)\n tries = 0\n while util.has_nan_or_inf(params.grad):\n # import pdb; pdb.set_trace()\n params = params.detach().requires_grad_()\n ham = hamiltonian(params, momentum.detach(), log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n params = collect_gradients(ham, params)\n tries += 1\n if tries > jitter_max_tries:\n print('Warning: reached jitter_max_tries {}'.format(jitter_max_tries))\n raise util.LogProbError()\n # import pdb; pdb.set_trace()\n # break\n\n return params.grad\n def hamAB_grad_momentum(params,momentum):\n momentum = momentum.detach().requires_grad_()\n params = params.detach().requires_grad_()\n # Can't detach p as we still need grad to do derivatives\n ham = hamiltonian(params, momentum, log_prob_func, jitter=jitter, normalizing_const=normalizing_const, softabs_const=softabs_const, explicit_binding_const=explicit_binding_const, sampler=sampler, integrator=leapfrog_hamiltonian_flag, metric=metric)\n # import pdb; pdb.set_trace()\n momentum = collect_gradients(ham,momentum)\n return momentum.grad\n ret_params = []\n ret_momenta = []\n params_copy = params.clone()\n momentum_copy = momentum.clone()\n for n in range(steps):\n # \\phi_{H_A}\n momentum = momentum - 0.5 * step_size * hamAB_grad_params(params,momentum_copy)\n params_copy = params_copy + 0.5 * step_size * hamAB_grad_momentum(params,momentum_copy)\n # \\phi_{H_B}\n params = params + 0.5 * step_size * hamAB_grad_momentum(params_copy,momentum)\n momentum_copy = momentum_copy - 0.5 * step_size * hamAB_grad_params(params_copy,momentum)\n # \\phi_{H_C}\n c = torch.cos(torch.FloatTensor([2* explicit_binding_const * step_size])).to(params.device)\n s = torch.sin(torch.FloatTensor([2* explicit_binding_const * step_size])).to(params.device)\n # params_add = params + params_copy\n # params_sub = params - params_copy\n # momentum_add = momentum + momentum_copy\n # momentum_sub = momentum - momentum_copy\n # ### CHECK IF THE VALUES ON THE RIGHT NEED TO BE THE OLD OR UPDATED ones\n # ### INSTINCT IS THAT USING UPDATED ONES IS BETTER\n # params = 0.5 * ((params_add) + c*(params_sub) + s*(momentum_sub))\n # momentum = 0.5 * ((momentum_add) - s*(params_sub) + c*(momentum_sub))\n # params_copy = 0.5 * ((params_add) - c*(params_sub) - s*(momentum_sub))\n # momentum_copy = 0.5 * ((momentum_add) + s*(params_sub) - c*(momentum_sub))\n params = 0.5 * ((params+params_copy) + c*(params-params_copy) + s*(momentum-momentum_copy))\n momentum = 0.5 * ((momentum+momentum_copy) - s*(params-params_copy) + c*(momentum-momentum_copy))\n params_copy = 0.5 * ((params+params_copy) - c*(params-params_copy) - s*(momentum-momentum_copy))\n momentum_copy = 0.5 * ((momentum+momentum_copy) + s*(params-params_copy) - c*(momentum-momentum_copy))\n\n\n # \\phi_{H_B}\n params = params + 0.5 * step_size * hamAB_grad_momentum(params_copy,momentum)\n momentum_copy = momentum_copy - 0.5 * step_size * hamAB_grad_params(params_copy,momentum)\n # \\phi_{H_A}\n momentum = momentum - 0.5 * step_size * hamAB_grad_params(params,momentum_copy)\n params_copy = params_copy + 0.5 * step_size * hamAB_grad_momentum(params,momentum_copy)\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n return [ret_params,params_copy], [ret_momenta, momentum_copy]\n\n # PAGE 35 MCMC Using Hamiltonian dynamics (Neal 2011)\n elif sampler == Sampler.HMC and (integrator == Integrator.SPLITTING or integrator == Integrator.SPLITTING_RAND or Integrator.SPLITTING_KMID):\n if type(log_prob_func) is not list:\n raise RuntimeError('For splitting log_prob_func must be list of functions')\n if pass_grad is not None:\n raise RuntimeError('Passing user-determined gradients not implemented for splitting')\n\n def params_grad(p,log_prob_func):\n # OLD:\n # p = p.detach().requires_grad_()\n # log_prob = log_prob_func(p)\n # # log_prob.backward()\n # p = collect_gradients(log_prob, p)\n # grad = p.grad\n # # For removing GPU memory for large data sets.\n # del p, log_prob\n # torch.cuda.empty_cache()\n\n p = p.detach().requires_grad_()\n log_prob = log_prob_func(p)\n # Need to check memory issues in collect_gradients\n grad = torch.autograd.grad(log_prob,p)[0]\n # For removing GPU memory for large data sets.\n del p, log_prob, log_prob_func\n torch.cuda.empty_cache()\n return grad\n\n params = params.detach() # Detach as we do not need to remember graph until we pass into log_prob\n ret_params = []\n ret_momenta = []\n if integrator == Integrator.SPLITTING:\n M = len(log_prob_func)\n K_div = (M - 1) * 2\n if M == 1:\n raise RuntimeError('For symmetric splitting log_prob_func must be list of functions greater than length 1')\n for n in range(steps):\n # Symmetric loop to ensure reversible\n for m in range(M):\n # print('p ',n)\n grad = params_grad(params,log_prob_func[m])\n with torch.no_grad():\n momentum += 0.5 * step_size * grad\n del grad\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if m < M-1:\n # print('q ',n)\n if inv_mass is None:\n params += (step_size/K_div) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/K_div) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/K_div) * inv_mass * momentum #/normalizing_const\n for m in reversed(range(M)):\n # print('p ', n )\n grad = params_grad(params,log_prob_func[m])\n with torch.no_grad():\n momentum += 0.5 * step_size * grad\n del grad\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n if m > 0:\n # print('q ', n-1)\n if inv_mass is None:\n params += (step_size/K_div) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/K_div) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/K_div) * inv_mass * momentum #/normalizing_const\n\n if store_on_GPU:\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n else:\n ret_params.append(params.clone().cpu())\n ret_momenta.append(momentum.clone().cpu())\n elif integrator == Integrator.SPLITTING_RAND:\n M = len(log_prob_func)\n idx = torch.randperm(M)\n for n in range(steps):\n # \"Labelling of subsets is randomised for each iteration\"\n # idx = torch.randperm(M)\n for m in range(M):\n # print('p ',n)\n momentum += 0.5 * step_size * params_grad(params, log_prob_func[idx[m]])\n # print('q ',n)\n if inv_mass is None:\n params += (step_size/M) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params += (step_size/M) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params += (step_size/M) * inv_mass * momentum #/normalizing_const\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[idx[m]])\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n # import pdb; pdb.set_trace()\n\n\n elif integrator == Integrator.SPLITTING_KMID:\n M = len(log_prob_func)\n if M == 1:\n raise RuntimeError('For symmetric splitting log_prob_func must be list of functions greater than length 1')\n for n in range(steps):\n # Symmetric loop to ensure reversible\n for m in range(M):\n # print('p ',n)\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[m])\n\n if inv_mass is None:\n params = params + (step_size) * momentum #/normalizing_const\n else:\n if type(inv_mass) is list:\n pass\n #Assum G is diag here so 1/Mass = G inverse\n elif len(inv_mass.shape) == 2:\n params = params + (step_size) * torch.matmul(inv_mass,momentum.view(-1,1)).view(-1) #/normalizing_const\n else:\n params = params + (step_size) * inv_mass * momentum #/normalizing_const\n\n for m in reversed(range(M)):\n # print('p ', n )\n momentum += 0.5 * step_size * params_grad(params,log_prob_func[m])\n\n ret_params.append(params.clone())\n ret_momenta.append(momentum.clone())\n\n return ret_params, ret_momenta\n\n else:\n raise NotImplementedError()",
"def horde_step(self, observation):",
"def _step(self, t, y, h):\n # We must use solvers / implicit form\n f_pn1 = lambda a_n1: (y + h*self.v + (h**2 / 2.0) * \\\n ((1.0 - 2.*self.beta)*self.a + 2.*self.beta*a_n1))\n f_vn1 = lambda a_n1: (self.v + h*((1.0-self.gamma)*self.a + self.gamma*a_n1))\n def f_an1(a_n1):\n f_n1 = self.f(t+h,f_pn1(a_n1),f_vn1(a_n1))\n f_n = self.f(t,y,self.v,)\n return a_n1 - ((1.0+self.alpha)*f_n1 - self.alpha*f_n)\n\n a = self.solver(f_an1, self.a)\n y = f_pn1(a) # Calculate and store new variables. \n self.v = f_vn1(a)\n self.a = a\n return t+h, y",
"def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)",
"def _rollout_an_episode(self):\n self._steps = 0\n me_id = self._learning_agent_id # short name\n oppo_id = self._oppo_agent_id # short name\n logger.log('episode begins with the task: {}'.format(str(self.task)))\n\n # passing me and oppo hyperparams to the arena interface\n assert self.task.hyperparam is not None\n logger.log('pulling oppo hyperparam of model key {}'.format(\n self.task.model_key2))\n oppo_hyperparam = self._model_pool_apis.pull_attr(attr='hyperparam',\n key=self.task.model_key2)\n logger.log('Done pulling oppo hyperparam')\n oppo_inter_kwargs = ({} if oppo_hyperparam is None\n else oppo_hyperparam.__dict__)\n inter_kwargs = ([self.task.hyperparam.__dict__]\n + [oppo_inter_kwargs] * (self.n_agents - 1))\n\n # agent, env reset\n obs = self.env.reset(inter_kwargs=inter_kwargs)\n for agt, ob in zip(self.agents, obs):\n agt.reset(ob)\n self._update_agents_model(self.task) # for agent Neural Net parameters\n\n me_reward_sum = 0.0\n self.time_beg = time.time()\n self._update_hyperparam(self.task)\n self._changed_task = False\n while True:\n self._steps += 1\n # predictions for each agent\n predictions = self._parallel.run((self._agent_pred, ob, i)\n for i, ob in enumerate(obs))\n me_prediction = predictions[me_id]\n me_action, extra_vars = me_prediction[me_id], me_prediction[oppo_id:]\n # predicted actions for each agent\n actions = [me_action] + [other_action\n for other_action in predictions[oppo_id:]]\n # book-keep obs in previous step\n last_obs = obs\n\n # agent-env interaction\n obs, reward, done, info = self.env.step(actions)\n\n me_rwd_scalar = self._reward_shape(reward[me_id])\n me_reward_sum += me_rwd_scalar\n\n if self._enable_push:\n # put the interested data (obs, rwd, act, ... for each agent) into the\n # _data_queue, which is watched in another Thread (the _push_data_to_learner()\n # method) that the data are dequeued and sent to remote Learner\n if self._data_queue.full():\n logger.log(\"Actor's queue is full.\", level=logger.WARN)\n rwd_to_push = (me_rwd_scalar if self.rwd_shape\n else np.asarray(reward[me_id], np.float32))\n if self.use_oppo_obs:\n if isinstance(extra_vars, tuple):\n extra_vars += (self.agents[self._oppo_agent_id]._last_state,)\n else:\n extra_vars.append(self.agents[self._oppo_agent_id]._last_state)\n data_tuple = (last_obs, tuple(actions), rwd_to_push, info, done, extra_vars)\n self._data_queue.put(data_tuple)\n logger.log('successfully put one tuple.', level=logger.DEBUG)\n\n if self._steps % self._log_interval_steps == 0:\n logger.log('_rollout_an_episode,', 'steps: {},'.format(self._steps),\n 'data qsize: {}'.format(self._data_queue.qsize()))\n\n if done:\n # an episode ends\n if self._replay_dir:\n self._save_replay()\n self.log_kvs(me_reward_sum, info)\n if self._changed_task:\n return None, info\n else:\n return self.log_outcome(info), info\n\n if self._update_model_freq and self._steps % self._update_model_freq == 0:\n # time to update the model for each agent\n if (self._enable_push and\n self._model_pool_apis.pull_attr(\n 'freezetime', self.task.model_key1) is not None):\n # Current task (learning period) finishes, start a new task or continue\n self._finish_task(self.task, None) # notify early abort\n last_task = self.task\n self.task = self._request_task() # try to continue\n if not is_inherit(last_task.model_key1, self.task.model_key1):\n self.log_kvs(me_reward_sum, info)\n return None, info\n if last_task.model_key2 != self.task.model_key2:\n self._changed_task = True\n self._update_agents_model(self.task)",
"def update_h(self):\n def calculate_nll(h, model, sample):\n model.h = h\n nll = -model.calculate_expected_log_likelihood(sample)\n return nll\n\n def calculate_nll_partial_h(h, model, sample):\n model.h = h\n partial_h = np.zeros((model.num_clones,))\n model.calculate_expected_log_likelihood_partial_h(sample, partial_h)\n return -partial_h\n\n h_before = self.model.h\n elbo_before = self.model.calculate_expected_log_likelihood(np.ones((self.model.num_segments,), dtype=int))\n\n sample = self._create_sample()\n\n result = scipy.optimize.minimize(\n calculate_nll,\n self.model.h,\n method='L-BFGS-B',\n jac=calculate_nll_partial_h,\n bounds=[(1e-8, 10.)] * self.model.num_clones,\n args=(self.model, sample),\n )\n\n if not result.success:\n\n # Check the gradiant if optimization failed\n if result.message == 'ABNORMAL_TERMINATION_IN_LNSRCH':\n analytic_fprime = calculate_nll_partial_h(result.x, self.model, sample)\n numerical_fprime = statsmodels.tools.numdiff.approx_fprime(result.x, calculate_nll, args=(self.model, sample))\n\n if not np.allclose(analytic_fprime, numerical_fprime, atol=2.):\n raise ValueError('gradiant error, analytic: {}, numerical: {}\\n'.format(analytic_fprime, numerical_fprime))\n\n else:\n raise ValueError('optimization failed\\n{}'.format(result)) \n\n self.model.h = result.x\n elbo_after = self.model.calculate_expected_log_likelihood(np.ones((self.model.num_segments,), dtype=int))\n\n if elbo_after < elbo_before:\n print ('[{}] h rejected, elbo before: {}, after: {}'.format(_gettime(), elbo_before, elbo_after))\n self.model.h = h_before\n\n else:\n self.model.h = result.x",
"def _apply_smooth_update(self):\n self.print(\"SGD with Momentum: Applying smooth update...\", line_above=True)\n\n raw_update = self.get_h5_data(self.raw_update_path)\n update = self.get_h5_data(self.smooth_update_path)\n\n if np.sum(np.isnan(update)) > 1:\n raise Exception(\n \"NaNs were found in the smoothed update.\"\n \"Check the raw update and smoothing process.\"\n )\n\n max_upd = np.max(np.abs(update))\n print(f\"Max smooth model update: {max_upd}\")\n\n update_scaling_fac_alpha = self.alpha / max_upd\n\n self.print(\n f\"Recaling based on alpha: {update_scaling_fac_alpha},\"\n f\"New maximum update is: {max_upd * update_scaling_fac_alpha}\"\n )\n\n update *= update_scaling_fac_alpha\n\n # normalise theta and apply update\n theta_0 = self.get_h5_data(self._get_path_for_iteration(0, self.model_path))\n\n # Update parameters\n if max(self.roughness_decay_smoothing_length) > 0.0:\n theta_prev = self.get_h5_data(self.smoothed_model_path)\n\n # If relative perturbations are smoothed, make model physical\n if self.roughness_decay_type == \"relative_perturbation\":\n theta_prev = (theta_prev + 1) * theta_0\n else:\n theta_prev = self.get_h5_data(self.model_path)\n\n # Normalize the model and prevent division by zero in the outer core.\n theta_prev[theta_0 != 0] = theta_prev[theta_0 != 0] / theta_0[theta_0 != 0] - 1\n\n # Make sure that the model is only updated where theta is non_zero\n theta_new = np.zeros_like(theta_0)\n theta_new[theta_0 != 0] = (\n theta_prev[theta_0 != 0]\n - update[theta_0 != 0]\n - (1 - self.beta) * self.perturbation_decay * theta_prev[theta_0 != 0]\n )\n\n # Remove normalization from updated model and write physical model\n theta_physical = (theta_new + 1) * theta_0\n shutil.copy(\n self.model_path,\n self.tmp_model_path,\n )\n self.set_h5_data(\n self.tmp_model_path,\n theta_physical,\n )",
"def _update_model(self, new_model):\n super()._update_model(new_model)\n\n if 'e' in self.tr_params:\n if self.state_no_train_de is None:\n for i in range(self.n_emissions - self.nr_no_train_de):\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n for i in range(self.n_d_emissions):\n if i < self.n_d_emissions - self.nr_no_train_de:\n self.B[i] = (1 - self.learning_rate) * new_model['B'][\n i\n ] + self.learning_rate * self.B[i]\n else:\n self.B[i][: -self.state_no_train_de, :] = (\n (1 - self.learning_rate)\n * new_model['B'][i][: -self.state_no_train_de, :]\n + self.learning_rate *\n self.B[i][: -self.state_no_train_de, :]\n )\n\n for i in range(self.n_emissions):\n normalise(new_model['B'][i], axis=1)",
"def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)",
"def postprocess_step(self, t, y):\n return",
"def __em(self, x):\n _, log_resp = self._e_step(x)\n\n pi, mu, var = self._m_step(x, log_resp)\n\n self.__update_pi(pi)\n self.__update_mu(mu)\n self.__update_var(var)",
"def e_step(self):\n # update VMF probabilities (Equation (3))\n logP = np.dot(self.features, self.mu.T)*self.kappa + np.log(self.pi).reshape(1,-1) # n by k\n logP_norm = logP - logsumexp(logP, axis=1).reshape(-1,1)\n self.p = np.exp(logP_norm)\n self.mllk = np.mean(logsumexp(logP, axis=1))",
"def step(self):\n\n self.compute_lr()\n\n self.optimizer.param_groups[self.param_group]['lr'] = self.lr\n self.optimizer.param_groups[self.param_group]['momentum'] = self.momentum",
"def run(self):\n\t\tep_rewards = [0.0]\n\t\tavg_rewards = []\n\t\tobs = self.env.reset()\n\t\tstep_counter = 0\n\n\t\tself.mylogger.info('Task: {}, epochs: {}, batch size: {}'.format(self.env.unwrapped.spec.id, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.epochs,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.batch_size\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ))\n\n\t\tfor epoch in range(self.epochs):\n\t\t\tfor step in range(self.batch_size):\n\t\t\t\tstep_counter += 1\n\n\t\t\t\tself.observations[step] = obs.copy()\n\t\t\t\tself.actions[step], self.values[step] = self.model.action_value(obs[None, :])\n\t\t\t\tobs, self.rewards[step], self.dones[step], _ = self.env.step(self.actions[step])\n\t\t\t\tep_rewards[-1] += self.rewards[step]\n\n\t\t\t\tif step_counter % self.log_step == 0:\n\t\t\t\t\tlog_msg = 'global_step: {}, obs: {}, act: {}, reward: {}'.format(step_counter,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t obs, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.actions[step], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t self.rewards[step]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t )\n\t\t\t\t\tself.mylogger.info(log_msg)\n\t\t\t\t\tself.mylogger.info(\"prev episode reward: {}\".format(ep_rewards[-2]))\n\n\t\t\t\tif self.dones[step]:\n\t\t\t\t\twith self.summary_writer.as_default():\n\t\t\t\t\t\ttf.summary.scalar('episode reward', ep_rewards[-1], step=step_counter)\n\t\t\t\t\tep_rewards.append(0.0)\n\t\t\t\t\tobs = self.env.reset()\n\n\t\t\t_, next_value = self.model.action_value(obs[None, :])\n\t\t\treturns, advs = self._returns_advantages(self.rewards, self.dones, self.values, next_value)\n\t\t\t# A trick to input actions and advantages through same API.\n\t\t\tacts_and_advs = np.concatenate([self.actions[:, None], advs[:, None]], axis=-1)\n\n\t\t\t# update weights \n\t\t\tlosses = self.model.train_on_batch(self.observations, [acts_and_advs, returns])\n\n\t\t\twith self.summary_writer.as_default():\n\t\t\t\ttf.summary.scalar('policy loss', losses[1], step=step_counter)\n\t\t\t\ttf.summary.scalar('value loss', losses[2], step=step_counter)",
"def multistep_deis_second_order_update(\n self,\n model_output_list: List[torch.FloatTensor],\n timestep_list: List[int],\n prev_timestep: int,\n sample: torch.FloatTensor,\n ) -> torch.FloatTensor:\n t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]\n m0, m1 = model_output_list[-1], model_output_list[-2]\n alpha_t, alpha_s0, alpha_s1 = self.alpha_t[t], self.alpha_t[s0], self.alpha_t[s1]\n sigma_t, sigma_s0, sigma_s1 = self.sigma_t[t], self.sigma_t[s0], self.sigma_t[s1]\n\n rho_t, rho_s0, rho_s1 = sigma_t / alpha_t, sigma_s0 / alpha_s0, sigma_s1 / alpha_s1\n\n if self.config.algorithm_type == \"deis\":\n\n def ind_fn(t, b, c):\n # Integrate[(log(t) - log(c)) / (log(b) - log(c)), {t}]\n return t * (-np.log(c) + np.log(t) - 1) / (np.log(b) - np.log(c))\n\n coef1 = ind_fn(rho_t, rho_s0, rho_s1) - ind_fn(rho_s0, rho_s0, rho_s1)\n coef2 = ind_fn(rho_t, rho_s1, rho_s0) - ind_fn(rho_s0, rho_s1, rho_s0)\n\n x_t = alpha_t * (sample / alpha_s0 + coef1 * m0 + coef2 * m1)\n return x_t\n else:\n raise NotImplementedError(\"only support log-rho multistep deis now\")",
"def update_mp(self, obs, pool):\n #######################################\n # Step 1 - prediction for birth targets\n born = [deepcopy(comp) for comp in self.birthgmm]\n # The original paper would do a spawning iteration as part of Step 1.\n spawned = [] # not implemented\n\n #######################################\n # Step 2 - prediction for existing targets\n updated = [GmphdComponent(self.survival * comp.weight, dot(self.f, comp.loc),\n self.q + dot(dot(self.f, comp.cov), self.f.T), comp.id)\n for comp in self.gmm]\n\n predicted = born + spawned + updated\n\n #######################################\n # Step 3 - construction of PHD update components\n # These two are the mean and covariance of the expected observation\n nu = [dot(self.h, comp.loc) for comp in predicted]\n s = [self.r + dot(dot(self.h, comp.cov), self.h.T) for comp in predicted]\n # Not sure about any physical interpretation of these two...\n k = [dot(dot(comp.cov, self.h.T), linalg.inv(s[index]))\n for index, comp in enumerate(predicted)]\n pkk = [dot(eye(len(k[index])) - dot(k[index], self.h), comp.cov)\n for index, comp in enumerate(predicted)]\n\n #######################################\n # Step 4 - update using observations\n # The 'predicted' components are kept, with a decay\n newgmm = [GmphdComponent(comp.weight * (1.0 - self.detection), comp.loc, comp.cov, comp.id)\n for comp in predicted]\n\n # then more components are added caused by each obsn's interaction with existing component\n result = pool.map_async(partial(self.update_obs_mp, predicted=predicted, nu=nu, s=s, pkk=pkk, k=k), obs)\n result = result.get()\n for newgmmpartial in result:\n newgmm.extend(newgmmpartial)\n\n self.gmm = newgmm",
"def perform_step(self) -> None:\n self.n_it = self.n_it + 1\n self.update_learning_rate()\n observed_gradient = self.get_observed_gradient(self.theta)\n latent_gradient = self.compute_latent_gradient(observed_gradient)\n g_omega = self.gplvm_model.predict_wishart_embedding(self.omega)[0]\n g_inv_omega = np.linalg.inv(g_omega)\n mu = np.dot(g_inv_omega, latent_gradient[0, :])\n epsilon_derivative = 1e-4\n for k in range(self.dim_latent):\n increment = np.copy(self.omega)\n increment[0, k] = increment[0, k] + epsilon_derivative\n g_derivative =\\\n (self.gplvm_model.predict_wishart_embedding(increment)[0] -\n g_omega) / epsilon_derivative\n tmp_mu = np.dot(g_inv_omega, np.dot(g_derivative, g_inv_omega))\n mu = mu - 2.0 * tmp_mu[:, k]\n mu = mu + g_inv_omega[:, k] * np.trace(np.dot(g_inv_omega,\n g_derivative))\n g_inv_sqrt_omega = sqrtm(g_inv_omega)\n eta = np.random.normal(0.0, np.sqrt(self.epsilon), self.dim_latent)\n self.omega = self.omega + self.epsilon / 2.0 * mu +\\\n np.dot(g_inv_sqrt_omega, eta)\n self.theta = self.gplvm_model.predict(self.omega)[0]\n return"
] |
[
"0.6641234",
"0.6641234",
"0.65344256",
"0.5842853",
"0.57025343",
"0.5628273",
"0.5624245",
"0.55662006",
"0.55565214",
"0.5556335",
"0.5555525",
"0.55499953",
"0.55407476",
"0.5503146",
"0.54973006",
"0.5490032",
"0.54801166",
"0.5471939",
"0.54597974",
"0.5448272",
"0.539526",
"0.53933954",
"0.5390296",
"0.53747016",
"0.53705895",
"0.53696936",
"0.5359849",
"0.5357687",
"0.5341679",
"0.5288205"
] |
0.69332415
|
0
|
If field is of GeometryField than encode otherwise call parent's method
|
def handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
if isinstance(field, GeometryField):
self._current[field.name] = value
else:
super(Serializer, self).handle_field(obj, field)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)",
"def get_fields(self):\n fields = super(GeoModelSerializer, self).get_fields()\n # Set the geometry field name when it's undeclared.\n if not self.Meta.geom_field:\n for name, field in fields.items():\n if isinstance(field, GeometryField):\n self.Meta.geom_field = name\n break\n return fields",
"def encode(self, value):\n raise NotImplementedError()",
"def encode_geometry(geom: BasePolygon) -> str:\n encoded_geom = geobuf.encode(mapping(geom)).hex()\n\n # if the geometry is so complex is still goes over the limit, incrementally attempting to simplify it\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.005, preserve_topology=False))\n ).hex()\n\n if sys.getsizeof(encoded_geom) > LAMBDA_ASYNC_PAYLOAD_LIMIT_BYTES:\n encoded_geom = geobuf.encode(\n mapping(geom.simplify(0.01, preserve_topology=False))\n ).hex()\n\n return encoded_geom",
"def _encode(self, data):\n raise NotImplementedError(\"_encode needs to be implemented in {} subclass\".format(type(self).__name__))",
"def encode(self, value):\r\n pass",
"def _encode_field(self, field_type, field_data, subcontent=None):\n self.logger.debug(\n '_encode_field(): pytype %s values %s',\n type(field_data).__name__, repr(field_data)\n )\n\n field_encoded = None\n\n # nested\n if field_type == 'a' and subcontent:\n field_encoded = self._encode_str(\n self._encode_wire(field_data, subcontent).read()\n )\n # bytes\n elif field_type == 'a':\n field_encoded = self._encode_str(field_data)\n\n # strings\n elif field_type == 'U':\n field_encoded = self._encode_str(field_data.encode('utf-8'))\n\n # vint family (signed, unsigned and boolean)\n elif field_type in 'Ttzb':\n if field_type == 't':\n field_data = self._vint_signedto2sc(field_data)\n elif field_type == 'z':\n field_data = self._vint_zigzagify(field_data)\n elif field_type == 'b':\n field_data = int(field_data)\n field_encoded = self._encode_vint(field_data)\n\n # fixed numerical value\n elif field_type in 'iIqQfd':\n field_encoded = struct.pack(\n '<{0}'.format(field_type), field_data\n )\n\n return field_encoded",
"def serialize_field(value):\r\n if isinstance(value, basestring):\r\n return value\r\n\r\n return json.dumps(value, cls=EdxJSONEncoder)",
"def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field",
"def encode(self): # pragma: no cover\n pass",
"def encode(self):\n \n assert False, \"Not implemented.\"",
"def geometry():\n return Geometry()",
"def encode(self,b):\n raise NotImplementedError('subclasses must override encode()!')",
"def serialize(self):\n if not self.fields:\n raise ValueError('Point must have at least one field.\\n')\n\n def value_repr(val):\n \"\"\"Encodes a field value.\"\"\"\n if isinstance(val, bool):\n return str(val)\n if isinstance(val, basestring):\n return '\\\"%s\\\"' % val.replace('\"', '\\\"')\n return repr(val)\n\n def measure_repr(measurement):\n \"\"\"Encodes a measurement name.\"\"\"\n return measurement.replace(',', r'\\,').replace(' ', r'\\ ')\n\n def label_repr(label):\n \"\"\"Encodes a tag name, tag value, or field name.\"\"\"\n return label.replace(',', r'\\,').replace(' ', r'\\ ').replace('=', r'\\=')\n\n # Tag set must be prefixed with, and joined by a comma.\n tags = ''.join([\n ',%s=%s' % (label_repr(key), label_repr(val))\n for key, val in self.tags.iteritems()\n ])\n # Field set is only joined by a comma.\n fields = ','.join([\n '%s=%s' % (label_repr(key), value_repr(val))\n for key, val in self.fields.iteritems()\n ])\n return '%s%s %s%s' % (\n measure_repr(self.measurement),\n tags,\n fields,\n ' %d' % self.time if self.time else ''\n )",
"def encode(self, text):",
"def handle(self, geometry, fields=None, tags=None, id=None):\n pass",
"def geom_type(self): # -> str:\n ...",
"def serialize(self):",
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def handle_field(self, obj, field):\n self.indent(3)\n internal_type = field.get_internal_type()\n attrs = {\n \"id\": field.name,\n \"resname\": field.name,\n \"restype\": \"x-%s\" % internal_type,\n \"translate\": \"no\",\n }\n if internal_type in (\"CharField\", \"TextField\"):\n attrs[\"translate\"] = \"yes\"\n\n if internal_type == \"CharField\":\n attrs[\"size-unit\"] = \"char\"\n attrs[\"maxwidth\"] = str(field.max_length)\n\n self.xml.startElement(\"trans-unit\", attrs)\n self.indent(4)\n self.xml.startElement(\"source\", {})\n # Get a \"string version\" of the object's data.\n if getattr(obj, field.name) is not None:\n self.xml.characters(field.value_to_string(obj))\n else:\n self.xml.addQuickElement(\"None\")\n\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")",
"def encode(self, obj):\n s = super(CustomEncoder, self).encode(obj)\n # If uncompressed, postprocess for formatting\n if len(s.splitlines()) > 1:\n s = self.postprocess(s)\n return s",
"def encode(self, decoded):",
"def serialize(self, value):\n # (Any) -> json\n # this is called when writing to elasticsearch",
"def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')",
"def dumps(obj, big_endian=True):\n geom_type = obj['type']\n\n exporter = __dumps_registry.get(geom_type)\n if exporter is None:\n __unsupported_geom_type(geom_type)\n\n return exporter(obj, big_endian)",
"def encode(self, value):\r\n return value",
"def geo_transform(self):\n pass",
"def serialize(self):\n raise NotImplemented()",
"def _encode_structure(self):\n pass",
"def _encode_structure(self):\n pass"
] |
[
"0.6092368",
"0.58623064",
"0.5737154",
"0.57359904",
"0.5712789",
"0.56236666",
"0.56137514",
"0.5575033",
"0.55569685",
"0.5485647",
"0.5467278",
"0.5435098",
"0.53939056",
"0.5381318",
"0.53652793",
"0.53468394",
"0.534481",
"0.53239167",
"0.5288633",
"0.5278437",
"0.5228041",
"0.5226254",
"0.5204008",
"0.5191733",
"0.5174946",
"0.5157615",
"0.51547384",
"0.5152623",
"0.5150064",
"0.5150064"
] |
0.70307666
|
0
|
overload the default method to process any GEOSGeometry objects otherwise call original method
|
def default(self, o):
if isinstance(o, GEOSGeometry):
dictval = json.loads(o.geojson)
#raise Exception(o.ewkt)
dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification
return dictval
else:
super(DjangoGEOJSONEncoder, self).default(o)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...",
"def get_geospatial(self):\n self.unimpl_base_class()",
"def get_default_geometry(self):",
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:\n ...",
"def spatial(self):",
"def __getGeometry(self, geom):\n if \"POLYGON\" in self.geometryType:\n rings = geom['rings']\n polygon = getMultiGeometry(rings)\n polyGeom = arcpy.Polygon(polygon, self.sr)\n return polyGeom\n elif \"POLYLINE\" in self.geometryType:\n paths = geom['paths']\n polyline = getMultiGeometry(paths)\n lineGeom = arcpy.Polyline(polyline, self.sr)\n return lineGeom\n elif \"POINT\" in self.geometryType:\n try:\n point = arcpy.Point(float(geom['x']), float(geom['y']))\n except:\n raise NullGeometryError(\"Point geometry is invalid or null\")\n pointGeom = arcpy.Geometry(\"point\", point, self.sr)\n return pointGeom",
"def clear_geometries(self):",
"def get_geometry(self, selection_name):",
"def geometry():\n return Geometry()",
"def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]",
"def _multigeometry(self, ogr_geometry):\n\n geo_type = ogr_geometry.GetGeometryType()\n\n if geo_type == ogr.wkbPolygon:\n return ogr.ForceToMultiPolygon(ogr_geometry)\n elif geo_type == ogr.wkbPoint:\n return ogr.ForceToMultiPoint(ogr_geometry)\n elif geo_type in [ogr.wkbLineString, ogr.wkbLinearRing]:\n return ogr.ForceToMultiLineString(ogr_geometry)\n else:\n return ogr_geometry",
"def handle(self, geometry, fields=None, tags=None, id=None):\n pass",
"def centroid(self): # -> BaseGeometry:\n ...",
"def _update_vertices(self):\n raise NotImplementedError(\"_update_vertices must be defined\"\n \"for every ShapeBase subclass\")",
"def envelope(self): # -> BaseGeometry:\n ...",
"def set_geometry(self, selection_name, geometry):",
"def __load_geo(self):\n pass\n # process any splines? and turn them into arcs\n # http://www.mathopenref.com/constcirclecenter.html\n # find max dist between points\n # double it\n # select two segments\n # draw normal lines\n # find intersections, that is the center",
"def union(self, other): # -> BaseGeometry:\n ...",
"def _prepare_with_copy(geometry):\n geometry = pygeos.apply(geometry, lambda x: x) # makes a copy\n pygeos.prepare(geometry)\n return geometry",
"def intersection(self, other): # -> BaseGeometry:\n ...",
"def geo_transform(self):\n pass",
"def normalizeGeometry(geom):\n\t# Convert string GEOSGeometry object to python dict\n\tgeom = json.loads(geom)\n\n\t# Normalize longitude to range [-180, 180) using saw tooth function\n\tc = geom['coordinates'][0]\n\tgeom['coordinates'][0] = (c+180 - ( math.floor( (c+180)/360 ) )*360) - 180\n\n\t# Normalize latitude to range [-90, 90) using saw tooth function\n\tc = geom['coordinates'][1]\n\tgeom['coordinates'][1] = (c+90 - ( math.floor( (c+90)/180 ) )*180) - 90\n\n\t# Encode and return GEOSGeometry object\n\treturn GEOSGeometry(json.dumps(geom))",
"def ground_contact_geoms(self):\n raise NotImplementedError",
"def get_geometries ( self, object_class_table, spatial_column, select_column, select_id ) :\n stmt = 'select sdo_util.to_wktgeometry(' + str(spatial_column) + ') from ' + str(object_class_table) + ' where ' + str(select_column) + ' = ' + str(select_id)\n self.oracle_cursor.execute( stmt )\n resultset = self.oracle_cursor.fetchall()\n return resultset",
"def geometry(self, geometry: Point) -> None:\n if geometry.type != 'Point':\n raise Exception('geometry must be a valid ArcGIS Point Geometry object')\n else:\n self._geom = geometry",
"def _update_proxy(self, change):\n if change['type'] == 'container':\n #: Only update what's needed\n self.proxy.update_points(change)\n else:\n super(MapPolygon, self)._update_proxy(change)",
"def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self",
"def plot_geometry ( ogr_geom_in, exterior_color, interior_color ) :\n if ogr_geom_in.GetGeometryName() == 'MULTIPOINT' or ogr_geom_in.GetGeometryName() == 'MULTILINESTRING' or ogr_geom_in.GetGeometryName() == 'MULTIPOLYGON' :\n for i in range(ogr_geom_in.GetGeometryCount()):\n plot_geometry ( ogr_geom_in.GetGeometryRef( i ), exterior_color, interior_color )\n if ogr_geom_in.GetGeometryName() == 'POINT' :\n x = []\n y = []\n x.append(ogr_geom_in.GetX())\n y.append(ogr_geom_in.GetY())\n pylab.plot(x,y,'o',color='y')\n if ogr_geom_in.GetGeometryName() == 'LINESTRING' :\n x = []\n y = []\n for i in range(ogr_geom_in.GetPointCount()) :\n x.append(ogr_geom_in.GetX(i))\n y.append(ogr_geom_in.GetY(i))\n pylab.plot(x,y,'-',color='g')\n if ogr_geom_in.GetGeometryName() == 'POLYGON' :\n polygon = ogr_geom_in\n ring_index = 0\n for nr_ring in range ( polygon.GetGeometryCount() ):\n ring = polygon.GetGeometryRef( nr_ring )\n x =[ring.GetX(i) for i in range(ring.GetPointCount()) ]\n y =[ring.GetY(i) for i in range(ring.GetPointCount()) ]\n if ring_index == 0 :\n pylab.plot(x,y,'-',color=str(exterior_color), linewidth=2.0, hold=True)\n else :\n pylab.plot(x,y,'-',color=str(interior_color), linewidth=2.0, hold=True)\n ring_index = ring_index + 1",
"def _setup(self):\n super()._setup()\n self.create_geometry()"
] |
[
"0.6038567",
"0.60086095",
"0.5834033",
"0.58121526",
"0.5745491",
"0.56554115",
"0.5609684",
"0.55950236",
"0.5489419",
"0.5473454",
"0.54334366",
"0.5390387",
"0.5352398",
"0.5352128",
"0.5349648",
"0.53448266",
"0.53440094",
"0.5336835",
"0.53026235",
"0.5299472",
"0.5273278",
"0.52030677",
"0.518125",
"0.51809454",
"0.5098641",
"0.5088739",
"0.50880593",
"0.50784034",
"0.50562507",
"0.50277156"
] |
0.63417476
|
0
|
Convert to a string that GEOSGeometry class constructor can accept. The default decoder would pass our geo dict object to the constructor which would result in a TypeError; using the below hook we are forcing it into a ewkt format. This is accomplished with a class hint as per JSONRPC
|
def GEOJsonToEWKT(dict):
if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition
return dict['__GEOSGeometry__'][1][0]
return dict
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def default(self, o): \n if isinstance(o, GEOSGeometry):\n dictval = json.loads(o.geojson)\n #raise Exception(o.ewkt)\n dictval['__GEOSGeometry__'] = ['__init__', [o.ewkt]] #json class hint; see http://json-rpc.org/wiki/specification\n return dictval\n else:\n super(DjangoGEOJSONEncoder, self).default(o)",
"def Deserializer(stream_or_string, **options):\n def GEOJsonToEWKT(dict):\n \"\"\" \n Convert to a string that GEOSGeometry class constructor can accept. \n \n The default decoder would pass our geo dict object to the constructor which \n would result in a TypeError; using the below hook we are forcing it into a \n ewkt format. This is accomplished with a class hint as per JSON-RPC \n \"\"\" \n if '__GEOSGeometry__' in dict: # using class hint catch a GEOSGeometry definition \n return dict['__GEOSGeometry__'][1][0]\n \n return dict\n if isinstance(stream_or_string, basestring):\n stream = StringIO(stream_or_string)\n else:\n stream = stream_or_string\n for obj in PythonDeserializer(simplejson.load(stream, object_hook=GEOJsonToEWKT), **options):\n yield obj",
"def __init__(self, geom_input, srs=None):\n str_instance = isinstance(geom_input, str)\n\n # If HEX, unpack input to a binary buffer.\n if str_instance and hex_regex.match(geom_input):\n geom_input = memoryview(bytes.fromhex(geom_input))\n str_instance = False\n\n # Constructing the geometry,\n if str_instance:\n wkt_m = wkt_regex.match(geom_input)\n json_m = json_regex.match(geom_input)\n if wkt_m:\n if wkt_m[\"srid\"]:\n # If there's EWKT, set the SRS w/value of the SRID.\n srs = int(wkt_m[\"srid\"])\n if wkt_m[\"type\"].upper() == \"LINEARRING\":\n # OGR_G_CreateFromWkt doesn't work with LINEARRING WKT.\n # See https://trac.osgeo.org/gdal/ticket/1992.\n g = capi.create_geom(OGRGeomType(wkt_m[\"type\"]).num)\n capi.import_wkt(g, byref(c_char_p(wkt_m[\"wkt\"].encode())))\n else:\n g = capi.from_wkt(\n byref(c_char_p(wkt_m[\"wkt\"].encode())), None, byref(c_void_p())\n )\n elif json_m:\n g = self._from_json(geom_input.encode())\n else:\n # Seeing if the input is a valid short-hand string\n # (e.g., 'Point', 'POLYGON').\n OGRGeomType(geom_input)\n g = capi.create_geom(OGRGeomType(geom_input).num)\n elif isinstance(geom_input, memoryview):\n # WKB was passed in\n g = self._from_wkb(geom_input)\n elif isinstance(geom_input, OGRGeomType):\n # OGRGeomType was passed in, an empty geometry will be created.\n g = capi.create_geom(geom_input.num)\n elif isinstance(geom_input, self.ptr_type):\n # OGR pointer (c_void_p) was the input.\n g = geom_input\n else:\n raise GDALException(\n \"Invalid input type for OGR Geometry construction: %s\"\n % type(geom_input)\n )\n\n # Now checking the Geometry pointer before finishing initialization\n # by setting the pointer for the object.\n if not g:\n raise GDALException(\n \"Cannot create OGR Geometry from input: %s\" % geom_input\n )\n self.ptr = g\n\n # Assigning the SpatialReference object to the geometry, if valid.\n if srs:\n self.srs = srs\n\n # Setting the class depending upon the OGR Geometry Type\n self.__class__ = GEO_CLASSES[self.geom_type.num]",
"def geos_geom_from_py(ob, create_func=...): # -> tuple[Any | Unknown, Unknown]:\n ...",
"def test_serialization():\n bb_1 = t2.TBoundingBox(0.4, 0.3, 0.1, top=None) # type:ignore forcing some None/null values\n bb_2 = t2.TBoundingBox(0.4, 0.3, 0.1, top=0.2) # type: ignore\n p1 = t2.TPoint(x=0.1, y=0.1)\n p2 = t2.TPoint(x=0.3, y=None) # type:ignore\n geo = t2.TGeometry(bounding_box=bb_1, polygon=[p1, p2])\n geo_s = t2.TGeometrySchema()\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s\n geo = t2.TGeometry(bounding_box=bb_2, polygon=[p1, p2])\n s: str = geo_s.dumps(geo)\n assert not \"null\" in s",
"def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)",
"def as_ewkt(self) -> ir.StringValue:\n return ops.GeoAsEWKT(self).to_expr()",
"def normalizeGeometry(geom):\n\t# Convert string GEOSGeometry object to python dict\n\tgeom = json.loads(geom)\n\n\t# Normalize longitude to range [-180, 180) using saw tooth function\n\tc = geom['coordinates'][0]\n\tgeom['coordinates'][0] = (c+180 - ( math.floor( (c+180)/360 ) )*360) - 180\n\n\t# Normalize latitude to range [-90, 90) using saw tooth function\n\tc = geom['coordinates'][1]\n\tgeom['coordinates'][1] = (c+90 - ( math.floor( (c+90)/180 ) )*180) - 90\n\n\t# Encode and return GEOSGeometry object\n\treturn GEOSGeometry(json.dumps(geom))",
"def getquoted(self):\n if self.is_geometry:\n # Psycopg will figure out whether to use E'\\\\000' or '\\000'.\n return b\"%s(%s)\" % (\n b\"ST_GeogFromWKB\" if self.geography else b\"ST_GeomFromEWKB\",\n sql.quote(self.ewkb).encode(),\n )\n else:\n # For rasters, add explicit type cast to WKB string.\n return b\"'%s'::raster\" % self.ewkb.hex().encode()",
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def Stringify():\n\n def s(obj):\n return json.dumps(obj, default=_decimal_default_proc)\n\n return \"Stringify\" >> beam.Map(s)",
"def wkt(self): # -> str:\n ...",
"def decode_geometry(geom: str) -> BasePolygon:\n return shape(geobuf.decode(bytes.fromhex(geom))).buffer(0)",
"def __init__(self, obj, geography=False):\n self.is_geometry = isinstance(obj, (GEOSGeometry, PostGISAdapter))\n\n # Getting the WKB (in string form, to allow easy pickling of\n # the adaptor) and the SRID from the geometry or raster.\n if self.is_geometry:\n self.ewkb = bytes(obj.ewkb)\n else:\n self.ewkb = to_pgraster(obj)\n\n self.srid = obj.srid\n self.geography = geography",
"def geometry_type(self) -> ir.StringValue:\n return ops.GeoGeometryType(self).to_expr()",
"def create_ogr_geom(geom) -> ogr.Geometry:\n if isinstance(geom, ogr.Geometry):\n return geom\n\n # Converte os tipos para diferentes situações (python 2.7).\n # if isinstance(geom, str):\n # geom = str(geom)\n # elif isinstance(geom, unicode):\n # geom = str(geom)\n try:\n ogr_geom = ogr.CreateGeometryFromWkb(geom)\n except RuntimeError:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n if not ogr_geom:\n ogr_geom = ogr.CreateGeometryFromWkt(geom)\n return ogr_geom",
"def to_wkt(self):\n return _property_op(arctern.ST_AsText, self)",
"def wkb_hex(self): # -> str:\n ...",
"def getDeserializer():",
"def test_jsonify_encode(self):\n\n Point = namedtuple('Point', ['x', 'y'], False)\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n str_field = StringField(default='this is default')\n date_field = DateField()\n int_field = IntField()\n bool_field = BoolField()\n list_field = ListField()\n tuple_field = TupleField(np=Point)\n\n foo = Foo.create({\n 'int_field': 100,\n 'list_field': [1, 2, 3],\n })\n\n _foo = foo.to_jsonify()\n self.assertEqual('Foo', _foo['__class__'])\n self.assertEqual(_foo['foo_id'], foo.foo_id,)\n self.assertEqual(_foo['str_field'], 'this is default')\n self.assertEqual(_foo['int_field'], 100)\n self.assertEqual(_foo['list_field'], [1, 2, 3])\n self.assertNotIn('tuple_field', _foo)\n self.assertNotIn('date_field', _foo)\n\n Point = namedtuple('Point', ['x', 'y'], False)\n\n class Foo(Base):\n _table = ClassReadonlyProperty('foos')\n _primary_key = ClassReadonlyProperty('foo_id')\n\n foo_id = IDField('_id')\n list_field = ListField()\n tuple_field = TupleField(np=Point, default=lambda: Point(x=1, y=2))\n\n foo = Foo.create({})\n _foo = foo.to_jsonify()\n self.assertEqual(_foo['tuple_field'], {'x': 1, 'y': 2})\n self.assertEqual(_foo['list_field'], [])",
"def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)",
"def convert_list_to_wkt(self, geom):\n if geom['type'] == \"Polygon\":\n coords = [f\"{coord[0]} {coord[1]}\" for coord in geom['coordinates'][0]]\n return f\"POLYGON (( {', '.join(coords)} ))\"\n else:\n raise Exception(f\"Unknown type of Geometry in GeoJSON of {geom['type']}\")",
"def dumps(obj, big_endian=True):\n geom_type = obj['type']\n\n exporter = __dumps_registry.get(geom_type)\n if exporter is None:\n __unsupported_geom_type(geom_type)\n\n return exporter(obj, big_endian)",
"def as_geom(data: dict) -> dict:\n geom = geom_from_geojson(data)\n validate_geom(geom)\n return geom",
"def _check_geom(geom):\n if isinstance(geom, BaseGeometry):\n return geom\n elif isinstance(geom, str): # assume it's a wkt\n return loads(geom)\n elif isinstance(geom, list) and len(geom) == 2: # coordinates\n return Point(geom)",
"def getWKT(self):\n logger.debug(\"Entering in ocentricWKT.getWkt\")\n\n # building WKT string\n wkt = OcentricWKT.GEODCRS % (\n self.getGeoGcsName(), self.getDatumName(), self.getSpheroidName(), self.getRadius(), self.getInverseFlattening(),\n self.getRadius(), self.getAuthorityName(), self.getAuthorityCode()\n )\n\n logger.debug(\"Exiting from ocentricWKT.getWkt\")\n return wkt",
"def from_dict(cls, dikt) -> 'JWKS':\n return util.deserialize_model(dikt, cls)",
"def _json_serializer(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat(sep=\" \")\n\n if isinstance(obj, (types.TracebackType, TracebackFrameProxy)):\n return \"<Traceback object>\"\n\n return saferepr(obj)",
"def get_json_string(self, **kwargs):\n ...",
"def as_text(self) -> ir.StringValue:\n return ops.GeoAsText(self).to_expr()"
] |
[
"0.7065414",
"0.6355173",
"0.59645593",
"0.57913524",
"0.57722545",
"0.57579434",
"0.5652088",
"0.5635648",
"0.5624472",
"0.54958695",
"0.5458331",
"0.54087853",
"0.53846693",
"0.53395563",
"0.52791077",
"0.525524",
"0.52512026",
"0.5229241",
"0.5206178",
"0.51639766",
"0.51562965",
"0.5152284",
"0.5105496",
"0.5104916",
"0.5092703",
"0.5085317",
"0.50593644",
"0.5056716",
"0.49964955",
"0.49929014"
] |
0.67895186
|
1
|
The desktop type. You can call `eds_get_desktop_types` to query desktop type.
|
def desktop_type(self) -> pulumi.Input[str]:
return pulumi.get(self, "desktop_type")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def desktop_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"desktop_type\")",
"def desktop_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"desktop_type\")",
"def desktop_name(self) -> Optional[str]:\n return None",
"def device_type(self) -> str:\n if self.android_feature_phone():\n return 'smartphone'\n\n dt = self.all_details.get('device', {}).get('type', '')\n if dt:\n return dt\n\n aat = self.android_device_type()\n if aat:\n return aat\n\n if self.windows_tablet():\n return 'tablet'\n\n if self.is_television():\n return 'tv'\n\n if self.is_desktop():\n return 'desktop'\n\n if self.opera_tablet():\n return 'tablet'\n\n return ''",
"def get_desktop_size(self):\n\n _ptr = ffi.new('SDL_DisplayMode *')\n check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr))\n return (_ptr.w, _ptr.h)",
"def device_type(self):\n return Context.devtype2str[self.device_typeid]",
"def device_type(self):\n return Context.devtype2str[self.device_typeid]",
"def platform_type(self):\n return self._platform_type",
"def device_type(self):\n return self._meta['device_type']",
"def getDesktopSize(self):\n return convToUnits(self.desktopBytes, divby=1000)",
"def device_type(self):\n return self._device_type",
"def device_type(self):\n # type: () -> string_types\n return self._device_type",
"def startup_type(self) -> str:\n return pulumi.get(self, \"startup_type\")",
"def startup_type(self) -> str:\n return pulumi.get(self, \"startup_type\")",
"def get_desktop():\n l=get_pids(('kwin','ksmserver',))\n if l: kde=l[0]\n else: kde=None\n l=get_pids(('gnome-session',))\n if l: gnome=l[0]\n else: gnome=None\n if kde:\n if not gnome or kde<gnome: return 1\n else: return 0\n if gnome: return 0\n else: return -1",
"def device_type(self) -> str:\n return self.profile_device.device_type",
"def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')",
"def type(self):\n return self._device.type_name",
"def get_device_type() -> str:\n return DefaultDeviceType._default_device_type",
"def GetDeviceTypeName(self):\n if self._device_type_name is None:\n self._device_type_name = self.LsbReleaseValue(\n key='DEVICETYPE', default='CHROMEBOOK')\n return self._device_type_name",
"def is_use_desktop(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsUseDesktop', self.handle))",
"def get_disk_type(self):\n\t\treturn call_sdk_function('PrlVmDevHd_GetDiskType', self.handle)",
"def fstype(self):\n return self._properties.get('fstype')",
"def device_class(self):\n return self._device_type",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def type(self) -> str:\n return self._device_info[\"Type\"]",
"def disktype(self):\n # easy enough\n return self._disktype",
"def machine_type(self):\n\n return self._machine_type",
"def get_eprom_type():\n return command(\"S\")",
"def os_type(self):\n\n return self._os_type"
] |
[
"0.8786367",
"0.87387115",
"0.68791276",
"0.66055137",
"0.65639985",
"0.62983614",
"0.62983614",
"0.627414",
"0.6271547",
"0.6242804",
"0.6240439",
"0.62114114",
"0.6184054",
"0.6184054",
"0.61598814",
"0.61366844",
"0.61350536",
"0.61319685",
"0.6104484",
"0.6085211",
"0.6038525",
"0.60043037",
"0.5959247",
"0.5904089",
"0.5902894",
"0.589503",
"0.58930075",
"0.5891804",
"0.58893657",
"0.58546793"
] |
0.88508695
|
0
|
The root disk size gib.
|
def root_disk_size_gib(self) -> pulumi.Input[int]:
return pulumi.get(self, "root_disk_size_gib")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def root_disk_size_gib(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"root_disk_size_gib\")",
"def root_disk_size_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"root_disk_size_gib\")",
"def disk_size_gb(self) -> pulumi.Output[Optional[float]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> str:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> str:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[int]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def size_gigabytes(self) -> float:\n return pulumi.get(self, \"size_gigabytes\")",
"def data_disk_size_gb(self) -> str:\n return pulumi.get(self, \"data_disk_size_gb\")",
"def disk_size_bytes(self) -> float:\n return pulumi.get(self, \"disk_size_bytes\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"os_disk_size_gb\")",
"def get_size_gib_in_disk(host, device_path, dbapi):\n size_gib = 0\n disks = dbapi.idisk_get_by_ihost(host.uuid)\n for disk in disks:\n if disk.device_path == device_path or disk.device_node == device_path:\n size_gib = disk.size_mib / 1024\n return size_gib",
"def min_disk_size(self) -> int:\n return pulumi.get(self, \"min_disk_size\")",
"def tmp_size(disk):\n tmp_gib = max(5, math.sqrt(disk))\n return int(tmp_gib) * GiB",
"def fs_size_total(self):\n return self._fs_size_total",
"def disk_size(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"disk_size\")",
"def state(self):\n decimals = 2\n size_mb = round(self._size/1e6, decimals)\n return size_mb",
"def size_gb(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"size_gb\")",
"def min_disk_size(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"min_disk_size\")",
"def get_gsize(self):\n gsize_file = Genome(self.genome).get_fasize()\n gsize = 0\n with open(gsize_file, 'rt') as fi:\n for a in fi:\n c, n = a.strip().split('\\t')\n gsize += int(n)\n return gsize",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2",
"def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")",
"def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")",
"def get_disk_size(path='/'):\n if path is None:\n path = '/'\n try:\n st = os.statvfs(path)\n except OSError, e:\n display_error(\n 'Error getting disk space in %s: %s', path, str(e))\n return 0\n total = (st.f_blocks * st.f_frsize) / 1024\n return int(total)"
] |
[
"0.9248658",
"0.8900354",
"0.79580075",
"0.7942863",
"0.7942863",
"0.7890328",
"0.77936786",
"0.7761898",
"0.77536315",
"0.77416444",
"0.7678155",
"0.7634557",
"0.7634557",
"0.7580981",
"0.73675495",
"0.73544204",
"0.7239178",
"0.72334707",
"0.71683353",
"0.71169215",
"0.7106825",
"0.7056321",
"0.703612",
"0.6974035",
"0.6974035",
"0.6974035",
"0.69658893",
"0.6951855",
"0.6951855",
"0.6942203"
] |
0.91740936
|
1
|
The name of the bundle.
|
def bundle_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "bundle_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bundle_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"bundle_name\")",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bundle_id\")",
"def get_name(self):\n return self._assets[0].get_name()",
"def name(self):\n\n return self.manifest[\"name\"]",
"def bundle_id(self):\n return self._bundle_id",
"def bundle_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bundle_id\")",
"def name(self):\n return _version._NAME # pylint: disable=protected-access",
"def name_python_package(self) -> str:\n return f'ba{self.name_compact}'",
"def name(self):\n\t\treturn self.asset.name",
"def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )",
"def name(self):\n return self.application_tree['name']",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def get_package_name(self):\n return self.name + '-' + self.version",
"def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)",
"def get_name():\n return config.APP_NAME",
"def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release",
"def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")",
"def name(self):\r\n return self.component.get(\"Name\", \"\")",
"def name(self):\r\n return self.component.get(\"Name\", \"\")",
"def get_name(self):\n return COMPONENT_LIST[self.index][0]",
"def name() -> str:\n pass",
"def module_name(self):\n return self.name()",
"def name(self) -> str:\n return self.dev.label",
"def package_name(self):",
"def name(self):\n return self.raw_resource[\"name\"]",
"def module_name(self):\n return self.name"
] |
[
"0.86618936",
"0.73552686",
"0.73552686",
"0.714117",
"0.7098918",
"0.6944878",
"0.69287366",
"0.68272054",
"0.66933715",
"0.664791",
"0.6634069",
"0.6630414",
"0.662722",
"0.66147816",
"0.66147816",
"0.66147816",
"0.6612741",
"0.6596214",
"0.65880525",
"0.6536391",
"0.65362847",
"0.6523235",
"0.6523235",
"0.65200627",
"0.646093",
"0.6458509",
"0.6453419",
"0.64529246",
"0.645162",
"0.6443184"
] |
0.8517897
|
1
|
Provides a ECD Bundle resource.
|
def __init__(__self__,
resource_name: str,
args: BundleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n bundle_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n desktop_type: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n language: Optional[pulumi.Input[str]] = None,\n root_disk_performance_level: Optional[pulumi.Input[str]] = None,\n root_disk_size_gib: Optional[pulumi.Input[int]] = None,\n user_disk_performance_level: Optional[pulumi.Input[str]] = None,\n user_disk_size_gibs: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None) -> 'Bundle':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _BundleState.__new__(_BundleState)\n\n __props__.__dict__[\"bundle_name\"] = bundle_name\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"desktop_type\"] = desktop_type\n __props__.__dict__[\"image_id\"] = image_id\n __props__.__dict__[\"language\"] = language\n __props__.__dict__[\"root_disk_performance_level\"] = root_disk_performance_level\n __props__.__dict__[\"root_disk_size_gib\"] = root_disk_size_gib\n __props__.__dict__[\"user_disk_performance_level\"] = user_disk_performance_level\n __props__.__dict__[\"user_disk_size_gibs\"] = user_disk_size_gibs\n return Bundle(resource_name, opts=opts, __props__=__props__)",
"def bundle(self):\n return self._bundle",
"def get_resource(self):\n from rowgenerators import parse_app_url # Here, to break an import cycle\n\n self._resource = self._downloader.download(self.inner)\n\n\n ru = parse_app_url(self._resource.sys_path,\n downloader=self.downloader,\n scheme_extension=self.scheme_extension,\n **self.frag_dict)\n\n\n return ru",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def getResource(self):\n pass;",
"def get_bundle():\n if should_save_generator_bundle():\n return None\n bundle_file = get_bundle_file()\n if bundle_file is None:\n return None\n return sequence_generator_bundle.read_bundle_file(bundle_file)",
"def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )",
"def _resource(path): # pragma: NO COVER\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")",
"def loadAsset(self, *args):\n\n asset = OL.loadAssemblyReference(self.name)\n return asset",
"def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def register_bundle(self, cls):\n return self.register_entity('bundle', cls)",
"def getResource(self):\n return self.serviceClass.app.resource()",
"def create_bundle(self):\n self._highest_bundle_id += 1\n bundle = Bundle(document=self, bundle_id=str(self._highest_bundle_id))\n self.bundles.append(bundle)\n bundle.number = len(self.bundles)\n return bundle",
"def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content",
"def get_bundle_file():\n if FLAGS.bundle_file is None:\n return None\n else:\n return os.path.expanduser(FLAGS.bundle_file)",
"def load_resource(self, resource_path):\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return resource_content.decode(\"utf8\")",
"def parse_bundle_for_file(fhir_bundle_path):\n\n with open(fhir_bundle_path, 'r', encoding='UTF-8') as f:\n bundle = bu.Bundle(json.load(f))\n return bundle",
"def resource(self):\n return self.properties.get('resource',\n Entity(self.context, ResourcePath(\"resource\", self.resource_path)))",
"def load_resource(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")",
"def bundle_id(self):\n return self._bundle_id",
"def load_resource(resource_path): # pragma: NO COVER\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return resource_content.decode(\"utf8\")",
"def test_create_software_asset_bundle_from_system_module(self):\n pass",
"def bundle(self, app):\r\n assert(isinstance(app, BundleCreate.App))\r\n\r\n bundledir = os.path.join(self.outdir, '%s-bundle' % app.basename)\r\n self.context.log.info('creating %s' % os.path.relpath(bundledir, get_buildroot()))\r\n\r\n safe_mkdir(bundledir, clean=True)\r\n\r\n classpath = OrderedSet()\r\n if not self.deployjar:\r\n libdir = os.path.join(bundledir, 'libs')\r\n os.mkdir(libdir)\r\n\r\n # Add internal dependencies to the bundle.\r\n def add_jars(target):\r\n target_jars = self.context.products.get('jars').get(target)\r\n if target_jars is not None:\r\n for basedir, jars in target_jars.items():\r\n for internaljar in jars:\r\n os.symlink(os.path.join(basedir, internaljar),\r\n os.path.join(libdir, internaljar))\r\n classpath.add(internaljar)\r\n app.binary.walk(add_jars, lambda t: t.is_internal)\r\n\r\n # Add external dependencies to the bundle.\r\n for basedir, externaljar in self.list_jar_dependencies(app.binary):\r\n path = os.path.join(basedir, externaljar)\r\n os.symlink(path, os.path.join(libdir, externaljar))\r\n classpath.add(externaljar)\r\n\r\n for basedir, jars in self.context.products.get('jars').get(app.binary).items():\r\n if len(jars) != 1:\r\n raise TaskError('Expected 1 mapped binary for %s but found: %s' % (app.binary, jars))\r\n\r\n binary = jars[0]\r\n binary_jar = os.path.join(basedir, binary)\r\n bundle_jar = os.path.join(bundledir, binary)\r\n if not classpath:\r\n os.symlink(binary_jar, bundle_jar)\r\n else:\r\n with open_zip(binary_jar, 'r') as src:\r\n with open_zip(bundle_jar, 'w', compression=ZIP_DEFLATED) as dest:\r\n for item in src.infolist():\r\n buf = src.read(item.filename)\r\n if Manifest.PATH == item.filename:\r\n manifest = Manifest(buf)\r\n manifest.addentry(Manifest.CLASS_PATH,\r\n ' '.join(os.path.join('libs', jar) for jar in classpath))\r\n buf = manifest.contents()\r\n dest.writestr(item, buf)\r\n\r\n for bundle in app.bundles:\r\n for path, relpath in bundle.filemap.items():\r\n bundlepath = os.path.join(bundledir, relpath)\r\n safe_mkdir(os.path.dirname(bundlepath))\r\n os.symlink(path, bundlepath)\r\n\r\n return bundledir",
"def create_resource():\n return wsgi.Resource(Controller(), serializer=ImageSerialize())",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n app_name: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n encoded_icon: Optional[pulumi.Input[str]] = None,\n industry_id: Optional[pulumi.Input[str]] = None,\n package_name: Optional[pulumi.Input[str]] = None,\n product_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def get_resource(self):\n raise errors.Unimplemented()",
"def extract_resource(self):\n pe = pefile.PE(self.path)\n if pe.DIRECTORY_ENTRY_RESOURCE.entries[0].name.string.decode('utf-8') != \"PYTHONSCRIPT\":\n raise InvalidPy2ExeFile(\"No PYTHONSCRIPT resource\")\n else:\n r = pe.DIRECTORY_ENTRY_RESOURCE.entries[0].directory.entries[0].directory.entries[0]\n offset = r.data.struct.OffsetToData\n size = r.data.struct.Size\n return pe.get_memory_mapped_image()[offset:offset+size]",
"def get_bundle(bundle_uuid):\n assert isinstance(bundle_uuid, UUID)\n try:\n data = api_request('get', api_url('bundles', str(bundle_uuid)))\n except NotFound:\n raise BundleNotFound(f\"Bundle {bundle_uuid} does not exist.\") # lint-amnesty, pylint: disable=raise-missing-from\n return _bundle_from_response(data)"
] |
[
"0.6647277",
"0.6139058",
"0.58623874",
"0.5751916",
"0.5688191",
"0.56277573",
"0.560747",
"0.56042796",
"0.5582597",
"0.5573237",
"0.55176234",
"0.55176234",
"0.5489475",
"0.54803205",
"0.54769784",
"0.546301",
"0.54373896",
"0.54065865",
"0.54052466",
"0.5404825",
"0.540409",
"0.5403813",
"0.53592503",
"0.53554183",
"0.5347145",
"0.5343591",
"0.53336734",
"0.53086466",
"0.5294898",
"0.5293506"
] |
0.6560403
|
1
|
Get an existing Bundle resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bundle_name: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
desktop_type: Optional[pulumi.Input[str]] = None,
image_id: Optional[pulumi.Input[str]] = None,
language: Optional[pulumi.Input[str]] = None,
root_disk_performance_level: Optional[pulumi.Input[str]] = None,
root_disk_size_gib: Optional[pulumi.Input[int]] = None,
user_disk_performance_level: Optional[pulumi.Input[str]] = None,
user_disk_size_gibs: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None) -> 'Bundle':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BundleState.__new__(_BundleState)
__props__.__dict__["bundle_name"] = bundle_name
__props__.__dict__["description"] = description
__props__.__dict__["desktop_type"] = desktop_type
__props__.__dict__["image_id"] = image_id
__props__.__dict__["language"] = language
__props__.__dict__["root_disk_performance_level"] = root_disk_performance_level
__props__.__dict__["root_disk_size_gib"] = root_disk_size_gib
__props__.__dict__["user_disk_performance_level"] = user_disk_performance_level
__props__.__dict__["user_disk_size_gibs"] = user_disk_size_gibs
return Bundle(resource_name, opts=opts, __props__=__props__)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceState.__new__(_InstanceState)\n\n __props__.__dict__[\"add_on\"] = add_on\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"availability_zone\"] = availability_zone\n __props__.__dict__[\"blueprint_id\"] = blueprint_id\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"cpu_count\"] = cpu_count\n __props__.__dict__[\"created_at\"] = created_at\n __props__.__dict__[\"ip_address_type\"] = ip_address_type\n __props__.__dict__[\"ipv6_address\"] = ipv6_address\n __props__.__dict__[\"ipv6_addresses\"] = ipv6_addresses\n __props__.__dict__[\"is_static_ip\"] = is_static_ip\n __props__.__dict__[\"key_pair_name\"] = key_pair_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"private_ip_address\"] = private_ip_address\n __props__.__dict__[\"public_ip_address\"] = public_ip_address\n __props__.__dict__[\"ram_size\"] = ram_size\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"user_data\"] = user_data\n __props__.__dict__[\"username\"] = username\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n accept_language: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n created_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n distributor: Optional[pulumi.Input[str]] = None,\n has_default_path: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n owner: Optional[pulumi.Input[str]] = None,\n provisioning_artifact_parameters: Optional[pulumi.Input[pulumi.InputType['ProductProvisioningArtifactParametersArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n support_description: Optional[pulumi.Input[str]] = None,\n support_email: Optional[pulumi.Input[str]] = None,\n support_url: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n type: Optional[pulumi.Input[str]] = None) -> 'Product':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ProductState.__new__(_ProductState)\n\n __props__.__dict__[\"accept_language\"] = accept_language\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"created_time\"] = created_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"distributor\"] = distributor\n __props__.__dict__[\"has_default_path\"] = has_default_path\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"owner\"] = owner\n __props__.__dict__[\"provisioning_artifact_parameters\"] = provisioning_artifact_parameters\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"support_description\"] = support_description\n __props__.__dict__[\"support_email\"] = support_email\n __props__.__dict__[\"support_url\"] = support_url\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"type\"] = type\n return Product(resource_name, opts=opts, __props__=__props__)",
"def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)",
"def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'FhirStore':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = FhirStoreArgs.__new__(FhirStoreArgs)\n\n __props__.__dict__[\"complex_data_type_reference_parsing\"] = None\n __props__.__dict__[\"dataset_id\"] = None\n __props__.__dict__[\"default_search_handling_strict\"] = None\n __props__.__dict__[\"disable_referential_integrity\"] = None\n __props__.__dict__[\"disable_resource_versioning\"] = None\n __props__.__dict__[\"enable_update_create\"] = None\n __props__.__dict__[\"fhir_store_id\"] = None\n __props__.__dict__[\"labels\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"notification_config\"] = None\n __props__.__dict__[\"notification_configs\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"stream_configs\"] = None\n __props__.__dict__[\"validation_config\"] = None\n __props__.__dict__[\"version\"] = None\n return FhirStore(resource_name, opts=opts, __props__=__props__)",
"def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Product':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = dict()\n\n __props__[\"approval_required\"] = None\n __props__[\"description\"] = None\n __props__[\"display_name\"] = None\n __props__[\"name\"] = None\n __props__[\"state\"] = None\n __props__[\"subscription_required\"] = None\n __props__[\"subscriptions_limit\"] = None\n __props__[\"terms\"] = None\n __props__[\"type\"] = None\n return Product(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n app_name: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n encoded_icon: Optional[pulumi.Input[str]] = None,\n industry_id: Optional[pulumi.Input[str]] = None,\n package_name: Optional[pulumi.Input[str]] = None,\n product_id: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None) -> 'App':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _AppState.__new__(_AppState)\n\n __props__.__dict__[\"app_name\"] = app_name\n __props__.__dict__[\"bundle_id\"] = bundle_id\n __props__.__dict__[\"encoded_icon\"] = encoded_icon\n __props__.__dict__[\"industry_id\"] = industry_id\n __props__.__dict__[\"package_name\"] = package_name\n __props__.__dict__[\"product_id\"] = product_id\n __props__.__dict__[\"type\"] = type\n return App(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n api_management_id: Optional[pulumi.Input[str]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None) -> 'Tag':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _TagState.__new__(_TagState)\n\n __props__.__dict__[\"api_management_id\"] = api_management_id\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"name\"] = name\n return Tag(resource_name, opts=opts, __props__=__props__)",
"def get_by_id(cls, name):\n\t\treturn super(Locality, cls).get_by_id(cls.normalized_name(name))",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auto_scaling_configuration_arn: Optional[pulumi.Input[str]] = None,\n encryption_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceEncryptionConfigurationArgs']]] = None,\n health_check_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceHealthCheckConfigurationArgs']]] = None,\n instance_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceInstanceConfigurationArgs']]] = None,\n network_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceNetworkConfigurationArgs']]] = None,\n observability_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceObservabilityConfigurationArgs']]] = None,\n service_id: Optional[pulumi.Input[str]] = None,\n service_name: Optional[pulumi.Input[str]] = None,\n service_url: Optional[pulumi.Input[str]] = None,\n source_configuration: Optional[pulumi.Input[pulumi.InputType['ServiceSourceConfigurationArgs']]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Service':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ServiceState.__new__(_ServiceState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auto_scaling_configuration_arn\"] = auto_scaling_configuration_arn\n __props__.__dict__[\"encryption_configuration\"] = encryption_configuration\n __props__.__dict__[\"health_check_configuration\"] = health_check_configuration\n __props__.__dict__[\"instance_configuration\"] = instance_configuration\n __props__.__dict__[\"network_configuration\"] = network_configuration\n __props__.__dict__[\"observability_configuration\"] = observability_configuration\n __props__.__dict__[\"service_id\"] = service_id\n __props__.__dict__[\"service_name\"] = service_name\n __props__.__dict__[\"service_url\"] = service_url\n __props__.__dict__[\"source_configuration\"] = source_configuration\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n return Service(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Instance':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = InstanceArgs.__new__(InstanceArgs)\n\n __props__.__dict__[\"build\"] = None\n __props__.__dict__[\"config\"] = None\n __props__.__dict__[\"create_time\"] = None\n __props__.__dict__[\"instance_id\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"state\"] = None\n __props__.__dict__[\"state_message\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Instance(resource_name, opts=opts, __props__=__props__)",
"def view_state_id(state_id):\n states_obj = storage.all(\"State\")\n if request.method == 'GET':\n for state in states_obj.values():\n if state.id == state_id:\n id_found = state.to_dict()\n return jsonify(id_found)\n abort(404)\n\n if request.method == 'DELETE':\n for state in states_obj.values():\n if state.id == state_id:\n storage.delete(state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n if request.method == 'PUT':\n key = \"State.\" + state_id\n states = storage.all(\"State\")\n instance = states.get(key)\n if instance is None:\n abort(404)\n else:\n if not request.json:\n abort(400, \"Not a JSON\")\n req_var = request.get_json()\n for key, value in req_var.items():\n setattr(instance, key, value)\n storage.save()\n return make_response(jsonify(instance.to_dict()), 200)"
] |
[
"0.6044276",
"0.6008436",
"0.59655076",
"0.5911224",
"0.5793162",
"0.57901233",
"0.57784617",
"0.5725814",
"0.5669625",
"0.56525624",
"0.5647222",
"0.5586973",
"0.55866736",
"0.55588764",
"0.55441326",
"0.54835856",
"0.54622126",
"0.5458629",
"0.54441386",
"0.53769153",
"0.53766656",
"0.5351254",
"0.52566135",
"0.5241709",
"0.5192261",
"0.5187818",
"0.51781553",
"0.5152682",
"0.5141168",
"0.5121419"
] |
0.66141945
|
0
|
The name of the bundle.
|
def bundle_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "bundle_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")",
"def bundle_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_name\")",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> str:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bundle_id\")",
"def get_name(self):\n return self._assets[0].get_name()",
"def name(self):\n\n return self.manifest[\"name\"]",
"def bundle_id(self):\n return self._bundle_id",
"def bundle_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bundle_id\")",
"def name(self):\n return _version._NAME # pylint: disable=protected-access",
"def name_python_package(self) -> str:\n return f'ba{self.name_compact}'",
"def name(self):\n\t\treturn self.asset.name",
"def bundle_path(self, app):\n return (\n self.platform_path / self.output_format / safe_formal_name(app.formal_name)\n )",
"def name(self):\n return self.application_tree['name']",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")",
"def get_package_name(self):\n return self.name + '-' + self.version",
"def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)",
"def get_name():\n return config.APP_NAME",
"def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release",
"def package_name(self) -> str:\n return pulumi.get(self, \"package_name\")",
"def name(self):\r\n return self.component.get(\"Name\", \"\")",
"def name(self):\r\n return self.component.get(\"Name\", \"\")",
"def get_name(self):\n return COMPONENT_LIST[self.index][0]",
"def name() -> str:\n pass",
"def module_name(self):\n return self.name()",
"def name(self) -> str:\n return self.dev.label",
"def package_name(self):",
"def name(self):\n return self.raw_resource[\"name\"]"
] |
[
"0.8517897",
"0.8517897",
"0.73552686",
"0.73552686",
"0.714117",
"0.7098918",
"0.6944878",
"0.69287366",
"0.68272054",
"0.66933715",
"0.664791",
"0.6634069",
"0.6630414",
"0.662722",
"0.66147816",
"0.66147816",
"0.66147816",
"0.6612741",
"0.6596214",
"0.65880525",
"0.6536391",
"0.65362847",
"0.6523235",
"0.6523235",
"0.65200627",
"0.646093",
"0.6458509",
"0.6453419",
"0.64529246",
"0.645162"
] |
0.86618936
|
0
|
The desktop type. You can call `eds_get_desktop_types` to query desktop type.
|
def desktop_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "desktop_type")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def desktop_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"desktop_type\")",
"def desktop_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"desktop_type\")",
"def desktop_name(self) -> Optional[str]:\n return None",
"def device_type(self) -> str:\n if self.android_feature_phone():\n return 'smartphone'\n\n dt = self.all_details.get('device', {}).get('type', '')\n if dt:\n return dt\n\n aat = self.android_device_type()\n if aat:\n return aat\n\n if self.windows_tablet():\n return 'tablet'\n\n if self.is_television():\n return 'tv'\n\n if self.is_desktop():\n return 'desktop'\n\n if self.opera_tablet():\n return 'tablet'\n\n return ''",
"def get_desktop_size(self):\n\n _ptr = ffi.new('SDL_DisplayMode *')\n check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr))\n return (_ptr.w, _ptr.h)",
"def device_type(self):\n return Context.devtype2str[self.device_typeid]",
"def device_type(self):\n return Context.devtype2str[self.device_typeid]",
"def platform_type(self):\n return self._platform_type",
"def device_type(self):\n return self._meta['device_type']",
"def getDesktopSize(self):\n return convToUnits(self.desktopBytes, divby=1000)",
"def device_type(self):\n return self._device_type",
"def device_type(self):\n # type: () -> string_types\n return self._device_type",
"def startup_type(self) -> str:\n return pulumi.get(self, \"startup_type\")",
"def startup_type(self) -> str:\n return pulumi.get(self, \"startup_type\")",
"def get_desktop():\n l=get_pids(('kwin','ksmserver',))\n if l: kde=l[0]\n else: kde=None\n l=get_pids(('gnome-session',))\n if l: gnome=l[0]\n else: gnome=None\n if kde:\n if not gnome or kde<gnome: return 1\n else: return 0\n if gnome: return 0\n else: return -1",
"def device_type(self) -> str:\n return self.profile_device.device_type",
"def get_type(self):\n return self.get_udev_property('ID_FS_TYPE')",
"def type(self):\n return self._device.type_name",
"def get_device_type() -> str:\n return DefaultDeviceType._default_device_type",
"def GetDeviceTypeName(self):\n if self._device_type_name is None:\n self._device_type_name = self.LsbReleaseValue(\n key='DEVICETYPE', default='CHROMEBOOK')\n return self._device_type_name",
"def is_use_desktop(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsUseDesktop', self.handle))",
"def get_disk_type(self):\n\t\treturn call_sdk_function('PrlVmDevHd_GetDiskType', self.handle)",
"def fstype(self):\n return self._properties.get('fstype')",
"def device_class(self):\n return self._device_type",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def type(self) -> str:\n return self._device_info[\"Type\"]",
"def disktype(self):\n # easy enough\n return self._disktype",
"def machine_type(self):\n\n return self._machine_type",
"def get_eprom_type():\n return command(\"S\")",
"def os_type(self):\n\n return self._os_type"
] |
[
"0.88508695",
"0.87387115",
"0.68791276",
"0.66055137",
"0.65639985",
"0.62983614",
"0.62983614",
"0.627414",
"0.6271547",
"0.6242804",
"0.6240439",
"0.62114114",
"0.6184054",
"0.6184054",
"0.61598814",
"0.61366844",
"0.61350536",
"0.61319685",
"0.6104484",
"0.6085211",
"0.6038525",
"0.60043037",
"0.5959247",
"0.5904089",
"0.5902894",
"0.589503",
"0.58930075",
"0.5891804",
"0.58893657",
"0.58546793"
] |
0.8786367
|
1
|
The ID of the image.
|
def image_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "image_id")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def image_id(self):\n return self._image_id",
"def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"image_id\")",
"def __get_image_id(self):\n return self.__get_multi_images_ids(1)",
"def image_id(cls):\n return str(uuid.uuid4())",
"def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")",
"def image_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"image_id\")",
"def base_image_id(self):\n return self._base_image_id",
"def image_id_at(self, i):\n return i",
"def test_image_id(self):\n result = self.test_client.image_id\n\n assert result == \"1238012\"",
"def get_image_id(filename):\n del filename\n global GLOBAL_IMG_ID\n GLOBAL_IMG_ID += 1\n return GLOBAL_IMG_ID",
"def image_reference(self, image_id):\n\n info = self.image_info[image_id]\n if info[\"source\"] == \"openimage\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)",
"def get_image_id(image):\n if not is_valid_image(image):\n return False\n\n return AVAILABLE_IMAGES[image]['imageid']",
"def _getNewImgId(self):\n\n newImgId = COCO_PLUS.IMG_ID\n COCO_PLUS.IMG_ID += 1\n\n return newImgId",
"def id(self):\n\t\treturn self.__id",
"def id(self):\n return self.metadata[\"id\"]",
"def ID(self):\n return self._id",
"def id(self):\n return self.__id",
"def ec2_image_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ec2_image_id\")",
"def avatar_id(self):\n return self._avatar_id",
"def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info['source'] == self.dataset_name:\n return info['id']\n else:\n super.image_reference(image_id)",
"def id(self):\n return self.get_data(\"id\")",
"def id(self):\n return self.raw_resource[\"id\"]",
"def id(self):\n return self._id",
"def id(self):\n return self._id",
"def id(self):\n return self._id",
"def id(self):\n return self._id",
"def id(self):\n return self._id",
"def id(self):\n return self._id",
"def id(self):\n return self._id"
] |
[
"0.8816168",
"0.84175587",
"0.84175587",
"0.83684385",
"0.82905245",
"0.8163188",
"0.8163188",
"0.7813689",
"0.7636593",
"0.7486018",
"0.74152106",
"0.7291737",
"0.72484857",
"0.7207797",
"0.71587074",
"0.7150392",
"0.71293414",
"0.70939773",
"0.7091085",
"0.70805043",
"0.7073985",
"0.7061627",
"0.70601445",
"0.69923586",
"0.69923586",
"0.69923586",
"0.69923586",
"0.69923586",
"0.69923586",
"0.69923586"
] |
0.85654783
|
1
|
The root disk size gib.
|
def root_disk_size_gib(self) -> pulumi.Output[int]:
return pulumi.get(self, "root_disk_size_gib")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def root_disk_size_gib(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"root_disk_size_gib\")",
"def root_disk_size_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"root_disk_size_gib\")",
"def disk_size_gb(self) -> pulumi.Output[Optional[float]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> str:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> str:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[int]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def size_gigabytes(self) -> float:\n return pulumi.get(self, \"size_gigabytes\")",
"def data_disk_size_gb(self) -> str:\n return pulumi.get(self, \"data_disk_size_gb\")",
"def disk_size_bytes(self) -> float:\n return pulumi.get(self, \"disk_size_bytes\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"disk_size_gb\")",
"def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"os_disk_size_gb\")",
"def get_size_gib_in_disk(host, device_path, dbapi):\n size_gib = 0\n disks = dbapi.idisk_get_by_ihost(host.uuid)\n for disk in disks:\n if disk.device_path == device_path or disk.device_node == device_path:\n size_gib = disk.size_mib / 1024\n return size_gib",
"def min_disk_size(self) -> int:\n return pulumi.get(self, \"min_disk_size\")",
"def tmp_size(disk):\n tmp_gib = max(5, math.sqrt(disk))\n return int(tmp_gib) * GiB",
"def fs_size_total(self):\n return self._fs_size_total",
"def disk_size(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"disk_size\")",
"def state(self):\n decimals = 2\n size_mb = round(self._size/1e6, decimals)\n return size_mb",
"def size_gb(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"size_gb\")",
"def min_disk_size(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"min_disk_size\")",
"def get_gsize(self):\n gsize_file = Genome(self.genome).get_fasize()\n gsize = 0\n with open(gsize_file, 'rt') as fi:\n for a in fi:\n c, n = a.strip().split('\\t')\n gsize += int(n)\n return gsize",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")",
"def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2",
"def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")",
"def size_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_gb\")",
"def get_disk_size(path='/'):\n if path is None:\n path = '/'\n try:\n st = os.statvfs(path)\n except OSError, e:\n display_error(\n 'Error getting disk space in %s: %s', path, str(e))\n return 0\n total = (st.f_blocks * st.f_frsize) / 1024\n return int(total)"
] |
[
"0.91740936",
"0.8900354",
"0.79580075",
"0.7942863",
"0.7942863",
"0.7890328",
"0.77936786",
"0.7761898",
"0.77536315",
"0.77416444",
"0.7678155",
"0.7634557",
"0.7634557",
"0.7580981",
"0.73675495",
"0.73544204",
"0.7239178",
"0.72334707",
"0.71683353",
"0.71169215",
"0.7106825",
"0.7056321",
"0.703612",
"0.6974035",
"0.6974035",
"0.6974035",
"0.69658893",
"0.6951855",
"0.6951855",
"0.6942203"
] |
0.9248658
|
0
|
Testing the conversion of rsr from wavelength to wavenumber
|
def test_convert2wavenumber(self):
newrsr, info = utils.convert2wavenumber(TEST_RSR)
unit = info['unit']
self.assertEqual(unit, 'cm-1')
self.assertTrue(newrsr['20']['det-1'].has_key('wavenumber'))
self.assertFalse(newrsr['20']['det-1'].has_key('wavelength'))
wvn_res = RESULT_RSR['20']['det-1']['wavenumber']
wvn = newrsr['20']['det-1']['wavenumber']
self.assertTrue(np.allclose(wvn_res, wvn))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def wavelength_to_wavenumber(wavelength):\n return 1. / wavelength",
"def test_wk2yr(self):\n result = round(TimeUnit(34, 'wk', 'yr').doconvert(), 2)\n self.assertEqual(result, round(0.652055, 2))",
"def calculate_1rm_brzycki(w: float, r: int):\n\n if r == 0:\n return 0\n\n if w == 0:\n w = 1\n\n return round(w*36/(37 - r), 2)",
"def wavelength(self):\n return self.getparam(\"WAVELENGTH\")",
"def wavelength(self):\n return self.getparam(\"WAVELENGTH\")",
"def test_custom_wsr(self):\n def _wsr_gen(num_bits):\n nonlocal gen_wsr\n gen_wsr = [1]*num_bits\n return gen_wsr\n\n gen_wsr = None\n generator = Generator(backend=FakeValencia(), wsr_generator=_wsr_gen)\n job = generator.sample(num_raw_bits=100)\n self.assertEqual(job.initial_wsr, gen_wsr)",
"def convertToSpectroGram(self):",
"def GetWavelengths (self) :\n\t\treturn self.run(\"GetWavelengths\")",
"def wavenumber(energy):\r\n return 2 * np.pi / wavelength(energy)",
"def calculate_desired_noise_rms(clean_rms, snr):\n a = float(snr) / 20\n noise_rms = clean_rms / (10 ** a)\n return noise_rms",
"def srwf(xi):\n\treturn np.sqrt(wienergain(xi)) # SRWF gain function.",
"def wavelength(self):\n return self.get(self._names[\"wavelength\"])",
"def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def test_RV():\n\n spec = IGRINSSpectrum(file=file)\n\n assert spec.uncertainty is not None\n assert hasattr(spec, \"barycentric_correct\")\n\n correction_velocity = spec.estimate_barycorr()\n\n assert isinstance(spec.RA, astropy.units.quantity.Quantity)\n assert isinstance(spec.DEC, astropy.units.quantity.Quantity)\n assert correction_velocity is not None\n assert isinstance(correction_velocity, astropy.units.quantity.Quantity)\n\n new_spec = spec.barycentric_correct()\n assert new_spec is not None\n assert isinstance(new_spec, Spectrum1D)",
"def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave",
"def read_R4W(self):\n self.write(':FETC?')\n sleep(0.1)\n msg = self.read()\n #print ('read_R4W msg:', msg)\n v = msg.split(',')[0].rstrip('NOHM4W').strip()\n if v[-1] == 'R':\n return float(v[:-1])\n else:\n return float(v)",
"def wavelength(self):\n return wavelength(energy)",
"def test_str_sound_intensity(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx,\n \"TestSensor\",\n group_address_state=\"1/2/3\",\n value_type=\"sound_intensity\",\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xC4,\n 0xF2,\n 0x56,\n 0xE6,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -1938.715576171875)\n self.assertEqual(sensor.unit_of_measurement(), \"W/m²\")\n self.assertEqual(sensor.ha_device_class(), None)",
"def get_wl_band(radar_frequency):\n return 0 if (30 < radar_frequency < 40) else 1",
"def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy",
"def test_str_rain_amount(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"rain_amount\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0xE0,\n 0xD0,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), -75366.4)\n self.assertEqual(sensor.unit_of_measurement(), \"l/m²\")\n self.assertEqual(sensor.ha_device_class(), None)",
"def convert_wave(wavelength: np.array, start_unit: str,\n end_unit: str) -> np.array:\n start = u.Unit(start_unit)\n end = u.Unit(end_unit)\n\n if start == end:\n log.debug(f'Start and end units are same: {start_unit}')\n return wavelength\n\n log.debug(f'Converting wavelength from {start_unit} to {end_unit}')\n\n try:\n out_wave = (wavelength * start).to(end,\n equivalencies=u.spectral()).value\n except u.core.UnitConversionError:\n log.debug(f'Units not convertible: {start_unit} -> {end_unit}')\n raise ValueError('Inconvertible units') from None\n\n return out_wave",
"def test_equivalent_width():\n\n spec = IGRINSSpectrum(file=file)\n mu = np.median(spec.wavelength.value)\n equivalent_width = spec.measure_ew(mu)\n\n assert equivalent_width is not None\n assert type(equivalent_width) is not int\n assert type(equivalent_width) is astropy.units.quantity.Quantity\n new_unit = equivalent_width.to(spec.wavelength.unit)\n assert new_unit.unit == spec.wavelength.unit",
"def set_wavelength(self, wavelength):\n print('Setting Santec wavelength to %.4f nm' % wavelength)\n\n # We need to select which of the 4 lasers to select depending on\n # the desired wavelength\n\n if 1530.0 < wavelength < 1630.000001:\n self.santec1.write(\"SW 4\")\n self.santec4.write(\"WA %.4f\" % wavelength)\n if self.active_module != 4:\n self.active_module = 4\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1440.0 < wavelength < 1530.1:\n self.santec1.write(\"SW 3\")\n self.santec3.write(\"WA %.4f\" % wavelength)\n if self.active_module != 3:\n self.active_module = 3\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1355 < wavelength < 1440.1:\n self.santec1.write(\"SW 2\")\n self.santec2.write(\"WA %.4f\" % wavelength)\n if self.active_module != 2:\n self.active_module = 2\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1259.999999 < wavelength < 1355.1:\n self.santec1.write(\"SW 1\")\n self.santec1.write(\"WA %.4f\" % wavelength)\n if self.active_module != 1:\n self.active_module = 1\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n else:\n print(\"Wavelength out of range. No change will be made\")",
"def _convert_rh2w(self):\n sat_vapor = 6.11 * (10.0 ** ((7.5 * self.data['Temperature_C']) /\n (237.7 + self.data['Temperature_C'])))\n\n sat_w = 621.97 * (sat_vapor / (self.data['Pressure'] - sat_vapor))\n\n self.data['Mixing_Ratio'] = (\n self.data['Relative_Humidity'] / 100.0) * sat_w",
"def convertToWindDirection(wb):\n if wb >= 0 and wb < 11.25:\n return \"N\"\n elif wb >= 11.25 and wb < 33.75:\n return \"NNE\"\n elif wb >= 33.75 and wb < 56.25:\n return \"NE\"\n elif wb >= 56.25 and wb < 78.75:\n return \"ENE\"\n elif wb >= 78.75 and wb < 101.25:\n return \"E\"\n elif wb >= 101.25 and wb < 123.75:\n return \"ESE\"\n elif wb >= 123.75 and wb < 146.25:\n return \"SE\"\n elif wb >= 146.25 and wb < 168.75:\n return \"SSE\"\n elif wb >= 168.75 and wb < 191.25:\n return \"S\"\n elif wb >= 191.25 and wb < 213.75:\n return \"SSW\"\n elif wb >= 213.75 and wb < 236.25:\n return \"SW\"\n elif wb >= 236.25 and wb < 258.75:\n return \"WSW\"\n elif wb >= 258.75 and wb < 281.25:\n return \"W\"\n elif wb >= 281.25 and wb < 303.75:\n return \"WNW\"\n elif wb >= 303.75 and wb < 326.25:\n return \"NW\"\n elif wb >= 326.25 and wb < 348.75:\n return \"NNW\"\n elif wb >= 348.75 and wb < 360:\n return \"N\"\n else:\n return \"NA\"",
"def wavelength_solution(file_name):\n file_data = read_file(file_name)\n header_data = file_data[0]\n image_data = file_data[1]\n\n range_begin = header_data['CRVAL3']\n pixel_begin = header_data['CRPIX3']\n step_size = header_data['CD3_3']\n steps = len(image_data)\n range_end = range_begin + steps * step_size\n return {'begin': range_begin, 'end': range_end, 'steps': steps}",
"def calculateSNR(self):\n pass",
"def _get_R(self, net_r_amp):\n return np.abs(net_r_amp)**2",
"def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)"
] |
[
"0.6189273",
"0.5634813",
"0.5630745",
"0.5557721",
"0.5557721",
"0.5554414",
"0.5544192",
"0.554243",
"0.5521899",
"0.5503673",
"0.5499144",
"0.5481467",
"0.54787725",
"0.5475458",
"0.5467655",
"0.54647005",
"0.54603744",
"0.54594994",
"0.54490954",
"0.5412325",
"0.5409588",
"0.54082555",
"0.53998643",
"0.53914714",
"0.53838617",
"0.5382368",
"0.53616244",
"0.5341851",
"0.5333964",
"0.53333104"
] |
0.7865288
|
0
|
Perform the request via PhantomJS.
|
def phantomjs_get(url):
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = config.USER_AGENT
dcap["phantomjs.page.settings.loadImages"] = False
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=config.PHANTOMJS_BIN)
logger.debug("PhantomJS get: %s", url)
driver.get(url)
time.sleep(10) # to follow redirects
response = driver.page_source
driver.quit()
return response
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setUp(self):\n self.driver = webdriver.PhantomJS()\n self.driver.get(self.get_server_url())",
"def do_get(self, url):\n self.driver.get(url)",
"def start_phantomjs(self):\n driver = webdriver.PhantomJS()\n return(driver)",
"def call(self):\n\n self.url = self._prepare_url()\n status_code, response = self._do_request(self.url)\n return self._process_response(status_code, response)",
"def interrogate_homepage(doc):\n\n socket.setdefaulttimeout(30)\n\n doc['browser'] = {}\n\n #empty page\n empty = u'<html><head></head><body></body></html>'\n\n #set the path to our compiled phantomjs\n phantomjs = '/phantom_bin/bin/phantomjs'\n #set server args to ignore certificate errors\n serv_arg = ['--ignore-ssl-errors=true']\n ua = ('Mozilla/4.0 (compatible; MSIE 6.01; Windows NT 6.0)')\n\n\n driver = webdriver.PhantomJS(phantomjs, \n service_args=serv_arg, \n desired_capabilities={\n 'phantomjs.page.settings.userAgent' : ua })\n\n #driver.set_page_load_timeout(10)\n\n try:\n #going to add a little sleep here, just to make sure phantomjs has finished loading up...\n time.sleep(1)\n driver.get(doc['url'])\n #add the page source to doc\n src = driver.page_source\n\n #lets check if the page is 'blank', this usually means there is no website\n if src == empty:\n print('[*] Recieved an empty page for url %s ' % (doc['url']))\n #first we are going to see if we hit it over ssl, if so try over http\n if 'https' in doc['url']:\n newurl = doc['url'].replace('https', 'http')\n\n #if it doesn't have https, so assume http, and there was some ssl stuff returned try https page\n if 'https' not in doc['url'] and doc.has_key('ssl'):\n newurl = doc['url'].replace('http', 'https')\n\n print('[*] Trying url %s' % newurl)\n driver.get(newurl)\n src = driver.page_source\n\n if src != empty:\n doc['url'] = newurl\n\n doc['browser']['src'] = src\n\n log = json.loads(driver.get_log('har')[0]['message'])\n \n #lets get every url we requested\n tmp = []\n urls = []\n for entry in log['log']['entries']:\n tmp.append(entry['request']['url'])\n\n #quick dedup\n urls = list(set(tmp))\n \n doc['browser']['urls'] = urls\n\n #final check to see if our page is empty\n if doc['browser']['src'] == empty:\n doc['browser'].pop('src')\n\n return doc\n\n except:\n print('[*] Something went wrong browsing %s falling back to requests' % doc['url'])\n\n try:\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.812.0 Safari/535.1'}\n res = requests.get(doc['url'], headers=headers, verify=False)\n doc['browser']['src'] = res.content\n return doc\n\n except:\n print('[*] Failed to get home page with requests for %s , giving up' % doc['url'])\n doc.pop('browser')\n return doc",
"def GET_test(self, request):\n request.setHeader('Access-Control-Allow-Origin', settings.PDFCD_HEADER_VALUE)\n ip = apiutils.getIP(request)\n out.info('Test called (%s)\\n' % (ip))\n request.setResponseCode(*pdapi.getResponse(pdapi.OK))\n return \"SUCCESS\\n\"",
"def setup_phantomjs():\n try:\n # Setup capabilities for the PhantomJS browser\n phantomjs_capabilities = DesiredCapabilities.PHANTOMJS\n # Some basic creds to use against an HTTP Basic Auth prompt\n phantomjs_capabilities['phantomjs.page.settings.userName'] = 'none'\n phantomjs_capabilities['phantomjs.page.settings.password'] = 'none'\n # Flags to ignore SSL problems and get screenshots\n service_args = []\n service_args.append('--ignore-ssl-errors=true')\n service_args.append('--web-security=no')\n service_args.append('--ssl-protocol=any')\n # Create the PhantomJS browser and set the window size\n browser = webdriver.PhantomJS(desired_capabilities=phantomjs_capabilities,service_args=service_args)\n browser.set_window_size(1920,1080)\n except Exception as error:\n click.secho(\"[!] Bad news: PhantomJS failed to load (not installed?), so activities \\\nrequiring a web browser will be skipped.\",fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n browser = None\n return browser",
"def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json",
"def start_requests(self):\n\n yield SeleniumRequest(\n url='https://www.iayt.org/search/newsearch.asp',\n callback=self.get_iframe_document_src,\n wait_time=5\n )",
"def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)",
"def test_doGet(self) -> None:\n\n status_code = apicall.doGet(URL, self._browserheader)\n print(\"in do get:\", status_code)\n assert status_code == API_SUCCESS",
"def get(self, url):\n self.browser.get(url)",
"def request_html_page(self):\n try:\n response = requests.get('http://www.indeed.com/jobs?', params=self.payload)\n except:\n print \"got error for \", self.payload\n self.page = response.content",
"def get(self, url: str):\n\n self.driver.get(url)",
"def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')",
"def go(self, url):\n self.driver.get(url)",
"def get_page(self):\n self.browser.get(self.url)",
"def start(self):\n self.get(self.url)",
"def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)",
"def _page_call(self, url, request) -> Dict:\n response = self._post(url, request)\n raise_on_error(response)\n return response.json()",
"def i_am_on_the_zoo_website():\n driver.get(\"http://www.thetestroom.com/webapp/\")",
"def request_page(self, url, action=None):\n if url.startswith(self.url):\n self.driver.get(url)\n else:\n self.driver.get(self.url + url)\n self.default_wait.until(EC.invisibility_of_element_located((By.XPATH, \"//div[@class='loading-bar']\")))\n if action:\n action(self.driver)\n return self.driver.page_source",
"def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200",
"def evaluate_in_page(self, js_string: str) -> Awaitable[Any]:",
"def step_impl(context, query):\n url = context.base_url+query\n print('url:',url,'\\n')\n with closing(requests.get(url)) as response:\n context.response = response\n context.response_json = response.json()",
"def _call(self, method, url, params):\n if not url.startswith('http'):\n url = self.root + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n r = self._session.request(method, url,\n headers=headers,\n proxies=self.proxies,\n params=params,\n timeout=self.requests_timeout)\n r.raise_for_status() # Check for error\n return r.json()",
"def send_ajax_request(self, url):\n return self.client.get(\n url,\n # emulate sending requests using XMLHttpRequest\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )",
"def test_simpleapp():\n class Handler(RequestHandler):\n def get(self):\n self.write('Hello')\n\n app = Application([url('/hello', Handler)])\n\n with Tester(app) as tester:\n response = yield tester.http_client.fetch(tester.url_for('/hello'))\n assert 'Hello' == text_body(response)",
"def page_request(url, data_payload, session, proxies=None):\n\n # Send the request payload\n if proxies:\n resp = session.post(url, data=data_payload, proxies=proxies, verify=False)\n else:\n resp = session.post(url, data=data_payload)\n\n # Return the JSON values\n return json.loads(resp.text)",
"def _send_request(self):\n url = self.config['url']\n agent = Agent(reactor)\n response = (yield agent.request(\n 'GET',\n url.encode(\"ASCII\"),\n ))\n\n d = defer.Deferred()\n response.deliverBody(ReceiveBody(d))\n defer.returnValue((yield d))"
] |
[
"0.65761065",
"0.6351941",
"0.63481647",
"0.59856844",
"0.59129536",
"0.5856444",
"0.5794455",
"0.57042354",
"0.5664509",
"0.55692554",
"0.5561167",
"0.5527076",
"0.5525101",
"0.55229294",
"0.55212134",
"0.55170685",
"0.5462537",
"0.54617757",
"0.54610646",
"0.5457927",
"0.5444056",
"0.53711253",
"0.5357319",
"0.5329997",
"0.5329865",
"0.53284883",
"0.53178036",
"0.5308861",
"0.53086466",
"0.5306385"
] |
0.650528
|
1
|
Will block access if the resource points to a draft blog post, and the user is different from the author. Any other variation is just passed on to be handled by regular permission checks.
|
def check_permission(self, action, username, resource, perm):
if not resource:
return
if resource.realm == 'blog' and resource.id:
the_post = BlogPost(self.env, resource.id, resource.version)
for category in the_post.category_list:
if category in self.draft and the_post.author != username:
# Block all access regardless
return False
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_non_owner_authenticated_user_read_given_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create()\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').read, blogpost)",
"def test_non_owner_authenticated_user_update_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n admin = UserFactory.create()\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_admin.id != blogpost.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').update, blogpost)",
"def test_non_owner_authenticated_user_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, blogpost)",
"def post_permissions(request, post):\n user = request.user\n is_editable = has_ownership = False\n\n if user.is_authenticated():\n\n if user == post.author :\n has_ownership = is_editable = True\n elif user.is_moderator or user.is_staff:\n is_editable = True\n\n post.is_editable = is_editable\n post.has_ownership = has_ownership\n\n return post",
"def test_owner_read_given_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n owner = UserFactory.create_batch(2)[1]\r\n app = AppFactory.create(owner=owner)\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id == app.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').read, blogpost)",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_non_owner_authenticated_user_read_blogposts_for_given_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create(hidden=1)\r\n user = UserFactory.create()\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').read, app_id=app.id)",
"def get_object(self, queryset=None):\n obj = super(PostUpdateView, self).get_object()\n if not obj.blog == self.request.user.blog:\n if not self.request.user.is_staff:\n raise Http404\n return obj",
"def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False",
"def pybb_editable_by(post, user): # pragma: no cover\n warnings.warn(\"pybb_editable_by filter is deprecated and will be removed in later releases. \"\n \"Use pybb_may_edit_post(user, post) filter instead\",\n DeprecationWarning)\n return perms.may_edit_post(user, post)",
"def test_non_owner_authenticated_user_create_given_blogpost(self):\r\n\r\n with self.flask_app.app_context():\r\n admin = UserFactory.create()\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.build(app=app, owner=admin)\r\n\r\n assert self.mock_admin.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').create, blogpost)",
"def validate_blog_post(self, req, postname, version, fields):\n for category in _parse_categories(fields['categories']):\n if category in self.draft:\n if req.authname == 'anonymous':\n return [(None, 'You need to be logged in to save as draft.')]\n elif req.authname != fields['author']:\n return [(None, \"Cannot save draft for an author that isn't you.\")]\n return []",
"def test_owner_read_given_blogpost_hidden_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n owner = UserFactory.create_batch(2)[1]\r\n app = AppFactory.create(owner=owner, hidden=1)\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert self.mock_authenticated.id == app.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').read, blogpost)",
"def get_object(self, queryset=None):\n obj = super(PostDeleteView, self).get_object()\n if not obj.blog == self.request.user.blog:\n if not self.request.user.is_staff:\n raise Http404\n return obj",
"def test_anonymous_user_read_given_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.create(app=app)\r\n\r\n assert_not_raises(Exception, getattr(require, 'blogpost').read, blogpost)",
"def test_non_owner_authenticated_user_read_blogposts_for_given_app(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n app = AppFactory.create()\r\n user = UserFactory.create()\r\n\r\n assert self.mock_authenticated.id != app.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').read, app_id=app.id)",
"def test_owner_create_blogpost_as_other_user(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n another_user = UserFactory.create()\r\n app = AppFactory.create()\r\n blogpost = BlogpostFactory.build(app_id=app.id,\r\n owner=another_user)\r\n\r\n assert self.mock_authenticated.id == app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').create, blogpost)",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def is_post_authorised(self, params):\n if 'id' not in params or not params['id']:\n return True\n else:\n return self.is_get_authorised(params['id'])",
"def check_permission(self, user, revision, review):\n # Document is not even under review\n if not revision.is_under_review():\n raise Http404()\n\n # User is not a member of the distribution list\n if review is None:\n raise Http404()",
"def test_post_update_unauthorized(self):\n url = reverse('post-detail', kwargs={'pk': self.post.id})\n user_url = reverse('user-detail', kwargs={'pk': self.superuser.id})\n title = 'Random New Title'\n body = 'Random New Body'\n response = self.client.put(url, {'title': title, 'body': body, 'user': user_url}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def test_get_update_blog_post_anonymous_user(self):\n\n test_blog = Post.objects.get(title=\"test1\")\n url = reverse('blogs:updated', kwargs={'slug': test_blog.slug})\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 403)",
"def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))",
"def test_non_owner_authenticated_user_delete_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n blogpost = BlogpostFactory.create()\r\n\r\n assert self.mock_authenticated.id != blogpost.owner.id\r\n assert not self.mock_authenticated.admin\r\n assert_raises(Forbidden, getattr(require, 'blogpost').delete, blogpost)",
"def test_non_owner_authenticated_user_create_blogpost_for_given_app(self):\r\n\r\n with self.flask_app.app_context():\r\n owner = UserFactory.create_batch(2)[1]\r\n app = AppFactory.create(owner=owner)\r\n\r\n assert self.mock_admin.id != app.owner.id\r\n assert_raises(Forbidden, getattr(require, 'blogpost').create, app_id=app.id)",
"def testPostAccessDenied(self):\n self.runPost(None, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPost(user, data=self.post_data)\n self.response_403()",
"def owns_post(self, post):\n return self.id == post.author.id"
] |
[
"0.6910113",
"0.667111",
"0.661465",
"0.65971345",
"0.65062755",
"0.641898",
"0.641898",
"0.641898",
"0.6400173",
"0.6387623",
"0.63546264",
"0.6342159",
"0.63315797",
"0.6272541",
"0.6268477",
"0.6256566",
"0.622883",
"0.62074184",
"0.6192771",
"0.6173709",
"0.6173709",
"0.614082",
"0.61286986",
"0.6126252",
"0.61214584",
"0.61087483",
"0.6090772",
"0.6054021",
"0.60120213",
"0.60048395"
] |
0.7453113
|
0
|
If the post is a draft, just do some rudimentary checking to make sure the author does not shoot him/herself in the foot.
|
def validate_blog_post(self, req, postname, version, fields):
for category in _parse_categories(fields['categories']):
if category in self.draft:
if req.authname == 'anonymous':
return [(None, 'You need to be logged in to save as draft.')]
elif req.authname != fields['author']:
return [(None, "Cannot save draft for an author that isn't you.")]
return []
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isDraft(self): #$NON-NLS-1$\r",
"def test_home_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertContains(response, \"No posts are available.\")\n self.assertQuerysetEqual(response.context['posts'], [])",
"def test_show_post_view_with_a_draft_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n draft_post = create_post(category=category, author=author, name='Draft Post', content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.', status='Draft')\n url = reverse('blog.post', args=(draft_post.id,))\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)",
"def owns_post(self, post):\n return self.id == post.author.id",
"def sees_post(self, post, context_home_or_profile):\n if owns_post(self, post):\n return True\n if context_home_or_profile and post.author not in self.followings:\n return False\n if post.is_public:\n return True\n else:\n for circle in post.circles:\n circle = get_in_circle_cache(circle.id)\n if check_member(circle, self):\n return True\n return False",
"def _filter_post(post):\n\n return True",
"def test_home_view_with_draft_post_and_published_post(self):\n category = create_category('Category 1')\n author = create_author('Author 1')\n create_post(category=category, author=author, name='Published Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Published')\n create_post(category=category, author=author, name='Draft Post',\n content='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Praesent sollicitudin.',\n status='Draft')\n create_question(question_text=\"Past question.\", days=-30)\n response = self.client.get(reverse('blog.home'))\n self.assertQuerysetEqual(\n response.context['posts'],\n ['<Post: Published Post>']\n )",
"def _is_post(self, frontmatter):\n is_post = frontmatter.get('blog', False)\n if type(is_post) != bool:\n raise AbortError(\n _('Invalid blog frontmatter (expects True or False): '\n '{blog_value}').format(blog_value=is_post))\n return is_post",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False",
"def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False",
"def check_can_post(self, user):\n if not user.is_active or user.is_anonymous():\n return False\n\n membership = self.check_membership(user)\n\n if ((self.type != 'O' or self.post_membership_required or self.post_admin_required) and\n (membership is None or\n membership.is_banned() or\n membership.is_left())):\n return False\n elif self.post_admin_required and membership.role not in ['O', 'A']:\n return False\n elif (self.post_condition == 'K' and\n user.profile.karma < self.post_karma_threshold):\n return False\n else:\n return True",
"def is_new_post(self, post):\n return self.last_post != post['id']",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def test_func(self):\n post = self.get_object()\n\n return self.request.user == post.author",
"def _initialize_drafts(self):\n drafts = memcache.get('user_drafts:' + self.email)\n if drafts is not None:\n self._drafts = drafts\n ##logging.info('HIT: %s -> %s', self.email, self._drafts)\n return False\n # We're looking for the Issue key id. The ancestry of comments goes:\n # Issue -> PatchSet -> Patch -> Comment.\n issue_ids = set(comment.key().parent().parent().parent().id()\n for comment in gql(Comment,\n 'WHERE author = :1 AND draft = TRUE',\n self.user))\n self._drafts = list(issue_ids)\n ##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)\n return True",
"def post_permissions(request, post):\n user = request.user\n is_editable = has_ownership = False\n\n if user.is_authenticated():\n\n if user == post.author :\n has_ownership = is_editable = True\n elif user.is_moderator or user.is_staff:\n is_editable = True\n\n post.is_editable = is_editable\n post.has_ownership = has_ownership\n\n return post",
"def test_draft_story_must_be_visible_to_owner(self):\n self.assertEqual(self.ds.is_visible_for(self.au), False)\n\n \"\"\" Draft story must not be visible for another. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u2), False)\n\n \"\"\" Draft story must be visible for story owner. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u1), True)\n\n \"\"\" Draft story must not be visible for a blocked user. \"\"\"\n self.assertEqual(self.ds.is_visible_for(self.u3), False)",
"def has_unpublished_feedbackdraft(self):\n last_feedbackset = self.cached_data.last_feedbackset\n return (last_feedbackset.grading_published_datetime is None\n and last_feedbackset.grading_points is not None)",
"def check_repost(post, user):\n if not user.is_authenticated():\n return 'not_auth' # no user to repost as\n\n if user.pk == post.author.pk:\n return 'own_post' # don't repost own post\n\n existing_repost = Post.objects.filter(author=user, repost_original=post).exists()\n if existing_repost:\n # don't repost more than once\n return 'already_reposted_as'\n\n return 'ok'",
"def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)",
"def is_post_valid(cls, community_id, comment_op: dict):\n\n assert community_id, 'no community_id'\n community = cls._get_name(community_id)\n account_id = Accounts.get_id(comment_op['author'])\n role = cls.get_user_role(community_id, account_id)\n type_id = int(community[5])\n\n # TODO: check `nsfw` tag requirement #267\n # TODO: (1.5) check that beneficiaries are valid\n\n if type_id == TYPE_JOURNAL:\n if not comment_op['parent_author']:\n return role >= Role.member\n elif type_id == TYPE_COUNCIL:\n return role >= Role.member\n return role >= Role.guest # or at least not muted",
"def test_post_creation_unauthenticated(self):\n url = reverse('post-list', args=[self.topic1.url_name])\n payload = {\n 'author': self.user1.id,\n 'title': 'Creating a post while being unauthenticated',\n 'content': 'Rich content 5',\n }\n response = self.client.post(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n new_post = Post.objects.filter(\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content'),\n topic=self.topic1\n )\n self.assertFalse(new_post.exists())",
"def _inline_draft(request):\n # TODO(guido): turn asserts marked with XXX into errors\n # Don't use @login_required, since the JS doesn't understand redirects.\n if not request.user:\n # Don't log this, spammers have started abusing this.\n return HttpTextResponse('Not logged in')\n snapshot = request.POST.get('snapshot')\n assert snapshot in ('old', 'new'), repr(snapshot)\n left = (snapshot == 'old')\n side = request.POST.get('side')\n assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)\n issue_id = int(request.POST['issue'])\n issue = models.Issue.get_by_id(issue_id)\n assert issue # XXX\n patchset_id = int(request.POST.get('patchset') or\n request.POST[side == 'a' and 'ps_left' or 'ps_right'])\n patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue.key)\n assert patchset # XXX\n patch_id = int(request.POST.get('patch') or\n request.POST[side == 'a' and 'patch_left' or 'patch_right'])\n patch = models.Patch.get_by_id(int(patch_id), parent=patchset.key)\n assert patch # XXX\n text = request.POST.get('text')\n lineno = int(request.POST['lineno'])\n message_id = request.POST.get('message_id')\n comment = _add_or_update_comment(user=request.user, issue=issue, patch=patch,\n lineno=lineno, left=left,\n text=text, message_id=message_id)\n issue.calculate_draft_count_by_user()\n issue_fut = issue.put_async()\n\n query = models.Comment.query(\n models.Comment.patch_key == patch.key, models.Comment.lineno == lineno,\n models.Comment.left == left).order(models.Comment.date)\n comments = list(c for c in query if not c.draft or c.author == request.user)\n if comment is not None and comment.author is None:\n # Show anonymous draft even though we don't save it\n comments.append(comment)\n issue_fut.get_result()\n if not comments:\n return HttpTextResponse(' ')\n for c in comments:\n c.complete()\n return render_to_response('inline_comment.html',\n {'user': request.user,\n 'patch': patch,\n 'patchset': patchset,\n 'issue': issue,\n 'comments': comments,\n 'lineno': lineno,\n 'snapshot': snapshot,\n 'side': side,\n },\n context_instance=RequestContext(request))",
"def update_draft(self, version, request):\n for metadata in version.revision.easypublishermetadata_set.all():\n if request.user.has_perm(\"easypublisher.can_approve_for_publication\"): \n metadata.status = 'published'\n # save all other drafts for this object as declined, because we\n # chose to save a different one\n for other in EasyPublisherMetaData.objects.filter(\n revision__version__object_id=version.object_id, \n revision__version__content_type=version.content_type):\n other.status = 'declined'\n other.save()\n else:\n metadata.status = 'updated'\n metadata.save()",
"def post(self):\n subject = self.request.get('subject')\n content = self.request.get('post_content')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if post and user and subject and content:\n if post.submitter_id == user_id:\n self.render_editpage(user, post_id, subject, content)\n else:\n self.render_improper_access()\n else:\n self.error(500)",
"def test_missing_author(self):\n\t\tps = PushshiftAPI()\n\t\tpost, = ps.search_submissions(limit=1, ids=['t3_otfrw'])\n\t\tre = RedditElement(post)\n\t\tself.assertEqual(re.author, 'Deleted')",
"def test_not_author_delete_post(self):\n self.client.login(username=\"Bill\", password=\"newpass1234\")\n response = self.client.post('/posts/1/delete/', {\"next\": \"\"})\n self.assertNotEqual(list(Post.objects.filter(id=1)), [])",
"def add_post(request, topic_id, post_id = False):\n\t\n\ttopic = Topic.objects.values('is_locked').get(id=topic_id)\n\tif topic['is_locked']:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('Topic is closed')}, context_instance=RequestContext(request))\n\n\t# check who made the last post.\n\tlastpost = Post.objects.order_by('-post_date').filter(post_topic=topic_id)[:1]\n\tis_staff = request.user.is_staff\n\t# if the last poster is the current one (login) and he isn't staff then we don't let him post after his post\n\tif str(lastpost[0].post_author) == str(request.user) and not is_staff:\n\t\treturn render_to_response('pages/bug.html', {'bug': _('You can\\'t post after your post')}, context_instance=RequestContext(request))\n\t\n\tlastpost = Post.objects.filter(post_topic=topic_id).order_by('-id')[:10]\n\tif request.POST:\n\t\tpage_data = request.POST.copy()\n\t\tpage_data['post_author'] = str(request.user)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['post_text'])\n\t\tfor i in tags:\n\t\t\tpage_data['post_text'] = page_data['post_text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.encodestring(i)+u'[/code]')\n\t\tpage_data['post_text'] = html2safehtml(page_data['post_text'] ,valid_tags=settings.VALID_TAGS)\n\t\ttags = findall( r'(?xs)\\[code\\](.*?)\\[/code\\]''', page_data['post_text'])\n\t\tfor i in tags:\n\t\t\tpage_data['post_text'] = page_data['post_text'].replace(u'[code]'+i+u'[/code]', u'[code]'+base64.decodestring(i)+u'[/code]')\n\t\t\n\t\tpage_data['post_ip'] = request.META['REMOTE_ADDR']\n\t\tpage_data['post_topic'] = topic_id\n\t\tpage_data['post_date'] = datetime.now()\n\t\tform = AddPostForm(page_data)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\n\t\t\ttopic = Topic.objects.get(id=topic_id)\n\t\t\tposts = Post.objects.filter(post_topic=topic_id).count()\n\t\t\t\n\t\t\tpmax = posts/10\n\t\t\tpmaxten = posts%10\n\t\t\tif pmaxten != 0:\n\t\t\t\tpmax = pmax+1\n\t\t\t\ttopic.topic_last_pagination_page = pmax\n\t\t\telif pmax > 0:\n\t\t\t\ttopic.topic_last_pagination_page = pmax\n\t\t\telse:\n\t\t\t\tpmax = 1\n\t\t\t\ttopic.topic_last_pagination_page = 1\n\t\t\ttopic.topic_posts = posts\n\t\t\ttopic.topic_lastpost = str(request.user)+'<br />' + str(datetime.today())[:-10]\n\t\t\ttopic.save()\n\t\t\t\n\t\t\tforum = Forum.objects.get(id=topic.topic_forum.id)\n\t\t\tforum.forum_posts = forum.forum_posts +1\n\t\t\t\n\t\t\tforum.forum_lastpost = str(request.user)+' (' + str(datetime.today())[:-10] + ')<br /><a href=\"/forum/topic/' + str(pmax) + '/' + str(topic.id) + '/\">' + topic.topic_name + '</a>'\n\t\t\tforum.save()\n\t\t\t\n\t\t\tmail_admins('Post Dodany', \"Dodano Post: http://www.\" + settings.SITE_KEY + \"/forum/topic/\" + str(pmax) + \"/\" + topic_id +\"/\", fail_silently=True)\n\t\t\treturn HttpResponseRedirect(\"/forum/topic/\" + str(pmax) + \"/\" + topic_id +\"/\")\n\t\telse:\n\t\t\treturn render_to_response(\n\t\t\t\t'myghtyboard/add_post.html',\n\t\t\t\t{'lastpost': lastpost, 'perms': list_perms(request), 'form':form},\n\t\t\t\tcontext_instance=RequestContext(request))\n\telse:\n\t\tif post_id:\n\t\t\tquote = Post.objects.get(id=post_id)\n\t\t\tquote_text = '<blockquote><b>' + quote.post_author + _(' wrote') + ':</b><br /><cite>' + quote.post_text + '</cite></blockquote>\\n\\n'\n\t\telse:\n\t\t\tquote_text = ''\n\treturn render_to_response(\n\t\t'myghtyboard/add_post.html',\n\t\t{'quote_text': quote_text, 'lastpost': lastpost, 'perms': list_perms(request)},\n\t\tcontext_instance=RequestContext(request))"
] |
[
"0.65785074",
"0.6397195",
"0.63286626",
"0.62827975",
"0.6240328",
"0.60869026",
"0.60302997",
"0.5999535",
"0.5980869",
"0.5980869",
"0.5980869",
"0.5914506",
"0.5834592",
"0.5820645",
"0.5819241",
"0.5819241",
"0.57507384",
"0.5684251",
"0.5664793",
"0.5656092",
"0.5635812",
"0.5633849",
"0.5631553",
"0.5616024",
"0.56003827",
"0.55588555",
"0.55528194",
"0.554725",
"0.5543588",
"0.5539669"
] |
0.6586996
|
0
|
Write a string to the temp file.
|
def write_string(s):
time.sleep(0.2)
with open(str(tmp_file), "w") as f:
f.write(s)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _write_to_file(self, string):\n with open(self.p.base_dir + '/' + self.p.filename, 'w') as f:\n f.write(string)",
"def write(self, string):\n self.__file.write(string)",
"def write_file(name_file, string):\n with open(name_file, 'w') as file:\n file.write(string)",
"def safe_write_file(self, fn, text):\n fd, tmpfn = mkstemp(dir=self.temp_dir)\n with open(fd, 'wt') as f:\n f.write(text)\n # https://stackoverflow.com/a/2333979\n f.flush()\n os.fsync(f.fileno())\n os.rename(tmpfn, fn)",
"def WriteStringToFile(string, filepath):\n with open(filepath, 'w') as file_handle:\n file_handle.write(string)",
"def write_file(filename, string):\n import sys\n #ugly fix, hopefully we can find a better one\n if sys.version_info[0] >= 3:\n with open(filename, 'w', encoding=\"utf-8\") as f:\n f.write(string)\n else:\n with open(filename, 'w') as f:\n f.write(string.encode(\"utf-8\"))",
"def writeFile(string_to_write: str, outfile: str):\n with open(outfile, \"w\") as f:\n f.write(string_to_write)",
"def save_to_file(self, string):\n with open(self.output_path, \"w\") as text_file:\n text_file.write(string)\n print \"Saved to file \" + self.output_path",
"def write(self, filename, text):\r\n self._call(\"-rm\", filename)\r\n with temporary_file() as fp:\r\n fp.write(text)\r\n fp.flush()\r\n return self._call('-copyFromLocal', fp.name, filename)",
"def string_to_tempfile(s: str):\n tmp = NamedTemporaryFile()\n tmp.write(bytes(s, encoding=\"utf-8\"))\n tmp.seek(0)\n return tmp",
"def write_file(text):\n\n\ttempfile.tempdir = UPLOAD_FOLDER\n\ttemp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.txt')\n\n\ttext = text.encode('utf8')\n\n\twith open(temp_file.name, 'w') as temp:\n\t\ttemp.write(text)\n\n\tpathparts = (temp.name).split('/')\n\tpath = \"/\".join(pathparts[5:])\n\n\t#returns the temporary file path\n\treturn path",
"def _temp_file(self, val):\n fd, fn = tempfile.mkstemp()\n fp = os.fdopen(fd, \"wb\")\n if val:\n if not isinstance(val, bytes):\n fp.write(val.encode(\"utf-8\", \"surrogateescape\"))\n else:\n fp.write(val)\n fp.close()\n return fn",
"def string_to_file(path_to_file, string_to_write):\n\t\twith open(path_to_file, 'w+') as f:\n\t\t\tf.write(string_to_write)",
"def write(self, str: str, /) -> None:",
"def makeTestFile(text):\n f = tempfile.NamedTemporaryFile()\n f.write(text)\n f.flush()\n return f",
"def save_string(cls, name, string):\n fd = open(cls.dirpath + name + '.txt', 'wb')\n fd.write(string)\n fd.close()",
"def _write(self, s):\n self.fh.write(s)\n self.fh.flush()",
"def save(string, file):\n\n save_file = open(file, 'w')\n save_file.write(string)\n save_file.close()",
"def append_string_to_textfile(filename, string):\n filepath = root + filename\n with open(filepath, 'a+') as file:\n file.write(string + \"\\n\")",
"def write_to_filepath(tmp_str, path='./temp_model111.py', create_dir=True):\n if create_dir:\n # create dir if not exists\n directory = os.path.dirname(path)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(path, 'w') as f:\n f.write(tmp_str)\n f.close()",
"def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)",
"def writeText(outputText, fileName):\n with open(fileName,\"w\") as fileObject:\n fileObject.write(outputText)",
"def save_tmp_file(self, data):\n with open(self.tmp_file, 'wb') as f:\n f.write(data)",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, mode=\"w\", encoding=\"utf-8\") as m:\n return m.write(text)",
"def writeFile(self, name, text):\n\t\ttry:\n\t\t\tf = open(name, 'w')\n\t\t\tf.write (text)\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint \"Error writing file %s\" % name",
"def test_file(tmpdir):\n file_path = tmpdir / 'test.txt'\n file_path = file_path.write_binary(b'This is some test data!')\n return file_path",
"def strToFile(text, filename):\n output = open(filename,\"w\")\n output.write(text)\n output.close()",
"def strToFile(text, filename):\n output = open(filename,\"w\")\n output.write(text)\n output.close()",
"def write_file(filename=\"\", text=\"\"):\n with open(filename, 'w', encoding='utf-8') as f:\n return f.write(text)",
"def write_file(path_str, content, encoding=\"utf8\"):\n try:\n logging.info(\"Writing value \\\"%s\\\" to file \\\"%s\\\"\", content, path_str)\n with open(path_str, 'w', encoding=encoding) as file_handle:\n file_handle.write(content)\n except IOError as io_err:\n logging.info(\"I/O error(%s): %s\", io_err.errno, io_err.strerror)"
] |
[
"0.7564076",
"0.74288267",
"0.73325247",
"0.70655453",
"0.70291775",
"0.6984853",
"0.6932403",
"0.6910737",
"0.68788034",
"0.68242836",
"0.67888033",
"0.6777633",
"0.6777468",
"0.6729787",
"0.6676116",
"0.66373664",
"0.6525719",
"0.6495248",
"0.63843197",
"0.6327576",
"0.6311675",
"0.63046026",
"0.6274792",
"0.6267465",
"0.62500954",
"0.6249232",
"0.62242067",
"0.62242067",
"0.62100047",
"0.6185755"
] |
0.8023115
|
0
|
Test that shell command works when the command is known to work.
|
def test_shell_good_command():
out, err = shell_command("ls .")
assert err is None
assert "test" in out
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err",
"def test_process_host_commands(self):\n\n command = [\"df\", \"-h\"]\n output = run(verification.process_host_commands(command))\n self.assertTrue(\"```\\nThat command is not available.```\" not in output)\n\n command = [\"ls\", \"-la\"]\n output = run(verification.process_host_commands(command))\n self.assertEqual(\"```\\nThat command is not available.```\", output)",
"def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)",
"def test_capture_shell_output():\n\n subprocess.call('ls')\n\n assert False",
"def test_popen(self):\n self.executor.command(['grep', 'foo']).popen()",
"def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):\n return result is not None",
"def run_command(cmd):\n p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n data = p.communicate()\n return p.returncode == 0",
"def test_command(self):\n\n expected = \"PyFunceble has been written by Fun Ilrys.\"\n actual = Command(\"echo '%s'\" % expected).execute()\n\n self.assertEqual(expected + \"\\n\", actual)",
"def test_system_command(self):\n process = Popen(['ubus'],stdout=PIPE)\n stdout, _ = process.communicate()\n self.assertEqual(process.returncode,0)\n self.assertIn(\"This isn't the real ubus. It's a simulator\",stdout.__str__())",
"def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass",
"def test_command(self):\n output, _error = self.executor.command(['echo', 'hello']).batch()\n self.assertEqual(output, 'hello\\n')",
"def test_ssh_cmd(self):\n self.assertEqual(general.ssh_command('user','example.com',('ls','-l')).command_line,\n ['ssh','[email protected]','ls','-l'])",
"def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult",
"def is_command_valid(command):\n if not command:\n return False\n\n try:\n # call command silentyly\n with open(devnull, 'wb') as no_out:\n subprocess.call(command, stdout=no_out, stderr=no_out)\n except OSError:\n return False\n else:\n return True",
"def test_check_system_cmd_line(self):\n\n intro = \"Checking your system, this may take a few seconds...\"\n\n cmd = ['pydroid', 'check']\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out = p.communicate()[0]\n self.assertIn(intro, out)\n self.assertTrue('Success' in out or 'Fix' in out)",
"def shell_command(split_cmd, cwd=HERE):\n print(colored(f'Kör \"{\" \".join(split_cmd)}\"', 'blue', attrs=['bold']))\n try:\n sp.run(split_cmd, cwd=cwd, check=True)\n return True\n except sp.CalledProcessError:\n return False",
"def test_cmd(host, cmd, timeout=None):\n ps = host.popen(cmd)\n rs = monitor_ps(ps, timeout)\n if rs is None:\n ps.kill()\n return False\n return rs == 0",
"def _switch(command):\n _LOGGER.info('Running command: %s', command)\n\n success = (subprocess.call(command, shell=True) == 0)\n\n if not success:\n _LOGGER.error('Command failed: %s', command)\n\n return success",
"def sh(cmd):\r\n return check_call(cmd, shell=True)",
"def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False",
"def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def sh(cmd):\n print 'CMD:', cmd\n return check_call(cmd, shell=True)",
"def test_subprocess_fails_with_no_command(self):\n with self.assertRaises(ValueError):\n LazySubprocessTester([])",
"def call_command(command: List[str], shell: bool = False, **kwargs) -> bool:\n log_command(command)\n exit_code = subprocess.call(_format_command(command, shell), shell=shell, **kwargs)\n logger.debug(\"Command exit code: {}\".format(exit_code))\n\n return not bool(exit_code)",
"def shell_command(command, shell=True):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=shell)\n result = p.communicate()[0]\n if result == \"command not known\":\n LOGGER.info(\"command not known \" + err)\n\n return result.strip()",
"def test_sungrow_pipe():\n script_path = path.join(SCRIPT_DIR, COMMAND)\n if platform.system() == 'Windows':\n args = [sys.executable, script_path]\n else:\n args = [COMMAND]\n env = os.environ.copy()\n env['PATH'] = SCRIPT_DIR\n env['PYTHONPATH'] = PARDIR\n ## make script executable to run, then revert\n exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n mode = os.stat(script_path).st_mode\n os.chmod(script_path, mode | exec_mask)\n try:\n popen = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n finally:\n os.chmod(script_path, mode)\n stdout, stderr = popen.communicate()\n return_code = popen.returncode\n ## no promises about exit code on Windows\n if platform.system() != 'Windows':\n assert return_code == EXIT_SYNTAX_ERROR, \\\n '{0} != {1}'.format(return_code, EXIT_SYNTAX_ERROR)\n assert stdout == '', '{0} != \"\"'.format(stdout)\n assert stderr.startswith(USAGE_MESSAGE_HEAD), \\\n '{0!r} does not start with {1!r}'.format(stderr, USAGE_MESSAGE_HEAD)",
"def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)",
"def test_unknown_command(self):\n\n self.assertRaises(commands.CommandNotFoundError,\n self.commands.run_command,\n '<unknown_command>', '')",
"def try_cmd(cmd, stdout=None, stderr=None):\n print \"\\n\\n %s \\n\\n\" %cmd\n try:\n retval = sp.check_call(cmd, shell=True, stdout=stdout, stderr=stderr)\n except sp.CalledProcessError:\n print(\"The command:\\n %s \\ndid not work, quitting...\" %cmd)\n sys.exit(0)"
] |
[
"0.8150244",
"0.74268186",
"0.7383249",
"0.7294423",
"0.701333",
"0.6992854",
"0.69897044",
"0.6959436",
"0.6927863",
"0.6918448",
"0.69177306",
"0.6879265",
"0.6870562",
"0.6854691",
"0.68386334",
"0.6801076",
"0.6761413",
"0.67489773",
"0.67297786",
"0.6681002",
"0.6648594",
"0.6640959",
"0.6615072",
"0.66075927",
"0.6600797",
"0.6585231",
"0.6559971",
"0.6548687",
"0.65209883",
"0.65150785"
] |
0.86112136
|
0
|
Test that shell command works when the command is known to work.
|
def test_shell_bad_command():
out, err = shell_command("ls adasdasdas")
assert out is None
assert "adasdasdas" in err
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_shell_good_command():\n out, err = shell_command(\"ls .\")\n assert err is None\n assert \"test\" in out",
"def test_process_host_commands(self):\n\n command = [\"df\", \"-h\"]\n output = run(verification.process_host_commands(command))\n self.assertTrue(\"```\\nThat command is not available.```\" not in output)\n\n command = [\"ls\", \"-la\"]\n output = run(verification.process_host_commands(command))\n self.assertEqual(\"```\\nThat command is not available.```\", output)",
"def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)",
"def test_capture_shell_output():\n\n subprocess.call('ls')\n\n assert False",
"def test_popen(self):\n self.executor.command(['grep', 'foo']).popen()",
"def validate_Exec_Shell(result, _dummy_command, _dummy_regex=None):\n return result is not None",
"def run_command(cmd):\n p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n data = p.communicate()\n return p.returncode == 0",
"def test_command(self):\n\n expected = \"PyFunceble has been written by Fun Ilrys.\"\n actual = Command(\"echo '%s'\" % expected).execute()\n\n self.assertEqual(expected + \"\\n\", actual)",
"def test_system_command(self):\n process = Popen(['ubus'],stdout=PIPE)\n stdout, _ = process.communicate()\n self.assertEqual(process.returncode,0)\n self.assertIn(\"This isn't the real ubus. It's a simulator\",stdout.__str__())",
"def _check_for_cmd(command):\n slab_logger.log(15, 'Checking if %s is installed' % command)\n # Note: Using type git here to establish if posix system has a binary\n # called git instead of which git b/c which often doesn't return\n # proper 0 or 1 exit status' and type does. Which blah on many\n # systems returns 0, which is bad.\n if os.name == \"posix\":\n returncode, myinfo = run_this('type %s' % command)\n return(returncode, myinfo)\n elif os.name == \"nt\":\n # test windows for git\n pass",
"def test_command(self):\n output, _error = self.executor.command(['echo', 'hello']).batch()\n self.assertEqual(output, 'hello\\n')",
"def test_ssh_cmd(self):\n self.assertEqual(general.ssh_command('user','example.com',('ls','-l')).command_line,\n ['ssh','[email protected]','ls','-l'])",
"def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult",
"def is_command_valid(command):\n if not command:\n return False\n\n try:\n # call command silentyly\n with open(devnull, 'wb') as no_out:\n subprocess.call(command, stdout=no_out, stderr=no_out)\n except OSError:\n return False\n else:\n return True",
"def test_check_system_cmd_line(self):\n\n intro = \"Checking your system, this may take a few seconds...\"\n\n cmd = ['pydroid', 'check']\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out = p.communicate()[0]\n self.assertIn(intro, out)\n self.assertTrue('Success' in out or 'Fix' in out)",
"def shell_command(split_cmd, cwd=HERE):\n print(colored(f'Kör \"{\" \".join(split_cmd)}\"', 'blue', attrs=['bold']))\n try:\n sp.run(split_cmd, cwd=cwd, check=True)\n return True\n except sp.CalledProcessError:\n return False",
"def test_cmd(host, cmd, timeout=None):\n ps = host.popen(cmd)\n rs = monitor_ps(ps, timeout)\n if rs is None:\n ps.kill()\n return False\n return rs == 0",
"def _switch(command):\n _LOGGER.info('Running command: %s', command)\n\n success = (subprocess.call(command, shell=True) == 0)\n\n if not success:\n _LOGGER.error('Command failed: %s', command)\n\n return success",
"def sh(cmd):\r\n return check_call(cmd, shell=True)",
"def check_command(self, cmd):\n which = \"which \" + cmd + self.echo_cmd_result\n self.connector.sendline(which)\n i = self.connector.expect(['\\r\\n0\\r\\n', '\\r\\n1\\r\\n', '\\r\\n2\\r\\n'])\n if i == 0:\n debug_log(\"command[%s] found!\", cmd)\n return True\n else:\n warn_log(\"command[%s] not found!\", cmd)\n return False",
"def check_for(command):\n if shutil.which(command) is None:\n print(colored(\"{} not available on system\".format(command),\"red\"))\n sys.exit(1)",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def sh(cmd):\n print 'CMD:', cmd\n return check_call(cmd, shell=True)",
"def test_subprocess_fails_with_no_command(self):\n with self.assertRaises(ValueError):\n LazySubprocessTester([])",
"def call_command(command: List[str], shell: bool = False, **kwargs) -> bool:\n log_command(command)\n exit_code = subprocess.call(_format_command(command, shell), shell=shell, **kwargs)\n logger.debug(\"Command exit code: {}\".format(exit_code))\n\n return not bool(exit_code)",
"def shell_command(command, shell=True):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=shell)\n result = p.communicate()[0]\n if result == \"command not known\":\n LOGGER.info(\"command not known \" + err)\n\n return result.strip()",
"def test_sungrow_pipe():\n script_path = path.join(SCRIPT_DIR, COMMAND)\n if platform.system() == 'Windows':\n args = [sys.executable, script_path]\n else:\n args = [COMMAND]\n env = os.environ.copy()\n env['PATH'] = SCRIPT_DIR\n env['PYTHONPATH'] = PARDIR\n ## make script executable to run, then revert\n exec_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n mode = os.stat(script_path).st_mode\n os.chmod(script_path, mode | exec_mask)\n try:\n popen = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n finally:\n os.chmod(script_path, mode)\n stdout, stderr = popen.communicate()\n return_code = popen.returncode\n ## no promises about exit code on Windows\n if platform.system() != 'Windows':\n assert return_code == EXIT_SYNTAX_ERROR, \\\n '{0} != {1}'.format(return_code, EXIT_SYNTAX_ERROR)\n assert stdout == '', '{0} != \"\"'.format(stdout)\n assert stderr.startswith(USAGE_MESSAGE_HEAD), \\\n '{0!r} does not start with {1!r}'.format(stderr, USAGE_MESSAGE_HEAD)",
"def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)",
"def test_unknown_command(self):\n\n self.assertRaises(commands.CommandNotFoundError,\n self.commands.run_command,\n '<unknown_command>', '')",
"def try_cmd(cmd, stdout=None, stderr=None):\n print \"\\n\\n %s \\n\\n\" %cmd\n try:\n retval = sp.check_call(cmd, shell=True, stdout=stdout, stderr=stderr)\n except sp.CalledProcessError:\n print(\"The command:\\n %s \\ndid not work, quitting...\" %cmd)\n sys.exit(0)"
] |
[
"0.86112136",
"0.74268186",
"0.7383249",
"0.7294423",
"0.701333",
"0.6992854",
"0.69897044",
"0.6959436",
"0.6927863",
"0.6918448",
"0.69177306",
"0.6879265",
"0.6870562",
"0.6854691",
"0.68386334",
"0.6801076",
"0.6761413",
"0.67489773",
"0.67297786",
"0.6681002",
"0.6648594",
"0.6640959",
"0.6615072",
"0.66075927",
"0.6600797",
"0.6585231",
"0.6559971",
"0.6548687",
"0.65209883",
"0.65150785"
] |
0.8150244
|
1
|
This function will run the command for number_to_run number of times. For each run, it will capture the run time for the unit test and will move on to the next test if it takes too long to run this one.
|
def run_commands(command, number_to_run, temp_file):
global g_max_runtime_secs
global g_finished_this_unit_test
temp_string = command.split()
testname = temp_string[-1]
temp_string = testname.split('/')
full_command = command + ' > ' + temp_file
g_finished_this_unit_test = False
for run_index in range(0, number_to_run):
if g_finished_this_unit_test:
break
child = subprocess.Popen(full_command, shell=True)
while child.poll() is None:
time.sleep(20)
# subprocess.call(full_command, shell=True) # run the command,
with open(temp_file, 'r') as thefile: # go into tempfile and grab test run info
for each_line in thefile:
temp_string = each_line.split()
if len(temp_string) > 0:
if temp_string[0] == 'PASS':
test_time = temp_string[2]
try:
runtime = float(test_time[:-1])
print("Unit test run time is {0}".format(runtime))
if runtime > g_max_runtime_secs:
g_finished_this_unit_test = True
except:
print("Cannot convert run time. It is {0}\n".format(runtime))
break
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run_test(_freq, cmd):\n for count in range(_freq):\n os.system(cmd.replace(\"result\", \"result\" + str(count + 1)))",
"def main(argv):\n global g_test_root_dir\n global g_temp_filename\n\n if len(argv) < 2:\n print(\"invoke this script as python collectUnitTestRunTime.py 10 'python run.py_path/run.py --wipe \"\n \"--test dir_to_test/test1,python run.py_path/run.py --wipe --test dir_to_test2/test2,...' True\\n\")\n sys.exit(1)\n else: # we may be in business\n repeat_number = int(argv[1]) # number of times to run a unit test\n command_lists = argv[2] # list of unit tests to run\n\n for command in command_lists.split(','): # for each command in the list\n # run command repeat_number of times and collect results into result_dict\n run_commands(command, repeat_number, g_temp_filename)",
"def run_trials(self, num=0):\n if num == 'all':\n self.trials_to_run = len(self.trials)\n else:\n self.trials_to_run = num\n self.vision_egg.go()",
"def run_multiple_test_cycles(self):\n # Perform as many cycles as required\n while self.args.repetitions >= 0:\n self.run_one_test_cycle()\n self.args.repetitions -= 1",
"def runner_scenario_x_times(repetitions, scenario_names, feature_files, out):\n if scenario_names is not None:\n to_test = scenario_names\n elif feature_files is not None:\n to_test = feature_files\n else:\n to_test = \"testsuite\"\n msg = (\"\\nRunning \" + str(repetitions) + \" times test(s):\\n \" \n + str(to_test) + \"\\n\")\n print(msg)\n if out:\n out_name = os.path.splitext(out)[0]\n ext = os.path.splitext(out)[1]\n for i in range(repetitions):\n print(\"Iteration number: \" + str(i+1))\n if out:\n out = out_name + \"-\" + str(i) + ext\n p = Process(target=worker_scenario, \n args=(scenario_names, feature_files, out))\n p.start()\n p.join()",
"def test_numbers(number):\n print(\"\\nRunning test_numbers with {}\".format(number))",
"def run_trials(environ, total):\n run_times = []\n\n for i in range(0, total):\n environ.run()\n run_times.append(environ.total_time)\n\n return run_times",
"def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())",
"def test_time(cmd, samples=16, warmup=4):\n # do testing\n print()\n avg_time = 0\n for s in range(samples + warmup):\n # report progress\n progress = s / (samples + warmup)\n print(CSI_UP + CSI_CLEARLN + \"Testing [{}%]\".format(floor(progress * 100)))\n\n output = shell(cmd) # run command\n tables = csv_mt.read_string(output, parse_float=True) # parse its output\n time = tables[\"statistics\"][\"time_us\"][0] # get its timing data\n\n # skip a few runs to let the system \"warm up\"\n if s >= warmup:\n avg_time += time / samples # compute average execution time\n\n # log the average time for this test case\n return avg_time",
"def run_one(num):\n start = time.time()\n if not config.get('radosbench'):\n benchcontext = {}\n else:\n benchcontext = copy.copy(config.get('radosbench'))\n iterations = 0\n while time.time() - start < int(config.get('time', 600)):\n log.info(\"Starting iteration %s of segment %s\"%(iterations, num))\n benchcontext['pool'] = str(num) + \"-\" + str(iterations)\n with radosbench.task(ctx, benchcontext):\n time.sleep()\n iterations += 1",
"def run_one_test_cycle(self):\n logging.info(\n \"{0} operations remaining: {1}\".format(\n self.args.pm_operation, self.args.repetitions\n )\n )\n\n self.check_last_cycle_duration()\n if self.args.repetitions > 0:\n self.run_pm_command()\n else:\n self.summary()",
"def run_simulation(self, num_games=10):\n for _ in range(num_games):\n self.result.append(self.single_game())",
"def test_multiple_commands_at_same_time(self):",
"def run_tests():\n os.environ['WORKDIR'] = CONFIG['workdir']\n os.environ['REPORTDIR'] = CONFIG['reportFolder']\n stdout = subprocess.DEVNULL\n if CONFIG['verbose']:\n stdout = None\n # cycle throught version\n total = 0\n valid = 0\n start = time.time()\n for version in utils.get_dirs(CONFIG['versionsFolder']):\n os.environ['VERSION'] = version\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version), CONFIG['workdir']\\\n , CONFIG['clearWorkdir'])\n # cycle throught use case\n for usecase in utils.get_dirs(CONFIG['testsFolder']):\n os.environ['TESTDIR'] = usecase\n if not CONFIG['quiet']:\n print('UseCase test: {}'.format(usecase))\n log_msg('info', 'UseCase test: {}'.format(usecase))\n try:\n folder = os.path.join(CONFIG['testsFolder'], usecase)\n with open(os.path.join(folder, CONFIG['useConfig'])) as usefp:\n jconfig = json.load(usefp)\n # clear workdir if desired\n if 'clearWorkdir' in jconfig.keys() and jconfig['clearWorkdir']:\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version)\\\n , CONFIG['workdir'], CONFIG['clearWorkdir'])\n # print('clearing')\n # raise\n cmd = ['py', os.path.join(folder, jconfig['entrypoint'])]\n total += 1\n if jconfig['runType'] == 'single':\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n else:\n for step in range(jconfig['numRuns']):\n if not CONFIG['quiet']:\n print('\\r >Step {}/{} '.format(step+1, jconfig['numRuns'])\\\n , end='', flush=True)\n log_msg('info', 'Step {}/{}'.format(step+1, jconfig['numRuns']))\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n if step+1 != jconfig['numRuns']:\n time.sleep(jconfig['interval'])\n except subprocess.CalledProcessError as excp:\n if not CONFIG['quiet']:\n print('Error msg:{}'\\\n .format(excp.stderr.decode().replace('\\r', '').replace('\\n', '|')))\n log_msg('error', excp.stderr.decode())\n else:\n valid += 1\n if not CONFIG['quiet']:\n print('{}.....Passed'.format(usecase))\n log_msg('info', '{} Passed'.format(usecase))\n\n elapse = time.time()-start\n log_msg('info', 'Ran {} tests in {:.3f}s with {} passed'.format(total, elapse, valid))\n print('-'*20)\n print('Ran {} tests in {:.3f}s with {} passed.'.format(total, elapse, valid))\n return total-valid",
"def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)",
"def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results",
"def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)",
"def run_tests():\n passed_tests = 0\n failed_tests = 0\n for case in TEST_CASES:\n start_date, end_date = [datetime.strptime(x, \"%d/%m/%Y\") for x in case[0]]\n experiment = Experiment(start_date, end_date)\n if experiment.duration() == case[1]:\n result = \"passed\"\n passed_tests += 1\n else:\n result = \"failed\"\n failed_tests += 1\n print(f\"\"\"{\"-\".join(case[0])}, {case[1]} days: Test {result}\"\"\")\n\n print(\n f\"All tests completed\\n\"\n f\"Number of tests passed: {passed_tests}\\n\"\n f\"Number of tests failed: {failed_tests}\"\n )",
"def set_runs_per_restart(self, num):\n raise NotImplementedError()",
"def test_unlimited_run_slow(self):\n cmds = ['/bin/sleep 0',\n '/bin/sleep 1',\n '/bin/sleep 2',]\n\n q = QueueCommands(cmds, 3)\n start = time.time()\n q.run()\n end = time.time()-start\n # we should only take the length of the longest sleep\n self.assertTrue( end > 1.8 and end < 2.2,\n \"took %s seconds, exected ~2\" % (end,))",
"def run(self):\n times = []\n for attempt in range(self.attempts):\n with self._generate_data() as tpath:\n for dname in self.commands:\n dpath = osp.join(self._scripts_dpath, dname)\n for fname in os.listdir(dpath):\n if self.libs and not osp.splitext(fname)[0] in self.libs:\n _logger.info('skipped %s %s', dname, fname)\n else:\n fpath = osp.join(dpath, fname)\n process = Popen([fpath, tpath], stdout=PIPE)\n process.wait()\n if process.returncode:\n _logger.warn('error running %s %s', dname, fname)\n else:\n times.append({\n 'attempt': attempt,\n 'schema': self.name,\n 'command': dname,\n 'library': osp.splitext(fname)[0],\n 'n_records': self.n_records,\n 'ms_per_record': float(process.stdout.read())\n })\n _logger.info('finished %s %s', dname, fname)\n return times",
"def test_multiple_games(self, iteration=10):\n # TODO: multithread?\n for i in range(iteration):\n self.test_one_game()",
"def test_concurrent_test_runs(self):\n num_passing_tests = 20\n num_failing_tests = 20\n num_error_tests = 20\n total_num_tests = num_passing_tests + num_failing_tests + num_error_tests\n\n times = [0] + [i for i in range(2 * total_num_tests)\n ] + [2 * total_num_tests - 1]\n result = self._make_result(times)\n threads = []\n names = []\n result.startTestRun()\n for i in range(num_passing_tests):\n name = 'passing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n # xml_reporter uses id(test) as the test identifier.\n # In a real testing scenario, all the test instances are created before\n # running them. So all ids will be unique.\n # We must do the same here: create test instance beforehand.\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_passing_test, args=(test, result)))\n for i in range(num_failing_tests):\n name = 'failing_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_failing_test, args=(test, result)))\n for i in range(num_error_tests):\n name = 'error_concurrent_test_%s' % i\n names.append(name)\n test_name = '__main__.MockTest.%s' % name\n test = MockTest(test_name)\n threads.append(threading.Thread(\n target=self._simulate_error_test, args=(test, result)))\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n result.stopTestRun()\n result.printErrors()\n tests_not_in_xml = []\n for tn in names:\n if tn not in self.xml_stream.getvalue():\n tests_not_in_xml.append(tn)\n msg = ('Expected xml_stream to contain all test %s results, but %s tests '\n 'are missing. List of missing tests: %s' % (\n total_num_tests, len(tests_not_in_xml), tests_not_in_xml))\n self.assertEqual([], tests_not_in_xml, msg)",
"def try_run(ctx, cmd, n):\n\n for _ in range(n):\n res = ctx.run(cmd, warn=True)\n if res.exited is None or res.exited > 0:\n print(\n color_message(\n f\"Failed to run \\\"{cmd}\\\" - retrying\",\n \"orange\",\n )\n )\n continue\n return True\n return False",
"def test_run(self):\n files = [\n (\"AS1-1.phy_r8s.txt\", \"AS1-1.phy_r8s.txt_2.5.txt\"),\n (\"AS1-3.phy_r8s.txt\", \"AS1-3.phy_r8s.txt_2.5.txt\"),\n (\"AS1-4.phy_r8s.txt\", \"AS1-4.phy_r8s.txt_2.5.txt\"),\n ]\n for file_pair in files:\n input_file = file_pair[0]\n expected_file = file_pair[1]\n infile = self.test_data_path + input_file\n outfile = self.test_data_path + expected_file\n divnum = 2.5\n result = run(infile, divnum)\n\n with open(outfile) as handle:\n expected_result = handle.read()\n self.assertEqual(expected_result, result)",
"def run_simulation(self, number_runs = 1):\n for i in range(0, number_runs):\n self.ques = [self.start for i in range(0, self.numQueues)]\n run = self.__single_sim_results()\n run_results = pd.DataFrame({'simulation':i,\n 'num_items': len(run),\n 'wait_count': len(run[run['wait_time']>datetime.timedelta(seconds=0)]),\n 'avg_wait_time': run.wait_time.mean(),\n 'close_time': max(run['appt_end_time'])}, index=[i])\n self.results = pd.concat([self.results, run_results], ignore_index=True)\n self.results['last_appt_to_close_minutes'] = (self.results['close_time']-self.end).dt.total_seconds().div(60)\n return",
"def run_test_cases(self):\n line = (\n '{reindeer} can fly {speed} km/s for {time} seconds'\n ', but then must rest for {rest} seconds.'\n )\n inputs = (\n line.format(reindeer='Comet', speed=14, time=10, rest=127),\n line.format(reindeer='Dancer', speed=16, time=11, rest=162),\n line.format(reindeer='Vixen', speed=18, time=12, rest=207),\n line.format(reindeer='Prancer', speed=20, time=13, rest=264),\n )\n test_cases = (\n solver.TestCase('\\n'.join(inputs[:1]), 2660, 2503),\n solver.TestCase('\\n'.join(inputs[:2]), 2660, 1564),\n solver.TestCase('\\n'.join(inputs[:3]), 2660, 1101),\n solver.TestCase('\\n'.join(inputs), 2660, 994),\n solver.TestCase('\\n'.join(inputs[1:]), 2640, 1201),\n solver.TestCase('\\n'.join(inputs[2:]), 2592, 1517),\n solver.TestCase('\\n'.join(inputs[3:]), 2540, 2503),\n )\n for test_case in test_cases:\n self._run_test_case(test_case)",
"def run_tests(tests_to_run: List[Dict[str, Union[str, List[str]]]],\n chromium_absolute_build_path: str, num_threads: int,\n perf_counters: List[str]):\n test_data = []\n for test in tests_to_run:\n executable_path = os.path.join(chromium_absolute_build_path,\n test['executable'])\n test_data.extend(\n gtest_executable_utils.run_test_suite(test, executable_path,\n perf_counters, num_threads))\n return test_data",
"def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)",
"def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')"
] |
[
"0.6575637",
"0.6521473",
"0.64306206",
"0.6430121",
"0.64166987",
"0.63451195",
"0.61504436",
"0.6135496",
"0.6086589",
"0.6084007",
"0.60143",
"0.6000346",
"0.59813243",
"0.59571207",
"0.58898485",
"0.5881376",
"0.58780885",
"0.5875602",
"0.5844247",
"0.5824281",
"0.58155006",
"0.5810804",
"0.581074",
"0.5802843",
"0.5793986",
"0.57916236",
"0.5782353",
"0.5775135",
"0.5766671",
"0.5756062"
] |
0.7836011
|
0
|
Main program. Take user input, parse it and call other functions to execute the commands and find long running unit tests, store the dataset and parameter settings and move onto the next unit tests if applicable.
|
def main(argv):
global g_test_root_dir
global g_temp_filename
if len(argv) < 2:
print("invoke this script as python collectUnitTestRunTime.py 10 'python run.py_path/run.py --wipe "
"--test dir_to_test/test1,python run.py_path/run.py --wipe --test dir_to_test2/test2,...' True\n")
sys.exit(1)
else: # we may be in business
repeat_number = int(argv[1]) # number of times to run a unit test
command_lists = argv[2] # list of unit tests to run
for command in command_lists.split(','): # for each command in the list
# run command repeat_number of times and collect results into result_dict
run_commands(command, repeat_number, g_temp_filename)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))",
"def main():\n\n # Parse arguments. The parser will raise an exception if required arguments are not present.\n parser = argparse.ArgumentParser()\n\n subparsers = parser.add_subparsers(dest='command')\n\n # Arguments for the runtest command.\n cmd_runtest = subparsers.add_parser('runtest')\n runtest_required_named = cmd_runtest.add_argument_group('named arguments')\n runtest_required_named.add_argument('-c', '--corpus',\n help='Corpus root directory containing all speakers.',\n metavar='corpus',\n required=True)\n runtest_required_named.add_argument('-o', '--csvout',\n help='CSV output file.',\n metavar='csvout',\n required=True)\n runtest_required_named.add_argument('-i', '--impl',\n help='Test runner implementation: fast, medium or slow.',\n metavar='impl',\n required=False,\n default='fastest')\n\n # Arguments for the analyse command.\n cmd_analyse = subparsers.add_parser('analyse')\n analyse_required_named = cmd_analyse.add_argument_group('named arguments')\n analyse_required_named.add_argument('-r', '--results',\n help='Input CSV results file.',\n metavar='results',\n required=True)\n analyse_required_named.add_argument('-t', '--th_user',\n help='User-defined threshold.',\n metavar='th_user',\n required=False,\n type=float,\n default=5.79)\n\n # Parse the arguments.\n args = parser.parse_args()\n\n # Dispatch to the correct command.\n if args.command == 'runtest':\n do_runtest(args)\n elif args.command == 'analyse':\n do_analyse(args)\n else:\n raise ValueError('Unknown command {}'.format(args.command))",
"def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")",
"def main():\n parser = argparse.ArgumentParser(description=\"\"\"Tester for YT Data API and different inputs\"\"\")\n parser.add_argument('-a', '--analytics', help='Performs a basic analytics lookup for the user\\'s channel entered')\n parser.add_argument('-c', '--comments', help='Performs a lookup of comments for the video id entered')\n args = parser.parse_args()\n\n if args.analytics:\n analytics = args.analytics\n analyt(analytics)\n\n if args.comments:\n comments = args.comments\n get_comments(comments)",
"def main(cli_args=None):\n # build an arg parser\n parser = get_arg_parser()\n\n # run the parser on cli args\n args = parser.parse_args(cli_args)\n\n print(f\"Running script with arguments: {args}\")\n test_input(args.raw_training_data)\n test_input(args.raw_testing_data)\n test_output(args.train_output)\n test_output(args.test_output)",
"def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"api_base_url\", type=str,\n help=\"base url for all tests\")\n parser.add_argument(\"test_file_name\", type=str,\n help=\"name of file containing JSON array of tests\")\n parser.add_argument(\"-f\", \"--format\", default=\"json\", type=str,\n help=\"output format - must be either json or text\")\n\n args = parser.parse_args()\n\n try:\n\n run_tests_from_file(args.api_base_url, args.test_file_name, \n args.format)\n\n except KeyError as e:\n print(\"Required key '%s' not found. Check tests file.\" % str(e.args[0]))\n exit(1)\n\n except FileNotFoundError:\n print(\"Cannot open file '%s'. File not found.\" % args.test_file_name)\n exit(1)\n\n except ValueError:\n print(\"Cannot decode JSON from file '%s'.\" % args.test_file_name)\n exit(1)",
"def main():\n log.configureLogger(log=log.testlog, fileLog=log.TESTLOGFILE, format=log.TESTFORMAT, truncate=True)\n log.configureLogger(log=log.log, fileLog=log.LOGFILE)\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"h\", [\"help\", \"file=\", \"verbosity=\",\n \"settings=\"]) \n # process options\n testfile = None\n inifile = None\n\n verbosity = 2 \n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n print __doc__\n sys.exit(0)\n if o in (\"--file\",):\n testfile = a\n logger.debug(\"starting %s\" % testfile)\n if o in (\"--settings\",):\n inifile = a\n if o in (\"--verbosity\",):\n verbosity = a\n\n if testfile is None:\n sys.stderr.write(\"Error, missing testfile\")\n print __doc__\n sys.exit(2)\n\n suite = regressionTest(testfile, inifile)\n tester = TESTER(suite, verbosity)\n # TODO: runTest gives a return value\n tester.runTest()\n tester.generateFailures()\n\n sys.exit(0)\n\t\n except getopt.error, msg:\n sys.stderr.write(\"%s\" % msg)\n sys.stderr.write(\"for help use --help\")\n sys.exit(2)",
"def main():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(dest='mode')\n\n # Add sub-parser for feature extraction\n parser_extract = subparsers.add_parser('extract')\n parser_extract.add_argument('dataset',\n choices=['training', 'validation', 'test'],\n )\n\n # Add sub-parser for training\n subparsers.add_parser('train')\n\n # Add sub-parser for inference\n parser_predict = subparsers.add_parser('predict')\n parser_predict.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n\n # Add sub-parser for evaluation\n parser_evaluate = subparsers.add_parser('evaluate')\n parser_evaluate.add_argument('task',\n nargs='?',\n choices=['tagging', 'sed', 'all'],\n default='all',\n )\n parser_evaluate.add_argument('dataset',\n nargs='?',\n choices=['validation', 'test'],\n default='test',\n )\n parser_evaluate.add_argument('--thresholds', action='store_true')\n\n args = parser.parse_args()\n\n if args.mode == 'extract':\n extract(cfg.to_dataset(args.dataset))\n elif args.mode == 'train':\n train()\n elif args.mode == 'predict':\n predict(cfg.to_dataset(args.dataset))\n elif args.mode == 'evaluate':\n eval_all = args.task == 'all'\n dataset = cfg.to_dataset(args.dataset)\n if args.task == 'tagging' or eval_all:\n evaluate_audio_tagging(dataset, args.thresholds)\n if args.task == 'sed' or eval_all:\n evaluate_sed(dataset)",
"def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)",
"def main():\n # ============================================================================\n\n working_folder = os.path.dirname(os.path.realpath(__file__))\n os.chdir(working_folder)\n\n description_string =\\\n \"A set of python tests to fetch data from Open Weather Map\"\n epilog_string = (\"These tests should be run regularly to ensure that the \"\n \"API server is healthy\")\n\n parser = argparse.ArgumentParser(\n prog='OWM-API-test',\n description=description_string,\n epilog=epilog_string)\n\n parser.add_argument(\n \"--tb\",\n action='store',\n default='line',\n nargs='?',\n choices=['auto', 'long', 'short', 'no', 'line', 'native'],\n help='Set the traceback level for pytest',\n dest='traceback')\n\n parser.add_argument(\n \"-v\", \"--verbose\",\n action='store_true',\n help='Increase output verbosity',\n dest='verbose')\n\n parser.add_argument(\n \"-q\", \"--quiet\",\n action='store_true',\n help='Reduce output verbosity')\n\n args = parser.parse_args()\n\n # ============================================================================\n # construct pytest commands\n pytest_command = []\n if args.verbose:\n # we double increase verbosity to make it actually verbose\n pytest_command.extend([\"-v\", \"-v\"])\n # this only overwrites traceback argument if left at default\n if args.traceback == \"line\":\n args.traceback = \"long\"\n\n if args.quiet: # this overwrites and tb argument given\n args.traceback = \"no\"\n\n pytest_command.extend([\"--tb\", args.traceback])\n\n # ============================================================================\n\n printing_functions.test_declaration(\"Running Tests...\")\n run_local_tests(pytest_command)",
"def main(*arguments):\n\n args = parse_args(arguments)\n\n if args.test_suite is not None:\n test_suite = report_manager.load_test_suite_conf(args.test_suite)\n for i, test in enumerate(test_suite):\n args = parse_args(test)\n process_args_and_run(args, test_suite_iter=i)\n else:\n process_args_and_run(args)",
"def main():\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nInterpreting command line options\\n'+'~'*72+'\\n')\n\n parser = ArgumentParser()\n subparser = parser.add_subparsers(\\\n help='run_selafin commands to do', dest='command')\n\n subparser = chop_parser(subparser)\n subparser = scan_parser(subparser)\n subparser = spec_parser(subparser)\n subparser = alter_parser(subparser)\n subparser = merge_parser(subparser)\n subparser = diff_parser(subparser)\n subparser = calcs_parser(subparser, 'calcs', '???')\n subparser = calcs_parser(subparser, 'crunch', '???')\n subparser = calcs_parser(subparser, 'transf', '???')\n subparser = sample_parser(subparser)\n subparser = subdivide_parser(subparser)\n subparser = tesselate_parser(subparser)\n\n options = parser.parse_args()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads code name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n if options.command == 'scan':\n scan(options)\n elif options.command == 'spec':\n spec(options)\n elif options.command == 'chop':\n chop(options)\n elif options.command == 'alter':\n alter(options)\n elif options.command == 'merge':\n merge(options)\n elif options.command == 'diff':\n diff(options)\n elif options.command == 'sample':\n sample(options)\n elif options.command in ['calcs', 'crunch', 'transf']:\n calcs(options, options.command)\n elif options.command == 'subdivide':\n subdivide(options)\n elif options.command == 'tessellate':\n tesselate(options)\n else:\n raise TelemacException(\\\n '\\nDo not know what to do with '\n 'this code name: {}'.format(options.command))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nMy work is done\\n\\n')\n\n sys.exit(0)",
"def main():\n parser = argparse.ArgumentParser(\n description='Runs test for C++ implementation of M*')\n parser.add_argument('test_file', help='File describing test cases')\n parser.add_argument('output_file', help='Name of output file')\n parser.add_argument('num_processors', type=int, action='store',\n help='Number of processes to run on each node. ' +\n 'The local host running the primary server will ' +\n 'run one fewer worker processes')\n parser.add_argument('-i', action='store', type=float, default=1.0,\n help='Set inflation factor for the heuristic, ' +\n 'defaults to 1', metavar='INF', dest='inflation')\n parser.add_argument('-t', action='store', type=int, default=120,\n help='Set time limit for planning. Defaults to 2 ' +\n 'minutes', dest='time_limit')\n parser.add_argument('--hosts', action='store',\n default=('python', 'cobra', 'viper', 'anaconda'),\n help='Hostnames/IPs to use as processing nodes.',\n nargs='*', metavar='HOSTNAME')\n\n args = parser.parse_args()\n\n run_cpp_mstar_trial(args.test_file, args.output_file,\n inflation=args.inflation, time_limit=args.time_limit,\n hosts=args.hosts, num_processors=args.num_processors)",
"def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)",
"def main():\n print()\n print(\"Un-comment and re-comment calls in MAIN one by one as you work.\")\n print()\n\n # run_test_sum_until_prime_input()\n # run_test_next_prime()\n # run_test_sum_to_next_prime()\n # run_test_prime_gap()\n # run_test_wait_for_sum_of_cubes()",
"def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())",
"def main():\n\n ############################ variable settings #################################\n parser = argparse.ArgumentParser(description='Run Subtask C of GermEval 2017 Using Pre-Trained Language Model.')\n parser.add_argument('--seed', type=int, default=42, help='Random seed.')\n parser.add_argument('--lang_model', type=str, default='bert-base-german-dbmdz-uncased', help='The pre-trained language model.')\n parser.add_argument('--epochs', type=int, default=4, help='Number of epochs for training.')\n parser.add_argument('--lr', type=float, default=5e-5, help='The learning rate.')\n parser.add_argument('--max_len', type=int, default=256, help='The maximum sequence length of the input text.')\n parser.add_argument('--batch_size', type=int, default=32, help='Your train set batch size.')\n parser.add_argument('--df_path', type=str, default='./data/', help='The data directory.') \n parser.add_argument('--train_data', type=str, default='train_df_cat.tsv', help='The filename of the input train data.')\n parser.add_argument('--dev_data', type=str, default='dev_df_cat.tsv', help='The filename of the input development data.')\n parser.add_argument('--test_data1', type=str, default='test_syn_df_cat.tsv', help='The filename of the first input test data (synchronic).')\n parser.add_argument('--test_data2', type=str, default='test_dia_df_cat.tsv', help='The filename of the second input test data (diachronic).')\n parser.add_argument('--output_path', type=str, default='./output/subtaskC/', help='The output directory of the model and predictions.')\n parser.add_argument(\"--train\", default=True, action=\"store_true\", help=\"Flag for training.\")\n parser.add_argument(\"--save_prediction\", default=False, action=\"store_true\", help=\"Flag for saving predictions.\")\n parser.add_argument(\"--save_cr\", default=False, action=\"store_true\", help=\"Flag for saving confusion matrix.\")\n parser.add_argument(\"--exclude_general\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein.\")\n parser.add_argument(\"--exclude_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding neutral polarity.\")\n parser.add_argument(\"--exclude_general_neutral\", default=False, action=\"store_true\", help=\"Flag for excluding category Allgemein:neutral.\")\n args = parser.parse_args()\n ################################################################################\n set_all_seeds(args.seed)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n \n # Load data\n train_df = pd.read_csv(args.df_path + args.train_data, delimiter = '\\t')\n dev_df = pd.read_csv(args.df_path + args.dev_data, delimiter = '\\t')\n test_syn_df = pd.read_csv(args.df_path + args.test_data1, delimiter = '\\t')\n test_dia_df = pd.read_csv(args.df_path + args.test_data2, delimiter = '\\t')\n \n # Create a tokenizer\n lower_case = False\n if args.lang_model[-7:] == \"uncased\":\n lower_case = True\n\n if args.lang_model[:4] == \"bert\":\n model_class = \"BERT\"\n tokenizer = BertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n if args.lang_model[:10] == \"distilbert\":\n model_class = \"DistilBERT\"\n tokenizer = DistilBertTokenizer.from_pretrained(args.lang_model, do_lower_case=lower_case, max_length=args.max_len)\n \n\n # get training features\n cats = train_df.columns[5:]\n end = \"full\"\n # exclude categories if required\n if (args.exclude_general):\n cats = [i for i in list(cats) if \"Allgemein\" not in i]\n end = \"excl_gen\"\n if (args.exclude_neutral):\n cats = [i for i in list(cats) if \"neutral\" not in i]\n end = \"excl_neu\"\n if (args.exclude_general_neutral):\n cats = [i for i in list(cats) if \"Allgemein:neutral\" not in i]\n end = \"excl_genneu\"\n \n num_labels = len(list(cats))\n\n # create one hot labels\n train_df['one_hot_labels'] = list(train_df[list(cats)].values)\n dev_df['one_hot_labels'] = list(dev_df[list(cats)].values)\n test_syn_df['one_hot_labels'] = list(test_syn_df[list(cats)].values)\n test_dia_df['one_hot_labels'] = list(test_dia_df[list(cats)].values)\n\n # retrieve sentences and labels\n df = pd.concat([train_df, dev_df])\n sentences = df.text.values\n labels = list(df.one_hot_labels.values) \n\n sentences_syn = test_syn_df.text.values\n labels_syn = list(test_syn_df.one_hot_labels.values)\n\n sentences_dia = test_dia_df.text.values\n labels_dia = list(test_dia_df.one_hot_labels.values)\n \n print(\"number of categories:\", len(list(cats)))\n\n # Tokenize all of the sentences and map the tokens to their word IDs. \n input_ids = [tokenizer.encode(sent, add_special_tokens=True, truncation=True, \n max_length=args.max_len) for sent in sentences]\n input_ids = pad_sequences(input_ids, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n # Create attention masks\n attention_masks = [[int(token_id > 0) for token_id in sent] for sent in input_ids]\n \n # synchronic test data\n input_ids_syn = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_syn]\n input_ids_syn = pad_sequences(input_ids_syn, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_syn = [[int(token_id > 0) for token_id in sent] for sent in input_ids_syn]\n \n # diachronic test data\n input_ids_dia = [tokenizer.encode(sent, add_special_tokens=True, truncation=True) for sent in sentences_dia]\n input_ids_dia = pad_sequences(input_ids_dia, maxlen=args.max_len, dtype=\"long\", \n value=0.0, truncating=\"post\", padding=\"post\")\n attention_masks_dia = [[int(token_id > 0) for token_id in sent] for sent in input_ids_dia]\n\n # split train, dev\n train_inputs, train_labels, dev_inputs, dev_labels, train_masks, dev_masks = split_train_dev(\n train_df, dev_df, attention_masks, input_ids, labels)\n \n # transform to torch tensor\n train_inputs = torch.tensor(train_inputs)\n dev_inputs = torch.tensor(dev_inputs)\n\n train_labels = torch.tensor(train_labels)\n dev_labels = torch.tensor(dev_labels)\n\n train_masks = torch.tensor(train_masks)\n dev_masks = torch.tensor(dev_masks)\n\n test_syn_inputs = torch.tensor(input_ids_syn)\n test_syn_masks = torch.tensor(attention_masks_syn)\n test_syn_labels = torch.tensor(labels_syn)\n\n test_dia_inputs = torch.tensor(input_ids_dia)\n test_dia_masks = torch.tensor(attention_masks_dia)\n test_dia_labels = torch.tensor(labels_dia)\n\n # Create the DataLoader\n train_dataloader = create_dataloader(train_inputs, train_masks, \n train_labels, args.batch_size, train = True)\n\n dev_dataloader = create_dataloader(dev_inputs, dev_masks, \n dev_labels, args.batch_size, train = False)\n\n test_syn_dataloader = create_dataloader(test_syn_inputs, test_syn_masks, \n test_syn_labels, args.batch_size, \n train = False)\n\n test_dia_dataloader = create_dataloader(test_dia_inputs, test_dia_masks, \n test_dia_labels, args.batch_size, \n train = False)\n\n # Create model\n if args.train:\n if model_class == \"BERT\":\n config = BertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = BertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n\n if model_class == \"DistilBERT\":\n config = DistilBertConfig.from_pretrained(args.lang_model, num_labels=num_labels) \n config.hidden_dropout_prob = 0.1 \n model = DistilBertForSequenceClassification.from_pretrained(\n args.lang_model,\n num_labels = num_labels,\n output_attentions = False,\n output_hidden_states = False\n )\n model.cuda()\n\n\n # Create an optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.01},\n {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],\n 'weight_decay_rate': 0.0}\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=args.lr,\n eps = 1e-8\n )\n # Total number of training steps = number of batches * number of epochs\n total_steps = len(train_dataloader) * args.epochs\n # Create the learning rate scheduler\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n )\n \n # train model\n # Main Loop\n print(\"=================== Train ================\")\n print(\"##### Language Model:\", args.lang_model, \",\", \"learning rate:\", args.lr)\n print()\n\n track_time = time.time()\n # trange is a tqdm wrapper around the normal python range\n for epoch in trange(args.epochs, desc=\"Epoch\"):\n print(\"Epoch: %4i\"%epoch, dt.datetime.now())\n\n model, optimizer, scheduler, tr_loss = train_multilabel(\n train_dataloader=train_dataloader, \n model=model, \n device=device, \n optimizer=optimizer, \n scheduler=scheduler, \n num_labels=num_labels\n )\n # EVALUATION: TRAIN SET\n pred_bools_train, true_bools_train, f1_train = eval_multilabel(\n train_dataloader, model=model, device=device)\n print(\"TRAIN: micro F1 %.3f\"%(f1_train))\n \n # EVALUATION: DEV SET\n pred_bools_dev, true_bools_dev, f1_dev = eval_multilabel(\n dev_dataloader, model=model, device=device)\n print(\"EVAL: micro F1 %.3f\"%(f1_dev))\n \n\n print(\" Training and validation took in total: {:}\".format(format_time(time.time()-track_time)))\n\n # EVALUATION: TEST SYN SET\n pred_bools_syn, true_bools_syn, f1_test_syn = eval_multilabel(\n test_syn_dataloader, model=model, device=device)\n print(\"TEST SYN: micro F1 %.4f\"%(f1_test_syn))\n\n # classification report\n clf_report_syn = classification_report(true_bools_syn, pred_bools_syn, target_names=cats, digits=3)\n print(clf_report_syn)\n\n\n # EVALUATION: TEST DIA SET\n pred_bools_dia, true_bools_dia, f1_test_dia = eval_multilabel(\n test_dia_dataloader, model=model, device=device\n )\n print(\"TEST DIA: micro F1 %.4f\"%(f1_test_dia))\n\n # classification report\n clf_report_dia = classification_report(true_bools_dia, pred_bools_dia, target_names=cats, digits=3)\n print(clf_report_dia)\n \n if args.save_cr:\n pickle.dump(clf_report_syn, open(args.output_path+'clf_report_'+args.lang_model+'_test_syn_'+str(num_labels)+end+'.txt','wb'))\n pickle.dump(clf_report_dia, open(args.output_path+'clf_report_'+args.lang_model+'_test_dia_'+str(num_labels)+end+'.txt','wb'))\n\n\n if args.save_prediction:\n test_syn_df[\"category_pred\"] = pred_bools_syn\n test_dia_df[\"category_pred\"] = pred_bools_dia\n test_syn_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_syn_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")\n test_dia_df.category_pred.to_csv(args.output_path+args.lang_model+'_test_dia_'+str(num_labels)+end+\".tsv\", \n sep=\"\\t\", index = False, header = True, encoding = \"utf-8-sig\")",
"def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()",
"def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))",
"def main(aArgs, aTransmitToTaskHandler):\n\n global TASK_HANDLER_HOST\n global TASK_HANDLER_PORT\n\n # if the option --printList is passed, only generate the file\n # reftestList.txt\n if hasattr(aArgs, \"printList\"):\n printReftestList()\n exit(0)\n\n # if the option --printNotes is passed, only generate the\n # testsuiteNotes html page.\n if hasattr(aArgs, \"printNotes\"):\n printNotes()\n exit(0)\n\n # if the option --printListOfTests is passed, only generate a ListOfTests\n # from a file containing test URIs\n if hasattr(aArgs, \"printListOfTests\"):\n if not aArgs.printListOfTests:\n print(\"No input file!\", file=sys.stderr)\n exit(0)\n printListOfTests(aArgs.printListOfTests)\n exit(0)\n\n # create the date directory\n now = datetime.utcnow();\n directory = MATHJAX_WEB_PATH + \"results/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # create the subdirectory\n if aArgs.output and re.match(\"^([0-9]|[a-z]|[A-Z]|-|/){1,50}/$\",\n aArgs.output):\n directory += aArgs.output\n else:\n directory += now.strftime(\"%Y-%m-%d/%H-%M-%S/\")\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # execute testing instances for all the config files\n configFileList = aArgs.config.split(\",\")\n\n for configFile in configFileList:\n\n configFile = configFile\n\n if (not os.path.isfile(configFile)):\n print(\"Warning: config file \" + configFile + \" not found!\",\n file=sys.stderr)\n continue\n\n # Load configuration file\n config = ConfigParser.ConfigParser()\n config.readfp(open(configFile))\n\n # framework section\n section = \"framework\"\n useGrid = getBooleanOption(config, section, \"useGrid\")\n host = config.get(section, \"host\")\n # host == \"default\" is handled below\n port = config.getint(section, \"port\")\n if (port == -1):\n port = SELENIUM_SERVER_PORT\n mathJaxPath = config.get(section, \"mathJaxPath\")\n if (mathJaxPath == \"default\"):\n mathJaxPath = DEFAULT_MATHJAX_PATH\n mathJaxTestPath = config.get(section, \"mathJaxTestPath\")\n if (mathJaxTestPath == \"default\"):\n mathJaxTestPath = MATHJAX_TEST_LOCAL_URI + \"testsuite/\"\n timeOut = config.getint(section, \"timeOut\")\n if (timeOut == -1):\n timeOut = DEFAULT_TIMEOUT\n timeOut = timeOut * 1000 # convert in ms\n useWebDriver = getBooleanOption(config, section, \"useWebDriver\")\n fullScreenMode = getBooleanOption(config, section, \"fullScreenMode\")\n formatOutput = getBooleanOption(config, section, \"formatOutput\")\n compressOutput = getBooleanOption(config, section, \"compressOutput\")\n\n # platform section\n section = \"platform\"\n operatingSystem = config.get(section, \"operatingSystem\")\n if (operatingSystem == \"default\"):\n operatingSystem = OS_LIST[0]\n if (host == \"default\"):\n host = HOST_LIST[HOST_LIST_OS.index(OS_LIST.index(operatingSystem))]\n browserList = config.get(section, \"browser\").split()\n browserVersionList = config.get(section, \"browserVersion\").split()\n browserModeList = config.get(section, \"browserMode\").split()\n browserPath = config.get(section, \"browserPath\")\n fontList = config.get(section, \"font\").split()\n outputJaxList = config.get(section, \"outputJax\").split()\n \n # testsuite section\n section = \"testsuite\"\n runSlowTests = getBooleanOption(config, section, \"runSlowTests\")\n runSkipTests = getBooleanOption(config, section, \"runSkipTests\")\n listOfTests = config.get(section, \"listOfTests\")\n startID = config.get(section, \"startID\")\n if (startID == \"default\"):\n startID = \"\"\n \n # When more than one browser is specified, browserPath is ignored.\n if (len(browserList) > 1 and browserPath != \"default\"):\n print(\"Warning: browserPath ignored\", file=sys.stderr)\n browserPath = \"default\"\n\n for browser in browserList:\n\n if (browser == \"default\"):\n browser = BROWSER_LIST[0]\n\n for font in fontList:\n\n if (font == \"default\"):\n font = FONT_LIST[0]\n\n for outputJax in outputJaxList:\n\n if (outputJax == \"default\"):\n outputJax = OUTPUT_JAX_LIST[0]\n\n for browserVersion in browserVersionList:\n\n # browserModeList is only relevant for MSIE\n if not(browser == \"MSIE\"):\n browserModeList2 = [\"default\"]\n else:\n browserModeList2 = browserModeList\n \n for browserMode in browserModeList2:\n \n # Create a Selenium instance\n selenium = \\\n seleniumMathJax.seleniumMathJax(useWebDriver,\n useGrid,\n host,\n port,\n mathJaxPath,\n mathJaxTestPath,\n operatingSystem,\n browser,\n browserVersion,\n browserMode,\n browserPath,\n font,\n outputJax,\n timeOut,\n fullScreenMode)\n \n if aTransmitToTaskHandler:\n taskHandler = [TASK_HANDLER_HOST,\n TASK_HANDLER_PORT,\n str(os.getpid())]\n else:\n taskHandler = None\n\n # Create the test suite\n suite = reftest.reftestSuite(taskHandler,\n runSlowTests,\n runSkipTests,\n listOfTests,\n startID)\n # use the specified file name\n if hasattr(aArgs, \"filename\"):\n filename = aArgs.filename\n else:\n filename = getOutputFileName(directory,\n selenium,\n suite)\n runTestingInstance(directory, selenium, suite,\n formatOutput, compressOutput,\n filename)\n # end browserMode\n # end browserVersion\n #end outputJax\n # end for font\n # end browser",
"def __main() :\n launchTests()",
"def main():\n configuration = {'resource-folder': 'resources',\n 'build-folder': 'build',\n 'log-folder': 'logfiles',\n 'use-preloaded': False,\n 'addi-metrics': 'addi-metrics.json',\n 'jenkins': {'dependency-filename': 'dependencies.txt',\n 'server': 'http://is.dbc.dk',\n 'repository-project': 'opensearch-3rd-party-dependencies'},\n 'log-zip-file':'logs.zip'}\n configuration.update(cli())\n setup_logger(configuration['verbose'])\n run_performance_test(configuration)",
"def MainUserInterface():\n print(\"---Shiva Govindaraju COEN 242 Assignment 1 --- Top K Words---\")\n\n runOption = int(input(\"Would you like to run the Generic Test or a Specific Test? (0 - Generic; 1 - Specific): \"))\n \n if runOption == 0:\n print(runGenericTests())\n\n elif runOption == 1:\n print(\"Running Specific Test.\")\n \n # Request for Input\n filename = input(\"Which file would you like to run on?: \")\n k = int(input(\"Set the number of words to output (k): \"))\n shards = int(input(\"How many shards should be used? (Recommended [1, 5000]): \"))\n \n # Ensure Shard Count is within exepected values\n while (shards > 5000 or shards < 1):\n nsh = input(\"That shard-count is higher than recommended. Please input shard-count [1,5000]: \")\n shards = int(nsh)\n \n #print(\"Current Total Memory: {} MB\".format(psutil.virtual_memory()[0] / 2.**20))\n #print(\"Current Available Memory: {} MB\".format(psutil.virtual_memory()[1] / 2.**20))\n #print(\"Current CPU Resource Percentage: {}\".format(psutil.cpu_percent(interval=None, percpu=True)))\n print(\"Testing TopKWords w/ Shards on {} using {} shards.\".format(filename, shards))\n print(\"Starting Test...\")\n \n # Execute the program\n \n start = time()\n print(findTopKViaSharding(filename, k, shards))\n end = time()\n print(\"Test Complete.\")\n \n print(\"Total Time: {}\".format(end - start))\n #print(\"Total CPU Usage Percentage: {}\".format(psutil.cpu_percent(interval=None, percpu=True)))\n \n elif runOption == 2:\n # This option is for testing whether Sharding worked correctly or not\n # It is not part of the code that is needed for submission, nor will it run without files I have not included in the submission\n print(\"Sharding Test -- Not for general use\")\n print(\"Confirm Sharding on Ulysses: {}\".format(confirmSharding(\"ulysses.txt\", 100)))\n deleteShards()\n #print(\"Confirm Sharding on Indonesia: {}\".format(confirmSharding(\"data_1gb.csv\", 5000)))\n print(\"Confirm Sharding on Lorum: {}\".format(confirmSharding(\"simpleTest.txt\",10)))\n deleteShards()\n \n else:\n # Basic functionality tests from before I cleaned up the UI for giving the program inputs.\n \n print(\"OG Testing of basic functionality\")\n print(\"Testing on Ulysses text\")\n #print(findFLength(\"4300-0.txt\"))\n start = time()\n print(kMostFrequentWords(\"ulysses.txt\", 15))\n end = time()\n print(\"Total Time: {}\".format(end - start))\n\n print(\"Testing on Ulysses with Shards\")\n print(\"Top 15 Words -- 10 Shards\")\n start = time()\n print(findTopKViaSharding(\"ulysses.txt\", 15, 10))\n end = time()\n print(\"Total Time: {}\".format(end - start))\n\n print(\"Testing TopKWords w/ Shards on Surya's 400MB\")\n print(\"Top 15 Words -- 1024 Shards\")\n start = time()\n print(findTopKViaSharding(\"dataset-400MB.txt\", 15, 1024))\n end = time()\n print(\"Total Time: {}\".format(end - start))",
"def main():\n\n argparser = ArgumentParser()\n argparser.add_argument('--datapath', '-D', type=str, help='Relative path to cwd of a local data file')\n argparser.add_argument('--attack_model', '-AM', type=str, default='ANY', choices=['RandomForest', 'LogReg', 'LinearSVC', 'SVC', 'KNN', 'ANY'])\n argparser.add_argument('--runconfig', '-RC', default='runconfig_mia.json', type=str, help='Path relative to cwd of runconfig file')\n argparser.add_argument('--outdir', '-O', default='outputs/test', type=str, help='Path relative to cwd for storing output files')\n args = argparser.parse_args()\n\n # Load runconfig\n with open(path.join(cwd, args.runconfig)) as f:\n runconfig = json.load(f)\n print('Runconfig:')\n print(runconfig)\n\n # Load data\n RawDF, metadata = load_local_data_as_df(path.join(cwd, args.datapath))\n dname = args.datapath.split('/')[-1]\n RawDF['ID'] = [f'ID{i}' for i in arange(len(RawDF))]\n RawDF = RawDF.set_index('ID')\n\n print(f'Loaded data {dname}:')\n print(RawDF.info())\n\n # Randomly select nt target records T = (t_1, ..., t_(nt))\n targetIDs = choice(list(RawDF.index), size=runconfig['nTargets'], replace=False).tolist()\n Targets = RawDF.loc[targetIDs, :]\n\n # Drop targets from sample population\n RawDFdropT = RawDF.drop(targetIDs)\n\n # Add a crafted outlier target to the evaluation set\n targetCraft = craft_outlier(RawDF, runconfig['sizeTargetCraft'])\n targetIDs.extend(list(set(targetCraft.index)))\n Targets = Targets.append(targetCraft)\n\n # Sample adversary's background knowledge RawA\n rawAidx = choice(list(RawDFdropT.index), size=runconfig['sizeRawA'], replace=False).tolist()\n\n # Sample k independent target test sets\n rawTindices = [choice(list(RawDFdropT.index), size=runconfig['sizeRawT'], replace=False).tolist() for nr in range(runconfig['nIter'])]\n\n # List of candidate generative models to evaluate\n gmList = []\n for gm, paramsList in runconfig['generativeModels'].items():\n if gm == 'IndependentHistogram':\n for params in paramsList:\n gmList.append(IndependentHistogram(*params))\n elif gm == 'BayesianNet':\n for params in paramsList:\n gmList.append(BayesianNet(*params))\n elif gm == 'PrivBayes':\n for params in paramsList:\n gmList.append(PrivBayes(*params))\n elif gm == 'CTGAN':\n for params in paramsList:\n gmList.append(CTGAN(metadata, *params))\n elif gm == 'PateGan':\n for params in paramsList:\n gmList.append(PateGan(metadata, *params))\n else:\n raise ValueError(f'Unknown GM {gm}')\n\n for GenModel in gmList:\n print(f'----- {GenModel.__name__} -----')\n\n FeatureList = [NaiveFeatureSet(GenModel.datatype), HistogramFeatureSet(GenModel.datatype, metadata), CorrelationsFeatureSet(GenModel.datatype, metadata), EnsembleFeatureSet(GenModel.datatype, metadata)]\n\n prior = {LABEL_IN: runconfig['prior']['IN'], LABEL_OUT: runconfig['prior']['OUT']}\n\n if args.attack_model == 'RandomForest':\n AttacksList = [MIAttackClassifierRandomForest(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LogReg':\n AttacksList = [MIAttackClassifierLogReg(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'LinearSVC':\n AttacksList = [MIAttackClassifierLinearSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'SVC':\n AttacksList = [MIAttackClassifierSVC(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'KNN':\n AttacksList = [MIAttackClassifierKNN(metadata, prior, F) for F in FeatureList]\n elif args.attack_model == 'ANY':\n AttacksList = []\n for F in FeatureList:\n AttacksList.extend([MIAttackClassifierRandomForest(metadata, prior, F),\n MIAttackClassifierLogReg(metadata, prior, F),\n MIAttackClassifierKNN(metadata, prior, F)])\n else:\n raise ValueError(f'Unknown AM {args.attack_model}')\n\n # Run privacy evaluation under MIA adversary\n results = evaluate_mia(GenModel, AttacksList, RawDFdropT, Targets, targetIDs, rawAidx, rawTindices,\n runconfig['sizeRawT'], runconfig['sizeSynT'], runconfig['nSynT'],\n runconfig['nSynA'], runconfig['nShadows'], metadata)\n\n outfile = f\"{dname}{GenModel.__name__}MIA\"\n\n with open(path.join(f'{args.outdir}', f'{outfile}.json'), 'w') as f:\n json.dump(results, f, indent=2, default=json_numpy_serialzer)",
"def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()",
"def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)",
"def main():\n run_test_all()",
"def main():\n\n parser = argparse.ArgumentParser(prog=\"run_test.py\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('id', help=\"Id of a test\")\n args = parser.parse_args()\n\n configure_logger()\n\n test_info = TESTS.get(args.id, None)\n if not test_info:\n test_info.log.error(f'{args.id} does not exist')\n exit(ErrorCode.CRITICAL)\n os.environ['DISPLAY'] = \":0.0\"\n\n test = Test(args.id, test_info)\n result = test.run()\n\n test.log.info('#' * 80)\n if not result:\n test.log.error('TEST FAILED')\n else:\n test.log.info('TEST PASSED')\n test.log.info('#' * 80)\n exit(not result)",
"def main():\n num_of_tests = int(input())\n\n # iterate over test cases\n for test_case in range(1, num_of_tests + 1):\n result = handle_case()\n printable_result = handle_result(result)\n print(\"Case #{}: {}\".format(test_case, printable_result))",
"def run_main(): # pragma: no cover\n RunTestsCLI.run()"
] |
[
"0.7044788",
"0.7024273",
"0.70204896",
"0.700891",
"0.69523466",
"0.69330347",
"0.6905141",
"0.68196094",
"0.67372173",
"0.67198765",
"0.6711246",
"0.67071337",
"0.6679333",
"0.6668226",
"0.6664324",
"0.66402525",
"0.6635689",
"0.6629262",
"0.66186917",
"0.66109586",
"0.6596056",
"0.65895003",
"0.6586959",
"0.6582618",
"0.65640944",
"0.65615183",
"0.65317184",
"0.65283036",
"0.6524932",
"0.652235"
] |
0.74495375
|
0
|
Crypt key property getter.
|
def crypt_key(self):
return self._crypt_key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_key(self):\r\n return self.__encryption_key",
"def _GetKeyString(self):\n return self.__key_string",
"def __GetKeyString(self):\n return self._GetKeyString()",
"def _GetKeyString(self):\n return self.__key_string",
"def __GetKeyString(self):\n return self._GetKeyString()",
"def get_key(self):\n return self.key",
"def get_key(self):\n return self.key",
"def _GetKeyString(self):",
"def _GetKeyString(self):",
"def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")",
"def getKey(self):\n return self.key",
"def get(self):\n return str(self.physical_key)",
"def getKey(self):\n return self.key",
"def getKey(self):\n\t\treturn self.key",
"def key(self):\n return self._key.decode('utf-8')",
"def getKey(self):\n return self.__key",
"def _get_encryption_key(self, **options):\n\n return self._public_key",
"def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")",
"def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")",
"def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return self._values.get('encryption_key')",
"def get_key(self):\n\n return self._key",
"def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")",
"def get_key(self):\n return self._determine_key()",
"def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)",
"def key(self):\n return self._key if self._key else self.factory().key",
"def key(key):\n return key",
"def key(self) -> Key:\n return self._key",
"def get_keypair(self):\n return self.__keypair",
"def private_key(self):",
"def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ..."
] |
[
"0.770979",
"0.73459613",
"0.72082657",
"0.7200954",
"0.7153404",
"0.70125467",
"0.70125467",
"0.696998",
"0.696998",
"0.68880767",
"0.6879895",
"0.68580955",
"0.68320686",
"0.6807505",
"0.6768181",
"0.67640454",
"0.6733351",
"0.6709191",
"0.6709191",
"0.67053545",
"0.6701271",
"0.6696467",
"0.66898876",
"0.6680302",
"0.6633087",
"0.6614328",
"0.65956926",
"0.6579019",
"0.6578393",
"0.6566374"
] |
0.7816463
|
0
|
Prunes dataframe to rows whose longitudes are multiples of 60 degrees and whose years are multiples of 10. This includes only lat/lon grid locations for which we have USNO data for all eight types of twilight events.
|
def get_grid_data(df):
bools = (df['Longitude'] % 60 == 0) & (df['Year'] % 10 == 0)
return df[bools]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def filter_tracks_domain(df, minlon=90, maxlon=180, minlat=-40, maxlat=0):\n\n domain = sbox(minlon, minlat, maxlon, maxlat, ccw=False)\n tracks = df.groupby('num')\n tempfilter = tracks.filter(lambda x: len(x) > 1)\n filterdf = tempfilter.groupby('num').filter(\n lambda x: LineString(zip(x['lon'], x['lat'])).intersects(domain))\n return filterdf",
"def clean_weather_df(weather_df):\n col = weather_df.columns\n drop_col = list(col[7::2])\n clean_num = weather_df[weather_df['LATITUDE'].str.contains(\n \"LATITUDE\") == False]\n num_weather = clean_num.drop(drop_col, axis=1)\n just_num = num_weather.drop(['NAME', 'STATION'], axis=1)\n all_weatherdf = just_num.apply(pd.to_numeric)\n all_weatherdf['name'] = num_weather['NAME']\n return all_weatherdf",
"def clean_data(df):\n \n any_location_id_missing = (df.PULocationID > 263) | (df.DOLocationID > 263)\n df = df.drop(df.index[any_location_id_missing])\n \n df = df[df.tpep_dropoff_datetime > df.tpep_pickup_datetime]\n\n df.PULocationID.replace([104, 105], 103)\n \n return df",
"def filter_dataframe(df, start_date_dt, end_date_dt):\n\n dff = df \n # df[\n # (df[\"timestamp\"].dt.date >= dt.date(start_date_dt.year, start_date_dt.month, start_date_dt.day))\n # & (df[\"timestamp\"].dt.date <= dt.date(end_date_dt.year, end_date_dt.month, end_date_dt.day))\n # ]\n # if (lat_min != -90) or (lat_max != 90):\n # dff = dff[\n # (dff[\"lat\"] >= lat_min)\n # & (dff[\"lat\"] <= lat_max)\n # ]\n # if (lon_min != -90) or (lon_max != 90):\n # dff = dff[\n # (dff[\"lon\"] >= lon_min)\n # & (dff[\"lon\"] <= lon_max)\n # ]\n\n return dff",
"def filter(df: pd.DataFrame = pd.DataFrame()):\n if df.empty:\n df = read()\n print('Filtering data...')\n df = df.dropna()\n df2 = pd.DataFrame()\n df2['Longitude'] = df['Longitude']\n df2['Latitude'] = df['Latitude']\n df2['Month'] = df['Date'].dt.strftime('%m').astype(int)\n df2['Day'] = df['Date'].dt.strftime('%d').astype(int)\n df2['Day_of_Week'] = df['Day_of_Week']\n df2['Time'] = np.array([t.timestamp() for t in df['Time']]) - df['Time'].min().timestamp()\n df2['Weather_Conditions'] = df['Weather_Conditions']\n return pd.get_dummies(df2)",
"def apply_lon_filter(data, lon_bounds):\n \n # Convert to common bounds (0, 360)\n lon_min = adjust_lon_range(lon_bounds[0], radians=False, start=0.0)\n lon_max = adjust_lon_range(lon_bounds[1], radians=False, start=0.0)\n lon_axis = adjust_lon_range(data.getLongitude()[:], radians=False, start=0.0)\n\n # Make required values zero\n ntimes, nlats, nlons = data.shape\n lon_axis_tiled = numpy.tile(lon_axis, (ntimes, nlats, 1))\n \n new_data = numpy.where(lon_axis_tiled < lon_min, 0.0, data)\n \n return numpy.where(lon_axis_tiled > lon_max, 0.0, new_data)",
"def crop_amide_one(df):\n df = df[(df[df.columns[0]] < 1706) & (df[df.columns[0]] > 1599)]\n df.reset_index(drop=True, inplace=True)\n return df",
"def clean_data(data):\n data.dropna(inplace=True)\n for feature in data:\n if ((feature != 'lat') and (feature != 'long') and (feature != 'date')):\n data.drop(data[(data[feature] < 0)].index, inplace=True)\n data.drop(data[(data['price'] == 0)].index, inplace=True)\n data.drop(data[(data['bedrooms'] == 0) & (data['bathrooms'] == 0.0)].index, inplace=True)\n return data",
"def clean_up_raw(df_raw):\n # exclude_subset = ['well', 'tile', 'cell', 'intensity', 'blob'] # causes issues with later joins, maybe a pandas bug\n import lasagna.utils\n df_raw[CYCLE] = df_raw[CYCLE].astype(int)\n df_raw = df_raw.sort_values([WELL, TILE, CELL, BLOB, CYCLE, CHANNEL])\n return df_raw",
"def clean_data(gps_df):\n gps_df = gps_df[gps_df['Validity'] == 'A'].copy() # checking if data is valid\n gps_df.drop_duplicates(subset=['Lat', 'Long'], inplace=True) # dropping duplicate values at when vehicle stops\n gps_df.dropna(inplace=True) # dropping null values\n gps_df = gps_df[gps_df['Quality'] < '5'].copy() # checking if quality is high enough to consider\n gps_df.reset_index(drop=True, inplace=True)\n\n return gps_df",
"def coarse_dataframe(geodf, side_square):\n\n # initialise the categories\n\n geodf['category'] = -1\n\n # do calculations on the first date, then extrapolate to the rest\n data_df = geodf[geodf['date'] == np.unique(geodf['date'])[0]]\n\n data_df = data_df.sort_values(by=['longitude', 'latitude'])\n\n n_grids = int(math.sqrt(data_df.shape[0]))\n\n category = 0\n\n for n in range(data_df.shape[0]):\n\n # only process lat,long point that do not have a category\n if data_df['category'].iloc[n] == -1:\n\n # get the side_square^2 nearest indexes to the point.\n indexes = []\n for i in range(side_square):\n for j in range(side_square):\n\n if n + n_grids * i + j < n_grids * n_grids and data_df['category'].iloc[n + n_grids * i + j] == -1:\n indexes.append(n + n_grids * i + j)\n\n # assing them all to the same categorty\n data_df['category'].iloc[indexes] = str(category)\n\n # get the geometry points of that catery\n cat_geometry = data_df[data_df['category'] == str(category)]['geometry']\n\n # get indexes of each point belonging to the category\n indexes_all = []\n for point in cat_geometry:\n indexes_all.append(geodf[geodf['geometry'] == point].index.tolist())\n\n indexes_all_flat = [item for sublist in indexes_all for item in sublist]\n\n geodf['category'].iloc[indexes_all_flat] = str(category)\n\n category = category + 1\n\n geodf['category'] = (geodf['category'].astype(str)).str.cat(geodf['date'], sep=\"_\")\n\n geodf = geodf.dissolve(by=['category', 'date'], aggfunc='mean')\n\n # re-assing the date because we are losing it\n geodf['date'] = [i[1] for i in geodf.index]\n\n geodf['category'] = [i[0] for i in geodf.index]\n\n return geodf",
"def clean_data(df, start = 1995, stop = 2018):\n country_iso3_code = pd.read_html('https://unstats.un.org/unsd/methodology/m49/')\n country_iso3_code = country_iso3_code[0]['ISO-alpha3 code']\n df = df.loc[df.country_iso3_code.isin(country_iso3_code)]\n df = df.set_index(['indicator', 'country_iso3_code', 'country', 'year']).unstack(level = 0)\n df.columns = df.columns.get_level_values(1)\n df = df.rename(columns = {'NY.GDP.PCAP.KD.ZG': 'pc_GDP_growth',\n 'NY.GDP.PCAP.PP.CD': 'pc_GDP_PPP'})\n df = df.reset_index()\n df = df.loc[(df.year >= (start - 1)) & (df.year <= stop)]\n df = df.dropna()\n return df",
"def _remove_duplicates_(self):\n t = self.table_orig\n mask = []\n t_obs = np.unique(t['jdobs'])\n for t_ in t_obs:\n if np.sum(t['jdobs'] == t_) == 1:\n mask.append(True)\n else:\n mags = t['magpsf'][t['jdobs'] == t_]\n if len(np.unique(mags)) == 1:\n mask.append(True)\n for k in range(len(mags) - 1):\n mask.append(False)\n elif np.sum(np.unique(mags) < 90) == 1:\n done = False\n for m_ in mags:\n if m_ < 90. and not done:\n mask.append(True)\n done = True\n else:\n mask.append(False)\n else:\n mags_ = np.unique(mags)\n mags_ = np.array(mags_[mags_ < 90])\n\n done = [False for k in range(len(mags_))]\n for m_ in mags:\n if m_ < 90.:\n k = np.where(mags_ == m_)[0][0]\n if not done[k]:\n mask.append(True)\n done[k] = True\n else:\n mask.append(False)\n\n self.table = t[np.array(mask)]",
"def clean_df(dfin, top=10):\n\n dfin['crop'] = dfin['crop'].astype('str')\n dfin['crop'] = dfin.crop.str.lower()\n\n dfin[\"created_on\"] = dfin[\"created_on\"].astype(\"datetime64\")\n dfin['latitude'] = np.round(dfin.latitude.apply(pd.to_numeric),2)\n dfin['longitude'] = np.round(dfin.longitude.apply(pd.to_numeric),2)\n dfin['query_type'] = dfin['query_type'].astype('str')\n dfin['query_type'] = dfin.query_type.apply(str.lower)\n\n dfin['hits'] = 1\n\n dfin = dfin[pd.notnull(dfin.kcc_answer_raw)]\n dfin = dfin[pd.notnull(dfin['query_text_raw'])]\n\n dfin['query_text_raw'] = dfin.query_text_raw.str.lower()\n dfin['kcc_answer_raw'] = dfin.kcc_answer_raw.str.lower()\n\n dfin['state_name'] = dfin.state_name.str.lower()\n dfin['district_name'] = dfin.district_name.str.lower()\n\n dfin['crop_full'] = dfin.crop\n dfin['crop'] = [i.split()[0] if len(i.split())>1 else i for i in dfin.crop]\n dfin.dropna(how='all',inplace=True)\n\n #topcrop = dfin.crop.value_counts().head(top).index.tolist()\n topcrop = ['paddy', 'wheat', 'cotton', 'chillies', 'onion', 'brinjal', 'sugarcane', 'tomato', 'bengal', 'groundnut', 'soybean', 'potato','maize']\n dfin = dfin[dfin.crop.isin(topcrop)]\n print(dfin.crop.unique())\n\n dfin = dfin[['crop','created_on','latitude','longitude','query_type','query_text_raw','kcc_answer_raw','state_name','district_name','crop_full']]\n return dfin",
"def is_in_dublin(self, df):\n Dublin = (53.346300, -6.263100)\n searchgrid = self.get_searchgrid(Dublin, distance=30)\n\n df.loc[df['latitude'] > searchgrid['North'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['latitude'] < searchgrid['South'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['longitude'] > searchgrid['East'],\n ['longitude', 'latitude']] = np.nan\n df.loc[df['longitude'] < searchgrid['West'],\n ['longitude', 'latitude']] = np.nan\n\n return(df)",
"def clean_station_data(station_df):\n # TODO implement data preparation here\n # Fix the datetime field\n\n # Cast to numeric fields where necessary\n\n # Interpolate missing data",
"def remove_data(ds, nh_lim, sh_lim, time_max, lat_name='lat', time_name='time'):\n return xr.where((ds[lat_name] < nh_lim) &\n (ds[lat_name] > sh_lim) &\n (ds[time_name] < pd.to_datetime([time_max]).values),\n np.nan,\n ds)",
"def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df",
"def _clear_rows(self) -> None:\n df = self.hotels_df.dropna()\n\n # Delete rows with non-float values in coordinates\n df = df[df[\"Latitude\"].apply(self.is_float)]\n df = df[df[\"Longitude\"].apply(self.is_float)]\n\n # Convert to Float\n df[\"Latitude\"] = df[\"Latitude\"].astype(float)\n df[\"Longitude\"] = df[\"Longitude\"].astype(float)\n\n # Delete rows with wrong values in coordinates\n df = df[df[\"Latitude\"].apply(lambda x: abs(x) <= 90)]\n df = df[df[\"Longitude\"].apply(lambda x: abs(x) <= 180)]\n\n self.hotels_df = df",
"def drop_years(dataframe, start, end):\n tmp = dataframe\n tmp = tmp[(start <= tmp['year'].astype(int)) & (tmp['year'].astype(int) <= end)]\n\n return tmp",
"def cull_missing(df, colname, missingdays):\n df2 = df[[\"binyear\", colname]]\n nancounts = df2.groupby(\"binyear\").agg(lambda x: x.isnull().sum())\n # cull anything with more than 3 days NaN\n df2 = nancounts[nancounts[colname] > missingdays]\n years = []\n if not df2.empty:\n years = list(df2.index.values)\n resdf = df[~df[\"binyear\"].isin(years)]\n minyear = resdf[\"binyear\"].min()\n # Prevent scary cullyears listing\n return resdf, list(filter(lambda x: x > minyear, years))",
"def filterEvents(intervals_dates,list_infected,distance):\n d=distance\n list_gpsevents=[]\n for z in range(len(intervals_dates)-1):\n print(\"Interval: \",intervals_dates[z], \"y\", intervals_dates[z+1])\n infected,uninfected=getTrazaTimestamp(intervals_dates[z],intervals_dates[z+1],GPSrecords,list_infected)\n events_gps = nearest_neighbor(infected, uninfected, d)\n events_gps = events_gps.drop(['geometry','closest_stop_geom'], axis=1)\n print(len(events_gps))\n if(len(events_gps)!=0):\n list_gpsevents.append(events_gps.reset_index(drop=True))\n else:\n events_gps=pd.DataFrame()\n list_gpsevents.append(events_gps)\n #GPSevents=pd.concat(list_gpsevents).reset_index(drop=True)\n #return GPSevents\n return list_gpsevents",
"def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf",
"def removeDays(minutes=4*60):\n global masterdf\n masterdf['data'] = masterdf.index.date\n days = []\n for day, group in masterdf.groupby(masterdf.data):\n if len(group) < minutes: # len is number of minutes\n continue\n days.append(day)\n masterdf = masterdf.loc[masterdf.data.isin(days)]\n masterdf.drop('data', axis=1, inplace=True)",
"def correct_lon(ds):\n ds = ds.copy()\n x = ds['x'].data\n ds['x'].data = np.where(x < 0 , 360 + x, x)\n\n lon = ds['lon'].data\n ds['lon'].data = np.where(lon < 0 , 360 + lon, lon)\n \n ds = ds.sortby('x')\n return ds",
"def _geofilter(frame):\r\n try:\r\n import geopandas as gpd\r\n\r\n # Remove rows with no latitude and longitude\r\n try:\r\n\r\n filresults = frame[(frame['ActionGeo_Lat'].notnull()\r\n ) | (frame['ActionGeo_Long'].notnull()\r\n )]\r\n except:\r\n\r\n filresults = frame[(frame['actiongeolat'].notnull()\r\n ) | (frame['actiongeolong'].notnull()\r\n )]\r\n gdf = gpd.GeoDataFrame(filresults.assign(geometry=_parallelize_dataframe(filresults)),\r\n crs={'init': 'epsg:4326'})\r\n gdf.columns = list(map(lambda x: (x.replace('_', \"\")).lower(), gdf.columns))\r\n\r\n final = gdf[gdf.geometry.notnull()]\r\n\r\n return final\r\n\r\n\r\n except BaseException as err: # pragma: no cover\r\n if str(err) == \"import of 'geopandas' halted; None in sys.modules\":\r\n raise ImportError(\"geopandas is not installed. gdeltPyR needs\"\r\n \" geopandas to export as shapefile. Visit http://\"\r\n \"geopandas.org/install.html for instructions.\")\r\n else:\r\n raise ValueError(\"geopandas is installed but the geospatial \"\r\n \"functions failed for some other reason. Review\"\r\n \" the stack trace to see where the failure \"\r\n \"occurred.\")",
"def _point_in_mbr(self, df):\n if df.empty:\n return df\n df = df[(df[\"lat\"] >= self._min_lat) &\n (df[\"lat\"] <= self._max_lat) &\n (df[\"lon\"] >= self._min_lon) &\n (df[\"lon\"] <= self._max_lon)\n ]\n return df",
"def clean_train_test(train):\n\n train[\"Month\"] = train.Date.apply(lambda x: x.month)\n train[\"Year\"] = train.Date.apply(lambda x: x.year)\n train[\"Day\"] = train.Date.apply(lambda x: x.day)\n\n # Doesn't actually seem to help\n #train[\"Latitude_int\"] = train.Latitude.apply(int)\n #train[\"Longitude_int\"] = train.Longitude.apply(int)\n\n c2d = [\"Id\", \"Address\", \"AddressNumberAndStreet\", \"WnvPresent\",\n \"NumMosquitos\"]\n\n for column in c2d:\n if column in train.columns:\n train.drop(column, axis=1, inplace=True)\n\n return train",
"def _truncate_games_df(df, season, season_type):\n return df[(df['season'] != season) | (df['type'] != season_type)]",
"def cleaned(df):\n\n cdf = df.copy()\n lcdf = len(cdf)\n\n danglings = []\n ld = len(danglings)\n\n print('cleaning data frame')\n iteration_times = 1\n while True:\n for index, row in cdf.iterrows():\n if index in danglings:\n cdf = cdf.drop(index)\n elif not (cdf['from'] == row['to']).any():\n danglings.append(index)\n cdf = cdf.drop(index)\n\n if not index % 77:\n print(f'{del_line}{index / lcdf * 100:2.1f}% #{iteration_times}', end='')\n iteration_times += 1\n\n # iterate until `danglings` does not change\n if len(danglings) == ld:\n break\n else:\n ld = len(danglings)\n\n print(f'{del_line}data cleaned with {iteration_times} iterations')\n\n return cdf, np.array(danglings)"
] |
[
"0.6048604",
"0.5930983",
"0.5897412",
"0.5653762",
"0.5554248",
"0.54679614",
"0.543293",
"0.5403124",
"0.5379485",
"0.5308922",
"0.5299097",
"0.5277261",
"0.5236994",
"0.51776224",
"0.515241",
"0.5146878",
"0.5102294",
"0.50780743",
"0.50694305",
"0.50475067",
"0.50459665",
"0.50196826",
"0.5011387",
"0.49942413",
"0.49647427",
"0.49633697",
"0.49579957",
"0.4952161",
"0.49488297",
"0.49255717"
] |
0.7148291
|
0
|
Get descriptor version from index.
|
def descriptorVersion(self) -> int:
return self._coreIndex.getDescriptorVersion()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def get_version(self):\n return self.cur_config['version']['name']",
"def get_index(self):\n\t\treturn call_sdk_function('PrlVmDev_GetIndex', self.handle)",
"def __getitem__(self, index: int) -> FaceDescriptor:\n _coreDescriptor = self._faceEngine.createDescriptor(self._coreIndex.getDescriptorVersion())\n error, descriptor = self._coreIndex.descriptorByIndex(index, _coreDescriptor)\n assertError(error)\n\n return FaceDescriptor(descriptor)",
"def currentVersionIndex(self):\n logger.debug(\"Func: currentVersionIndex/getter\")\n\n \"\"\"Returns current Version index at cursor position\"\"\"\n return self._currentVersionIndex",
"def get_version(self):\n pass",
"def get_version(self):\n return self.__make_api_call('get/version')",
"def get_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetIndex', self.handle)",
"def get(self):\n return self._version",
"def _get_version(self):",
"def read_version(ctl):\n\tr = ctl.bus_read_struct_coherent(tm.status_addr, 'BB')\n\treturn r",
"def index(self):\n return self._data.get('index')",
"def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)",
"def getVersion(self):\n return self.get('Version', type=\"numeric\")",
"def version(self):\r\n return self.definition_id",
"def get_latest_version(self, did, has_version=None):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == did)\n\n try:\n record = query.one()\n baseid = record.baseid\n except NoResultFound:\n baseid = did\n except MultipleResultsFound:\n raise MultipleRecordsFound('multiple records found')\n\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.baseid == baseid) \\\n .order_by(IndexRecord.created_date.desc())\n if has_version:\n query = query.filter(IndexRecord.version.isnot(None))\n record = query.first()\n if (not record):\n raise NoRecordFound('no record found')\n\n return record.to_document_dict()",
"def index(self):\n return self.container['index']",
"def identifier_version(self):\n return next(self.graph[self.identifier_bound:owl.versionIRI])",
"def get_version(self):\n return self.version",
"def _get_index(self):\n\n return self.index\n\n # to avoid having differences bigger than 2pi",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def GetVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def get_version_info(self):\n return self._jadeRpc('get_version_info')",
"def get_index(self):\n return self.index",
"def get_index(self):\n return self.index",
"def index(self):\n return self._index",
"def version(self):\n return self._get(\"version\")",
"def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()",
"def getIndex(self):\n return self.index"
] |
[
"0.62470573",
"0.6164937",
"0.6150249",
"0.61311305",
"0.61171246",
"0.61062837",
"0.60857606",
"0.6019658",
"0.5965834",
"0.595268",
"0.5926031",
"0.588588",
"0.5883278",
"0.587447",
"0.5865104",
"0.58440655",
"0.5817548",
"0.5810021",
"0.57959163",
"0.5795582",
"0.57735914",
"0.57735914",
"0.5771934",
"0.57693654",
"0.5748772",
"0.5748772",
"0.5747225",
"0.57432234",
"0.5731969",
"0.57315356"
] |
0.77219415
|
0
|
Get descriptor by index from internal storage.
|
def __getitem__(self, index: int) -> FaceDescriptor:
_coreDescriptor = self._faceEngine.createDescriptor(self._coreIndex.getDescriptorVersion())
error, descriptor = self._coreIndex.descriptorByIndex(index, _coreDescriptor)
assertError(error)
return FaceDescriptor(descriptor)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_at_index(self, index: int) -> object:\n return self.data[index]",
"def get(self, index):\n raise NotImplementedError() # pragma: no cover",
"def get_by_index(self, index):\n # makes it easier for callers to just pass in a header value\n index = int(index) if index else 0\n return self.by_index.get(index)",
"def __getitem__(self, index):\n if self.hdf5_cache_mode == \"all\":\n return self.getitem_cache[index]\n return self.get_item(index)",
"def get_by_index(self, index):\n if index >= len(self._datasets):\n raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))\n\n return self._datasets[index]",
"def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)",
"def __getitem__(self, index):\n return getattr(self, self.__slots__[index])",
"def get(self, index):\n self.__validate_index(index)\n return self.__list[index]",
"def __getitem__(self, index):\n return self.dataset[index]",
"def __getitem__( self, index ) :\n\n return( self.__entries[index] )",
"def __getitem__(self, index):\r\n\r\n if self._instance is not _unset and index not in self:\r\n self._instance[index]\r\n return self._contents[index]",
"def get(self, idx):\n if idx in self._objects:\n return self._objects[idx]\n else:\n warning(\"%s not found\" % idx)\n return None",
"def __getitem__(self, index):\n if self._constructed is False:\n self._not_constructed_error(index)\n\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n try:\n index = self._processUnhashableIndex(index)\n except TypeError:\n # This index is really unhashable. Set a flag so that\n # we can re-raise the original exception (not this one)\n index = TypeError\n if index is TypeError:\n raise\n if index.__class__ is _IndexedComponent_slice:\n return index\n # The index could have contained constant but nonhashable\n # objects (e.g., scalar immutable Params).\n # _processUnhashableIndex will evaluate those constants, so\n # if it made any changes to the index, we need to re-check\n # the _data dict for membership.\n try:\n obj = self._data.get(index, _NotFound)\n except TypeError:\n obj = _NotFound\n\n if obj is _NotFound:\n # Not good: we have to defer this import to now\n # due to circular imports (expr imports _VarData\n # imports indexed_component, but we need expr\n # here\n from pyomo.core.expr import current as EXPR\n if index.__class__ is EXPR.GetItemExpression:\n return index\n validated_index = self._validate_index(index)\n if validated_index is not index:\n index = validated_index\n # _processUnhashableIndex could have found a slice, or\n # _validate could have found an Ellipsis and returned a\n # slicer\n if index.__class__ is _IndexedComponent_slice:\n return index\n obj = self._data.get(index, _NotFound)\n #\n # Call the _getitem_when_not_present helper to retrieve/return\n # the default value\n #\n if obj is _NotFound:\n return self._getitem_when_not_present(index)\n\n return obj",
"def get(self, tag, index):\n raise NotImplementedError",
"def __getitem__(self, index):\n item = self.data[index]\n return item",
"def get_descriptor(self, uuid):\n return tuple(self.get_many_descriptors(uuid))[0]",
"def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data",
"def get_symbol_by_index(self, index):\n return self[self._index[index]]",
"def findByIndex(self, obj_index):\n return self.registry.findByIndex(obj_index)",
"def get_item(self, index: int) -> _T:\n return self.index_to_item[index]",
"def get_indexed_item_from_file(index, file_name):\n\n list_in_file = read_list_bin(file_name)\n return get_indexed_item_from_list(index, list_in_file)",
"def __getitem__(self, index):\n return self.components[index]",
"def __getitem__(self, index):\n return self.data[index]",
"def __getitem__(self, index):\n return self.data[index]",
"def __getitem__(self, index):\n if self.valid_index(index):\n return self._data[index]\n else:\n return IndexError",
"def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass",
"def getItem(enum, index):\r\n return enum[list(enum.__members__)[index]]",
"def __getitem__(self, idx):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n return self.data[nidx]",
"def get_id(self, index):\n return self.__keys[index]",
"def __getitem__(self, index):\n if index >= self.size:\n raise KeyError\n else:\n return self._get_item(index)"
] |
[
"0.67557156",
"0.6576555",
"0.6531874",
"0.65297633",
"0.64009917",
"0.6336537",
"0.63330805",
"0.6243469",
"0.6238573",
"0.6216206",
"0.61165506",
"0.61148983",
"0.611092",
"0.610756",
"0.6094632",
"0.6077617",
"0.60752577",
"0.6068823",
"0.6039814",
"0.6013402",
"0.60041183",
"0.60022736",
"0.5990971",
"0.5990971",
"0.59864366",
"0.5953196",
"0.59444165",
"0.5921046",
"0.59125495",
"0.5911456"
] |
0.70444727
|
0
|
Returns a chunk of guess dataset corresponding to the same pixels of the main dataset.
|
def _read_guess_chunk(self):
curr_pixels = self._get_pixels_in_current_batch()
self._guess = self._h5_guess[curr_pixels, :]
if self.verbose and self.mpi_rank == 0:
print('Guess of shape: {}'.format(self._guess.shape))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_guess_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_projected_loops.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_projected_loops[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_projected_loops.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_projected_loops[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n guess = self.h5_guess[self._start_pos:self._end_pos,\n self._current_met_spec_slice].reshape([-1, 1])\n self.guess = flatten_compound_to_real(guess)[:, :-1]",
"def _scan_and_sample_dataset(self, dives):\n roots = [os.path.join(self.p.data_root, n) for n in dives]\n ret = []\n for root in roots:\n h5_files = glob.glob(os.path.join(root, '*.h5'))\n for h5 in h5_files:\n try:\n fgroup = FrameGroup(h5, self.meta)\n except (AssertionError, KeyError, OSError) as e:\n if type(e) == AssertionError:\n print_warn('Unmatched time: {}'.format(h5))\n else:\n print_warn('Corrupted h5: {}'.format(h5))\n continue\n num_samples = int(self.p.downsample * fgroup.num_frames)\n indices = np.random.choice(\n fgroup.num_frames, size=num_samples, replace=False)\n ret.extend([(h5, int(idx)) for idx in indices])\n random.shuffle(ret)\n return ret",
"def _create_guess_datasets(self):\n self.h5_guess = create_empty_dataset(self.h5_loop_metrics, loop_fit32, 'Guess')\n write_simple_attrs(self._h5_group, {'guess method': 'pycroscopy statistical'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_guess, self._parms_dict)\n write_simple_attrs(self.h5_guess, {'Loop_fit_method': \"pycroscopy statistical\", 'last_pixel': 0})\n\n self.h5_main.file.flush()",
"def _write_results_chunk(self):\n if self._is_guess:\n targ_dset = self._h5_guess\n source_dset = self._guess\n else:\n targ_dset = self._h5_fit\n source_dset = self._fit\n\n curr_pixels = self._get_pixels_in_current_batch()\n\n if self.verbose and self.mpi_rank == 0:\n print('Writing data of shape: {} and dtype: {} to position range: '\n '{} in HDF5 dataset:{}'.format(source_dset.shape,\n source_dset.dtype,\n [curr_pixels[0],curr_pixels[-1]],\n targ_dset))\n targ_dset[curr_pixels, :] = source_dset",
"def get_identical_patches(imgs, patch_size):\n ih, iw = imgs[0].shape[:2]\n tp = patch_size\n ix = np.random.randint(0, iw - patch_size)\n iy = np.random.randint(0, ih - patch_size)\n imgs = []\n for i in range(len(imgs)):\n imgs.append(imgs[i][iy:iy + tp, ix:ix + tp, :])\n return imgs",
"def _get_next_minibatch(self):\n images = np.zeros((self._batch_size, 3, self._crop_h, self._crop_w), dtype=np.float32)\n masks = np.zeros((self._batch_size, 1, self._crop_h, self._crop_w), dtype=np.float32)\n\n shuffled_batch = np.arange(self._batch_size)\n np.random.shuffle(shuffled_batch)\n for batch_index in shuffled_batch:\n blob_queue = self._blob_queue.get()\n images[batch_index, :, :, :] = blob_queue[0]\n masks[batch_index, :, :, :] = blob_queue[1]\n\n return [images, masks]",
"def get_a_similar_img(self, imgID, db = \"train\"):\n if db==\"train\":\n the_landmark_id = self.get_landmark_id(imgID)\n subset = self.train_data.loc[self.train_data.landmark_id == the_landmark_id, \"id\"].values \n return self.choose_an_imgID(subset, imgID)\n \n return None",
"def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])",
"def __random_pickup(self, guess):\n already_clustered = guess.sum(axis=0)\n while True:\n p1 = random.randint(0, guess.shape[1] - 1)\n p2 = random.randint(0, guess.shape[2] - 1)\n if not already_clustered[p1, p2]:\n return (p1, p2)",
"def __getitem__(self, idx):\n\n if self.test_mode:\n return self.prepare_test_img(idx)\n \n while True:\n data_ori, data_aug = self.prepare_train_img(idx)\n if data_ori is None:\n idx = self._rand_another(idx)\n continue\n \n # Duplicate data\n return data_ori, data_aug",
"def __call__(self, results):\n h, w, c = results['img'].shape\n n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)\n for _ in range(n_holes):\n x1 = np.random.randint(0, w)\n y1 = np.random.randint(0, h)\n index = np.random.randint(0, len(self.candidates))\n if not self.with_ratio:\n cutout_w, cutout_h = self.candidates[index]\n else:\n cutout_w = int(self.candidates[index][0] * w)\n cutout_h = int(self.candidates[index][1] * h)\n\n x2 = np.clip(x1 + cutout_w, 0, w)\n y2 = np.clip(y1 + cutout_h, 0, h)\n results['img'][y1:y2, x1:x2, :] = self.fill_in\n\n return results",
"def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n ind = np.random.permutation(y.shape[0])\n threshold = int(y.shape[0]*ratio)\n return y[ind[:threshold]], x[ind[:threshold]], y[ind[threshold:]], x[ind[threshold:]]",
"def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n packed = np.vstack([y,x]).T\n np.random.shuffle(packed)\n N = y.shape[0]\n eightyN = int(ratio*N)\n xTrain = packed[0:eightyN,1]\n yTrain = packed[0:eightyN,0]\n xTest = packed[eightyN:N, 1]\n yTest = packed[eightyN:N,0]\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n return xTrain, yTrain, xTest, yTest",
"def get_partial_data(x, keep=200):\n range_x = x.size(1)\n print(\"rangex\", range_x)\n\n range_p = range_x - keep - 50\n n = random.randint(25, range_p)\n return x[:, n:n + keep]",
"def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y",
"def _set_guess(self, h5_guess):\n '''\n Get the Spectroscopic and Position datasets from `self.h5_main`\n '''\n self._sho_spec_inds = self.h5_main.h5_spec_inds\n self._sho_spec_vals = self.h5_main.h5_spec_vals\n self._sho_pos_inds = self.h5_main.h5_pos_inds\n\n '''\n Find the Spectroscopic index for the DC_Offset\n '''\n fit_ind = np.argwhere(get_attr(self._sho_spec_vals, 'labels') == self._fit_dim_name).squeeze()\n self._fit_spec_index = fit_ind\n self._fit_offset_index = 1 + fit_ind\n\n '''\n Get the group and projection datasets\n '''\n self._h5_group = h5_guess.parent\n self.h5_projected_loops = self._h5_group['Projected_Loops']\n self.h5_loop_metrics = self._h5_group['Loop_Metrics']\n self._met_spec_inds = self._h5_group['Loop_Metrics_Indices']\n\n self.h5_guess = h5_guess",
"def stack_red_detect(self):\n self.redundancy_pool.clear()\n\n for nslice in np.arange(self.nz-1):\n self._red_detect_(nslice, thresh = 1.0)\n\n # OK, let's check the the size of the pool and remove them one by one.\n dist_3d = np.zeros((0, 4)) # create an empty array to save z, y, x, f\n\n\n for sl_key, sl_value in self.redundancy_pool.items():\n z_start = sl_value.z_marker # where does the z_marker starts\n z_list = np.array(sl_value.list) # convert it into a 2d array\n z_key = 's_' + format(z_start, '03d')\n zframe_0 = self.z_dense[z_key]\n z_identifier = int(sl_key[3:]) - z_start*1000 # which cell?\n\n pz = self.z_step*np.inner(z_list[:,0], z_list[:,1])/z_list[:,1].sum() # weighted average estimation\n py, px = zframe_0[z_identifier, 0:2] # The x-y coordinates\n pf = zframe_0[z_identifier, 4] # the fluorescence\n\n\n new_entry = np.array([[pz, py, px, pf]])\n dist_3d = np.concatenate((dist_3d, new_entry), axis = 0)\n\n ord_z = np.argsort(dist_3d[:,0], axis = 0)\n # sort in the order of Z.\n\n\n self.dist_3d = dist_3d[ord_z, :]\n\n return dist_3d",
"def split_data_by_image(self, test_fraction=0.5):\n image_id = BaseModel.get_image_id(self.inputs)\n test_idx = np.random.random(image_id.max()+1) <= test_fraction\n\n # Low image count edge case (mostly just for testing purposes)\n if True not in test_idx:\n test_idx[0] = True\n elif False not in test_idx:\n test_idx[0] = False\n \n test_idx = test_idx[image_id]\n if BaseModel.is_laue(self.inputs):\n train, test = self.split_laue_data_by_mask(test_idx)\n else:\n train, test = self.split_mono_data_by_mask(test_idx)\n\n #return self.get_tf_dataset(train), self.get_tf_dataset(test)\n return train, test",
"def get_image_list(im, dic):\n lst = list(im.getdata())\n tiles = []\n for i in range(len(lst)):\n #print find_similar(lst[i], dic)[random.randrange(10)][1]\n tiles.append(find_similar(lst[i], dic)[random.randrange(10)][1])\n return tiles",
"def _get_data_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n return",
"def split_dataset(x_test, y_test, dev_ratio):\n test_size = len(x_test)\n print(test_size)\n dev_size = (int)(test_size * dev_ratio)\n print(dev_size)\n x_dev = x_test[:dev_size]\n x_test = x_test[dev_size:]\n y_dev = y_test[:dev_size]\n y_test = y_test[dev_size:]\n return x_test, y_test",
"def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y",
"def _unique_beams(self):\n bmap, mask = self.single_pointing_telescope._unique_beams()\n block_bmap = linalg.block_diag(*[bmap+i*self.single_pointing_telescope.nfeed for i, _ in enumerate(self.pointings)])\n block_mask = linalg.block_diag(*[mask for _ in self.pointings])\n\n return block_bmap, block_mask",
"def next_example(self, K):\n if self._ptr == len(self.em_data):\n self._ptr = 0\n\n i = self._ptr\n\n # select slice\n em_img = self.em_data[i]\n seg_img = self.seg_data[i]\n\n # crop region\n dx = np.random.randint(0,1024-572)\n dy = np.random.randint(0,1024-572)\n\n em_img = em_img[dx:dx+572, dy: dy+572]\n seg_img = seg_img[dx:dx+572, dy: dy+572]\n l = (572-388)//2\n u = 388+l\n seg_img = seg_img[l:u, l:u]\n\n # add channel dimension\n em_img = np.expand_dims(em_img, axis=-1)\n seg_img = np.expand_dims(seg_img, axis=-1)\n\n # create mask_list\n mask_list = self.create_mask_list(seg_img, K)\n\n # preprocess\n em_img = self.preprocess(em_img)\n\n # augment\n imgs = augment.augment_example([em_img]+mask_list)\n em_img = imgs[0]\n mask_list = imgs[1:]\n\n # Get seed points\n seed_list = [np.mean(np.where(mask[:,:,0]),axis=1).astype(int) for mask in mask_list]\n \n # increment ptr\n self._ptr += 1\n\n return em_img, mask_list, seed_list",
"def _prepare_sets(self):\n\n ds_images, ds_labels = self._load_images_labels()\n\n ds_images_2 = ds_images.take(self.val_count)\n ds_labels_2 = ds_labels.take(self.val_count)\n ds_images_1 = ds_images.skip(self.val_count)\n ds_labels_1 = ds_labels.skip(self.val_count)\n\n ds_1 = (ds_images_1, ds_labels_1)\n ds_2 = (ds_images_2, ds_labels_2)\n\n return ds_1, ds_2",
"def check_dataset(*, low_path: str, high_path: str, count: int = 1):\n with open(high_path, \"rb\") as s_file:\n src_data: np.array = np.load(s_file)\n\n with open(low_path, \"rb\") as s_file:\n res_data: np.array = np.load(s_file)\n\n assert src_data.shape == res_data.shape\n n, m = res_data.shape\n core_size = int(np.sqrt(m / LAYERS))\n assert core_size ** 2 * LAYERS == m\n k = core_size * 4\n\n for _ in range(count):\n img = np.zeros(\n (core_size, k, LAYERS), dtype=res_data.dtype\n )\n i = random.randint(0, n)\n res_row = res_data[i]\n src_row = src_data[i]\n\n mask = create_percent_diff(src_row, res_row)\n restored_src = apply_diff(res_row, mask)\n for l_i, layer_mask in enumerate(np.reshape(mask, (LAYERS, core_size, core_size))): # noqa\n print(f\"layer {l_i} mask:\")\n for row in layer_mask:\n print(\",\".join(map(\"{: >3}\".format, row)))\n\n nopy_restore_area(\n img[:, 0:core_size, :], src_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size:core_size * 2, :], res_row, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 2:core_size * 3, :], mask, core_size, LAYERS\n )\n nopy_restore_area(\n img[:, core_size * 3:k, :], restored_src, core_size, LAYERS\n )\n plt.imshow(Image.fromarray(img))\n plt.show(block=True)",
"def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret",
"def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))",
"def split_data(self, data, ratio=0.7, shuffle=True, seed=0):\n if shuffle:\n random.seed(seed) # fix to default seed 0\n random.shuffle(data)\n\n size = int(len(data) * ratio)\n data_1 = data[:size]\n data_2 = data[size:]\n\n return data_1, data_2",
"def _create_guess_datasets(self):\n raise NotImplementedError('Please override the _create_guess_datasets '\n 'specific to your model')"
] |
[
"0.6148216",
"0.5663909",
"0.5594921",
"0.53717375",
"0.5326499",
"0.5301302",
"0.5206093",
"0.51859504",
"0.51695466",
"0.51634556",
"0.512264",
"0.511285",
"0.50646293",
"0.5054581",
"0.50458497",
"0.50453305",
"0.503205",
"0.5013712",
"0.50062114",
"0.50025904",
"0.49989948",
"0.49806303",
"0.49759102",
"0.49701333",
"0.4966671",
"0.4952692",
"0.49526116",
"0.4937598",
"0.4936838",
"0.49335524"
] |
0.6047562
|
1
|
Writes the guess or fit results into appropriate HDF5 datasets.
|
def _write_results_chunk(self):
if self._is_guess:
targ_dset = self._h5_guess
source_dset = self._guess
else:
targ_dset = self._h5_fit
source_dset = self._fit
curr_pixels = self._get_pixels_in_current_batch()
if self.verbose and self.mpi_rank == 0:
print('Writing data of shape: {} and dtype: {} to position range: '
'{} in HDF5 dataset:{}'.format(source_dset.shape,
source_dset.dtype,
[curr_pixels[0],curr_pixels[-1]],
targ_dset))
targ_dset[curr_pixels, :] = source_dset
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_as_hdf5(self, filename):",
"def _create_guess_datasets(self):\n self.h5_guess = create_empty_dataset(self.h5_loop_metrics, loop_fit32, 'Guess')\n write_simple_attrs(self._h5_group, {'guess method': 'pycroscopy statistical'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_guess, self._parms_dict)\n write_simple_attrs(self.h5_guess, {'Loop_fit_method': \"pycroscopy statistical\", 'last_pixel': 0})\n\n self.h5_main.file.flush()",
"def SaveResultsToH5(self):\n\n try: \n wildcard = \"HDF5 files (*.hdf5)|*.hdf5\"\n dialog = wx.FileDialog(None, \"Save as .hdf5\", wildcard=wildcard,\n style=wx.SAVE|wx.OVERWRITE_PROMPT)\n\n if dialog.ShowModal() == wx.ID_OK:\n filepath = dialog.GetPath()\n self.page1.filename = dialog.GetFilename()\n dir = dialog.GetDirectory()\n \n self.common.path = dir\n self.common.filename = self.page1.filename\n\n wx.BeginBusyCursor() \n self.stk.write_results_h5(filepath, self.data_struct, self.anlz) \n wx.EndBusyCursor() \n\n except:\n\n wx.EndBusyCursor()\n wx.MessageBox(\"Could not save HDF5 file.\")\n \n dialog.Destroy()\n self.refresh_widgets()\n \n return",
"def write(self,data): \n \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n units = {'A':'K','x0':'degrees','y0':'degrees','sigx':'degrees','sigy':'degrees','sigy_scale':'none','B':'K','phi':'radians'}\n\n outfile = '{}/{}_{}'.format(self.output_dir,self.prefix,fname)\n\n print ('WRITING: ',outfile)\n output = h5py.File(outfile,'a')\n\n # Set permissions and group\n os.chmod(outfile,0o664)\n shutil.chown(outfile, group='comap')\n\n ##\n ## Narrow channel fits\n ##\n\n for valerr in ['Values','Errors','Chi2']:\n if f'Gauss_Narrow_{valerr}' in output:\n del output[f'Gauss_Narrow_{valerr}']\n gauss_fits = output.create_group(f'Gauss_Narrow_{valerr}')\n gauss_fits.attrs['FitFunc'] = self.model.__name__\n gauss_fits.attrs['source_el'] = self.source_positions['mean_el']\n gauss_fits.attrs['source_az'] = self.source_positions['mean_az']\n\n dnames = self.map_parameters\n dsets = [self.map_fits[valerr][...,iparam] for iparam in range(self.map_fits[valerr].shape[-1])]\n\n for (dname, dset) in zip(dnames, dsets):\n if dname in output:\n del output[dname]\n print(dname,dset.shape,units[dname])\n gauss_dset = gauss_fits.create_dataset(dname, data=dset)\n gauss_dset.attrs['Unit'] = units[dname]\n \n\n output.attrs['SourceFittingVersion'] = __version__\n output.attrs['source'] = self.getSource(data)\n output.close()\n self.linkfile(data)",
"def resultInHDF5(self, iStep):\n filePath = os.path.expanduser('~/LBMResults')\n resultFile = filePath + '/SimulationResults.h5'\n dataFile = tb.open_file(resultFile, 'a')\n #output the densities of fluids\n for i in sp.arange(self.typesFluids):\n dataFile.create_array('/FluidMacro', 'FluidDensityType%gin%g' % (i, iStep), \\\n self.fluidsDensity[i])\n dataFile.create_array('/FluidVelocity', 'FluidVelocityXAt%g' % iStep, \\\n self.physicalVX)\n dataFile.create_array('/FluidVelocity', 'FluidVelocityYAt%g' % iStep, \\\n self.physicalVY)\n dataFile.close()",
"def _create_fit_datasets(self):\n\n if self.h5_guess is None:\n warn('Need to guess before fitting!')\n return\n\n self.h5_fit = create_empty_dataset(self.h5_guess, loop_fit32, 'Fit')\n write_simple_attrs(self._h5_group, {'fit method': 'pycroscopy functional'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_fit, self._parms_dict)\n write_simple_attrs(self.h5_fit, {'Loop_fit_method': \"pycroscopy functional\", 'last_pixel': 0})",
"def write(self,data): \n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n # We will store these in a separate file and link them to the level2s\n fname = data.filename.split('/')[-1]\n \n if os.path.exists(self.outfile):\n output = h5py.File(self.outfile,'a')\n else:\n output = h5py.File(self.outfile,'w')\n\n # Set permissions and group\n if self.set_permissions:\n try:\n os.chmod(self.outfile,0o664)\n shutil.chown(self.outfile, group=self.permissions_group)\n except PermissionError:\n self.logger(f'{fname}:{self.name}: Warning, couldnt set the file permissions.')\n\n # Store datasets in root\n data_out = {'tod':self.all_tod,\n 'weights':self.all_weights,\n 'mask':self.all_mask,\n 'cal_factors':self.all_cal_factors,\n 'frequency':self.all_frequency,\n 'auto_rms':self.all_auto}\n\n for dname, dset in data_out.items():\n if dname in output:\n del output[dname]\n output.create_dataset(dname, data=dset)\n\n output.attrs['version'] = __level3_version__\n output['cal_factors'].attrs['source'] = self.cal_source\n output['cal_factors'].attrs['calibrator_obsid'] = self.nearest_calibrator\n\n output.close()\n \n if self.level3 in data.keys():\n del data[self.level3]\n data[self.level3] = h5py.ExternalLink(self.outfile,'/')",
"def write_data_files(self):\n \n logging.info('\\n Start writing data files \\n')\n \n for i, (data_file, label_file) in enumerate(self.files):\n data_file, label_file = Path(data_file), Path(label_file)\n logging.info('Writing .hdf5 file for : [{}]'.format(str(data_file)))\n \n file_name = self.save_data_folder / '{}.hdf5'.format(label_file.name[:-4])\n if file_name.exists():\n continue\n \n with h5py.File(str(file_name), 'w') as writer:\n self.serialize_samples(\n writer, data_file, label_file)",
"def save_to_hd5(out_file, x_train, y_train, x_val, y_val, x_test, y_test):\n data = h5py.File(out_file, \"w\")\n train_data = data.create_group(\"train_data\")\n train_data.create_dataset(\"x_train\", data=x_train)\n train_data.create_dataset(\"y_train\", data=y_train)\n if x_val is not None:\n val_data = data.create_group(\"val_data\")\n val_data.create_dataset(\"x_val\", data=x_val)\n val_data.create_dataset(\"y_val\", data=y_val)\n if x_test is not None:\n test_data = data.create_group(\"test_data\")\n test_data.create_dataset(\"x_test\", data=x_test)\n test_data.create_dataset(\"y_test\", data=y_test)\n data.close()",
"def write_hdf5( self, iteration ) :\n # Before opening the file, select the particles that\n # need to be written for each species\n # (This allows to know the number of particles to be written,\n # which is needed when setting up the file)\n select_array_dict = {}\n selected_nlocals_dict = {}\n selected_nglobal_dict = {}\n # Loop over the different species, select the particles and fill\n # select_array_dict, selected_nlocals_dict, selected_nglobal_dict\n for species_name in sorted(self.species_dict.keys()):\n # Select the particles that will be written\n species = self.species_dict[species_name]\n select_array_dict[species_name] = self.apply_selection( species )\n # Get their total number\n n = select_array_dict[species_name].sum()\n if self.comm_world is not None :\n # In MPI mode: gather and broadcast an array containing\n # the number of particles on each process\n selected_nlocals_dict[species_name] = mpiallgather( n )\n selected_nglobal_dict[species_name] = \\\n sum(selected_nlocals_dict[species_name])\n else:\n # Single-proc output\n selected_nlocals_dict[species_name] = None\n selected_nglobal_dict[species_name] = n\n\n # Find the file name\n filename = \"data%08d.h5\" %iteration\n fullpath = os.path.join( self.write_dir, \"hdf5\", filename )\n\n # Create the file and setup its attributes\n # (can be done by one proc or in parallel)\n self.create_file_empty_particles( fullpath, self.top.it,\n self.top.time, self.top.dt, selected_nglobal_dict )\n\n # Open the file again (possibly in parallel)\n f = self.open_file( fullpath, parallel_open=self.lparallel_output )\n # (f is None if this processor does not participate in writing data)\n\n # Loop over the different species and write the requested quantities\n for species_name in sorted(self.species_dict.keys()) :\n\n # Get the HDF5 species group\n if f is not None:\n species_path = \"/data/%d/particles/%s\"%(iteration,species_name)\n species_grp = f[species_path]\n else:\n species_grp = None\n\n # Get the relevant species object and selection array\n species = self.species_dict[species_name]\n select_array = select_array_dict[species_name]\n n_rank = selected_nlocals_dict[species_name]\n\n # Write the datasets for each particle datatype\n self.write_particles( species_grp, species, n_rank, select_array )\n\n # Close the file\n if f is not None:\n f.close()",
"def WriteHDF5(self, filename=None, external_fields=None):\n\n # DO NOT WRITE IF POINTS DO NOT EXIST - THIS IS TO PREVENT ACCIDENTAL WRITING OF\n # POTENTIALLU EMPTY MESH OBJECT\n if self.points is None:\n warn(\"Nothing to write\")\n return\n\n Dict = deepcopy(self.__dict__)\n\n if external_fields is not None:\n if isinstance(external_fields,dict):\n Dict.update(external_fields)\n elif isinstance(external_fields,tuple):\n for counter, fields in enumerate(external_fields):\n Dict['results_'+str(counter)] = fields\n else:\n raise AssertionError(\"Fields should be either tuple or a dict\")\n\n if filename is None:\n pwd = os.path.dirname(os.path.realpath(__file__))\n filename = pwd+'/output.mat'\n\n for key in list(Dict.keys()):\n if Dict[str(key)] is None:\n del Dict[str(key)]\n\n savemat(filename, Dict, do_compression=True)",
"def export_to_hdf5(cls, h5_file, model, eids):\n #comments = []\n pids = []\n nodes = []\n x = []\n g0 = []\n offt = []\n unused_bit = []\n pa = []\n pb = []\n wa = []\n wb = []\n nan = np.full(3, np.nan)\n encoding = model._encoding\n for eid in eids:\n element = model.elements[eid]\n #comments.append(element.comment)\n pids.append(element.pid)\n nodes.append(element.nodes)\n if element.g0 is None:\n x.append(element.x)\n g0.append(-1)\n else:\n x.append(nan)\n g0.append(element.g0)\n\n offti = element.offt\n if isinstance(offti, integer_types):\n offti = str(offti)\n offt.append(offti.encode(encoding))\n pa.append(element.pa)\n pb.append(element.pb)\n wa.append(element.wa)\n wb.append(element.wb)\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('eid', data=eids)\n h5_file.create_dataset('nodes', data=nodes)\n h5_file.create_dataset('pid', data=pids)\n #print('x =', x)\n #print('g0 =', g0)\n h5_file.create_dataset('x', data=x)\n h5_file.create_dataset('g0', data=g0)\n h5_file.create_dataset('offt', data=offt)\n\n h5_file.create_dataset('pa', data=pa)\n h5_file.create_dataset('pb', data=pb)\n\n h5_file.create_dataset('wa', data=wa)\n h5_file.create_dataset('wb', data=wb)",
"def write_results(location, classes, results):\n check_out(location)\n # these are tuples consisting of list of values, mean of the values and std the values\n [accuracies, precisions, recalls, f1scores] = results\n\n file_name = []\n for class_ in classes:\n if class_.endswith('.hdf5'):\n # file_name.append(re.findall('\\d/(.*).hdf5$', class_)[0])\n file_name.append(re.findall('([%.\\w-]*).hdf5$', class_)[0])\n\n with open(os.path.join(location, '-'.join(file_name) + '.results'), 'w') as out_file:\n for class_id, class_ in enumerate(classes):\n out_file.write(file_name[class_id].split('_')[0] + '\\n')\n # out_file.write(infos)\n # out_file.write(' '.join([str(header_item) for header_item in headers[class_id]]))\n out_file.write('\\n\\n')\n\n out_file.write(\"Mean accuracy is {:.2f} with a std of ({:.2f}).\\n\".format(accuracies[1], accuracies[2]))\n out_file.write('\\n'.join([str(accuracy) for accuracy in accuracies[0]]) + '\\n')\n\n if len(classes) == 2:\n out_file.write(\"\\nMean precision is {:.2f} with a std of ({:.2f}).\\n\".format(precisions[1], precisions[2]))\n out_file.write('\\n'.join([str(precision) for precision in precisions[0]]) + '\\n')\n\n out_file.write(\"\\nMean recall is {:.2f} with a std of ({:.2f}).\\n\".format(recalls[1], recalls[2]))\n out_file.write('\\n'.join([str(recall) for recall in recalls[0]]) + '\\n')\n\n out_file.write(\"\\nMean f1 score is {:.2f} with a std of ({:.2f}).\\n\".format(f1scores[1], f1scores[2]))\n out_file.write('\\n'.join([str(f1score) for f1score in f1scores[0]]) + '\\n')",
"def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()",
"def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)",
"def save_hdf5(self, analysis_name, analysis_metadata, list_results, results_names, file_name, file_settings, results_dir):\n\n # First thing is to create a separate folder within the results directory for this analysis\n hdf5_dir = os.path.join(results_dir, 'HDF5')\n print(\"Analysis Results will be saved in folder: \", hdf5_dir)\n if not os.path.exists(hdf5_dir):\n os.mkdir(hdf5_dir) # If not, create the directory to store results\n\n hdf5_file = file_name + '.hdf5'\n # Check whether the file already exists\n if os.path.isfile(os.path.join(hdf5_dir, hdf5_file)): # Overwrite it\n print(\"HDF5 file already exists. Adding analysis\")\n with h5py.File(os.path.join(hdf5_dir, hdf5_file), 'r+') as f:\n\n # Here we have 2 options: either we are adding another analysis type or we are adding the same type\n # but with different settings (such as at a different surface)\n file_keys = list(f.keys())\n if analysis_name in file_keys:\n print(\"Analysis type already exists\")\n analysis_group = f[analysis_name]\n # we need to know how many analyses of the same type already exist\n subgroup_keys = list(analysis_group.keys())\n subgroup_number = len(subgroup_keys) # if [0, 1] already exist we call it '2'\n subgroup = analysis_group.create_group(str(subgroup_number))\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n else: # It's a new analysis type\n\n # (2) Create a Group for this analysis\n analysis_group = f.create_group(analysis_name)\n # (3) Create a Sub-Group so that we can have the same analysis at multiple surfaces / wavelength ranges\n subgroup = analysis_group.create_group('0')\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n else: # File does not exist, we create it now\n print(\"Creating HDF5 file: \", hdf5_file)\n with h5py.File(os.path.join(hdf5_dir, hdf5_file), 'w') as f:\n\n # (1) Save Zemax Metadata\n zemax_metadata = f.create_group('Zemax Metadata')\n zemax_metadata.attrs['(1) Zemax File'] = file_name\n zemax_metadata.attrs['(2) System Mode'] = file_settings['system']\n zemax_metadata.attrs['(3) Spaxel Scale'] = file_settings['scale']\n zemax_metadata.attrs['(4) IFU'] = file_settings['ifu']\n zemax_metadata.attrs['(5) Grating'] = file_settings['grating']\n AO = file_settings['AO_mode'] if 'AO_mode' in list(file_settings.keys()) else 'NA'\n zemax_metadata.attrs['(6) AO Mode'] = AO\n\n # (2) Create a Group for this analysis\n analysis_group = f.create_group(analysis_name)\n\n # (3) Create a Sub-Group so that we can have the same analysis at multiple surfaces / wavelength ranges\n subgroup = analysis_group.create_group('0')\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n return",
"def save_hdf5(self, analysis_name, analysis_metadata, list_results, results_names, file_name, file_settings, results_dir):\n\n # First thing is to create a separate folder within the results directory for this analysis\n hdf5_dir = os.path.join(results_dir, 'HDF5')\n print(\"Analysis Results will be saved in folder: \", hdf5_dir)\n if not os.path.exists(hdf5_dir):\n os.mkdir(hdf5_dir) # If not, create the directory to store results\n\n hdf5_file = file_name + '.hdf5'\n # Check whether the file already exists\n if os.path.isfile(os.path.join(hdf5_dir, hdf5_file)): # Overwrite it\n print(\"HDF5 file already exists. Adding analysis\")\n with h5py.File(os.path.join(hdf5_dir, hdf5_file), 'r+') as f:\n\n # Here we have 2 options: either we are adding another analysis type or we are adding the same type\n # but with different settings (such as at a different surface)\n file_keys = list(f.keys())\n if analysis_name in file_keys:\n print(\"Analysis type already exists\")\n analysis_group = f[analysis_name]\n # we need to know how many analyses of the same type already exist\n subgroup_keys = list(analysis_group.keys())\n subgroup_number = len(subgroup_keys) # if [0, 1] already exist we call it '2'\n subgroup = analysis_group.create_group(str(subgroup_number))\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n else: # It's a new analysis type\n\n # (2) Create a Group for this analysis\n analysis_group = f.create_group(analysis_name)\n # (3) Create a Sub-Group so that we can have the same analysis at multiple surfaces / wavelength ranges\n subgroup = analysis_group.create_group('0')\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n else: # File does not exist, we create it now\n print(\"Creating HDF5 file: \", hdf5_file)\n with h5py.File(os.path.join(hdf5_dir, hdf5_file), 'w') as f:\n\n # (1) Save Zemax Metadata\n zemax_metadata = f.create_group('Zemax Metadata')\n zemax_metadata.attrs['(1) Zemax File'] = file_name\n zemax_metadata.attrs['(2) System Mode'] = file_settings['system']\n zemax_metadata.attrs['(3) Spaxel Scale'] = file_settings['scale']\n zemax_metadata.attrs['(4) IFU'] = file_settings['ifu']\n zemax_metadata.attrs['(5) Grating'] = file_settings['grating']\n AO = file_settings['AO_mode'] if 'AO_mode' in list(file_settings.keys()) else 'NA'\n zemax_metadata.attrs['(6) AO Mode'] = AO\n\n # (2) Create a Group for this analysis\n analysis_group = f.create_group(analysis_name)\n\n # (3) Create a Sub-Group so that we can have the same analysis at multiple surfaces / wavelength ranges\n subgroup = analysis_group.create_group('0')\n # Save results datasets\n for array, array_name in zip(list_results, results_names + ['WAVELENGTHS']):\n data = subgroup.create_dataset(array_name, data=array)\n # Save analysis metadata\n subgroup.attrs['E2E Python Git Hash'] = sha\n subgroup.attrs['Analysis Type'] = analysis_name\n date_created = datetime.datetime.now().strftime(\"%c\")\n subgroup.attrs['Date'] = date_created\n subgroup.attrs['At Surface #'] = file_settings['surface']\n # Add whatever extra metadata we have:\n for key, value in analysis_metadata.items():\n subgroup.attrs[key] = value\n\n return",
"def _write_dataset(self):\n if self.output_file:\n if self.dryrun:\n sys.stdout.write(f\"{self.output_file.resolve()}\\n{self.parameter_study}\\n\")\n else:\n self.output_file.parent.mkdir(parents=True, exist_ok=True)\n self._conditionally_write_dataset(self.output_file, self.parameter_study)\n else:\n for parameter_set_file, parameter_set in self.parameter_study.groupby(_set_coordinate_key):\n parameter_set_file = pathlib.Path(parameter_set_file)\n # If no output file template is provided, print to stdout\n if not self.provided_output_file_template:\n sys.stdout.write(f\"{parameter_set_file.name}\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n # If overwrite is specified or if file doesn't exist\n elif self.overwrite or not parameter_set_file.is_file():\n # If dry run is specified, print the files that would have been written to stdout\n if self.dryrun:\n sys.stdout.write(f\"{parameter_set_file.resolve()}:\\n{parameter_set}\")\n sys.stdout.write(\"\\n\")\n else:\n self._conditionally_write_dataset(parameter_set_file, parameter_set)",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def run(self):\n\n # Setup hdf5 file and datasets\n self.vw_f = h5py.File(self.name,'w')\n self.vw,self.vwts = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n vw = self.vw_f.create_dataset('mov{}'.format(i), (self.hdf_resize, y, x), maxshape=(None, y, x), dtype='uint8', compression='lzf') \n vwts = self.vw_f.create_dataset('ts{}'.format(i), (self.hdf_resize,2), maxshape=(None,2), dtype=np.float64, compression='lzf')\n self.vw.append(vw)\n self.vwts.append(vwts)\n \n # Counters and buffers\n _sav_idx = [0]*self.n_cams # index within hdf5 dataset\n _buf_idx = [0]*self.n_cams # index of in-memory buffer that is periodicially dumped to hdf5 dataset\n _saving_buf,_saving_ts_buf = [],[]\n for i in range(self.n_cams):\n x,y = self.resolution[i]\n sb = np.empty((self.buffer_size,y,x), dtype=np.uint8)\n stb = np.empty((self.buffer_size,2), dtype=np.float64)\n _saving_buf.append(sb)\n _saving_ts_buf.append(stb)\n\n cams_running = [True for i in range(self.n_cams)]\n # Main loop\n while any(cams_running):\n # For all datasets: if there's not enough room to dump another buffer's worth into dataset, extend it\n # Then read new frames, and save/query as desired\n for di in range(self.n_cams):\n if not cams_running[di]:\n continue\n \n if self.vw[di].shape[0]-_sav_idx[di] <= self.buffer_size:\n assert self.vw[di].shape[0] == self.vwts[di].shape[0], 'Frame and timestamp dataset lengths are mismatched.'\n self.vw[di].resize((self.vw[di].shape[0]+self.hdf_resize, self.vw[di].shape[1], self.vw[di].shape[2]))\n self.vwts[di].resize((self.vwts[di].shape[0]+self.hdf_resize,self.vwts[di].shape[1]))\n \n # Get new frames from buffer, breaking out if empty and kill flag has been raised\n ts=temp=bsave=None\n try:\n ts,temp,bsave = self.frame_buffer[di].get(block=False)\n except Queue.Empty:\n if self.kill_flag.value:\n cams_running[di] = False\n continue\n\n if self.kill_flag.value==True:\n logging.info('Final flush for camera {}: {} frames remain.'.format(di, self.frame_buffer[di].qsize()))\n \n if di==self.query_idx and self.query_flag.value:\n self.query_queue[:] = temp.copy()\n self.query_queue_ts.value = ts[1]\n self.query_flag.value = False\n \n if bsave: # flag that this frame was added to queue during a saving period\n\n # add new data to in-memory buffer\n x,y = self.resolution[di]\n _saving_buf[di][_buf_idx[di]] = temp.reshape([y,x])\n _saving_ts_buf[di][_buf_idx[di]] = ts\n _buf_idx[di] += 1\n # if necessary, flush out buffer to hdf dataset\n if (self.flushing.value and _buf_idx[di]>=self.min_flush) or _buf_idx[di] >= self.buffer_size:\n if _buf_idx[di] >= self.buffer_size:\n logging.warning('Dumping camera b/c reached max buffer (buffer={}, current idx={})'.format(self.buffer_size, _buf_idx[di]))\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n _buf_idx[di] = 0\n\n # final flush:\n for di in range(self.n_cams):\n self.vw[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di],:,:] = _saving_buf[di][:_buf_idx[di]]\n self.vwts[di][_sav_idx[di]:_sav_idx[di]+_buf_idx[di]] = _saving_ts_buf[di][:_buf_idx[di]]\n _sav_idx[di] += _buf_idx[di]\n # cut off all unused allocated space \n self.vw[di].resize([_sav_idx[di],self.vw[di].shape[1],self.vw[di].shape[2]])\n self.vwts[di].resize([_sav_idx[di],2])\n\n self.vw_f.close()\n self.saving_complete.value = True",
"def write_calculations(params, hdf5_data):\n\n if params.rho is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.rho)\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n\n if params.g is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.g)\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n\n if params.depth is not None:\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.depth)\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n\n if (params.xeff is not None) and (params.yeff is not None):\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.xeff)\n dset[1] = float(params.yeff)\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n if params.floating_bodies is not None:\n num_bodies = len(params.floating_bodies)\n i = 0\n for fb in params.floating_bodies:\n i += 1\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i) + '/'\n mesh_x = []\n with open(fb.mesh_file, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n num_points = int(float(fb.points))\n num_panels = int(float(fb.panels))\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4),\n dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(float(mesh_x2[0]))\n dset[0, 1] = int(float(mesh_x2[1]))\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n num = int(float(fb.degrees_of_freedom))\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n\n x1 = [fb.surge, fb.sway, fb.heave, fb.roll_about_cdg, fb.pitch_about_cdg, fb.yaw_about_cdg]\n for j in range(len(x1)):\n if x1[j]:\n x2 = x1[j].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n num = int(float(fb.resulting_generalised_forces))\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7),\n dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n x1 = [fb.force_in_x_direction, fb.force_in_y_direction, fb.force_in_z_direction,\n fb.moment_cdg_force_in_x_direction, fb.moment_cdg_force_in_y_direction,\n fb.moment_cdg_force_in_z_direction]\n for j in range(len(x1)):\n if x1[j]:\n x2 = x1[j].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n if params.wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(float(params.wave_frequencies))\n\n if params.min_wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(params.min_wave_frequencies)\n\n if params.max_wave_frequencies is not None:\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(params.max_wave_frequencies)\n\n if params.wave_directions is not None:\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(params.wave_directions)\n\n if params.min_wave_directions is not None:\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(params.min_wave_directions)\n\n if params.max_wave_direction is not None:\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(params.max_wave_direction)\n\n x1 = ['1 0.1 10.', '0', '181. 0. 180.', '1 2 1000. 2.']\n idx = 0\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])\n\n if params.indiq_solver is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_TYPE, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(params.indiq_solver))\n set_hdf5_attributes(dset, structure.H5_SOLVER_TYPE_ATTR)\n\n if params.ires is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_RESTART, (1,), dtype=settings.NEMOH_INT)\n dset[0] = int(float(params.ires))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_RESTART_ATTR)\n\n if params.tol_gmres is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_STOPPING, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(params.tol_gmres)\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_STOPPING_ATTR)\n\n if params.max_iterations is not None:\n dset = require_dataset(hdf5_data, structure.H5_SOLVER_GMRES_MAX_ITERATIONS, (1,), dtype=settings.NEMOH_INT)\n\n dset[0] = int(float(params.max_iterations))\n set_hdf5_attributes(dset, structure.H5_SOLVER_GMRES_MAX_ITERATIONS_ATTR)",
"def export_to_hdf5(cls, h5_file, model, loads):\n #encoding = model._encoding\n #comments = []\n sid = []\n node = []\n cid = []\n mag = []\n xyz = []\n for load in loads:\n #comments.append(loads.comment)\n sid.append(load.sid)\n node.append(load.node)\n cid.append(load.cid)\n mag.append(load.mag)\n xyz.append(load.xyz)\n\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('sid', data=sid)\n h5_file.create_dataset('node', data=node)\n h5_file.create_dataset('cid', data=cid)\n h5_file.create_dataset('mag', data=mag)\n h5_file.create_dataset('xyz', data=xyz)",
"def save_results(self, name):\n # metrics in npz format\n # filename = join(self.out_dir, '%s_metrics.npz' % name)\n # np.savez(filename, tel_r=self.tel_r, cable_length=self.cable_length,\n # cable_length_2=self.cable_length_2, uv_hist=self.uv_hist)\n\n if self.cable_length: # Empty dict() evaluate to False\n # ASCII CSV table of radius vs cable length\n filename = join(self.out_dir, '%s_cables.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n if self.cable_length_2: # Empty dict() evaluate to False\n # ASCII CSV table of radius vs cable length\n filename = join(self.out_dir, '%s_cables_2.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length_2.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n if self.cable_length_3: # Empty dict() evaluates to False\n filename = join(self.out_dir, '%s_cables.txt' % name)\n data = np.array([[k, v] for k, v in\n self.cable_length_3.iteritems()])\n data = np.sort(data, axis=0)\n np.savetxt(filename, data, fmt=b'%.10f %.10f')\n\n # Save a pickle with the PSF comparison info.\n if self.psf:\n filename = join(self.out_dir, '%s_psf.p' % name)\n pickle.dump(self.psf, open(filename, 'wb'))\n\n # Save a pickle of uv hist data.\n if self.uv_hist:\n filename = join(self.out_dir, '%s_uv_hist.p' % name)\n pickle.dump(self.uv_hist, open(filename, 'wb'))",
"def onestatfile():\n with hp.File('StatsFile.h5', 'w') as onefile:\n alldata = np.empty((600, 4, 3, 500), dtype=np.float32)\n for j in range(600):\n for i in range(3):\n msd, vol, rms, asp = getstats(i, j+1)\n alldata[j, 0, i, :] = msd\n alldata[j, 1, i, :] = vol\n alldata[j, 2, i, :] = rms\n alldata[j, 3, i, :] = asp\n onefile.create_dataset('Stats', data=alldata, chunks=(1, 4, 3, 500),\n compression='gzip', compression_opts=9)",
"def save_results(PATH, data, filename):\n with open(PATH + '/' + filename + \".txt\",\"w\") as file:\n file.write(\"Results of heuristic models with mean and standard deviation.\\n\")\n for result in data:\n write_result(file, result)\n file.close()\n print('results saved in:'+ PATH + '/' + filename + \".txt\")",
"def _write_h5_out(self, fout, save_hybrid_meta=True):\n\n with Outputs(fout, mode='a') as out:\n if 'meta' in out.datasets and save_hybrid_meta:\n hybrid_meta = to_records_array(self.hybrid_meta)\n out['meta'] = hybrid_meta\n\n for dset, data in self.profiles.items():\n out[dset] = data",
"def write(data: orm.Data, filename: str) -> None:\n save(to_bands_inspect(data), hdf5_file=filename)",
"def save_h5_file(self, result_dict, loc_id):\n if self.storage_type == 's3':\n file_path = '{0}/{1}_combined.h5'.format(\n self.output_path,\n loc_id\n )\n hf_result = h5py.File(file_path, 'w')\n for k, v in result_dict.items():\n hf_result.create_dataset(k, data=v)\n hf_result.close()\n else:\n file_path = os.path.join(self.output_path, '{0}_combined.h5'.format(loc_id))\n hf_result = h5py.File(file_path, 'w')\n for k, v in result_dict.items():\n hf_result.create_dataset(k, data=v)\n hf_result.close()\n return file_path",
"def save_hdf5(self, filename):\n filename += '.h5'\n try:\n hf = h5py.File(filename, 'w')\n hf.create_dataset('Array', data=self.flat_array)\n hf.close()\n except TypeError as err:\n if isinstance(self.mess_inst, MessagesGUI):\n self.mess_inst.message('TypeError [{}] when attempting to save HDF5'.format(err))\n else:\n print('TypeError [{}] when attempting to save HDF5'.format(err))",
"def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")"
] |
[
"0.69929975",
"0.69636846",
"0.6793143",
"0.6706263",
"0.6678321",
"0.6622494",
"0.661845",
"0.6577388",
"0.65469503",
"0.64344764",
"0.6309686",
"0.62802446",
"0.6271253",
"0.626097",
"0.62531996",
"0.62441474",
"0.62441474",
"0.6225183",
"0.6224221",
"0.6205013",
"0.6176995",
"0.6158527",
"0.6148892",
"0.61478114",
"0.61375153",
"0.61135924",
"0.60773724",
"0.6063653",
"0.60551083",
"0.6045544"
] |
0.70214975
|
0
|
Model specific call that will create the (empty) fit dataset, and link the fit dataset to the spectroscopic datasets.
|
def _create_fit_datasets(self):
raise NotImplementedError('Please override the _create_fit_datasets '
'specific to your model')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _create_fit_datasets(self):\n\n if self.h5_guess is None:\n warn('Need to guess before fitting!')\n return\n\n self.h5_fit = create_empty_dataset(self.h5_guess, loop_fit32, 'Fit')\n write_simple_attrs(self._h5_group, {'fit method': 'pycroscopy functional'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_fit, self._parms_dict)\n write_simple_attrs(self.h5_fit, {'Loop_fit_method': \"pycroscopy functional\", 'last_pixel': 0})",
"def __call__(self, models, x, y, z=None, xbinsize=None, ybinsize=None, err=None, bkg=None, bkg_scale=1, **kwargs):\n\n tie_list = []\n try:\n n_inputs = models[0].n_inputs\n except TypeError:\n n_inputs = models.n_inputs\n\n self._data = Dataset(n_inputs, x, y, z, xbinsize, ybinsize, err, bkg, bkg_scale)\n\n if self._data.ndata > 1:\n\n if len(models) == 1:\n self._fitmodel = ConvertedModel([models.copy() for _ in xrange(self._data.ndata)], tie_list)\n # Copy the model so each data set has the same model!\n elif len(models) == self._data.ndata:\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n raise Exception(\"Don't know how to handle multiple models \"\n \"unless there is one foreach dataset\")\n else:\n if len(models) > 1:\n self._data.make_simfit(len(models))\n self._fitmodel = ConvertedModel(models, tie_list)\n else:\n self._fitmodel = ConvertedModel(models)\n\n self._fitter = Fit(self._data.data, self._fitmodel.sherpa_model, self._stat_method, self._opt_method, self._est_method, **kwargs)\n self.fit_info = self._fitter.fit()\n\n return self._fitmodel.get_astropy_model()",
"def fit(model, data, test_ids, exp_name, datasets):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n \n trained_model = train(model, train_ids, data, scaler, datasets)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})\n return results",
"def fit():\n pass",
"def fit_from_model_data(self, model_data: np.ndarray) -> f.FitDataset:\r\n return f.FitDataset(dataset=self.dataset, model_data=model_data)",
"def run_fit(self, optimize_opts=None):\n fit_range = self.config[\"fit\"].get(\"fit_range\")\n model = self.config[\"fit\"][\"model\"]\n\n for obs in self.extraction.spectrum_observations:\n if fit_range is not None:\n obs.mask_fit = obs.counts.energy_mask(fit_range[0], fit_range[1])\n obs.model = model\n\n self.fit = Fit(self.extraction.spectrum_observations)\n self.fit_result = self.fit.run(optimize_opts=optimize_opts)\n\n model = self.config[\"fit\"][\"model\"]\n modelname = model.__class__.__name__\n\n model.parameters.covariance = self.fit_result.parameters.covariance\n\n filename = make_path(self.config[\"outdir\"]) / \"fit_result_{}.yaml\".format(\n modelname\n )\n\n self.write(filename=filename)\n\n obs_stacker = SpectrumDatasetOnOffStacker(self.extraction.spectrum_observations)\n obs_stacker.run()\n\n datasets_fp = obs_stacker.stacked_obs\n datasets_fp.model = model\n self.flux_point_estimator = FluxPointsEstimator(\n e_edges=self.config[\"fp_binning\"], datasets=datasets_fp\n )\n fp = self.flux_point_estimator.run()\n fp.table[\"is_ul\"] = fp.table[\"ts\"] < 4\n self.flux_points = fp",
"def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=True):\r\n save_prefix = 'ssd_{}_{}_{}'.format(self.img_size, self.backbone, dataset.dataset_type)\r\n\r\n # convert dataset to compatible format\r\n dataset = self.__prepare_dataset(dataset)\r\n\r\n # set save dir for checkpoint saving\r\n self.__create_model(dataset.classes)\r\n if verbose:\r\n print(\"Saving models as: {}\".format(save_prefix))\r\n\r\n checkpoints_folder = os.path.join(self.temp_path, '{}_checkpoints'.format(save_prefix))\r\n if self.checkpoint_after_iter != 0 and not os.path.exists(checkpoints_folder):\r\n # user set checkpoint_after_iter so checkpoints must be created\r\n # create checkpoint dir\r\n os.makedirs(checkpoints_folder, exist_ok=True)\r\n\r\n start_epoch = 0\r\n if self.checkpoint_load_iter > 0:\r\n # user set checkpoint_load_iter, so load a checkpoint\r\n checkpoint_name = self.checkpoint_str_format.format(self.checkpoint_load_iter)\r\n checkpoint_path = os.path.join(checkpoints_folder, checkpoint_name)\r\n try:\r\n self._model.load_parameters(checkpoint_path)\r\n start_epoch = self.checkpoint_load_iter + 1\r\n except FileNotFoundError as e:\r\n e.strerror = 'No such file or directory {}'.format(checkpoint_path)\r\n\r\n # set device\r\n # NOTE: multi-gpu a little bugged\r\n if 'cuda' in self.device:\r\n if mx.context.num_gpus() > 0:\r\n if self.device == 'cuda':\r\n ctx = [mx.gpu(0)]\r\n else:\r\n ctx = [mx.gpu(int(self.device.split(':')[1]))]\r\n else:\r\n ctx = [mx.cpu()]\r\n else:\r\n ctx = [mx.cpu()]\r\n\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(ctx[0])\r\n if verbose:\r\n print(\"Network:\")\r\n print(self._model)\r\n\r\n # get data loader\r\n with autograd.train_mode():\r\n _, _, anchors = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), ctx[0]))\r\n anchors = anchors.as_in_context(mx.cpu())\r\n\r\n # transform dataset & get loader\r\n train_transform = presets.ssd.SSDDefaultTrainTransform(self.img_size, self.img_size, anchors)\r\n dataset = dataset.transform(train_transform)\r\n\r\n batchify_fn = Tuple(Stack(), Stack(), Stack())\r\n train_loader = gluon.data.DataLoader(\r\n dataset, self.batch_size, shuffle=True, batchify_fn=batchify_fn,\r\n last_batch='rollover', num_workers=self.num_workers\r\n )\r\n\r\n trainer = gluon.Trainer(self._model.collect_params(),\r\n 'sgd', {'learning_rate': self.lr,\r\n 'wd': self.weight_decay,\r\n 'momentum': self.momentum},\r\n update_on_kvstore=None)\r\n mbox_loss = SSDMultiBoxLoss()\r\n ce_metric = mx.metric.Loss('cross_entropy_loss')\r\n smoothl1_metric = mx.metric.Loss('smoothl1_loss')\r\n\r\n self._model.collect_params().reset_ctx(ctx)\r\n self._model.hybridize(static_alloc=True, static_shape=True)\r\n\r\n # start training\r\n training_dict = {\"cross_entropy_loss\": [], \"smoothl1_loss\": [], \"val_map\": []}\r\n n_iters = 0\r\n for epoch in range(start_epoch, self.epochs):\r\n autograd.set_training(True)\r\n cur_lr = self.__get_lr_at(epoch)\r\n trainer.set_learning_rate(cur_lr)\r\n\r\n self._model.hybridize(static_alloc=True, static_shape=True)\r\n\r\n tic = time.time()\r\n # TODO: epoch + 1\r\n print('[Epoch {}/{} lr={}]'.format(epoch, self.epochs, trainer.learning_rate))\r\n ce_metric.reset()\r\n smoothl1_metric.reset()\r\n\r\n for i, batch in enumerate(train_loader):\r\n n_iters += 1\r\n data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)\r\n cls_targets = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)\r\n box_targets = gluon.utils.split_and_load(batch[2], ctx_list=ctx, batch_axis=0)\r\n\r\n with autograd.record():\r\n cls_preds = []\r\n box_preds = []\r\n for x in data:\r\n cls_pred, box_pred, _ = self._model(x)\r\n cls_preds.append(cls_pred)\r\n box_preds.append(box_pred)\r\n sum_loss, cls_loss, box_loss = mbox_loss(\r\n cls_preds, box_preds, cls_targets, box_targets)\r\n autograd.backward(sum_loss)\r\n\r\n trainer.step(1)\r\n\r\n ce_metric.update(0, [l * self.batch_size for l in cls_loss])\r\n smoothl1_metric.update(0, [l * self.batch_size for l in box_loss])\r\n if n_iters % self.log_after == self.log_after - 1:\r\n name1, loss1 = ce_metric.get()\r\n name2, loss2 = smoothl1_metric.get()\r\n # TODO: epoch + 1\r\n print('[Epoch {}][Batch {}] {}={:.3f}, {}={:.3f}'.format(\r\n epoch, i, name1, loss1, name2, loss2\r\n ))\r\n toc = time.time()\r\n\r\n # perform evaluation during training\r\n if epoch % self.val_after == self.val_after - 1 and val_dataset is not None:\r\n if verbose:\r\n print(\"Model evaluation at epoch {}\".format(epoch))\r\n eval_dict = self.eval(val_dataset)\r\n training_dict[\"val_map\"].append(eval_dict[\"map\"])\r\n\r\n # checkpoint saving\r\n if self.checkpoint_after_iter > 0 and epoch % self.checkpoint_after_iter == self.checkpoint_after_iter - 1:\r\n if verbose:\r\n print('Saving model at epoch {}'.format(epoch))\r\n checkpoint_name = self.checkpoint_str_format.format(epoch)\r\n checkpoint_filepath = os.path.join(checkpoints_folder, checkpoint_name)\r\n self._model.save_parameters(checkpoint_filepath)\r\n\r\n name1, loss1 = ce_metric.get()\r\n name2, loss2 = smoothl1_metric.get()\r\n training_dict[\"cross_entropy_loss\"].append(loss1)\r\n training_dict[\"smoothl1_loss\"].append(loss2)\r\n # TODO: epoch + 1\r\n print('[Epoch {}] Training cost: {:.3f}, {}={:.3f}, {}={:.3f}'.format(\r\n epoch, toc - tic, name1, loss1, name2, loss2\r\n ))\r\n\r\n return training_dict",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def makeFit(self):\n if not self.fitModel.params:\n return\n cs = self.spectrum\n self.worker.make_model_curve(cs, allData=csi.allLoadedItems)\n\n dfparams = cs.fitParams\n lcfRes = dfparams['lcf_result']\n self.fitR.setText('R={0:.5g}'.format(lcfRes['R']))\n self.updateFitResults()\n self.fitReady.emit()",
"def fit(model, hists, fitmethod, eps=1.0e-7):\n\n RooAbsReal.defaultIntegratorConfig().setEpsAbs(eps)\n RooAbsReal.defaultIntegratorConfig().setEpsRel(eps)\n modfuncs = model.model_functions()\n\n datahist = [RooDataHist(\n 'scan{0}Beam{1}RestDataHist'.format(c, i),\n 'scan{0}Beam{1}RestDataHist'.format(c, i),\n RooArgList(model.xvar(), model.yvar()),\n hists[j]\n ) for j, (i,c) in enumerate(ic)]\n sample = RooCategory('sample', 'sample')\n for (i,c) in ic:\n sample.defineType('{0}_ScanData_Beam{1}Rest'.format(c, i))\n combdata = RooDataHist(\n 'combdata', 'combined data',\n RooArgList(model.xvar(), model.yvar()),\n RooFit.Index(sample),\n RooFit.Import('X_ScanData_Beam1Rest', datahist[0]),\n RooFit.Import('Y_ScanData_Beam1Rest', datahist[1]),\n RooFit.Import('X_ScanData_Beam2Rest', datahist[2]),\n RooFit.Import('Y_ScanData_Beam2Rest', datahist[3])\n )\n simpdf = RooSimultaneous('simpdf', 'simultaneous pdf', sample)\n for j, (i,c) in enumerate(ic):\n simpdf.addPdf(modfuncs[j], '{0}_ScanData_Beam{1}Rest'.format(c, i))\n\n result = fitmethod(simpdf, combdata)\n return result, modfuncs, datahist",
"def fitted_model(model_data, sequential_model):\n x_train, y_train, x_val, y_val, x_test, _ = model_data\n compile_model(sequential_model)\n fitted_model = fit_model(sequential_model, 64, 1, False, x_train, y_train, x_val, y_val, x_test)\n return fitted_model",
"def fit_training_data(self):\n self.model.fit(self.X_train)",
"def _prepare_fit(self, model, data, data_val,\n generator=False, delay=False,\n *args, **kwargs):\n\n data, data_val, data_hash, size_gen = self._prepare_message(model,\n data,\n data_val,\n kwargs,\n generator)\n\n f = self.backend.fit\n if delay:\n f = self.backend.fit.delay\n res = f(self.backend_name,\n self.backend_version,\n copy.deepcopy(self.model_dict),\n data, data_hash, data_val,\n size_gen=size_gen,\n generator=generator,\n *args, **kwargs)\n return self._handle_results(res, delay)",
"def fit(self, data):\n if data is None:\n self.train_self()\n else:\n # not needed this week\n pass",
"def fit(model, data, test_ids, exp_name, train_ids=None):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n if train_ids == None:\n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n \n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n scaling_data = data[0][train_ids]\n desc_scaler.fit(scaling_data)\n data[0] = desc_scaler.transform(data[0])\n else:\n scaling_data = None\n \n trained_model = train(model, train_ids, data, scaler)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments[exp_name] = {'model':trained_model, 'results':results, 'scaler':scaler, 'desc scaling data':scaling_data}\n return results",
"def fit(self) -> None:\n\n levels = self.levels\n TSs = GetAggregateTS(self.data).aggregate(levels)\n models = {}\n residuals = {}\n fcsts = {}\n for bm in self.baseModels:\n model_name = bm.model_name\n if model_name is None: # only residuals and fcsts are provided\n models[bm.level] = None\n residuals[bm.level] = bm.residuals\n fcsts[bm.level] = bm.fcsts\n else:\n m = BASE_MODELS[model_name](\n data=TSs[bm.level],\n params=bm.model_params,\n )\n m.fit()\n models[bm.level] = m\n self.models = models\n self.info_fcsts = fcsts\n self.info_residuals = residuals",
"def save_fit(self):\n if self.fit is None:\n self.fit_status.setText('Fit not available for saving')\n return\n self.read_parameters()\n group = NXprocess()\n group['model'] = self.composite_model\n group['data'] = self.data\n for m in self.models:\n group[m['name']] = self.get_model(m['model'])\n parameters = NXparameters(attrs={'model': m['class']})\n for name in m['parameters']:\n p = self.fit.params[name]\n name = name.replace(m['model'].prefix, '')\n parameters[name] = NXfield(p.value, error=p.stderr,\n initial_value=p.init_value,\n min=str(p.min), max=str(p.max),\n vary=p.vary, expr=p.expr)\n group[m['name']].insert(parameters)\n group['program'] = 'lmfit'\n group['program'].attrs['version'] = lmfit_version\n group['title'] = 'Fit Results'\n group['fit'] = self.get_model(fit=True)\n fit = NXparameters()\n fit.nfev = self.fit.result.nfev\n fit.chisq = self.fit.result.chisqr\n fit.redchi = self.fit.result.redchi\n fit.message = self.fit.result.message\n group['statistics'] = fit\n group.note = NXnote(\n self.fit.result.message,\n f'Chi^2 = {self.fit.result.chisqr}\\n'\n f'Reduced Chi^2 = {self.fit.result.redchi}\\n'\n f'No. of Function Evaluations = {self.fit.result.nfev}\\n'\n f'No. of Variables = {self.fit.result.nvarys}\\n'\n f'No. of Data Points = {self.fit.result.ndata}\\n'\n f'No. of Degrees of Freedom = {self.fit.result.nfree}\\n'\n f'{self.fit.fit_report()}')\n self.write_group(group)",
"def fit(self):\n self.output_data = np.array([])\n self.mapper_data = np.array\n return self",
"def test_fit_prep():\n args = get_layer('fit', 'manual', 'temporal', False, True, window=2, step_size=3)\n run_layer(*args)",
"def _create_guess_datasets(self):\n raise NotImplementedError('Please override the _create_guess_datasets '\n 'specific to your model')",
"def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)",
"def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))",
"def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')",
"def run(self):\n self.set_pipeline()\n self.pipeline.fit(self.X, self.y)",
"def _train(self,\n Xs: Array,\n Ys: Array,\n metric: Callable = None,\n **kwargs):\n self.model.fit(Xs, Ys, **kwargs)\n return None",
"def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))",
"def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())",
"def init_training(self):\n\n if not os.path.exists(self._model_root_path):\n os.makedirs(self._model_root_path)\n\n # Only initialize once!\n if self._model is None:\n self._model = TrainableAimbotModel(self._config, self._fov,\n os.path.join(self._model_root_path, 'aimbot_model.tf'))\n\n if not os.path.isfile(self._train_data_tfrecord_path) and not os.path.isfile(self._test_data_tfrecord_path):\n # Only create if not existing\n images_labels = _get_annotations_and_images(self._image_path)\n images_labels_train, images_labels_test = train_test_split(images_labels, shuffle=True, test_size=0.20)\n\n self._model.create_tfrecords(self._train_data_tfrecord_path, images_labels_train)\n self._model.create_tfrecords(self._test_data_tfrecord_path, images_labels_test)\n\n self._train_data_set = self._model.create_dataset(self._train_data_tfrecord_path, augment=True, shuffle=True)\n self._test_data_set = self._model.create_dataset(self._train_data_tfrecord_path)",
"def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()",
"def training(self):\n self.model.fit(self.train_x, self.train_y)"
] |
[
"0.7104951",
"0.7070305",
"0.65044504",
"0.64612836",
"0.6461269",
"0.64220417",
"0.63549966",
"0.6304513",
"0.6270343",
"0.6267603",
"0.6158856",
"0.61479485",
"0.61465675",
"0.61364293",
"0.6110159",
"0.61055976",
"0.6104337",
"0.61033475",
"0.608298",
"0.60610783",
"0.60402405",
"0.60213846",
"0.6019746",
"0.6006722",
"0.59872466",
"0.59829724",
"0.5964538",
"0.5958162",
"0.595571",
"0.59213805"
] |
0.7163324
|
0
|
Gets existing Guess, Fit, status datasets, from the HDF5 group. All other domainspecific datasets should be loaded in the classes that extend this class
|
def _get_existing_datasets(self):
self._h5_guess = USIDataset(self.h5_results_grp['Guess'])
try:
self._h5_status_dset = self.h5_results_grp[self._status_dset_name]
except KeyError:
warn('status dataset not created yet')
self._h5_status_dset = None
try:
self._h5_fit = self.h5_results_grp['Fit']
self._h5_fit = USIDataset(self._h5_fit)
except KeyError:
self._h5_fit = None
if not self._is_guess:
self._create_fit_datasets()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _generate_datasets(self):\n datasets = list()\n for fname in sorted(os.listdir(self.base_dir)):\n if not self._filename_re.match(fname):\n continue\n\n file_path = os.path.join(self.base_dir, fname)\n try:\n fh = self._open_hdf5(file_path)\n\n except (IOError, OSError) as e:\n warnings.warn('Cannot access {}; skipped'.format(file_path))\n print(e)\n continue\n\n for key in fh:\n if self._groupname_re.match(key.lstrip('/')):\n datasets.append(ObjectTableWrapper(fh, key, self._schema))\n continue\n\n warn_msg = 'incorrect group name \"{}\" in {}; skipped this group'\n warnings.warn(warn_msg.format(os.path.basename(file_path), key))\n\n return datasets",
"def load_data(self, directory, group):\n \n em_images = h5py.File(os.path.join(directory, \"image.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"human_labels_split.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:192], segmentations['main'][:192]\n elif group == 'dev':\n return em_images['main'][192:], segmentations['main'][192:]\n \"\"\"\n em_images = h5py.File(os.path.join(directory, \"voronoi_boundary.h5\"), 'r')\n segmentations = h5py.File(os.path.join(directory, \"voronoi_segmentation.h5\"), 'r')\n\n if group == 'train':\n return em_images['main'][:16], segmentations['main'][:16]\n elif group == 'dev':\n return em_images['main'][16:], segmentations['main'][16:]\n \"\"\"",
"def get_datasets(h5group, prefix=''):\n for key in h5group.keys():\n h5obj = h5group[key]\n path = '{}/{}'.format(prefix, key)\n attrs = {att:val for att, val in h5obj.attrs.items()}\n\n if isinstance(h5obj, h5py.Dataset): \n \n # get metadata\n units = attrs[\"units\"] if 'units' in attrs else None\n spec = attrs[\"datatype\"] if 'datatype' in attrs else None\n \n # special handling for the nested waveform dataset\n if \"waveform/values/cumulative_length\" in path:\n nwfs = h5obj.shape[0]\n \n # must fix datatype AFTER this initial iteration\n yield (path, \"waveform\", nwfs, None, units, spec) \n elif \"waveform\" in path:\n pass\n \n # handle normal 'array<1>{real}' datasets\n else:\n yield (path, key, h5obj.shape[0], h5obj.dtype, units, spec) \n \n # test for group (go down)\n elif isinstance(h5obj, h5py.Group): \n yield from get_datasets(h5obj, path)",
"def get_data(data_dir, hdf5):\r\n\r\n # Get the filenames of the lists containing image paths and labels.\r\n train_file, val_file = build_dataset_index(data_dir)\r\n\r\n # Check if (creating and) loading from hdf5 database is desired.\r\n if hdf5:\r\n # Create folder to store dataset.\r\n if not os.path.exists('hdf5'):\r\n os.makedirs('hdf5')\r\n # Check if hdf5 databases already exist and create them if not.\r\n if not os.path.exists('hdf5/tiny-imagenet_train.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 train dataset.'\r\n build_hdf5_image_dataset(train_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_train.h5', categorical_labels=True, normalize=True)\r\n\r\n if not os.path.exists('hdf5/tiny-imagenet_val.h5'):\r\n from tflearn.data_utils import build_hdf5_image_dataset\r\n print ' Creating hdf5 val dataset.'\r\n build_hdf5_image_dataset(val_file, image_shape=(64, 64), mode='file', output_path='hdf5/tiny-imagenet_val.h5', categorical_labels=True, normalize=True)\r\n\r\n # Load training data from hdf5 dataset.\r\n h5f = h5py.File('hdf5/tiny-imagenet_train.h5', 'r')\r\n X = h5f['X']\r\n Y = h5f['Y']\r\n\r\n # Load validation data.\r\n h5f = h5py.File('hdf5/tiny-imagenet_val.h5', 'r')\r\n X_test = h5f['X']\r\n Y_test = h5f['Y'] \r\n\r\n # Load images directly from disk when they are required.\r\n else:\r\n from tflearn.data_utils import image_preloader\r\n X, Y = image_preloader(train_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n X_test, Y_test = image_preloader(val_file, image_shape=(64, 64), mode='file', categorical_labels=True, normalize=True, filter_channel=True)\r\n\r\n # Randomly shuffle the dataset.\r\n X, Y = shuffle(X, Y)\r\n\r\n return X, Y, X_test, Y_test",
"def _create_guess_datasets(self):\n self.h5_guess = create_empty_dataset(self.h5_loop_metrics, loop_fit32, 'Guess')\n write_simple_attrs(self._h5_group, {'guess method': 'pycroscopy statistical'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_guess, self._parms_dict)\n write_simple_attrs(self.h5_guess, {'Loop_fit_method': \"pycroscopy statistical\", 'last_pixel': 0})\n\n self.h5_main.file.flush()",
"def _read_h5_dataset(self):\n dev = self.getParentObj()\n top = dev.getFileDescriptor()\n for attr in self._attr_list:\n data = top.get(attr)\n if data is None:\n msg = \"Unable to open object (Object %s doesn't exist)\" % attr\n raise TaurusException(msg)\n top = data\n return data",
"def load_datasets(in_h5_path, partition='train'):\n\tif partition == 'train':\n\t\tx_train = HDF5Matrix(datapath=in_h5_path, dataset=\"train/X_train\")\n\t\ty_train = HDF5Matrix(datapath=in_h5_path, dataset=\"train/y_train\")\n\t\treturn x_train, y_train\n\telif partition == 'test':\n\t\tx_test = HDF5Matrix(datapath=in_h5_path, dataset=\"test/X_test\")\n\t\ty_test = HDF5Matrix(datapath=in_h5_path, dataset=\"test/y_test\")\n\t\treturn x_test, y_test\n\telse:\n\t\tprint(\"Invalid 'partition' parameter: Valid values: ['train', 'test']\")",
"def _create_guess_datasets(self):\n raise NotImplementedError('Please override the _create_guess_datasets '\n 'specific to your model')",
"def get_data_heuristics(rootdir, img_path, datasetnames, heuristicnames):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Heuristic model images (predictions of models)\n images_models = {}\n for model in heuristicnames:\n image = load_data_pickle(img_path, dataset=dataset, filename=model)\n images_models.update({model: image})\n\n # Save images in datasets dictionary\n datasets.update({dataset: {'gt':images_gt, 'models':images_models}})\n\n print(\"dataset created\")\n return datasets",
"def read_hdf5_group(filename, gname, vars_name=None):\n fid = h5py.File(filename, 'r')\n gid = fid.get(gname)\n if vars_name is None: vars_name = list(gid.keys())\n\n data = {}\n for var_name in vars_name:\n try:\n dset = gid.get(var_name)\n shape = dset.shape\n data[var_name] = np.zeros(shape)\n dset.read_direct(data[var_name])\n except:\n pass\n fid.close()\n print('Read from ', ''.join((filename,'/',gname)))\n print('Variables names = ')\n print('\\n'.join(vars_name))\n\n return data, vars_name",
"def _scan_and_sample_dataset(self, dives):\n roots = [os.path.join(self.p.data_root, n) for n in dives]\n ret = []\n for root in roots:\n h5_files = glob.glob(os.path.join(root, '*.h5'))\n for h5 in h5_files:\n try:\n fgroup = FrameGroup(h5, self.meta)\n except (AssertionError, KeyError, OSError) as e:\n if type(e) == AssertionError:\n print_warn('Unmatched time: {}'.format(h5))\n else:\n print_warn('Corrupted h5: {}'.format(h5))\n continue\n num_samples = int(self.p.downsample * fgroup.num_frames)\n indices = np.random.choice(\n fgroup.num_frames, size=num_samples, replace=False)\n ret.extend([(h5, int(idx)) for idx in indices])\n random.shuffle(ret)\n return ret",
"def get_datasets(load_key=None, maven=False):\n ds_names = {}\n if load_key == 'R2349': \n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_names['batsrus_multi_species'] = model_dir+'R2349/batsrus_3d_multi_species.h5'\n ds_names['batsrus_electron_pressure'] = model_dir+'R2349/batsrus_3d_pe.h5'\n ds_names['heliosares'] ='/Volumes/triton/Data/ModelChallenge/R2349/heliosares_multi.h5'\n #ds_names['rhybrid'] ='/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5'\n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'multi_fluid' in key],\n 'batsrus2':[key for key in ds_names.keys() if 'multi_species' in key],\n 'batsrus3':[key for key in ds_names.keys() if 'electron_pressure' in key],\n 'batsrus4':[key for key in ds_names.keys() if 'mf_lr' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key],\n 'rhybrid_helio':[key for key in ds_names.keys() if 'rhybrid' in key ]}\n if maven or True:\n ds_names['maven']=orbit_dir+'orbit_2349.csv'\n #ds_names['maven'] = orbit_dir+'orbit_plume_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'batsrus_mf_lowres':\n ds_names['batsrus_mf_lr'] = model_dir+'R2349/batsrus_3d_multi_fluid_lowres.h5'\n ds_types = {'batsrus_mf_lr' : ['batsrus_mf_lr']}\n\n\n elif load_key == 'helio_multi':\n ds_names['t00550'] = model_dir+'R2349/Heliosares_Multi/t00550.h5'\n ds_names['t00560'] = model_dir+'R2349/Heliosares_Multi/t00560.h5'\n ds_names['t00570'] = model_dir+'R2349/Heliosares_Multi/t00570.h5'\n ds_names['t00580'] = model_dir+'R2349/Heliosares_Multi/t00580.h5'\n ds_names['t00590'] = model_dir+'R2349/Heliosares_Multi/t00590.h5'\n ds_names['t00600'] = model_dir+'R2349/Heliosares_Multi/t00600.h5'\n ds_names['t00610'] = model_dir+'R2349/Heliosares_Multi/t00610.h5'\n ds_names['t00620'] = model_dir+'R2349/Heliosares_Multi/t00620.h5'\n ds_names['t00630'] = model_dir+'R2349/Heliosares_Multi/t00630.h5'\n ds_names['t00640'] = model_dir+'R2349/Heliosares_Multi/t00640.h5'\n ds_names['t00650'] = model_dir+'R2349/Heliosares_Multi/t00650.h5'\n\n ds_types = {'heliosares':[key for key in ds_names.keys()]}\n if maven:\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'SDC_BATS':\n ds_names['LS180_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_max.h5'\n ds_names['LS270_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_max.h5'\n ds_names['LS090_SSL000_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_max.h5'\n ds_names['LS180_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_max.h5'\n ds_names['LS270_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_max.h5'\n ds_names['LS090_SSL270_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_max.h5'\n ds_names['LS180_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_max.h5'\n ds_names['LS270_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_max.h5'\n ds_names['LS090_SSL180_max'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_max.h5'\n ds_names['LS180_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL000_min.h5'\n ds_names['LS270_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL000_min.h5'\n ds_names['LS090_SSL000_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL000_min.h5'\n ds_names['LS180_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL270_min.h5'\n ds_names['LS270_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL270_min.h5'\n ds_names['LS090_SSL270_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL270_min.h5'\n ds_names['LS180_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS180_SSL180_min.h5'\n ds_names['LS270_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS270_SSL180_min.h5'\n ds_names['LS090_SSL180_min'] = model_dir+'SDC_Archive/BATSRUS/LS090_SSL180_min.h5'\n\n ds_types = {'batsrus':[key for key in ds_names.keys()]}\n\n elif load_key == 'SDC_G1':\n #BATSRUS\n ds_names['bats_min_LS270_SSL0'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG0.h5'\n ds_names['bats_min_LS270_SSL180'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG180.h5'\n ds_names['bats_min_LS270_SSL270'] = \\\n model_dir+'SDC_Archive/BATSRUS/'+'3d__ful_4_n00060000_PERmin-SSLONG270.h5' \n \n #HELIOSARES\n #ds_names['helio_1'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_1.h5'\n \n #ds_names['helio_2'] = \\\n # model_dir+'SDC_Archive/HELIOSARES/Hybrid/'+'helio_2.h5'\n \n \n ds_types = {'batsrus1':[key for key in ds_names.keys() if 'bats' in key],\n 'heliosares':[key for key in ds_names.keys() if 'helio' in key]}\n if maven:\n pass\n #ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n #ds_types['maven']=['maven']\n\n elif load_key == 'rhybrid_res':\n ds_names = {'rhybrid240':'/Volumes/triton/Data/ModelChallenge/R2349/rhybrid.h5',\n 'rhybrid120':'/Volumes/triton/Data/ModelChallenge/R2349/HYB/state00030000.h5'}\n ds_types = {'rhybrid1':['rhybrid240'], 'rhybrid2':['rhybrid120']}\n elif load_key == 'batsrus_tseries':\n ds_names = {'batsrus_mf':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_mf/3d__ful_4_n00040000.h5',\n 'batsrus_ms':'/Volumes/triton/Data/ModelChallenge/R2349/BATSRUS/10km_ms/3d__mhd_6_n0050000.h5'}\n ds_types = {'batsrus_mf':['batsrus_mf'], 'batsrus_ms':['batsrus_ms']}\n\n elif load_key == 'maven':\n ds_names, ds_types = {},{}\n ds_names['maven'] = orbit_dir+'orbit_2349.csv'\n ds_types['maven']=['maven']\n elif load_key == 'exo_2349':\n keys = ['2349_1RM_225km','2349_1RM_450km', '2349_2RM_450km',\n '2349_2RM_900km','2349_4RM_900km'] \n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonA':\n keys = ['2349_1RM_225km', '2349_2RM_450km',\n '2349_1.5RM_338km'] \n ds_names = {k:exo_dir+'/ComparisonA/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n elif load_key == 'exo_comparisonB':\n keys = ['2349_1RM_225km', 'T0_1RM_225km', 'T1_1RM_225km', \"T2_1RM_225km\"] \n ds_names = {k:exo_dir+'/ComparisonB/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n elif load_key == 'exo_t1':\n keys = ['T1_1RM_112km', 'T1_1RM_225km', #'T1_1RM_450km',\n 'T1_2RM_225km', 'T1_2RM_450km', #'T1_2RM_900km',\n 'T1_4RM_900km']\n\n ds_names = {k:exo_dir+'/'+k+'/'+k+'.h5' for k in keys}\n ds_types = {k:[k] for k in keys}\n\n else:\n print('No datasets selected')\n \n\n return (ds_names, ds_types)",
"def get_data(self):\n\n if not self.checked:\n self.check_cache()\n h5f = h5py.File(self.data_filename, 'r')\n train_lbl = h5f['train_lbl'][:]\n train_img = h5f['train_img'][:]\n val_lbl = h5f['val_lbl'][:]\n val_img = h5f['val_img'][:]\n h5f.close()\n return train_img, train_lbl, val_img, val_lbl",
"def get_data(self, grp, class_type=\"NXdata\"):\n coll = [grp[name] for name in grp\n if isinstance(grp[name], h5py.Dataset) and\n self.get_attr(grp[name], \"NX_class\") == class_type]\n return coll",
"def datasets(self):\n pass",
"def from_hdf5(cls, group_or_filename):\n if isinstance(group_or_filename, h5py.Group):\n group = group_or_filename\n else:\n h5file = h5py.File(str(group_or_filename), 'r')\n\n # Make sure version matches\n if 'version' in h5file.attrs:\n major, minor = h5file.attrs['version']\n # For now all versions of HDF5 data can be read\n else:\n raise IOError(\n 'HDF5 data does not indicate a version. Your installation of '\n 'the OpenMC Python API expects version {}.x data.'\n .format(HDF5_VERSION_MAJOR))\n\n group = list(h5file.values())[0]\n\n name = group.name[1:]\n atomic_number = group.attrs['Z']\n mass_number = group.attrs['A']\n metastable = group.attrs['metastable']\n atomic_weight_ratio = group.attrs['atomic_weight_ratio']\n\n data = cls(name, atomic_number, mass_number, metastable, \n atomic_weight_ratio)\n\n # Read energy grid\n data.energy = group['energy'][()]\n\n # Read reaction data\n rxs_group = group['reactions']\n for name, obj in sorted(rxs_group.items()):\n if name.startswith('reaction_'):\n rx = ProtonReaction.from_hdf5(obj, data.energy)\n data.reactions[rx.mt] = rx\n\n return data",
"def generate_data(self):\n self.remove_hdf5_file()\n hdf5_handler = self.create_hdf5_file()\n self.populate_hdf5_file(hdf5_handler, self.dataset)",
"def get_dataloader(data_folder, model_name, data_name, size=\"default\"):\n training_set = None\n validation_set = None\n\n if model_name == \"Howe_Patterson\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder)\n validation_set = Dataset_full(partition['validation'], data_folder)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"Deep_Sleep\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition.pkl')))\n\n elif size == \"small\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_small.pkl'))\n elif size == \"tiny\":\n partition = load_obj(os.path.join(data_folder, 'data_partition_tiny.pkl'))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition.pkl'))\n\n if data_name == \"SHHS\":\n training_set = Dataset_full_SHHS(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full_SHHS(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"snooze\":\n training_set = Dataset_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"philips\":\n training_set = Dataset_Philips_full(partition['train'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n validation_set = Dataset_Philips_full(partition['validation'], data_folder, downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False)\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n # TODO combined dataset https://discuss.pytorch.org/t/train-simultaneously-on-two-datasets/649/17\n training_set = ConcatDataset(\n Dataset_full(partition[0]['train'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['train'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n validation_set = ConcatDataset(\n Dataset_full(partition[0]['validation'], data_folder[0], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False),\n Dataset_full_SHHS(partition[1]['validation'], data_folder[1], downsample_ratio=4,\n pre_allocation=2 ** 22, down_sample_annotation=False))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n elif model_name == \"ConvNet_IID\":\n if data_name == \"combined\":\n partition = []\n for data_fold in data_folder:\n partition.append(load_obj(os.path.join(data_fold, 'data_partition_IID_windows.pkl')))\n else:\n partition = load_obj(os.path.join(data_folder, 'data_partition_IID_windows.pkl'))\n if data_name == \"SHHS\":\n training_set = Dataset_IID_window_SHHS(partition['train'], data_folder)\n validation_set = Dataset_IID_window_SHHS(partition['validation'], data_folder)\n elif data_name == \"snooze\":\n training_set = Dataset_IID_window(partition['train'], data_folder)\n validation_set = Dataset_IID_window(partition['validation'], data_folder)\n elif data_name == \"philips\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"HMC\":\n print(\"{} not implemented data\".format(data_name))\n exit()\n elif data_name == \"combined\":\n training_set = ConcatDataset(\n Dataset_IID_window(partition[0]['train'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['train'], data_folder[1]))\n validation_set = ConcatDataset(\n Dataset_IID_window(partition[0]['validation'], data_folder[0]),\n Dataset_IID_window_SHHS(partition[1]['validation'], data_folder[1]))\n else:\n print(\"{} wrong data for dataloader\".format(data_name))\n exit()\n\n else:\n print(\"{} wrong model for dataloader\".format(model_name))\n exit()\n\n return training_set, validation_set",
"def _create_fit_datasets(self):\n\n if self.h5_guess is None:\n warn('Need to guess before fitting!')\n return\n\n self.h5_fit = create_empty_dataset(self.h5_guess, loop_fit32, 'Fit')\n write_simple_attrs(self._h5_group, {'fit method': 'pycroscopy functional'})\n\n # This is necessary comparing against new runs to avoid re-computation + resuming partial computation\n write_simple_attrs(self.h5_fit, self._parms_dict)\n write_simple_attrs(self.h5_fit, {'Loop_fit_method': \"pycroscopy functional\", 'last_pixel': 0})",
"def get_test_dataset_SetLoader(self, set_name='train'):\n set_loader = self.load_hdf5_file_SetLoader(set_name)\n\n set_data = self.dataset[set_name]\n set_fields = self.data_fields[set_name]\n\n return set_loader, set_data, set_fields",
"def read_hdf5(path_to_file):\n\n print(\"\\nReading HDF5 file: \", path_to_file)\n file = h5py.File(path_to_file, 'r')\n\n # List the groups\n groups = list(file.keys())\n print(\"Groups available: \", groups)\n\n # Read Zemax Metadata\n zemax_metadata = {}\n print(\"\\nZemax Metadata:\")\n for key in file['Zemax Metadata'].attrs.keys():\n print('{} : {}'.format(key, file['Zemax Metadata'].attrs[key]))\n zemax_metadata[key] = file['Zemax Metadata'].attrs[key]\n\n # Read the analysis groups\n for group_name in groups:\n if group_name != 'Zemax Metadata':\n analysis_group = file[group_name]\n print('\\nAnalysis: ', group_name)\n # For each Analysis Group we loop over subgroups\n for subgroup_key in analysis_group.keys():\n subgroup = analysis_group[subgroup_key]\n print('Subgroup #', subgroup_key)\n # List the metadata of the subgroup\n for att_key in subgroup.attrs.keys():\n print(' {} : {}'.format(att_key, subgroup.attrs[att_key]))\n\n file.close()\n\n return zemax_metadata",
"def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)",
"def read_metadata(self, data_set):\n # checks if there is an open file in the dataset as load_data does\n # reading of metadata before reading the complete dataset\n if not hasattr(self, '_h5_base_group'):\n self._open_file(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n metadata_group = data_set._h5_base_group['metadata']\n self.read_dict_from_hdf5(data_set.metadata, metadata_group)\n return data_set",
"def read_hdf5(filename, **extras):\n groups = {'sampling': {}, 'obs': {}}\n res = {}\n with h5py.File(filename, \"r\") as hf:\n # loop over the groups\n for group, d in groups.items():\n # read the arrays in that group into the dictionary for that group\n for k, v in hf[group].items():\n d[k] = np.array(v)\n # unserialize the attributes and put them in the dictionary\n for k, v in hf[group].attrs.items():\n try:\n d[k] = json.loads(v)\n except:\n d[k] = v\n # do top-level attributes.\n for k, v in hf.attrs.items():\n try:\n res[k] = json.loads(v)\n except:\n res[k] = v\n res.update(groups['sampling'])\n res['obs'] = groups['obs']\n try:\n res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])\n except:\n pass\n try:\n res['rstate'] = pickle.loads(res['rstate'])\n except:\n pass\n try:\n mp = [names_to_functions(p.copy()) for p in res['model_params']]\n res['model_params'] = mp\n except:\n pass\n\n return res",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def provide_data(tfds_name,\n batch_size,\n patch_size,\n split='train',\n color_labeled=0,\n num_parallel_calls=None,\n shuffle=True,\n domains=('Black_Hair', 'Blond_Hair', 'Brown_Hair'),\n download=True,\n data_dir=None):\n\n if tfds_name.startswith('cycle_gan'):\n ds = provide_custom_datasets(batch_size,\n None,\n shuffle,\n 1,\n patch_size,\n tfds_name,\n with_labels=True)\n\n images = [d['images'] for d in ds]\n labels = [d['labels'] for d in ds]\n\n else:\n if tfds_name.startswith('celeb_a'):\n ds = provide_domained_dataset(tfds_name, batch_size, patch_size,\n split=split,\n num_parallel_calls=num_parallel_calls,\n shuffle=shuffle,\n domains=domains,\n download=download,\n data_dir=data_dir)\n\n else:\n ds = provide_categorized_dataset(tfds_name, batch_size, patch_size,\n split=split,\n color_labeled=color_labeled,\n num_parallel_calls=num_parallel_calls,\n shuffle=shuffle,\n download=download,\n data_dir=data_dir)\n\n next_batch = tf.compat.v1.data.make_one_shot_iterator(ds).get_next()\n domains = next_batch.keys()\n images = [next_batch[domain]['images'] for domain in domains]\n labels = [next_batch[domain]['labels'] for domain in domains]\n\n return images, labels",
"def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data",
"def dataset(directory, subset, num_folds, fold, holdout):\n local_file = learn.datasets.base.maybe_download('omniglot.mat', directory, _DOWNLOAD_URL)\n data = scipy.io.loadmat(local_file)\n\n images = data[_SUBSET_TO_GROUP[subset]].astype(np.float32)\n images = images.transpose([1, 0]).reshape([-1] + IMAGE_SHAPE)\n\n if subset == 'train':\n images = images[:-_VALIDATION_SIZE]\n elif subset == 'validate':\n images = images[-_VALIDATION_SIZE:]\n\n images = get_folds(images, num_folds, fold, holdout)\n return slim.dataset.Dataset(\n images, None, None, images.shape[0], _ITEMS_TO_DESCRIPTIONS,\n data_shape=IMAGE_SHAPE)",
"def dataset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f).category(filebase(d))",
"def roi_data_from_hdf(self, h5file, roi_wildcard, data_type, run = [], postFix = ['mcf','sgtf'],combined = False, prf = False):\n\t\t\n\t\tif combined == False:\n\t\t\tthis_run_group_name = os.path.split(self.runFile(stage = 'processed/mri', run = run, postFix = postFix))[1]\n\t\telse:\n\t\t\tthis_run_group_name = os.path.split(self.runFile(stage = 'processed/mri/', extension = '_combined'))[1]\n\n\t\tif prf == True:\n\t\t\tthis_run_group_name = 'prf'\t\n\t\n\t\ttry:\n\t\t\tthisRunGroup = h5file.get_node(where = '/', name = this_run_group_name, classname='Group')\n\t\t\t# self.logger.info('group ' + self.runFile(stage = 'processed/mri', run = run, postFix = postFix) + ' opened')\n\t\t\troi_names = []\n\t\t\tfor roi_name in h5file.iterNodes(where = '/' + this_run_group_name, classname = 'Group'):\n\t\t\t\tif len(roi_name._v_name.split('.')) == 2:\n\t\t\t\t\themi, area = roi_name._v_name.split('.')\n\t\t\t\t\tif roi_wildcard == area:\n\t\t\t\t\t\troi_names.append(roi_name._v_name)\n\t\t\t\t#if len(roi_name._v_name.split('.')) == 3:\n\t\t\t\t#\themi, area, do_nothing = roi_name._v_name.split('.')\n\t\t\t\t#\tif roi_wildcard == area:\n\t\t\t\t#\t\troi_names.append(roi_name._v_name)\n\t\t\tif len(roi_names) == 0:\n\t\t\t\tself.logger.info('No rois corresponding to ' + roi_wildcard + ' in group ' + this_run_group_name)\n\t\t\t\treturn None\n\t\texcept NoSuchNodeError:\n\t\t\t# import actual data\n\t\t\tself.logger.info('No group ' + this_run_group_name + ' in this file')\n\t\t\treturn None\n\t\t\n\t\tall_roi_data = []\n\t\tfor roi_name in roi_names:\n\t\t\tthisRoi = h5file.get_node(where = '/' + this_run_group_name, name = roi_name, classname='Group')\n\t\t\tall_roi_data.append( eval('thisRoi.' + data_type + '.read()') )\n\t\tall_roi_data_np = np.hstack(all_roi_data).T\n\t\treturn all_roi_data_np"
] |
[
"0.6769013",
"0.6504869",
"0.62971133",
"0.62946093",
"0.62839943",
"0.6193727",
"0.6161359",
"0.6161155",
"0.6072767",
"0.6044694",
"0.60155284",
"0.5996884",
"0.59594387",
"0.5885074",
"0.5856235",
"0.57856464",
"0.57595074",
"0.57548445",
"0.57152677",
"0.5700575",
"0.5699335",
"0.5688054",
"0.5686966",
"0.5685959",
"0.5662731",
"0.5656396",
"0.56555736",
"0.5653341",
"0.56261307",
"0.5614577"
] |
0.7772891
|
0
|
Function to compute ZCA whitening matrix (aka Mahalanobis whitening).
|
def zca_whitening_matrix(X):
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U,S,V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0/np.sqrt(S + epsilon)), U.T)) # [M x M]
return ZCAMatrix
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _whiten_embeddings_zca(self, xs, lambd = 1e-8):\n print('ZCA-Whitening')\n Cov_s = np.cov(xs.T)\n d = Cov_s.shape[0]\n\n W_s = scipy.linalg.sqrtm(Cov_s + lambd*np.eye(d))\n xs = xs@W_s#.T\n return xs",
"def _whiten_embeddings_zca(self, lambd = 1e-8):\n print('ZCA-Whitening')\n Cov_s = np.cov(self.xs.T)\n Cov_t = np.cov(self.xt.T)\n d = Cov_s.shape[0]\n\n W_s = scipy.linalg.sqrtm(Cov_s + lambd*np.eye(d))\n W_t = scipy.linalg.sqrtm(Cov_t + lambd*np.eye(d))\n\n self.xs = self.xs@W_s#.T\n self.xt = self.xt@W_t#.T",
"def zca_whiten(X):\n cov = np.cov(X.T)\n U, Sigma, V = np.linalg.svd(cov)\n D = np.diag(np.sqrt(1/Sigma)) # square root inverse of singular value matrix\n W = U @ D @ V # rotation matrix\n centered = X - X.mean(axis=0)\n X_white = np.einsum(\"ij, ...j -> ...i\", W, centered)\n return X_white",
"def ZCA_whitening(imageVector):\n\n sigma = np.dot(inputs, inputs.T)/inputs.shape[1] #Correlation matrix\n U,S,V = np.linalg.svd(sigma) #Singular Value Decomposition\n epsilon = 0.1 #Whitening constant, it prevents division by zero\n ZCAMatrix = np.dot(np.dot(U, np.diag(1.0/np.sqrt(np.diag(S) + epsilon))), U.T) #ZCA Whitening matrix\n return np.dot(ZCAMatrix, inputs) #Data whitening",
"def zca_whiten(self, data):\n # input tensor is [batch_size, num_samples, dim_x]\n num = compat.get_dim_int(data, 1)\n\n data = tf.cast(data, tf.float64)\n\n # center the samples\n mean = tf.reduce_mean(data, axis=1, keepdims=True)\n centered = data - tf.tile(mean, [1, num, 1])\n\n # whiten\n # compute the current covariance\n diff_square = tf.matmul(centered[:, :, :, None],\n centered[:, :, None, :])\n # sigma: [batch_size, dim_x, dim_x]\n sigma = tf.reduce_mean(diff_square, axis=1)\n # get the whitening matrix\n # s: [batch_size, dim_x], u: [batch_size, dim_x, dim_x]\n s, u, _ = tf.linalg.svd(sigma, full_matrices=True)\n s_inv = 1. / tf.sqrt(s + 1e-5)\n s_inv = tf.linalg.diag(s_inv)\n # w: [batch_size, dim_x, dim_x]\n w = tf.matmul(u, tf.matmul(s_inv, u, transpose_b=True))\n\n whitened = tf.matmul(centered, w)\n\n return whitened",
"def PCA_ZCA_whiten_transform(X, symmetric = True):\n X = np.asarray(X, dtype='float32')\n #center\n X = X - np.mean(X, axis=1).reshape((-1,1))\n C = np.dot(X.T, X) / X.shape[0]\n EigVal, EigVec = np.linalg.eigh(C)\n mval = np.max(np.real(EigVal))\n max_ratio = 1e4\n tol = mval / max_ratio\n #ngd = np.nonzero(np.real(EigVal) > mval/max_ratio)\n if symmetric:\n ngd = np.nonzero(np.real(EigVal) > tol)[0]\n P = (np.real(EigVal[ngd])**(-0.5)).reshape((-1,1)) * EigVec[:, ngd].T # [reduced dim]*[dim]\n P = np.dot(EigVec[:, ngd], P)\n else:\n EigVal[EigVal <= tol] = 1.\n P = EigVal.reshape((-1,1)) * (EigVec.T)\n X = np.dot(X, P.T)\n return X, P.T",
"def pca_whiten(X):\n cov = np.cov(X.T)\n Sigma, U = np.linalv.eig(cov)\n D = np.diag(np.sqrt(1/Sigma)) # square root inverse of singular value matrix\n W = D @ U.T # rotation matrix\n centered = X - X.mean(axis=0)\n X_white = np.einsum(\"ij, ...j -> ...i\", W, centered)\n return X_white",
"def whiten(data):\n\n from numpy.linalg import eig\n\n eps = 0.01\n \n # covariance matrix\n Sigma = np.cov(data)\n\n # eigenvalue decomposition\n V,U = eig(Sigma)\n \n W = U.dot(np.diag((V+eps)**(-0.5)).dot(U.transpose()))\n\n return W",
"def whiten(X):\n X = asmatrix(X - mean(asmatrix(X),axis=1))\n C = X * X.T / X.shape[1]\n d, V = eigh(C)\n d[d<0] = 0 # In case d returns very small negative eigenvalues\n return (V / sqrt(d+spacing(1))) * V.T * X",
"def __call__(self,Zin):\r\n self.Z = np.zeros((self.Nc,))\r\n for ii in range(0,self.Nc):\r\n self.Z[ii] = np.dot(self.W[:,ii],Zin[self.ind[ii,:]])\r\n \r\n return self.Z",
"def whiten(self, blocks=20000, eps=0.0001):\r\n cov = np.zeros([self.datasize, self.datasize])\r\n nblocks = int(np.ceil(self.nstims / blocks))\r\n for ind in range(nblocks):\r\n X = self.data[blocks*ind:blocks*(ind+1)]\r\n cov += X.T.dot(X)\r\n eigvals, eigvecs = np.linalg.eigh(cov)\r\n if np.any(np.isnan(eigvals)):\r\n print('Warning: some nan eigenvalues found, replacing with small numbers.')\r\n eigvals[np.isnan(eigvals)] = 0.9 * eps**2\r\n if np.any(eigvals < 0):\r\n print('Warning: some negative eigenvalues of covariance matrix found. Replacing with small numbers.')\r\n eigvals[eigvals < 0] = 0.9 * eps**2\r\n\r\n idx = np.argsort(eigvals)\r\n svals = np.sqrt(eigvals[idx][::-1])\r\n eigvecs = eigvecs[idx][::-1]\r\n\r\n # do ZCA whitening\r\n wm = np.diag(1./np.maximum(svals, eps))\r\n self.zca_matrix = eigvecs.T.dot(wm).dot(eigvecs)\r\n self.data = self.data.dot(self.zca_matrix)",
"def Wv(z, k, c_M=0):\n k *= 0.6763 # 1/Mpc\n c = 299792458/1000 # km/s\n A = np.zeros((len(k), len(z)))\n A[:] = -(1 - c/(Hcal(z)*chi(z)) + alpha_M(z, c_M)/2)*f(z)*H(z)/(1+z)\n Wtransp = np.transpose(A)*1/k**2*1/c\n W = np.transpose(Wtransp)\n return W",
"def _zchi2_one(Tb, weights, flux, wflux, zcoeff, solve_matrices_algorithm=\"PCA\"):\n\n M = Tb.T.dot(np.multiply(weights[:,None], Tb))\n y = Tb.T.dot(wflux)\n\n try:\n zcoeff[:] = solve_matrices(M, y, solve_algorithm=solve_matrices_algorithm, use_gpu=False)\n except np.linalg.LinAlgError:\n return 9e99\n except NotImplementedError:\n return 9e99\n\n model = Tb.dot(zcoeff)\n\n zchi2 = np.dot( (flux - model)**2, weights )\n\n return zchi2",
"def form_matrix_yt(w):\r\n M = np.zeros((len(w),len(w)))\r\n for i in range(len(w)):\r\n for j in range(len(w)):\r\n M[i,j] = YoungTableaux(w[i],w[j]).CMNR()\r\n return M",
"def whiten(M):\r\n sigma = util.cov(M)\r\n U,S,V = np.linalg.svd(sigma)\r\n S_1_2 = S**(-0.5)\r\n S = np.diag(S_1_2.T)\r\n Aw = np.dot(V, np.dot(S, V.T))\r\n return np.dot(M, Aw)",
"def _whiten_wls(mat, weights):\n\n if weights.shape[0] != mat.shape[0]:\n raise ValueError(\n \"The number of weights must be the same as the number of observations\"\n )\n if mat.ndim == 1:\n return mat * np.sqrt(weights)\n elif mat.ndim == 2:\n # return np.column_stack([x[:,0], np.sqrt(weights)[:, None]*x[:,1:]])\n return np.sqrt(weights)[:, None] * mat",
"def mahalanobis(x=None, data=None, cov=None):\n x_minus_mu = x - np.mean(data)\n #print(\"x_minum_mu\",x_minus_mu)\n if not cov:\n cov = np.cov(data.values.T)\n inv_covmat = sp.linalg.inv(cov)\n #print(\"inv_covmat\",inv_covmat)\n left_term = np.dot(x_minus_mu, inv_covmat)\n #print(\"lt\",left_term)\n mahal = np.dot(left_term, x_minus_mu.T)\n #print(\"mahal\",mahal)\n return mahal.diagonal()",
"def Wk(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(k), len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz, (0,2,1))[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W2 = 3/2*A*chifraction\n Wtransp = np.transpose(W2)#/k**2 # If k is included, multiply by h\n W = np.transpose(Wtransp)\n W /= c # Unit correction for Wk to be unitless\n return W",
"def calc_zchi2_batch(spectra, tdata, weights, flux, wflux, nz, nbasis, solve_matrices_algorithm=\"PCA\", use_gpu=False, fullprecision=True):\n zchi2 = np.zeros(nz)\n if (weights.sum() == 0):\n zchi2[:] = 9e99\n zcoeff = np.zeros((nz, nbasis))\n return (zchi2, zcoeff)\n if (use_gpu):\n global cp\n import cupy as cp\n #On the GPU, all operations are batch operations for all templates\n #in parallel.\n\n #1) batch_dot_product_sparse will compute dot products of all\n #spectra with all templates in batch and return a 3D array of\n #size (nz x ncols x nbasis).\n Tbs = batch_dot_product_sparse(spectra, tdata, nz, use_gpu)\n if (cp_memcheck):\n #Free memory on consumer grade GPUs with low resources\n mpool = cp.get_default_memory_pool()\n mpool.free_all_blocks()\n\n #2) On the GPU, M and y are computed for all templates at once\n #CUPY swapaxes is the equivalent of the transpose in CPU mode\n #and the @ matrix multiplication operator performs a dot\n #product for each template.\n\n ###!!! NOTE - there are 3 different options for calculating the\n ### M and y arrays -\n ### A) Straight CUPY, which works well on perlmutter with a\n ### runtime of 6.2s on 1 GPU and 2.0s on 4 GPUs, but is\n ### unusably slow on Volta generation GPUs (16.8s for only\n ### 10 targets on a 1660 Super).\n ### B) calc_M_y_batch, the custom CUDA kernel, which is the\n ### fastest at 2.9s on 1 GPU and 0.7s on 4 GPUs (and 0.7s\n ### for 10 targets on a 1660 Super) but is the most difficult\n ### from a maintenance perspective\n ### C) Use the calc_batch_dot_product_3d3d_gpu kernel to offload\n ### only the matrix multiplication for M (and transpose of\n ### Tbs) but use CUPY for everything else. This strikes a\n ### middle ground that is very maintainable but removes the\n ### bottleneck of the CUPY Volta issue. 5.7s on 1 GPU and\n ### 1.8s on 4 GPUs on Perlmutter; 1.6s for 10 targets on\n ### 1660 Super.\n ###!!! NOTE - uncomment the 2 lines below to run (A)\n #all_M = Tbs.swapaxes(-2, -1) @ (weights[None, :, None] * Tbs)\n #all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n ###!!! NOTE - uncomment the below line to run (B)\n #(all_M, all_y) = calc_M_y_batch(Tbs, weights, wflux, nz, nbasis)\n ###!!! NOTE - uncomment the 2 lines below to run (C)\n all_M = calc_batch_dot_product_3d3d_gpu(Tbs, (weights[None, :, None] * Tbs), transpose_a=True, fullprecision=fullprecision)\n all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n ###!!! NOTE - uncomment the 2 lines below to run an alternative\n ### version of (C) that does the transpose on the CPU - this seems\n ### to needlessly waste time though\n #all_M = calc_batch_dot_product_3d3d_gpu(cp.ascontiguousarray(Tbs.swapaxes(-2, -1)), (weights[None, :, None] * Tbs))\n #all_y = (Tbs.swapaxes(-2, -1) @ wflux)\n\n #3) Use new helper method solve_matrices to use appropriate method\n #for this template to solve for zcoeff in batch for all_M and all_y.\n #There is no Error thrown by cupy's version of linalg.solve so just\n #need to catch NotImplementedError.\n try:\n zcoeff = solve_matrices(all_M, all_y, solve_algorithm=solve_matrices_algorithm, use_gpu=True)\n except NotImplementedError:\n zchi2[:] = 9e99\n zcoeff = np.zeros((nz, nbasis))\n return (zchi2, zcoeff)\n\n #4) calc_batch_dot_product_3d2d will compute the dot product\n #of Tbs and zcoeff for all templates in parallel.\n #It is the same as model[i,:,:] = Tbs[i,:,:].dot(zcoeff[i,:])\n model = calc_batch_dot_product_3d2d(Tbs, zcoeff, use_gpu)\n\n #5) On the GPU, (flux-model)*(flux-model) is faster than\n #(flux-model)**2. The @ matrix multiplication operator performs\n #a dot product for each template. get() copies the data back\n #from the GPU to the numpy array allocated for zchi2.\n zchi2[:] = (((flux - model)*(flux-model)) @ weights).get()\n #Copy data from GPU to numpy arrays\n zcoeff = zcoeff.get()\n\n if (cp_memcheck):\n #Free memory on consumer grade GPUs with low resources\n del Tbs\n del all_M\n del all_y\n del model\n mpool = cp.get_default_memory_pool()\n mpool.free_all_blocks()\n else:\n zcoeff = np.zeros((nz, nbasis))\n #On the CPU, the templates are looped over and all operations\n #are performed on one template at a time.\n\n for i in range(nz):\n #1) dot_product_sparse_one will compute dot products of all\n #spectra with ONE template and return a 2D array of size\n #(ncols x nbasis)\n Tb = dot_product_sparse_one(spectra, tdata, i)\n\n #2) On the CPU, M and y are computed for each template\n M = Tb.T.dot(np.multiply(weights[:,None], Tb))\n y = Tb.T.dot(wflux)\n\n #3) Use new helper method solve_matrices to use appropriate method\n #for this template to solve for zcoeff for each M, y.\n #Catch LinAlgError and NotImplementedError\n try:\n zcoeff[i,:] = solve_matrices(M, y, solve_algorithm=solve_matrices_algorithm, use_gpu=False)\n except np.linalg.LinAlgError:\n zchi2[i] = 9e99\n continue\n except NotImplementedError:\n zchi2[i] = 9e99\n continue\n\n #4) Calculate dot products individually for each template\n model = Tb.dot(zcoeff[i,:])\n\n #5) Calculate this zchi2 element individually for each template\n zchi2[i] = np.dot( (flux - model)**2, weights )\n return (zchi2, zcoeff)",
"def alpha_M(z, c_M = 0):\n I = CLASS(z_i)\n return I.alpha_M(z, c_M)",
"def zca(x, xtest, bias=0.1):\n covariance = np.dot(x.T, x) / x.shape[0]\n covariance += bias * np.eye(x.shape[1])\n U, S, _ = np.linalg.svd(covariance)\n pc = U @ np.diag(1. / np.sqrt(S)) @ U.T\n X = x @ pc\n Xtest = xtest @ pc\n return X, Xtest",
"def computeB(linsys_setup):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nFreq = len(g_nu); nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz = True\n \n def computeCMBY(d0):\n \"\"\"\n For CMB, y = S^1/2 A N^-1 d, where S is CMB signal covariance matrix (Cl's)\n \"\"\"\n # N.B. Reshaping operations required to go between 2D pixel arrays and \n # 1D vector (for linear system)\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny,nx))\n d1 *= ninvs[freq]\n a_l = fft.fft(d1,axes=[-2,-1])\n a_l *= beams[freq]*precond_2d\n d1 = numpy.real(fft.ifft(a_l,axes=[-2,-1],normalize=True))\n d1 = numpy.reshape(d1,(nx*ny))\n d2 += d1\n return d2\n \n def computeClusterY(d0):\n \"\"\"\n For cluster, y = F^T A^T N^-1 d, where F is TSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[0][ic][freq] * g_nu[freq])\n return d2\n \n def computeClusterKSZY(d0):\n \"\"\"\n For cluster, y = K^T A^T N^-1 d, where K is KSZ spatial template for cluster.\n \"\"\"\n d2 = numpy.zeros(nCluster)\n for ic in range(nCluster):\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2[ic] += numpy.sum(d1 * ninvs[freq] * clumaps[1][ic][freq])\n return d2\n \n def computeMonopoleY(d0):\n \"\"\"\n Overall monopole amplitude.\n \"\"\"\n d2 = 0\n for freq in range(nFreq):\n d1 = d0[freq].data.copy().reshape((ny, nx))\n d2 += numpy.sum(d1 * ninvs[freq])\n return(d2)\n \n \n # CMB realisation; convolve white noise map with beam and multiply by \n # signal covmat S^1/2 in harmonic space\n b0 = numpy.random.randn(ny,nx)\n a_l = numpy.fft.fft2(b0, b0.shape)\n a_l *= precond_2d * power_2d**(-0.5)\n b0 = numpy.fft.irfft2(a_l, b0.shape)\n \n # Calculate per-band noise realisation.\n # Multiply by pixel-space N^1/2, convolve with beam, and sum over \n # cluster pixels to get RHS\n b1 = 0; b4 = 0\n b2 = numpy.zeros(nCluster)\n if ksz: b3 = numpy.zeros(nCluster)\n \n for freq in range(nFreq):\n _b = numpy.random.randn(ny,nx) * ninvs[freq]**0.5\n a_l = numpy.fft.fft2(_b) * beams[freq] * precond_2d\n b1 += numpy.fft.irfft2(a_l, _b.shape)\n b4 += numpy.sum(_b)\n for ic in range(nCluster):\n b2[ic] += numpy.sum( _b * g_nu[freq] * clumaps[0][ic][freq] )\n if ksz: b3[ic] += numpy.sum( _b * clumaps[1][ic][freq] )\n\n b0 = numpy.reshape(b0,(nx*ny))\n b1 = numpy.reshape(b1,(nx*ny))\n \n\n # Compute CMB and cluster data parts of b\n b_CMB = computeCMBY(datamaps) + b0 + b1\n b_mono = computeMonopoleY(datamaps) + b4\n b_tsz = computeClusterY(datamaps) + b2\n if ksz: b_ksz = computeClusterKSZY(datamaps) + b3\n \n # Return total b vector (Ncmbpix + 1 + (1|2)*Ncluster elements in vector)\n b = numpy.append(b_CMB, b_mono)\n b = numpy.append(b, b_tsz)\n if ksz: b = numpy.append(b, b_ksz)\n return b",
"def zernike_coeff(filename=None,zernike_max_order=20):\n hdu = pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = 1.08/0.27\n for hdui in hdu[1:]:\n img = hdui.data[0][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,M20,M22,M31,M33])\n data=np.array(data)\n betaAll=[]\n #betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n #betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n x=hdu[0].header['x']\n y=hdu[0].header['y']\n z=hdu[0].header['z']\n phi = hdu[0].header['phi']\n theta=hdu[0].header['theta']\n s_fwhm=hdu[0].header['s_fwhm']\n e1=hdu[0].header['e1']\n e2=hdu[0].header['e2']\n return x,y,z,theta,phi,s_fwhm,e1,e2,betaAll,R2adjAll",
"def cayley_menger_mat(x2, y2, z2, xb2, yb2, zb2):\n one = np.ones_like(x2)\n zero = np.zeros_like(x2)\n mat = np.array([[zero, x2, y2, z2, one], \n [x2, zero, zb2, yb2, one], \n [y2, zb2, zero, xb2, one], \n [z2, yb2, xb2, zero, one], \n [one, one, one, one, zero]\n ]).T\n return mat",
"def get_nXS_cld( layer_z, grid_wn, dict_atmprof_funcZ ) :\n\n\n CCC = 2.* f_factor\n\n matrixWZ_wn, matrixWZ_z = np.meshgrid( grid_wn, layer_z, indexing='ij' )\n\n matrixWZ_rho_atm = dict_atmprof_funcZ['rho']( matrixWZ_z ) \n\n # ice\n matrixWZ_icecld = dict_atmprof_funcZ['icecld']( matrixWZ_z )\n matrixWZ_rho_cld_ice = matrixWZ_rho_atm * matrixWZ_icecld\n matrixWZ_nXS = 3. * CCC * matrixWZ_rho_cld_ice / ( 4. * f_cloud_Deff_ice ) / RHO_H2O\n\n # liquid\n matrixWZ_wtrcld = dict_atmprof_funcZ['wtrcld']( matrixWZ_z )\n matrixWZ_rho_cld_liquid = matrixWZ_rho_atm * matrixWZ_wtrcld\n matrixWZ_nXS += 3. * CCC * matrixWZ_rho_cld_liquid / ( 4. * f_cloud_Deff_liquid ) / RHO_H2O\n\n return matrixWZ_nXS",
"def Wk2(z, zp, k, c_M=0, c_B=0):\n c = 299792458/1000 # km/s\n A = np.zeros((len(z), len(zp)))\n chiz = np.copy(A); np.transpose(chiz)[:] = chi(z)\n chifraction = (chiz - chi(zp))*chi(zp)/chiz\n A[:] = omega_matter(zp)*H(zp)/(1 + zp)**2*G_light(zp, c_M, c_B)\n W = 3/2*A*chifraction\n W /= c # Unit correction for Wk to be unitless\n return W",
"def calc_cmatrix(self):\n tw = self.twiss_df\n res = self._results_df\n\n LOG.debug(\"Calculating CMatrix.\")\n with timeit(lambda t:\n LOG.debug(\" CMatrix calculated in {:f}s\".format(t))):\n\n j = np.array([[0., 1.],\n [-1., 0.]])\n rs = np.reshape(tw.as_matrix(columns=[\"R11\", \"R12\",\n \"R21\", \"R22\"]),\n (len(tw), 2, 2))\n cs = np.einsum(\"ij,kjn,no->kio\",\n -j, np.transpose(rs, axes=(0, 2, 1)), j)\n cs = np.einsum(\"k,kij->kij\", (1 / np.sqrt(1 + np.linalg.det(rs))), cs)\n\n g11a = 1 / np.sqrt(tw.loc[:, \"BETX\"])\n g12a = np.zeros(len(tw))\n g21a = tw.loc[:, \"ALFX\"] / np.sqrt(tw.loc[:, \"BETX\"])\n g22a = np.sqrt(tw.loc[:, \"BETX\"])\n gas = np.reshape(np.array([g11a, g12a,\n g21a, g22a]).T,\n (len(tw), 2, 2))\n\n ig11b = np.sqrt(tw.loc[:, \"BETY\"])\n ig12b = np.zeros(len(tw))\n ig21b = -tw.loc[:, \"ALFY\"] / np.sqrt(tw.loc[:, \"BETY\"])\n ig22b = 1. / np.sqrt(tw.loc[:, \"BETY\"])\n igbs = np.reshape(np.array([ig11b, ig12b,\n ig21b, ig22b]).T,\n (len(tw), 2, 2))\n cs = np.einsum(\"kij,kjl,kln->kin\", gas, cs, igbs)\n gammas = np.sqrt(1 - np.linalg.det(cs))\n\n res.loc[:, \"GAMMA_C\"] = gammas\n\n res.loc[:, \"F1001_C\"] = ((cs[:, 0, 0] + cs[:, 1, 1]) * 1j +\n (cs[:, 0, 1] - cs[:, 1, 0])) / 4 / gammas\n res.loc[:, \"F1010_C\"] = ((cs[:, 0, 0] - cs[:, 1, 1]) * 1j +\n (-cs[:, 0, 1]) - cs[:, 1, 0]) / 4 / gammas\n\n res.loc[:, \"C11\"] = cs[:, 0, 0]\n res.loc[:, \"C12\"] = cs[:, 0, 1]\n res.loc[:, \"C21\"] = cs[:, 1, 0]\n res.loc[:, \"C22\"] = cs[:, 1, 1]\n\n LOG.debug(\" Average coupling amplitude |F1001|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1001_C\"]))))\n LOG.debug(\" Average coupling amplitude |F1010|: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"F1010_C\"]))))\n LOG.debug(\" Average gamma: {:g}\".format(np.mean(\n np.abs(res.loc[:, \"GAMMA_C\"]))))\n\n self._log_added('GAMMA_C', 'F1001_C', 'F1010_C', 'C11', 'C12', 'C21', 'C22')",
"def sample_new_columns(X, Z, Y, alpha, ep, lamb, p, K, N, T, i, max_newK):\n \n # Sample new columns in Zi\n \n # compute m_{-i}\n m = (Z.sum(axis=0) - Z[i,:])\n empty_columns = [k for k in range(K) if m[k] == 0]\n # if no dish k is chosen by other customers, costomer i also\n # doesn't choose dish k \n Z[i,empty_columns] = 0\n\n # compute probabilities for all possible new ks\n newK_probs = []\n e = np.dot(Z[i,range(K)], Y[range(K),:])\n # create indices of ones in Xi and zeros in Xi\n one_inds = [t for t in range(T) if X[i,t] == 1]\n zero_inds = [t for t in range(T) if t not in one_inds]\n # eta = (1 - lambda) ** np.dot(Z[i,1:K], Y[1:K,t]) in Eq(15)\n eta_one = np.power((1 - lamb), e[one_inds])\n eta_zero = np.power((1 - lamb), e[zero_inds]) \n\n for newk in range(1, max_newK + 1):\n lhood_XiT = 0\n lhood_XiT = np.sum(np.log(1 - (1 - ep) * eta_one * ((1 - lamb * p) ** newk)))\n lhood_XiT = lhood_XiT + np.sum(np.log((1 - ep) * eta_zero * ((1 - lamb * p) ** newk)))\n prob_newKi = lhood_XiT - alpha / N + (K + newk) * log(alpha / N) - gammaln(K + newk + 1)\n newK_probs.append(prob_newKi)\n\n # sample new k\n pdf = map(lambda x: np.exp(x - max(newK_probs)), newK_probs)\n normalized_pdf = map(lambda x: x / np.sum(pdf), pdf)\n cdf = pdf[0]\n cdfr = np.random.uniform()\n newK = 0\n ii = 0\n\n while cdf < cdfr:\n ii = ii + 1\n cdf = cdf + pdf[ii]\n newK = newK + 1\n\n if newK > 0:\n # construct new Z\n new_Z = np.hstack((Z, np.zeros((N, newK))))\n new_Z[i, xrange(-newK, 0)] = 1\n Z = new_Z\n\n # construct new Y\n new_Y = np.vstack((Y, np.zeros((newK, T))))\n\n # sample new Y_j,t: new values of Y are drawn from their\n # posterior dist given Z. \n # I do not understand why Eq(12) can be the following. I just\n # translated Wood's code (sampZ.mat). \n e_newY = np.tile(e, (newK+1, 1)) + np.tile(np.arange(newK+1).transpose(), (1, T))\n newY_probs = np.power(((1 - ep) * (1 - lamb)), e_newY)\n newY_probs[:,one_inds] = 1 - newprobs[:,one_inds]\n\n e_newY_prior = (n_choose_kv(newK) * np.power(p, np.arange(newK+1)) \\\n * np.power(1 - p, (newK - np.arange(newK+1)))).transpose()\n newY_prior = np.tile(e_newY_prior, (1, T))\n newY_probs = newY_probs * newY_prior \n\n newY_probs = newY_probs / np.tile(np.sum(newY_probs), (newK+1, 1))\n newY_probs = np.cumsum(newY_probs)\n\n for j in range(T):\n rand = np.random.uniform()\n bigger_than_rand = [k for k in range(K) if rand < newY_probs[k,j]]\n m = min(bigger_than_rand)\n new_Y[xrange(-m, 0), j] = 1\n\n Y = new_Y\n\n return Z, Y",
"def _makeWaMatrix_(self, wa, nRow, nCol):\n\t\t#print nRow, nCol\n\t\t#print wa\n\t\t#print\n\t\twaMatrix = [[0 for j in xrange(nCol)] for i in xrange(nRow)]\n\t\tfor a in wa: \n\t\t\tfor i in a[0]:\n\t\t\t\tfor j in a[1]:\n\t\t\t\t\twaMatrix[i][j] = 1\n\t\treturn waMatrix",
"def _iczt_one_dimension(fd_data, zs_power, n_freqs):\r\n\r\n # Compute the ICZT\r\n iczt_data = np.sum(fd_data[:, None] * zs_power, axis=0) / n_freqs\r\n\r\n return iczt_data"
] |
[
"0.7298965",
"0.72373927",
"0.7098",
"0.69169414",
"0.674374",
"0.66822344",
"0.6380025",
"0.62321043",
"0.61145115",
"0.59687454",
"0.59334385",
"0.591302",
"0.57762146",
"0.5614465",
"0.5599952",
"0.5513964",
"0.5494949",
"0.5427968",
"0.5414677",
"0.5395066",
"0.5314387",
"0.53117996",
"0.5309728",
"0.52867186",
"0.5284466",
"0.5237732",
"0.52311796",
"0.5221083",
"0.5208261",
"0.5197172"
] |
0.7799107
|
0
|
Test that the build will fail when the "MATLAB_RELEASE" argument is different from the one contained in VersionInfo.xml ("latest")
|
def test_mismatching_releases_raises_error(self):
# The failure message that we expect to see
expected_fail_regex = (
f"Provided release (.*) does not match release found in VersionInfo.xml"
)
with self.assertRaisesRegex(
docker.errors.BuildError,
expected_fail_regex,
):
# Build the Docker image using the default value for MATLAB_RELEASE,
# which does not match with the one in mocks/matlab-install/VersionInfo.xml
self.client.images.build(
path=self.dockerfile_dirpath,
forcerm=True,
buildargs={"MATLAB_RELEASE": self.old_matlab_release},
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_mismatching_releases_displays_err_msg(self):\n\n # The failure message that we expect to see\n expected_fail_msg = (\n f\"Provided release ({self.old_matlab_release}) does not match \"\n \"release found in VersionInfo.xml\"\n )\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=self.old_matlab_release,\n )\n\n self.assertTrue(\n any([expected_fail_msg in line for line in build_msg]),\n f\"The error message '{expected_fail_msg}' was not displayed\",\n )",
"def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )",
"def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )",
"def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def test_release_tag_for_invalid_version(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version foo.bar.ba\"):\n release_tag()",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def test_buildrequire_invalid_module(pkg_util, scenario, repo, koji):\n\n repo.bump()\n\n expected_error = \"Cannot find any module builds\"\n with pytest.raises(ErrorReturnCode) as excinfo:\n # Override 'baked' (_err=sys.stderr) stderr redirect:\n # Here we are fine with what plain sh.Command gives us\n # (otherwise ErrorReturnCode.stderr is incomplete).\n builds = pkg_util.run(\"--optional\", \"rebuild_strategy=all\", _err=None)\n try:\n for build in builds:\n print(\"Canceling module-build {}...\".format(build.id))\n pkg_util.cancel(build)\n except ErrorReturnCode:\n # Do nothing, this is just a clean-up of accidentally started builds\n # in case that the test-case fails\n pass\n assert expected_error in excinfo.value.stderr.decode(\"utf-8\")",
"def test_versioning_unknown_version(workflow_runner):\n with pytest.raises(WDL.Error.SyntaxError):\n workflow_runner(\"test_versioning_unknown_version.wdl\")",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"async def test_release_bad_version(doof, repo_info, event_loop, command):\n command_words = command.split() + ['a.b.c']\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n assert doof.said(\n 'having trouble figuring out what that means',\n )",
"def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()",
"def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())",
"def test_changelog_missing() -> None:\n collection = RulesCollection()\n collection.register(GalaxyRule())\n bad_runner = Runner(\"examples/no_changelog/galaxy.yml\", rules=collection)\n result = bad_runner.run()\n assert len(result) == 1\n for item in result:\n assert item.tag == \"galaxy[no-changelog]\"",
"def test_version_check_does_not_exist(self):\n output = self.run_command(\"selfupdate --check selfupdate_test_does_not_exist\", exitcode=0)\n self.assertIn(\"Target: ywangd:selfupdate_test_does_not_exist\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertIn(\"Error: \", output)",
"def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)",
"def test_install_error_message(self):\n\n fail_msg = \"Failure message\"\n\n fail_file = Path(self.dockerfile_dirpath) / \"matlab-install\" / \"FAIL\"\n\n with open(str(fail_file), \"w\") as ff:\n ff.write(fail_msg + \"\\n\")\n self.addCleanup(utils.remove_file, fail_file)\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=\"latest\",\n )\n\n self.assertTrue(any([fail_msg in msg for msg in build_msg]))",
"def test_dev_version_if_tagged_not_last_commit(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)",
"def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)",
"def test_wrong_architecture(tmp_path, host_python, build_python, get_resource):\n\n crossenv = make_crossenv(tmp_path, host_python, build_python,\n '--cc=/usr/bin/gcc')\n for line in crossenv.creation_log.splitlines():\n if re.match(r'WARNING:.*architecture', line):\n return\n assert False, \"Crossenv did not detect wrong architecture\"",
"def test_os_release(self):\n self.assertEqual(self.settings.OS_RELEASE, platform.release())",
"def check_version():\n reset_flag = False\n try:\n data = du.read_yml(du.DEFAULT)\n if (\n data[\"version\"].split(\".\")[0] != __version__.split(\".\")[0]\n ): # If Version if different from \"1.x.y\" remove data:\n reset_flag = True\n except (KeyError, FileNotFoundError, TypeError):\n reset_flag = True\n\n if reset_flag:\n print(\"Your configuration file version is older than 1.0.0\")\n print(\n \"Your .Experiment file will be removed, please run daf.init to generate an up-to-date file\"\n )\n if os.path.isfile(du.DEFAULT):\n os.remove(du.DEFAULT)\n sys.exit(0)",
"def test_version():\n assert pywren.__version__ is not None",
"def test_dev_version_if_dirty(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n # Test `patch` part\n self.get_dev_version('patch')\n self.assertEqual(self.project.version, '1.2.4.dev')\n # Test `minor` part\n self.get_dev_version('minor')\n self.assertEqual(self.project.version, '1.3.0.dev')\n # Test `major` part\n self.get_dev_version('major')\n self.assertEqual(self.project.version, '2.0.0.dev')\n # Test incorrect part\n self.project.set_property('semver_git_tag_increment_part', 'incorrect')\n with self.assertRaises(BuildFailedException) as context:\n set_version_from_git_tag(self.project, self.logger)\n err_msg = str(context.exception)\n self.assertTrue(\n (\"Incorrect value for `semver_git_tag_increment_part` property. \"\n \"Has to be in (`major`, `minor`, `patch`), \"\n \"but `incorrect` passed.\") in err_msg)",
"def test_no_version(self):\r\n errstring = \"unknown version\"\r\n with self.assertRaisesRegexp(ValueError, errstring):\r\n convert_between_versions(self.no_version, self.result_dir)",
"def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")",
"def check_pyversion() -> None:\n pyversion = float(str(sys.version_info[0]) + '.' + str(sys.version_info[1]))\n if not pyversion >= 3.6:\n text = f'''FAIL: You are using python {pyversion}. This pipeline was built with python 3.7.\nFAIL: use 3.6 <= python version < 3.8\nFAIL: exiting cmh_test.py'''\n print(ColorText(text).fail())\n exit()\n if not pyversion < 3.8:\n print(ColorText(\"FAIL: python 3.8 has issues with the ipyparallel engine returns.\").fail())\n print(ColorText(\"FAIL: use 3.6 <= python version < 3.8\").fail())\n print(ColorText(\"FAIL: exiting cmh_test.py\").fail())\n exit()",
"def is_release():\n return VERSION[-1]",
"def test_bad_registry_version(self):\n # Setup test\n infilename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_simple.xml\")\n filename = os.path.join(_TMP_DIR, \"reg_bad_version.xml\")\n out_source_name = \"physics_types_bad_ver\"\n out_source = os.path.join(_TMP_DIR, out_source_name + '.F90')\n out_meta = os.path.join(_TMP_DIR, out_source_name + '.meta')\n remove_files([out_source, out_meta])\n tree, root = read_xml_file(infilename)\n # Write bad version number\n root.set('version', '1.1')\n # Change output filename\n for obj in root:\n oname = obj.get('name')\n if (obj.tag == 'file') and (oname == 'physics_types_simple'):\n obj.set('name', out_source_name)\n break\n # End if\n # End for\n tree.write(filename)\n\n # Run test\n with self.assertRaises(ValueError) as verr:\n _ = gen_registry(filename, 'fv', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check exception message\n xml_loc = os.path.join(_TMP_DIR, \"reg_bad_version.xml\")\n emsg = (\"Invalid registry file, {}\".format(xml_loc))\n self.assertEqual(emsg.format(out_source_name),\n str(verr.exception).split('\\n')[0])\n # Make sure no output files were created\n self.assertFalse(os.path.exists(out_meta))\n self.assertFalse(os.path.exists(out_source))",
"def test_version():\n assert __version__"
] |
[
"0.7061311",
"0.6778055",
"0.65040183",
"0.6406685",
"0.6374489",
"0.6338314",
"0.6337696",
"0.6325358",
"0.6279139",
"0.6252388",
"0.6159534",
"0.61333746",
"0.6117964",
"0.609705",
"0.60959524",
"0.6092544",
"0.6088889",
"0.6075766",
"0.60548264",
"0.6053249",
"0.60388136",
"0.6009721",
"0.5999489",
"0.5983258",
"0.5982522",
"0.5968524",
"0.59514344",
"0.5944362",
"0.59425354",
"0.5940228"
] |
0.7735341
|
0
|
Test that the error message is displayed when the "MATLAB_RELEASE" argument is different from the one contained in VersionInfo.xml ("latest")
|
def test_mismatching_releases_displays_err_msg(self):
# The failure message that we expect to see
expected_fail_msg = (
f"Provided release ({self.old_matlab_release}) does not match "
"release found in VersionInfo.xml"
)
build_msg = utils.get_build_output(
docker_api_client=self.client.api,
dockerfile_dirpath=self.dockerfile_dirpath,
release=self.old_matlab_release,
)
self.assertTrue(
any([expected_fail_msg in line for line in build_msg]),
f"The error message '{expected_fail_msg}' was not displayed",
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_mismatching_releases_raises_error(self):\n\n # The failure message that we expect to see\n expected_fail_regex = (\n f\"Provided release (.*) does not match release found in VersionInfo.xml\"\n )\n\n with self.assertRaisesRegex(\n docker.errors.BuildError,\n expected_fail_regex,\n ):\n # Build the Docker image using the default value for MATLAB_RELEASE,\n # which does not match with the one in mocks/matlab-install/VersionInfo.xml\n self.client.images.build(\n path=self.dockerfile_dirpath,\n forcerm=True,\n buildargs={\"MATLAB_RELEASE\": self.old_matlab_release},\n )",
"def test_install_error_message(self):\n\n fail_msg = \"Failure message\"\n\n fail_file = Path(self.dockerfile_dirpath) / \"matlab-install\" / \"FAIL\"\n\n with open(str(fail_file), \"w\") as ff:\n ff.write(fail_msg + \"\\n\")\n self.addCleanup(utils.remove_file, fail_file)\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=\"latest\",\n )\n\n self.assertTrue(any([fail_msg in msg for msg in build_msg]))",
"def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )",
"def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )",
"def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)",
"def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion",
"def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))",
"def test_versioning_unknown_version(workflow_runner):\n with pytest.raises(WDL.Error.SyntaxError):\n workflow_runner(\"test_versioning_unknown_version.wdl\")",
"def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)",
"def test_release_tag_for_invalid_version(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version foo.bar.ba\"):\n release_tag()",
"def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())",
"def test_version(self):\n version_instance = get_version('kolibri', __file__)\n self.assertIn(version_instance.major_version, kolibri.__version__)",
"def test_no_version(self):\r\n errstring = \"unknown version\"\r\n with self.assertRaisesRegexp(ValueError, errstring):\r\n convert_between_versions(self.no_version, self.result_dir)",
"def test_version():\n assert __version__",
"def test_version(self):\n result = check_output([b\"flocker-reportstate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))",
"def test_not_show_py_ver_incompatible_results(self):\n store = fake_compatibility_store.CompatibilityStore()\n store.save_compatibility_statuses([\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_1],\n python_major_version=3,\n status=compatibility_store.Status.SUCCESS\n ),\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_3],\n python_major_version=3,\n status=compatibility_store.Status.INSTALL_ERROR\n ),\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_1, PACKAGE_3],\n python_major_version=3,\n status=compatibility_store.Status.INSTALL_ERROR,\n details=\"Installation failure\"\n ),\n ])\n patch = mock.patch(\n 'compatibility_lib.configs.PKG_PY_VERSION_NOT_SUPPORTED', {\n 2: ['package4'],\n 3: ['package3'],\n })\n\n with patch:\n grid = grid_builder.GridBuilder(store)\n html_grid = grid.build_grid([PACKAGE_1, PACKAGE_2])\n\n self.assertNotIn(\"Installation failure\", html_grid)",
"def test_get_version(self):\n pass",
"def test_version():\n assert pywren.__version__ is not None",
"def test_matplotlib_suported_version(self):\r\n min_acceptable_version = (1, 1, 0)\r\n max_acceptable_version = (1, 3, 1)\r\n try:\r\n from matplotlib import __version__ as matplotlib_lib_version\r\n version = matplotlib_lib_version.split('.')\r\n if version[-1].endswith('rc'):\r\n version[-1] = version[-1][:-2]\r\n version = tuple(map(int, version))\r\n pass_test = (version >= min_acceptable_version and\r\n version <= max_acceptable_version)\r\n version_string = str(matplotlib_lib_version)\r\n except ImportError:\r\n pass_test = False\r\n version_string = \"Not installed\"\r\n self.assertTrue(pass_test,\r\n \"Unsupported matplotlib version. Must be >= %s and <= %s , but running %s.\"\r\n % ('.'.join(map(str, min_acceptable_version)),\r\n '.'.join(map(str, max_acceptable_version)), version_string))",
"def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def test_versionString(self):\n self.assertIn(\"%d.%d.%d\" % nevow.__version_info__, nevow.__version__)",
"def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')",
"def testGetVersion(self):\n helper = pylint.PylintHelper()\n\n helper._GetVersion()",
"def test_version(self):\n pass",
"def test_version_time_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version time bad_version \"%s\"'\n % self._test_date)\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()",
"def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()",
"def test_version_check_does_not_exist(self):\n output = self.run_command(\"selfupdate --check selfupdate_test_does_not_exist\", exitcode=0)\n self.assertIn(\"Target: ywangd:selfupdate_test_does_not_exist\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertIn(\"Error: \", output)",
"def test_main_first_arg_version(capsys):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--version'])\n\n stdout, stderr = capsys.readouterr()\n expected = uflash.get_version()\n # On python 2 --version prints to stderr. On python 3 to stdout.\n # https://bugs.python.org/issue18920\n assert (expected in stdout) or (expected in stderr)",
"def test__get_program_version():\n version = util._get_program_version(\"midgard\")\n assert isinstance(version, str) and re.search(\"[0-9]\", version)"
] |
[
"0.7205926",
"0.6710082",
"0.6615529",
"0.64460653",
"0.6420502",
"0.64166665",
"0.64018804",
"0.6322818",
"0.6317216",
"0.625823",
"0.6214125",
"0.6150578",
"0.6125207",
"0.60518926",
"0.604974",
"0.60494673",
"0.60443074",
"0.60435677",
"0.60417646",
"0.6041761",
"0.6023344",
"0.601578",
"0.60054815",
"0.6003893",
"0.59969854",
"0.5985975",
"0.59642446",
"0.59610945",
"0.5955718",
"0.5928705"
] |
0.7735132
|
0
|
Test that the failure message is displayed if a failure occurs during the installation of matlab
|
def test_install_error_message(self):
fail_msg = "Failure message"
fail_file = Path(self.dockerfile_dirpath) / "matlab-install" / "FAIL"
with open(str(fail_file), "w") as ff:
ff.write(fail_msg + "\n")
self.addCleanup(utils.remove_file, fail_file)
build_msg = utils.get_build_output(
docker_api_client=self.client.api,
dockerfile_dirpath=self.dockerfile_dirpath,
release="latest",
)
self.assertTrue(any([fail_msg in msg for msg in build_msg]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_verify_installation(self):\n self.installer._pretty_print = Mock()\n self.installer._run_command = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stderr.read().splitlines.return_value = []\n self.assertEqual(\"Success\", self.installer.verify_installation())\n stderr.read().splitlines.return_value = [\"error\"]\n self.assertEqual(\"Fail\", self.installer.verify_installation())",
"def test_self_failure(self):\n store = fake_compatibility_store.CompatibilityStore()\n store.save_compatibility_statuses([\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_1],\n python_major_version=3,\n status=compatibility_store.Status.INSTALL_ERROR,\n details=\"Installation failure\"\n ),\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_2],\n python_major_version=3,\n status=compatibility_store.Status.SUCCESS\n ),\n ])\n grid = grid_builder.GridBuilder(store)\n html_grid = grid.build_grid([PACKAGE_1, PACKAGE_2])\n self.assertIn(\"Installation failure\", html_grid)",
"def test_matlab_is_on_path(self):\n self.assertTrue(self.host.run(\"which matlab\").succeeded)",
"def loading_failures():\n\n import simtk.openmm as mm\n print(mm.Platform.getPluginLoadFailures())",
"def test_pairwise_failure(self):\n store = fake_compatibility_store.CompatibilityStore()\n store.save_compatibility_statuses([\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_1],\n python_major_version=3,\n status=compatibility_store.Status.SUCCESS\n ),\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_2],\n python_major_version=3,\n status=compatibility_store.Status.SUCCESS\n ),\n compatibility_store.CompatibilityResult(\n packages=[PACKAGE_1, PACKAGE_2],\n python_major_version=3,\n status=compatibility_store.Status.INSTALL_ERROR,\n details=\"Installation failure\"\n ),\n ])\n grid = grid_builder.GridBuilder(store)\n html_grid = grid.build_grid([PACKAGE_1, PACKAGE_2])\n self.assertIn(\"Installation failure\", html_grid)",
"def test_valid_python_raise_kb_exception(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_kraise\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_kraise --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_kraise test\")\n\n assert(rtn.return_code == 245)",
"def failure_cmd(self) -> str:\n return \"{} --enable=all -f -q {}\".format(\n self.conf.get_executable(), constants.ROOT_PATH + \"/data/cppcheck-152/trial-fail.cpp\"\n )",
"def test_DDSim_runIt_fail_env(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n res = self.ddsim.runIt()\n self.assertEqual( res['Message'], \"missing ddsiming.sh\" )",
"def test_install_subprocess_error_should_fail(self, *args):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.INSTALL: {\n EXTCFG_OPTION.EXEC_EXT_CMD: ['command'],\n }\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerError):\n ext_manager.handle_install_extras()",
"def test_mismatching_releases_displays_err_msg(self):\n\n # The failure message that we expect to see\n expected_fail_msg = (\n f\"Provided release ({self.old_matlab_release}) does not match \"\n \"release found in VersionInfo.xml\"\n )\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=self.old_matlab_release,\n )\n\n self.assertTrue(\n any([expected_fail_msg in line for line in build_msg]),\n f\"The error message '{expected_fail_msg}' was not displayed\",\n )",
"def test_launch_failures_hw(self):\n self.test_launch_failures()",
"def test_failure():\n with pytest.raises(ModuleNotFoundError):\n import torch # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import tensorflow # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n import horovod # noqa: F401\n\n with pytest.raises(ModuleNotFoundError):\n from ray import tune # noqa: F401",
"def tools_install_problems(host, guest):\n bad = []\n vm_address = domain_address(host, guest, timeout=5)\n msi_versions = get_msi_versions(vm_address)\n for msi in msi_versions:\n print 'INFO:', msi, 'is installed'\n for msi in EXPECTED_MSIS:\n if msi not in msi_versions:\n bad.append(msi+' MSI not installed')\n services = get_running_services(vm_address)\n for service in services:\n print 'TOOLS: service', service, 'running'\n for service in EXPECTED_SERVICES:\n if service not in services:\n bad.append(service+' not running')\n if bad == []:\n return\n message = ('tools not considered installed at this point because '+\n ' and '.join(bad))\n print 'INSTALL_TOOLS:', message\n return message",
"def test_valid_python_raise_exception(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_raise\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_raise --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_raise test\")\n\n assert(rtn.return_code == 246)",
"def test_entrypoint_called_mocked_matlab(self):\n self.assertEqual(\n self.host.file(\"matlab.log\").content_string.rstrip(), f\"MATLAB {CUSTOM_CMD}\"\n )",
"def test_buildx_plugin_not_installed(mock_tools):\n mock_tools.subprocess.check_output.side_effect = [\n VALID_DOCKER_VERSION,\n VALID_DOCKER_INFO,\n subprocess.CalledProcessError(\n returncode=1,\n cmd=\"docker buildx version\",\n ),\n ]\n\n with pytest.raises(\n BriefcaseCommandError,\n match=\"Docker is installed and available for use but the buildx plugin\\nis not installed\",\n ):\n Docker.verify(mock_tools)",
"def _check_for_errors(self, status):\r\n\r\n # Case when test suite name is misspelled or file doesn't exist\r\n if status == 252:\r\n sys.stderr.write('Test suite \"{}\" was not found in path {}\\n'.format(self.name, self.path))\r\n print 'Return code is {}'.format(status)",
"def test_matlab_install_dir_absent(self):\n directories = (\"/\", \"/tmp\")\n for dirname in directories:\n with self.subTest(dirname=dirname):\n self.assertNotIn(\"matlab-install\", self.host.file(dirname).listdir())",
"def test_install(self):\n pass",
"def test_load_fails_no_shell(install_mockery, mock_fetch, mock_archive, mock_packages):\n install(\"mpileaks\")\n\n out = load(\"mpileaks\", fail_on_error=False)\n assert \"To set up shell support\" in out",
"def test_install(self):\n self.installer._run_command = Mock()\n self.installer._pretty_print = Mock()\n self.installer.verify_installation = Mock()\n stdout = Mock()\n stderr = Mock()\n self.installer._run_command.return_value = (stdout, stderr)\n stdout.channel.recv_exit_status.return_value = 0\n self.installer.verify_installation.return_value = \"Success\"\n self.installer.install()\n # self.installer.verify_installation.return_value = \"Fail\"\n # with self.assertRaises(Exception):\n # self.installer.install()\n self.installer.verify_installation.return_value = \"Success\"\n stdout.channel.recv_exit_status.return_value = -1\n with self.assertRaises(Exception):\n self.installer.install()\n self.installer._run_command.side_effect = SSHException\n with self.assertRaises(SSHException):\n self.installer.install()",
"def test_failed():\n assert False",
"def _test_local_install():\n if os.getcwd() == os.sep.join(\n os.path.abspath(__file__).split(os.sep)[:-2]):\n import warnings\n warnings.warn('Running the tests from the install directory may '\n 'trigger some failures')",
"def test_warningsAreErrors(self):\n output = StringIO()\n self.patch(sys, \"stdout\", output)\n self.createFakeSphinxProject()\n with self.sphinxDir.child(\"index.rst\").open(\"a\") as f:\n f.write(b\"\\n.. _malformed-link-target\\n\")\n exception = self.assertRaises(\n SystemExit, self.builder.main, [self.sphinxDir.parent().path]\n )\n self.assertEqual(exception.code, 1)\n self.assertIn(\"malformed hyperlink target\", output.getvalue())\n self.verifyBuilt()",
"def test_valid_python_wrong_sub(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_subs\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_sub --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_sub nope\")\n\n assert(rtn.return_code == 241)",
"def test_error(self) -> None:\n context: Dict[str, ArtifactDescriptor] = dict()\n cmd = ModuleCommand(\n package_id='error', \n command_id='error',\n arguments=[],\n packages=None\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(2)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'ERROR')\n self.assertEqual(len(controller.outputs.stdout), 0)\n self.assertNotEqual(len(controller.outputs.stderr), 0)",
"def show_hkl_checksum_warning() -> None:\n if \"PYTEST_CURRENT_TEST\" in os.environ:\n print('DBG> Running inside a pytest -> not showing error message.')\n return\n info = QMessageBox()\n info.setIcon(QMessageBox.Warning)\n info.setText('The \"_shelx_hkl_checksum\" is not\\nconsistent with the .hkl file content!')\n info.show()\n info.exec()",
"def assert_cmd_success_script(self, cmd_string):\n ret = compmake_main([self.root, '--nosysexit', '-c', cmd_string])\n self.assertEqual(ret, 0)",
"def test_managed_install(visualstudio, tmp_path):\n assert not visualstudio.managed_install",
"def test_verification_failed(self):\n pass"
] |
[
"0.68971574",
"0.68589246",
"0.64004433",
"0.6262197",
"0.6260561",
"0.6079343",
"0.602237",
"0.5998593",
"0.599344",
"0.59405375",
"0.59382325",
"0.59276253",
"0.5877427",
"0.587091",
"0.5852264",
"0.58366877",
"0.58251274",
"0.582421",
"0.57860076",
"0.57816166",
"0.57772183",
"0.5773325",
"0.5762959",
"0.5721709",
"0.57160485",
"0.56869984",
"0.56845677",
"0.5683678",
"0.56783324",
"0.5669612"
] |
0.7641776
|
0
|
Takes cooler, a set of regions (chromosomes, chromosomal arms, regions) For each region, it fetches a sparse cooler matrix. Returns (as a generator) a list of (matrix, bins) pairs
|
def fetchCooler(c, regions, coolerFetch = lambda coo, ext:coo.matrix(balance=True, sparse=True).fetch(ext),
mask=True, force=False, ):
regions = [list(i) for i in regions]
resolution = c.binsize
for i in regions:
if i[1] == None:
i[1] = 0
if i[2] == None:
i[2] = c.chromsizes[i[0]]
for a in regions:
if str(a[0]) not in c.chromnames:
raise ValueError("Chromosome {0} from regions not found in cooler".format(a))
if (a[1] % resolution) != 0:
raise ValueError("Start of an region should be a multiple fo resolution")
# bins = c.bins()[:]
# # managing masks
# if mask is False:
# bins["mask"] = 1
# elif mask is None:
# assert "mask" in bins.columns
# elif mask is True:
# pass
# elif callable(mask):
# pass
# else:
# bins["mask"] = mask
for region in regions:
matrix = coolerFetch(c, region)
try: # setting matrix nans to zeros.
matrix.data = np.nan_to_num(matrix.data, copy=False)
except TypeError: #workaround for old numpy versions
matrix.data = np.nan_to_num(matrix.data)
# st,end = c.extent(region)
# subbins = bins[st:end].copy()
if mask is True:
newmask = np.array((matrix.sum(axis=0) > 0 ))[0]
# if callable(mask):
# new_mask = mask(matrix)
# subbins["mask"] = newmask
assert len(newmask) == matrix.shape[0]
yield matrix, newmask
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_map(nrows, ncols, nrooms, max_col_size, max_row_size):\n arr = np.zeros((nrows, ncols), dtype=np.int8)\n\n for i in range(nrooms):\n rand_row_start = np.random.randint(nrows)\n rand_col_start = np.random.randint(ncols)\n\n rand_row_size = np.random.randint(max_row_size / 2, max_row_size)\n rand_col_size = np.random.randint(max_col_size / 2, max_col_size)\n\n arr[rand_row_start:rand_row_start + rand_row_size, rand_col_start:rand_col_start + rand_col_size] = 1\n\n labels = measure.label(arr)\n regions = measure.regionprops(labels)\n\n centroids = list()\n for region in regions:\n centroids.append(region.centroid)\n\n num_centroids = len(centroids)\n\n # get distances between every pair of centroids\n dists = scipy.spatial.distance.cdist(centroids, centroids)\n\n # get a distance that is greater than all current distances\n max_dist = np.max(dists) + 1\n\n # make sure upper triangle is at least max_dist so that when picking closest\n # pairs, we won't choose a diagonal element or a duplicate connection\n dists = dists + np.triu(np.ones((num_centroids, num_centroids))) * max_dist\n\n for i in range(num_centroids - 1):\n min_dist_idx = np.argmin(dists)\n min_dist_idx = np.unravel_index(min_dist_idx, dists.shape)\n\n # create a hallway between regionprops\n centroid1 = np.array(centroids[min_dist_idx[0]], dtype=np.int)\n centroid2 = np.array(centroids[min_dist_idx[1]], dtype=np.int)\n\n [row_centroid_1, row_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[0])\n [col_centroid_1, col_centroid_2] = sorted([centroid1, centroid2], key=lambda x: x[1])\n\n arr[row_centroid_1[0]:row_centroid_2[0] + 1, row_centroid_1[1]] = 1\n arr[row_centroid_2[0], col_centroid_1[1]:col_centroid_2[1] + 1] = 1\n\n dists[:, min_dist_idx[1]] += max_dist\n\n return arr",
"def heatmap_tiles_generator_diag(clr, chroms, pad_size, tile_size, band_to_cover):\n\n for chrom in chroms:\n chr_start, chr_stop = clr.extent(chrom)\n for tilei, tilej in square_matrix_tiling(chr_start,\n chr_stop,\n tile_size,\n pad_size):\n # check if a given tile intersects with \n # with the diagonal band of interest ...\n diag_from = tilej[0] - tilei[1]\n diag_to = tilej[1] - tilei[0]\n #\n band_from = 0\n band_to = band_to_cover\n # we are using this >2*padding trick to exclude\n # tiles from the lower triangle from calculations ...\n if (min(band_to,diag_to) - max(band_from,diag_from)) > 2*pad_size:\n yield chrom, tilei, tilej",
"def _build_cooccur(self):\n\n vocab_size = len(self._vocabulary)\n\n # Collect cooccurrences internally as a sparse matrix for passable\n # indexing speed; we'll convert into a list later\n cooccurrences = sparse.lil_matrix((vocab_size, vocab_size),\n dtype=np.float64)\n\n for i, list_ in enumerate(self._lists):\n if i % 1000 == 0:\n self._logger.info(\"Building cooccurrence matrix: on line %i\", i)\n\n token_ids = [self._vocabulary[word][0] for word in list_ if word in self._vocabulary]\n\n for center_i, center_id in enumerate(token_ids):\n # Collect all word IDs in left window of center word\n context_ids = token_ids[:]\n del context_ids[center_i]\n contexts_len = len(context_ids)\n\n for left_i, left_id in enumerate(context_ids):\n # Build co-occurrence matrix symmetrically (pretend we\n # are calculating right contexts as well)\n cooccurrences[center_id, left_id] += 0.5\n cooccurrences[left_id, center_id] += 0.5\n\n # Now yield our tuple sequence (dig into the LiL-matrix internals to\n # quickly iterate through all nonzero cells)\n for i, (row, data) in enumerate(zip(cooccurrences.rows,\n cooccurrences.data)):\n if self._min_count is not None and self._vocabulary[self.id2word[i]][1] < self._min_count:\n continue\n\n for data_idx, j in enumerate(row):\n if self._min_count is not None and self._vocabulary[self.id2word[j]][1] < self._min_count:\n continue\n\n yield i, j, data[data_idx]",
"def generate_random_sparse_array(nrows, ncols, numdense):\n i = np.random.randint(0, nrows, numdense)\n j = np.random.randint(0, ncols, numdense)\n data = np.random.randint(1,6, numdense)\n ij = np.vstack((i,j))\n return coo_matrix((data, ij), shape=(nrows, ncols))",
"def compress_csr(self):\n _, unique, indices = np.unique(\n self.m*self.rows + self.cols,\n return_index=True, return_inverse=True)\n self.rows = self.rows[unique]\n self.cols = self.cols[unique]\n self.vals = np.bincount(indices, weights=self.vals)",
"def form_common_kmer_matrix(all_CEs):\n A = np.zeros((len(all_CEs), len(all_CEs)), dtype=np.float64)\n chunk_size = 70000\n # Can precompute all the indicies\n indicies = []\n for i in xrange(len(all_CEs)):\n for j in xrange(len(all_CEs)):\n indicies.append((i, j))\n for sub_indicies in chunks(indicies, chunk_size):\n input_args = ((all_CEs[sub_indicies[i][0]], all_CEs[sub_indicies[i][1]]) for i in xrange(len(sub_indicies)))\n pool = Pool(processes=multiprocessing.cpu_count())\n res = pool.imap(form_common_kmer_matrix_helper, input_args, chunksize=np.floor(len(indicies)/float(multiprocessing.cpu_count())))\n # pool.close()\n # pool.join()\n # pool.terminate()\n for (i, j), val in zip(sub_indicies, res):\n A[i, j] = val[0] #res[i][0] # Replace i with i+last_index where last_index was the number of times the xranges executed before going into the pool\n A[j, i] = val[1] #res[i][1]\n print((i,j))\n print(val)\n\n pool.terminate()\n return A",
"def conc_iter(self, nbits: int):\n i = 0\n while(i < 2**nbits):\n yield self.bv2conc(int2bv(i, nbits))\n i += 1",
"def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)",
"def my_combi(data, dimensions, projection_dimension_size, nbr_step = 50, number_of_clusters = 4, threshold = 1e-5):\n from itertools import combinations\n for projection in combinations(dimensions, projection_dimension_size):\n yield data[:,projection], projection, nbr_step, number_of_clusters, threshold",
"def set_representation_by_buckets(X):\n\n # Investigate columns ranges\n ranges_max = X.max(axis=0).toarray().flatten()\n ranges_min = X.min(axis=0).toarray().flatten()\n spreads = [mx - mn for mx, mn in itertools.izip(ranges_max, ranges_min)]\n\n bucket_count = min(max(spreads), 5)\n\n # Iterate each column and creae evenly spread buckets\n buckets = []\n for col in X.T:\n col = np.array(col.toarray().reshape(-1))\n col.sort()\n col = col[np.where(col>0)[0][0]:]\n col_buckets = [a[0] for a in np.array_split(col, min(bucket_count, len(col)))]\n if len(col_buckets) < bucket_count:\n col_buckets += [col_buckets[-1]]*-(len(col_buckets)-bucket_count)\n buckets.append(col_buckets)\n\n # Create new matrix row by row\n feature_dict = {}\n\n X_tr = np.zeros(shape=(X.shape[0],bucket_count*X.shape[1]), dtype=\"int32\")\n for i in range(X.shape[0]):\n row = []\n for col_idx, col in enumerate(X[i,:].toarray().reshape(-1)):\n # Faster than iterating X[i,:].T, iterating scipy sparse is slow\n for b,x in enumerate(buckets[col_idx]):\n assert(x <= ranges_max[col_idx])\n if(col >= x):\n f = str(col_idx)+\">= bucket_\"+str(b)\n if f not in feature_dict:\n feature_dict[f] = len(feature_dict)\n row.append(feature_dict[f])\n\n X_tr[i, row] = 1\n\n return scipy.sparse.csr_matrix(X_tr)",
"def sample_cc(self, nsamples=1, weighted=True):\n weights = self.areas / np.sum(self.areas) if weighted else None\n for index in np.random.choice(a=len(self.geometries), size=nsamples, p=weights):\n yield self.geometries[index]",
"def prepare_each(self, model, wngrid):\n\n self._total_cia = len(self.ciaPairs)\n self._nlayers = model.nLayers\n self._ngrid = wngrid.shape[0]\n self.info('Computing CIA ')\n\n sigma_cia = np.zeros(shape=(model.nLayers, wngrid.shape[0]))\n\n chemistry = model.chemistry\n\n for pairName in self.ciaPairs:\n cia = self._cia_cache[pairName]\n sigma_cia[...] = 0.0\n\n cia_factor = chemistry.get_gas_mix_profile(cia.pairOne) * \\\n chemistry.get_gas_mix_profile(cia.pairTwo)\n\n for idx_layer, temperature in enumerate(model.temperatureProfile):\n _cia_xsec = cia.cia(temperature, wngrid)\n sigma_cia[idx_layer] += _cia_xsec*cia_factor[idx_layer]\n self.sigma_xsec = sigma_cia\n yield pairName, sigma_cia",
"def miserables(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array(\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,\n 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12,\n 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, 19,\n 20, 20, 20, 21, 21, 22, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25,\n 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27,\n 27, 27, 27, 27, 27, 28, 28, 29, 29, 29, 29, 29, 30, 34, 34, 34, 34, 35, 35, 35, 36, 36, 37, 39,\n 39, 41, 41, 41, 41, 41, 41, 41, 41, 41, 46, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48,\n 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 51, 51, 51, 51, 54, 55, 55, 55, 55, 55, 55, 55, 55,\n 55, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59,\n 59, 59, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 63, 63, 63, 63, 64, 64,\n 64, 65, 65, 66, 68, 68, 68, 68, 69, 69, 69, 70, 70, 71, 73])\n col = np.array(\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 3, 11, 11, 11, 12, 13, 14,\n 15, 23, 24, 25, 26, 27, 28, 29, 31, 32, 33, 34, 35, 36, 37, 38, 43,\n 44, 48, 49, 51, 55, 58, 64, 68, 69, 70, 71, 72, 23, 17, 18, 19, 20,\n 21, 22, 23, 26, 55, 18, 19, 20, 21, 22, 23, 19, 20, 21, 22, 23, 20,\n 21, 22, 23, 21, 22, 23, 22, 23, 23, 24, 25, 27, 29, 30, 31, 25, 26,\n 27, 41, 42, 50, 68, 69, 70, 26, 27, 39, 40, 41, 42, 48, 55, 68, 69,\n 70, 71, 75, 27, 43, 49, 51, 54, 55, 72, 28, 29, 31, 33, 43, 48, 58,\n 68, 69, 70, 71, 72, 44, 45, 34, 35, 36, 37, 38, 31, 35, 36, 37, 38,\n 36, 37, 38, 37, 38, 38, 52, 55, 42, 55, 57, 62, 68, 69, 70, 71, 75,\n 47, 48, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68, 69, 71, 73,\n 74, 75, 76, 50, 51, 54, 55, 56, 52, 53, 54, 55, 55, 56, 57, 58, 59,\n 61, 62, 63, 64, 65, 58, 59, 61, 62, 63, 64, 65, 67, 59, 60, 61, 62,\n 63, 64, 65, 66, 70, 76, 60, 61, 62, 63, 64, 65, 66, 61, 62, 63, 64,\n 65, 66, 62, 63, 64, 65, 66, 63, 64, 65, 66, 76, 64, 65, 66, 76, 65,\n 66, 76, 66, 76, 76, 69, 70, 71, 75, 70, 71, 75, 71, 75, 75, 74])\n data = np.array(\n [1, 8, 10, 1, 1, 1, 1, 2, 1, 5, 6, 3, 3, 1, 1, 1, 1,\n 1, 9, 7, 12, 31, 17, 8, 2, 3, 1, 2, 3, 3, 2, 2, 2, 3,\n 1, 1, 2, 2, 19, 4, 1, 1, 1, 1, 1, 1, 2, 4, 4, 4, 3,\n 3, 3, 3, 1, 1, 4, 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4,\n 3, 3, 3, 5, 4, 4, 4, 4, 4, 2, 1, 5, 1, 1, 2, 13, 4,\n 1, 2, 1, 1, 1, 1, 1, 1, 5, 1, 1, 3, 2, 1, 2, 5, 6,\n 4, 1, 3, 1, 1, 3, 2, 1, 21, 2, 1, 1, 1, 1, 1, 1, 6,\n 1, 2, 1, 1, 1, 3, 2, 2, 2, 1, 1, 1, 2, 3, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 1, 1, 2, 5, 1, 1, 1, 1, 1, 1, 1,\n 1, 2, 4, 1, 7, 6, 1, 2, 7, 5, 5, 3, 1, 1, 1, 1, 2,\n 2, 1, 1, 1, 9, 1, 12, 1, 1, 1, 2, 6, 1, 1, 1, 7, 5,\n 1, 9, 1, 5, 2, 1, 2, 1, 2, 2, 1, 1, 3, 15, 4, 6, 17,\n 4, 10, 5, 3, 1, 1, 2, 5, 13, 5, 9, 5, 1, 2, 3, 2, 2,\n 2, 1, 6, 3, 6, 5, 1, 6, 12, 5, 2, 1, 4, 5, 1, 1, 7,\n 3, 1, 2, 1, 1, 6, 4, 2, 3, 4, 2, 3, 2, 1, 1, 3])\n adjacency = sparse.csr_matrix((data, (row, col)), shape=(77, 77))\n adjacency = adjacency + adjacency.T\n\n if metadata:\n names = ['Myriel', 'Napoleon', 'Mlle Baptistine', 'Mme Magloire', 'Countess de Lo', 'Geborand',\n 'Champtercier', 'Cravatte', 'Count', 'Old man', 'Labarre', 'Valjean', 'Marguerite', 'Mme Der',\n 'Isabeau', 'Gervais', 'Tholomyes', 'Listolier', 'Fameuil', 'Blacheville', 'Favourite', 'Dahlia',\n 'Zephine', 'Fantine', 'Mme Thenardier', 'Thenardier', 'Cosette', 'Javert', 'Fauchelevent',\n 'Bamatabois', 'Perpetue', 'Simplice', 'Scaufflaire', 'Woman1', 'Judge', 'Champmathieu', 'Brevet',\n 'Chenildieu', 'Cochepaille', 'Pontmercy', 'Boulatruelle', 'Eponine', 'Anzelma', 'Woman2',\n 'Mother Innocent', 'Gribier', 'Jondrette', 'Mme Burgon', 'Gavroche', 'Gillenormand', 'Magnon',\n 'Mlle Gillenormand', 'Mme Pontmercy', 'Mlle Vaubois', 'Lt Gillenormand', 'Marius', 'Baroness',\n 'Mabeuf', 'Enjolras', 'Combeferre', 'Prouvaire', 'Feuilly', 'Courfeyrac', 'Bahorel', 'Bossuet',\n 'Joly', 'Grantaire', 'MotherPlutarch', 'Gueulemer', 'Babet', 'Claquesous', 'Montparnasse',\n 'Toussaint', 'Child1', 'Child2', 'Brujon', 'Mme Hucheloup']\n x = np.array(\n [0.53, 0.98, 0.41, 0.4, 1., 0.92, 0.84, 0.74, 0.78, 1., 0.51, 0.09, -0., 0.29, 0.37,\n 0.41, -0.35, -0.46, -0.42, -0.46, -0.41, -0.37, -0.36, -0.2, -0.06, -0.04, -0.01, -0.02, 0.33,\n 0.17, -0.29, -0.1, 0.58, 0.29, 0.29, 0.26, 0.29, 0.37, 0.35, 0.04, -0.01, -0.18, -0.09,\n 0.2, 0.51, 0.7, -0.95, -0.7, -0.37, -0.08, -0.18, -0.05, 0.04, -0.12, -0.06, -0.13, -0.24, -0.48,\n -0.25, -0.33, -0.43, -0.39, -0.33, -0.42, -0.31, -0.38, -0.48, -0.74, -0.08, -0.1, -0.02, -0.1,\n 0.14, -0.76, -0.75, -0.18, -0.58])\n y = np.array(\n [-0.23, -0.42, -0.14, -0.18, -0.31, -0.52, -0.6, -0.65, -0.38, -0.19, 0.39, 0.03, 0.44, -0.44,\n 0.51, -0.36, 0.27, 0.37, 0.4, 0.32, 0.32, 0.36, 0.4, 0.2, 0.07, 0.14, -0.05, 0.06, 0.06,\n 0.24, -0.26, -0.1, 0.24, -0.04, 0.17, 0.23, 0.31, 0.21, 0.27, -0.36, 0.69, 0.11, 0.38, -0.09,\n 0.05, 0.12, 0.82, 0.44, 0.06, -0.2, -0.4, -0.28, -0.68, -0.79, -0.4, -0.07, -0.51, -0.17, -0.03,\n -0.09, -0.14, -0.04, -0.04, -0.07, -0.06, -0.11, -0.06, -0.35, 0.24, 0.19, 0.22, 0.29, -0.2,\n 0.06, 0.14, 0.3, -0.1])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.names = np.array(names)\n graph.position = np.vstack((x, y)).T\n graph.name = 'miserables'\n return graph\n else:\n return adjacency",
"def get_random_smoothing_matrix2(kernel_widths, counts, num_regions=5):\n s_matrix = []\n regions = []\n scount_idxs = np.argsort(counts.numpy())\n region_start, this_region = None, None\n idx_to_region_mapping = {}\n for _ii, idx in enumerate(scount_idxs):\n if region_start is None:\n assert _ii==0\n region_start = counts[idx]\n this_region = []\n if counts[idx]/region_start > 2:\n regions.append(this_region)\n region_start = counts[idx]\n this_region = []\n this_region.append(idx)\n idx_to_region_mapping[idx] = len(regions)\n regions.append(this_region)\n \n num_obs = len(counts)\n for ni in range(num_obs):\n rid = idx_to_region_mapping[ni]\n for width in kernel_widths:\n for wi in range(num_regions):\n s_row = np.zeros([num_obs])\n _w = min(len(regions[rid]), width-1)\n idxs = [ni] + np.random.choice(regions[rid], _w, replace=False).tolist()\n for idx in idxs:\n s_row[idx] = 1./(_w+1)\n s_matrix.append(s_row)\n S = np.stack(s_matrix, axis=0).astype(np.float32)\n \n assert np.alltrue(S.sum(axis=-1)>0) \n return torch.from_numpy(S)",
"def _10x10_grid_clusters_close():\n return [mn(mean=np.array([i * 5, j * 5]), cov=np.array([[1.0, 0.0],\n [0.0, 1.0]]))\n for i in range(10)\n for j in range(10)]",
"def by_arm(self, min_gap_size: Union[int, float] = 1e5, min_arm_bins: int = 50):\n # ENH:\n # - Accept GArray of actual centromere regions as input\n # -> find largest gap (any size) within cmere region, split there\n # - Cache centromere locations once found\n self.data.chromosome = self.data.chromosome.astype(str)\n for chrom, subtable in self.data.groupby(\"chromosome\", sort=False):\n margin = max(min_arm_bins, int(round(0.1 * len(subtable))))\n if len(subtable) > 2 * margin + 1:\n # Found a candidate centromere\n gaps = (\n subtable.start.values[margin + 1 : -margin]\n - subtable.end.values[margin : -margin - 1]\n )\n cmere_idx = gaps.argmax() + margin + 1\n cmere_size = gaps[cmere_idx - margin - 1]\n else:\n cmere_idx = 0\n cmere_size = 0\n if cmere_idx and cmere_size >= min_gap_size:\n logging.debug(\n \"%s centromere at %d of %d bins (size %s)\",\n chrom,\n cmere_idx,\n len(subtable),\n cmere_size,\n )\n p_arm = subtable.index[:cmere_idx]\n yield chrom, self.as_dataframe(subtable.loc[p_arm, :])\n q_arm = subtable.index[cmere_idx:]\n yield chrom, self.as_dataframe(subtable.loc[q_arm, :])\n else:\n # No centromere found -- emit the whole chromosome\n if cmere_idx:\n logging.debug(\n \"%s: Ignoring centromere at %d of %d bins (size %s)\",\n chrom,\n cmere_idx,\n len(subtable),\n cmere_size,\n )\n else:\n logging.debug(\"%s: Skipping centromere search, too small\", chrom)\n yield chrom, self.as_dataframe(subtable)",
"def scampi(infile, sequence):\n aa2topo = {\n 'I': [1, 0, 0, 0],\n 'M': [0, 1, 0, 0],\n 'O': [0, 0, 1, 0]\n }\n result = []\n with open(infile, 'r') as fh:\n for line in fh:\n if not line.startswith('>'):\n for aa in line.strip():\n result.append(aa2topo[aa])\n\n return np.array([result])",
"def cbindMatrices(hm, args):\n hm2 = heatmapper.heatmapper()\n\n # Make a dict of region name:row associations\n hm.read_matrix_file(args.matrixFile[0])\n d = dict({x: dict() for x in hm.parameters[\"group_labels\"]})\n for idx, group in enumerate(hm.parameters[\"group_labels\"]):\n s = hm.parameters[\"group_boundaries\"][idx]\n e = hm.parameters[\"group_boundaries\"][idx + 1]\n for idx2, reg in enumerate(hm.matrix.regions[s:e]):\n d[group][reg[2]] = idx2 + s\n\n # Iterate through the other matrices\n for idx in range(1, len(args.matrixFile)):\n hm2.read_matrix_file(args.matrixFile[idx])\n # Add the sample labels\n hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels'])\n # Add the sample boundaries\n lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:]\n hm.parameters['sample_boundaries'].extend(lens)\n\n # Add on additional NA initialized columns\n ncol = hm.matrix.matrix.shape[1]\n hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape)))\n hm.matrix.matrix[:, ncol:] = np.NAN\n\n # Update the values\n for idx2, group in enumerate(hm2.parameters[\"group_labels\"]):\n if group not in d:\n continue\n s = hm2.parameters[\"group_boundaries\"][idx2]\n e = hm2.parameters[\"group_boundaries\"][idx2 + 1]\n for idx3, reg in enumerate(hm2.matrix.regions[s:e]):\n if reg[2] not in d[group]:\n continue\n hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :]\n\n # Append the special params\n for s in hm.special_params:\n hm.parameters[s].extend(hm2.parameters[s])\n\n # Update the sample parameters\n hm.matrix.sample_labels = hm.parameters['sample_labels']\n hm.matrix.sample_boundaries = hm.parameters['sample_boundaries']",
"def czgenerator(cols, db):\n\n\tcfh = []\n\tif not cols:\n\t\tcols = czcols(db)\n\tfor col in cols:\n\t\tcolfile = db + \"/\" + col + \".gz\"\n\t\tcfh.append(gzip.GzipFile(colfile))\n\tfor line in cfh[0]:\n\t\tv = [line.strip()] + [c.readline().strip() for c in cfh[1:]]\n\t\tyield v",
"def index_iterator((x_min, x_max, y_min, y_max)):\n for row in xrange(y_min, y_max):\n for col in xrange(x_min, x_max):\n yield (row, col)",
"def painters(metadata: bool = False) -> Union[sparse.csr_matrix, Bunch]:\n row = np.array(\n [0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 5,\n 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9,\n 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13])\n col = np.array(\n [3, 10, 3, 12, 9, 0, 1, 7, 11, 12, 2, 5, 9, 2, 4, 8, 9,\n 0, 13, 1, 2, 3, 8, 11, 12, 0, 1, 4, 5, 7, 10, 11, 2, 4,\n 0, 3, 8, 11, 12, 0, 1, 3, 10, 12, 1, 3, 4, 7, 6, 8])\n adjacency = sparse.csr_matrix((np.ones(len(row), dtype=bool), (row, col)), shape=(14, 14))\n\n if metadata:\n names = np.array(\n ['Pablo Picasso', 'Claude Monet', 'Michel Angelo', 'Edouard Manet', 'Peter Paul Rubens', 'Rembrandt',\n 'Gustav Klimt', 'Edgar Degas', 'Vincent van Gogh', 'Leonardo da Vinci', 'Henri Matisse', 'Paul Cezanne',\n 'Pierre-Auguste Renoir', 'Egon Schiele'])\n x = np.array(\n [0.24, -0.47, -0.3, -0.31, -0.08, 0.12, 0.78, -0.36, 0.11,\n -0.06, -0.02, -0.12, -0.24, 0.73])\n y = np.array(\n [0.53, 0.19, -0.71, 0.44, -0.48, -0.65, 0.69, -0.11, 0.01,\n -1., 0.49, 0.28, 0.06, 0.27])\n graph = Bunch()\n graph.adjacency = adjacency\n graph.names = names\n graph.position = np.stack((x, y)).T\n graph.name = 'painters'\n return graph\n else:\n return adjacency",
"def gen_Jij(atom_list,jmats):\n N_atoms = len(atom_list)\n jij_values = []\n jij_columns = []\n jij_rowIndex = []\n zeroval = np.zeros((3,3))\n \n # Counts total number of interactions: needed for row indexing\n num_inters = 0\n # Scan through atom_list\n \n nbrs_ints = [] \n for i in range(N_atoms):\n nbrs_ints = atom_list[i].interactions\n nbrs_ints.sort()\n\n # Now we have a sorted list of (nbr,intr) tuples from lowest neighbor to highest neighbor\n # Scan through interactions\n if len(nbrs_ints)>0:\n for j in range(len(nbrs_ints)):\n nbr = nbrs_ints[j][0]\n intr = nbrs_ints[j][1]\n \n #Get an interaction matrix\n curr_mat = jmats[intr].tolist()\n curr_mat = np.array(curr_mat, dtype=np.float64)\n \n # Values = current matrix\n # Columns = the current neighbor\n # RowIndex = total number of interactions \n jij_values.append(curr_mat)\n jij_columns.append(nbr)\n if j == 0:\n jij_rowIndex.append(num_inters)\n \n # Increase number of total interactions\n num_inters = num_inters + 1\n else:\n jij_values.append(zeroval)\n jij_columns.append(0)\n jij_rowIndex.append(num_inters)\n num_inters = num_inters + 1\n # Add dummy index to rowIndex\n jij_rowIndex.append(len(jij_values))\n\n # Convert to numpy arrays\n jij_values = np.array(jij_values)\n jij_columns = np.array(jij_columns)\n jij_rowIndex = np.array(jij_rowIndex)\n \n print jij_values\n print jij_values.shape[1:]\n print N_atoms + 1\n print len(jij_rowIndex)\n print jij_columns\n print jij_rowIndex\n \n # Create Sparse Array\n jij = bsr_matrix( (jij_values,jij_columns,jij_rowIndex), shape=(3*N_atoms,3*N_atoms) ).todense()\n\n return jij",
"def compress_csc(self):\n _, unique, indices = np.unique(\n self.rows + self.n*self.cols,\n return_index=True, return_inverse=True)\n self.rows = self.rows[unique]\n self.cols = self.cols[unique]\n self.vals = np.bincount(indices, weights=self.vals)",
"def demand_matrices(self) -> list:\n for sample in range(self.__n_samples):\n try:\n yield self.demand_matrix(sample)\n except:\n continue",
"def gen_random_matrix_(region_sizes, result, densities=None):\n if densities is None:\n densities = [0.01, 0.02, 0.0001, 0.0005]\n ab, ba, aa, bb = gen_random_matrix(*region_sizes, *densities)\n mc = MatrixConnectivity(ab=ab, ba=ba, aa=aa, bb=bb)\n mc.create_connections()\n reverse_graph = reverse(mc.graph)\n args_dict = mc.compute_stats()\n result[\"full_matrix_stats\"] = print_args_dict(args_dict, out=False)\n to_write = [mc.num_a, mc.num_b]\n\n return mc, reverse_graph, to_write, args_dict",
"def generateMines(num_rows, num_cols, num_mines):\n arr = np.random.permutation(num_rows * num_cols)\n return arr[:num_mines]",
"def get_stain_matrix(I):",
"def candidate_map(self):\n candidates = [[set(range(1, 10)) for _dummy in range(9)] for _dummy in range(9)]\n vertex_value_unknown = [[True for _dummy in range(9)] for _dummy in range(9)]\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] in range(1, 10):\n candidates[line][row] = set([self.grid[line][row]])\n vertex_value_unknown[line][row] = False\n for i in range(9):\n if i != row:\n candidates[line][i].discard(self.grid[line][row])\n if i != line:\n candidates[i][row].discard(self.grid[line][row])\n if line - line%3 + i//3 != line or row - row%3 + i%3 != row:\n candidates[line - line%3 + i//3][row - row%3 + i%3].discard(self.grid[line][row])\n # Further reduce candidate map\n reduce_cadidate_map_further = True\n while reduce_cadidate_map_further:\n reduce_cadidate_map_further = False\n total_number_of_candidates = sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)])\n for number in range(1, 10):\n for i in range(9):\n # Check for single possible vertex for *number* in candidate map line *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[i][j]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[i][seen_in_j[0]]:\n candidates[i][seen_in_j[0]] = set([number])\n vertex_value_unknown[i][seen_in_j[0]] = False\n # Discard other candidates for *number* in corresponding row and subsquare\n for j in range(9):\n if j != i:\n candidates[j][seen_in_j[0]].discard(number)\n if i - i%3 + j//3 != i:\n candidates[i - i%3 + j//3][seen_in_j[0] - seen_in_j[0]%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(i//3) + j//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare//3) + j//3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map row *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[j][i]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[seen_in_j[0]][i]:\n candidates[seen_in_j[0]][i] = set([number])\n vertex_value_unknown[seen_in_j[0]][i] = False\n # Discard other candidates for *number* in corresponding line and subsquare\n for j in range(9):\n if j != i:\n candidates[seen_in_j[0]][j].discard(number)\n if i - i%3 + j%3 != i:\n candidates[seen_in_j[0] - seen_in_j[0]%3 + j//3][i - i%3 + j%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same subsquare\n elif 1 < len(seen_in_j) < 4:\n subsquares = set()\n for j in seen_in_j:\n subsquares.add(3*(j//3) + i//3)\n if len(subsquares) == 1:\n subsquare = subsquares.pop()\n for j in range(9):\n if 3*(subsquare%3) + j%3 != i:\n candidates[3*(subsquare//3) + j//3][3*(subsquare%3) + j%3].discard(number)\n # Check for single possible vertex for *number* in candidate map subsquare *i*\n seen_in_j = []\n for j in range(9):\n if number in candidates[3*(i//3) + j//3][3*(i%3) + j%3]:\n seen_in_j.append(j)\n if len(seen_in_j) == 1 and vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3]:\n candidates[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = set([number])\n vertex_value_unknown[3*(i//3) + seen_in_j[0]//3][3*(i%3) + seen_in_j[0]%3] = False\n # Discard other candidates for *number* in corresponding line and row\n for j in range(9):\n if j not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]:\n candidates[3*(i//3) + seen_in_j[0]//3][j].discard(number)\n if j not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]:\n candidates[j][3*(i%3) + seen_in_j[0]%3].discard(number)\n # otherwise add check wheter all candidates for *number* are in the same line/row\n elif 1 < len(seen_in_j) < 4:\n lines = set()\n rows = set()\n for j in seen_in_j:\n lines.add(3*(i//3) + j//3)\n rows.add(3*(i%3) + j%3)\n if len(lines) == 1:\n line = lines.pop()\n for row in [rw for rw in range(9) if rw not in [3*(i%3), 3*(i%3) + 1, 3*(i%3) + 2]]:\n candidates[line][row].discard(number)\n elif len(rows) == 1:\n row = rows.pop()\n for line in [ln for ln in range(9) if ln not in [3*(i//3), 3*(i//3) + 1, 3*(i//3) + 2]]:\n candidates[line][row].discard(number)\n if sum([len(candidates[ln][rw]) for ln in range(9) for rw in range(9)]) < total_number_of_candidates:\n reduce_cadidate_map_further = True\n return candidates",
"def generateNumbersArr(mines, num_rows, num_cols):\n minesIn2D = [convertFrom1Dto2D(mine, num_cols) for mine in mines] \n minesSet = set(minesIn2D) #set of tuples\n\n #initialize a 2D array\n #newArr = new Array(rows).fill(0).map(() => new Array(cols).fill(0));\n newArr = np.zeros(shape=(num_rows, num_cols))\n for i in range(num_rows):\n for j in range(num_cols):\n if ((i, j) in minesSet):\n newArr[i][j]=9\n else:\n newArr[i][j]=numNeighbors(minesSet, i, j, num_cols, num_rows)\n return newArr",
"def sample_from_ibp(K, alpha, sigma, c):\n pp = poissonparams(K, alpha, sigma, c)\n new_nodes = np.random.poisson(pp)\n Ncols = new_nodes.sum()\n node_count = np.zeros(Ncols)\n\n # used to build sparse matrix, entries of each Zij=1\n colidx = [] \n rowidx = []\n rightmost_node = 0\n\n # for each clique\n for n in range(K):\n # revisit each previously seen node\n for k in range(rightmost_node):\n prob_repeat = (node_count[k] - sigma) / (n + c)\n r = np.random.rand()\n if r < prob_repeat:\n rowidx.append(n)\n colidx.append(k)\n node_count[k] += 1\n\n for k in range(rightmost_node, rightmost_node + new_nodes[n]):\n rowidx.append(n)\n colidx.append(k)\n node_count[k] += 1\n \n rightmost_node += new_nodes[n]\n\n # build sparse matrix\n data = np.ones(len(rowidx), int)\n shape = (K, Ncols)\n Z = csr_matrix((data, (rowidx, colidx)), shape)\n\n return Z"
] |
[
"0.6095622",
"0.5488364",
"0.54629165",
"0.5376488",
"0.5363729",
"0.52927715",
"0.52869534",
"0.5282201",
"0.5278",
"0.5261104",
"0.52289116",
"0.5223765",
"0.52106524",
"0.5200011",
"0.518992",
"0.51824594",
"0.518002",
"0.5161271",
"0.5128736",
"0.51250774",
"0.51108515",
"0.51067626",
"0.50911665",
"0.509106",
"0.50890696",
"0.5086174",
"0.5083149",
"0.50804406",
"0.50794417",
"0.50439394"
] |
0.7412908
|
0
|
Takes cooler, a set of regions (chromosomes, chromosomal arms, regions), and a dataframe. For each region, it converts (chr1, pos1) columns of the dataframe (specified in the "columns" argument) to a column named "ind1", which is an index of this position
|
def chunkDataFrame(c, regions, positionDataFrame,
columns=[("chrom1","pos1"), ("chrom2","pos2")], force=False, ):
originalRegions = [tuple(i) for i in regions]
regions = [list(i) for i in regions]
resolution = c.binsize
positionDataFrame = positionDataFrame.copy()
for i in regions:
if i[1] == None:
i[1] = 0
if i[2] == None:
i[2] = c.chromsizes[i[0]]
columns = list(columns)
firstChr = columns[0][0] # first column specifying a chromosomes
for a in columns: # check that all columns are in the dataframe
for b in a:
if b not in positionDataFrame:
raise ValueError("Column {0} not found in dataframe".format(b))
for a in columns[1:]: # check that all pairs are cis
if not (positionDataFrame[a[0]] == positionDataFrame[firstChr]).all():
raise ValueError("Only cis pileups are currently supported")
if not force: # check that dataframe has chromosomes that are in cooler ('not' added by Sameer - 3/18)
for a in positionDataFrame[firstChr].astype(str).unique():
if a not in c.chromnames:
raise ValueError("Chrom. {0} from Dataframe notin cooler (force=True to override)".format(a))
for a in regions:
if str(a[0]) not in c.chromnames:
raise ValueError("Chromosome {0} from regions not found in cooler".format(a))
if (a[1] % resolution) != 0:
raise ValueError("Start of an region should be a multiple fo resolution")
for a in columns:
positionDataFrame[a[0]] = positionDataFrame[a[0]].astype(str)
grouped = positionDataFrame.groupby(firstChr)
bins = c.bins()[:]
result = {}
for orig_region, region in zip(originalRegions, regions):
frame = grouped.get_group(region[0])
framemask = np.ones(len(frame), dtype = np.bool)
for column in columns: # Selects for parts of the dataframe that are in the region. First selects chr1 then chr2.
pos = frame[column[1]].values
framemask[pos < region[1]] = False
framemask[pos >= region[2]] = False
frame = frame.iloc[framemask]
for j,column in enumerate(columns): # Creates associated indices for the matrices.
indColumn = (frame[column[1]] - region[1] )// c.binsize
frame["ind{0}".format(j+1)] = indColumn
result[orig_region] = frame
return result
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def geocode(df, col):\r\n pass",
"def bin_electrodes_by_region(self, elec_column1='stein.region', elec_column2='ind.region',\n x_coord_column='ind.x', roi_dict=None):\n if self.elec_info is None:\n print('{}: please load data before trying to bin electrode locations'.format(self.subject))\n return\n\n # smoosh the columns together, with the first column taking precedence\n regions = self.elec_info[elec_column1].fillna(self.elec_info[elec_column2]).fillna(value='')\n\n # if no dictionary is providing, use this\n if roi_dict is None:\n roi_dict = {'Hipp': ['Left CA1', 'Left CA2', 'Left CA3', 'Left DG', 'Left Sub', 'Right CA1', 'Right CA2',\n 'Right CA3', 'Right DG', 'Right Sub'],\n 'MTL': ['Left PRC', 'Right PRC', 'Right EC', 'Right PHC', 'Left EC', 'Left PHC'],\n 'Frontal': ['parsopercularis', 'parsorbitalis', 'parstriangularis', 'caudalmiddlefrontal',\n 'rostralmiddlefrontal', 'superiorfrontal'],\n 'Temporal': ['superiortemporal', 'middletemporal', 'inferiortemporal'],\n 'Parietal': ['inferiorparietal', 'supramarginal', 'superiorparietal', 'precuneus'],\n 'Occipital': ['lateraloccipital', 'lingual', 'cuneus', 'pericalcarine']}\n\n # get ROI for each electrode. THIS GETS THE FIRST, IF IT IS IN MULTIPLE SOMEHOW\n elec_region_list = [''] * len(regions)\n for e, elec_region in enumerate(regions):\n for roi in roi_dict.keys():\n if elec_region in roi_dict[roi]:\n elec_region_list[e] = roi\n continue\n\n # get hemisphere\n elec_hemi_list = np.array(['right'] * len(regions))\n elec_hemi_list[self.elec_info[x_coord_column] < 0] = 'left'\n\n # make new DF\n region_df = self.elec_info[['label']].copy()\n region_df['region'] = elec_region_list\n region_df['hemi'] = elec_hemi_list\n\n return region_df",
"def get_coverage(regions1, fnames1, sample2nanopolish1):\n pos2count1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn[:-10]).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in fnames1] \n for ref, pos, mt in regions1}\n # get number of resquiggled reads from tombo\n tombo1 = [\"guppy3.0.3.hac/%s/workspace/batch0.fast5.bam\"%fn.split(\"/\")[-2] for fn in fnames1]\n tombo_p2c1 = {(ref, pos): [sum([1 for a in pysam.AlignmentFile(fn).fetch(ref, pos-1, pos) if pass_filters(a)]) for fn in tombo1] \n for ref, pos, mt in regions1}\n # combine\n names1 = [\"%s %s\"%(n, fn.split(\"/\")[-2]) for n in (\"coverage\", \"nanopolish\", \"tombo\") for fn in fnames1]\n df4c1 = pd.DataFrame([[ref, pos, *cov, *[len(sample2nanopolish1[i][(ref, pos)]) for i, fn in enumerate(fnames1)], *tombo_p2c1[(ref, pos)]] \n for (ref, pos), cov in pos2count1.items()], columns=[\"chrom\", \"pos\", *names1])\n return df4c1",
"def encode_rgi(rgi_df, genome_ids):\n rgi_encoded = pd.DataFrame(index=genome_ids,\n columns=rgi_df['Best_Hit_ARO'].unique()).fillna(0)\n # print(rgi_encoded)\n for genome_id, rgi_data in rgi_df.iterrows():\n rgi_encoded.loc[rgi_data['Sample'], rgi_data['Best_Hit_ARO']] += 1\n\n return rgi_encoded",
"def process_roi_coord(coor, roi_names):\n\n coor.rename(columns={'Unnamed: 0': 'ROI'}, inplace=True)\n idx = []\n for i in range(0, len(roi_names)): # Reordering according to ROInames\n for k in range(0, len(coor['ROI'])):\n if roi_names[i] == coor['ROI'][k]:\n idx.append(k)\n coor = coor.loc[idx, :]\n # From the new index created we reorganize the table by index.\n\n return coor",
"def get_loci_colnames(df):\n if 'node1_locus' in df.columns:\n return 'node1_locus', 'node2_locus'\n elif 'gene A' in df.columns:\n return 'gene A', 'gene B'",
"def bed_encoding(bed_df, reference):\n\n fasta = Fasta(reference, as_raw=True)\n seq_list = list()\n for _, i in bed_df.iterrows():\n print(f\"region:{i[0]}:{i[1]}-{i[2]}\")\n seq_list.append(one_hot_encoding(fasta[i[0]][i[1]:i[2]]))\n result = np.stack(seq_list)\n return result",
"def get_coords(self, df_primers):\n primer_list = []\n names_dup = []\n names = []\n exons = []\n dirs = []\n start_coords = []\n end_coords = []\n chroms = []\n seq_position = 0\n list_position = 0\n primer_seqs = pd.DataFrame([])\n csv = '%s.csv' % self.excel_file[:-5]\n csv = csv.replace(\" \", \"\")\n\n # (1) Gets sequences, exons and directions, splits the sequences into F+R and combines into series and then csv.\n for row_index, row in df_primers.iterrows():\n primer_list.append(str(row['Primer_seq']))\n names_dup.append(str(row['Gene']) + '_' + str(row['Exon']) + str(row['Direction']))\n exons.append(str(row['Exon']))\n dirs.append(str(row['Direction']))\n for item in names_dup:\n if item not in names:\n names.append(item)\n\n forwards = primer_list[::2]\n reverses = primer_list[1::2]\n\n while list_position < len(forwards):\n ser = pd.Series([names[list_position], forwards[list_position], reverses[list_position]])\n primer_seqs = primer_seqs.append(ser, ignore_index=True)\n list_position += 1\n\n primer_seqs.to_csv(csv, header=None, index=None, sep='\\t')\n\n # (2) Runs virtual PCR on generated csv.\n bedfile = self.run_pcr(csv)\n tool = BedTool(bedfile)\n\n # (3) Uses results to calculate start and end position of each primer (results give PCR product). Adds to df.\n for row in tool:\n chroms.append(row.chrom)\n start_coords.append(row.start)\n end_coords.append(row.start + len(primer_list[seq_position]))\n chroms.append(row.chrom)\n end_coords.append(row.end)\n start_coords.append(row.end - len(primer_list[seq_position + 1]))\n seq_position += 1\n\n df_coords = pd.DataFrame([])\n df_coords.insert(0, 'chrom', chroms)\n df_coords.insert(1, 'start', start_coords)\n df_coords.insert(2, 'end', end_coords)\n df_coords.insert(3, 'name', names)\n\n # (4) Generates a bed file from df_coords (not currently used in application).\n bed = os.path.splitext(bedfile)[0]\n df_coords.to_csv('%s.csv' % bed, header=None, index=None, sep='\\t') # cannot directly convert to bed.\n csv_file = BedTool('%s.csv' % bed)\n csv_file.saveas('%s.bed' % bed)\n\n df_coords.insert(4, 'Exon', exons) # not need in bed file so added after.\n df_coords.insert(5, 'Direction', dirs)\n\n # Removes unnecessary files and moves BED file into shared folder. (add /tests for unit testing)\n os.system(\"rm /home/cuser/PycharmProjects/django_apps/mysite/%s.csv\" % bed)\n os.system(\"mv /home/cuser/PycharmProjects/django_apps/mysite/%s.bed /media/sf_sarah_share/bedfiles\" %\n bed)\n os.system(\"rm /home/cuser/PycharmProjects/django_apps/mysite/%s\" % csv)\n\n return df_coords",
"def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]",
"def get_inter_cds_regions(annotations):\n # Determine locations of inter-CDS regions for each chromosome\n inter_cds_regions = {}\n\n for chr_id, chromosome in annotations.items():\n # get chromosome dimensions (the first feature represents the\n # chromosome itself)\n ch_end = int(chromosome.features[0].location.end)\n\n # filter out everything except for genes\n genes = [x for x in chromosome.features if x.type == 'gene']\n\n # order by position along chromosome\n genes.sort(key=lambda x: x.location.start)\n\n # add range before first gene\n start = 0\n\n # keep track of strand of polycistronic transcriptional unit\n strand = None\n\n inter_cds_regions[chr_id] = {\n -1: [],\n +1: []\n }\n\n # iterate through genes and store the ranges between them;\n # for TriTrypDB files, the gene boundaries are generally the same\n # as the CDS boundaries.\n for gene in genes:\n # Determine location for the region up to start of the gene\n end = int(gene.location.start)\n\n # Skip over snoRNAs, etc. that are nested inside of other genes\n # For example: TcCLB TcChr22-2 179,000:180,000\n if end <= start:\n continue\n\n # Add CDS to relevant list based on strand\n if strand is None:\n # Left-most gene\n inter_cds_regions[chr_id][gene.location.strand].append((start, end))\n elif strand != gene.location.strand:\n # Add ORFs in both directions at transcription switch sites (TSSs)\n inter_cds_regions[chr_id][+1].append((start, end))\n inter_cds_regions[chr_id][-1].append((start, end))\n else:\n # Within PTU; look for ORFs on same strand\n inter_cds_regions[chr_id][strand].append((start, end))\n\n # update start counter and strand\n start = int(gene.location.end)\n strand = gene.location.strand\n\n # add region after last gene\n inter_cds_regions[chr_id][strand].append((start, ch_end))\n\n return inter_cds_regions",
"def zillow_df(frame, region_list, region_column_str, columns_list):\n frame = frame[frame[region_column_str].isin(region_list)]\n frame = frame[columns_list]\n frame[region_column_str] = frame[region_column_str].astype(str)\n return frame",
"def column_to_index(df_data, columns):\n df_data = df_data.set_index(columns, inplace=True)\n return df_data",
"def _regions(self, voronoi_diagram, unique_id, ids, crs):\n # generate DataFrame of results\n regions = pd.DataFrame()\n regions[unique_id] = ids # add unique id\n regions[\"region\"] = voronoi_diagram.point_region # add region id for each point\n\n # add vertices of each polygon\n vertices = []\n for region in regions.region:\n vertices.append(voronoi_diagram.regions[region])\n regions[\"vertices\"] = vertices\n\n # convert vertices to Polygons\n polygons = []\n for region in tqdm(regions.vertices, desc=\"Vertices to Polygons\"):\n if -1 not in region:\n polygons.append(Polygon(voronoi_diagram.vertices[region]))\n else:\n polygons.append(None)\n # save polygons as geometry column\n regions[\"geometry\"] = polygons\n\n # generate GeoDataFrame\n regions_gdf = gpd.GeoDataFrame(regions.dropna(), geometry=\"geometry\")\n regions_gdf = regions_gdf.loc[\n regions_gdf[\"geometry\"].length < 1000000\n ] # delete errors\n regions_gdf = regions_gdf.loc[\n regions_gdf[unique_id] != -1\n ] # delete hull-based cells\n regions_gdf.crs = crs\n return regions_gdf",
"def get_gtf_region_position_info(region_gtf_pr):\n ## Check if region has no features\n if region_gtf_pr.empty:\n return dict()\n\n gene_info_dict = dict()\n for name, group in region_gtf_pr.df.groupby(\"transcript_id\"):\n for row in group.itertuples():\n\n ## Add Transcript Info\n if row.Feature == \"transcript\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"chrom\"] = row.Chromosome\n gene_info_dict[row.gene_id][row.transcript_id][\"start\"] = row.Start\n gene_info_dict[row.gene_id][row.transcript_id][\"end\"] = row.End\n gene_info_dict[row.gene_id][row.transcript_id][\n \"gene_symbol\"\n ] = row.gene_name\n gene_info_dict[row.gene_id][row.transcript_id][\n \"biotype\"\n ] = row.gene_type\n gene_info_dict[row.gene_id][row.transcript_id][\"strand\"] = row.Strand\n\n ## Add exon feature info\n elif row.Feature == \"exon\":\n\n if row.gene_id not in gene_info_dict:\n gene_info_dict[row.gene_id] = dict()\n if row.transcript_id not in gene_info_dict[row.gene_id]:\n gene_info_dict[row.gene_id][row.transcript_id] = {\"exons\": []}\n\n gene_info_dict[row.gene_id][row.transcript_id][\"exons\"].append(\n {\"start\": row.Start, \"end\": row.End, \"exon_number\": row.exon_number}\n )\n\n return gene_info_dict",
"def gene_coords_by_range(probes, chrom, start, end, ignore=params.IGNORE_GENE_NAMES):\n ignore += params.ANTITARGET_ALIASES\n # Tabulate the genes in the selected region\n genes = collections.OrderedDict()\n for row in probes.in_range(chrom, start, end):\n name = str(row.gene)\n if name in genes:\n genes[name][1] = row.end\n elif name not in ignore:\n genes[name] = [row.start, row.end]\n # Reorganize the data structure\n return {\n chrom: [(gstart, gend, name) for name, (gstart, gend) in list(genes.items())]\n }",
"def getAreas(df):\n\n df_plu = df[df[\"strand\"]==\"+\"]\n df_min = df[df[\"strand\"]==\"-\"]\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_min_LA = ThreePrimeArea(df_min)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_plu = pd.merge(df_plu_FA,df_plu_LA,on=\"name\")\n df_min = pd.merge(df_min_FA,df_min_LA,on=\"name\")\n df = pd.concat([df_plu,df_min])\n return df",
"def get_data(f, zoom_level, start_pos_1, end_pos_1, start_pos_2, end_pos_2):\n \n c = cooler.Cooler(f[str(zoom_level)])\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n i0 = abs_coord_2_bin(c, start_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n i1 = abs_coord_2_bin(c, end_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n j0 = abs_coord_2_bin(c, start_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n j1 = abs_coord_2_bin(c, end_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n \n pixels = c.matrix(as_pixels=True, balance=False, max_chunk=np.inf)[i0:i1+1, j0:j1+1]\n \n if not len(pixels):\n return pd.DataFrame(columns=['genome_start1', 'genome_start2', 'balanced'])\n \n bins = c.bins()[['chrom', 'start', 'end', 'weight']]\n pixels = annotate(pixels, bins)\n\n pixels['genome_start1'] = chrom_cum_lengths[pixels['chrom1']] + pixels['start1']\n pixels['genome_start2'] = chrom_cum_lengths[pixels['chrom2']] + pixels['start2']\n pixels['balanced'] = (\n pixels['count'] * pixels['weight1'] * pixels['weight2']\n )\n \n return pixels[['genome_start1', 'genome_start2', 'balanced']]",
"def check_regs(region_df, chr_name=None, start_name=None, stop_name=None,\n strand_name=None, sample_name=None):\n\n if sample_name is None:\n region_df.index = np.repeat(default_id_sample, len(region_df))\n else:\n region_df = search_column(region_df, id_sample_aliases,\n id_sample_types, 'id_sample', sample_name)\n region_df = region_df.set_index(\"id_sample\", drop=True)\n region_df = region_df.sort_index()\n\n region_df = search_column(region_df, chr_aliases, chr_types, 'chr', chr_name)\n region_df = search_column(region_df, start_aliases, start_types, 'start', start_name)\n region_df = search_column(region_df, stop_aliases, stop_types, 'stop', stop_name)\n region_df = search_column(region_df, strand_aliases, strand_types, 'strand', strand_name)\n return region_df",
"def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result",
"def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df",
"def Indexes(self, latitudes, longitudes):\n res = self._transform.TransformPoints(\n np.column_stack((longitudes, latitudes)))\n res = list(zip(*res))\n x, y = np.array(res[0]), np.array(res[1])\n idx_col = self._inv_txf[0] + self._inv_txf[1] * x + self._inv_txf[2] * y\n idx_row = self._inv_txf[3] + self._inv_txf[4] * x + self._inv_txf[5] * y\n return idx_row.astype(int), idx_col.astype(int)",
"def select_regions(data, region_col, regions, combine_subregions=True):",
"def call_cells(df_reads):\n cols = [WELL, TILE, CELL]\n s = (df_reads\n .drop_duplicates([WELL, TILE, BLOB])\n .groupby(cols)[BARCODE]\n .value_counts()\n .rename('count')\n .sort_values(ascending=False)\n .reset_index()\n .groupby(cols)\n )\n\n return (df_reads\n .join(s.nth(0)[BARCODE].rename(BARCODE_0), on=cols)\n .join(s.nth(0)['count'].rename(BARCODE_COUNT_0).fillna(0), on=cols)\n .join(s.nth(1)[BARCODE].rename(BARCODE_1), on=cols)\n .join(s.nth(1)['count'].rename(BARCODE_COUNT_1).fillna(0), on=cols)\n .join(s['count'].sum() .rename(BARCODE_COUNT), on=cols)\n .drop_duplicates(cols)\n [[WELL, TILE, CELL, BARCODE_0, BARCODE_COUNT_0, BARCODE_1, BARCODE_COUNT_1]]\n )",
"def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict",
"def common_coordinate(df):\n \n if 'BP' in df and 'CHR' in df:\n df['COORDINATE'] = df['CHR'].astype(str) + ':' + df['BP'].astype(str)\n \n # elif use regex to find chr and bp \n else: \n df = re_add_col(df, 'ID', 'COORDINATE', r'\\d+:\\d+')\n \n re_add_col(df, 'COORDINATE', 'CHR', r'(\\d+):')\n df = df.astype({'CHR': 'int8'})\n \n re_add_col(df, 'COORDINATE', 'BP', r'\\d+:(\\d+)')\n df = df.astype({'BP': 'int32'}) \n \n df = df.loc[:, df.columns != 'ID'] \n \n return df",
"def chrDIC(df):\n chr_names=df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom : df[df[\"chr\"]==chrom] for chrom in chr_names}\n return d_chr",
"def tabix_command_from_chromosome_regions(regions_df,\n path_to_tabix,\n path_to_bgzip,\n path_to_bcftools,\n remove_SVs=False,\n local_dir=None,\n tmp_dir=None,\n http=False):\n # Make sure there's only one chromosome in the regions_df\n seen_chromosomes = list(regions_df['chrom'].unique())\n assert len(seen_chromosomes) == 1\n chrom = seen_chromosomes.pop()\n\n # Create a temporary bed file with this chromosome's regions\n # It will be used in the tabix command, as the -R parameter\n chrom_bedfile = temp_filepath(f'chr_{chrom}.bed', tmp_dir=tmp_dir)\n regions_df.to_csv(chrom_bedfile, sep='\\t', header=False, index=False)\n\n # Define the destination VCF filename for this chromosome\n dest_file = temp_filepath(f'chr_{chrom}.vcf.gz', tmp_dir=tmp_dir)\n\n chrom_1kg_url = thousand_genomes_chromosome_url(\n chromosome=chrom,\n local_dir=local_dir,\n http=http\n )\n # Generate the tabix command to download 1kG genotypes for these regions\n tabix_command = f'{path_to_tabix} -fh -R {chrom_bedfile} {chrom_1kg_url} | '\n if remove_SVs:\n tabix_command += f\"{path_to_bcftools} filter -e 'INFO/VT == \\\"SV\\\"' | \"\n tabix_command += f'{path_to_bgzip} > {dest_file}'\n\n chrom_index_file = basename(chrom_1kg_url) + '.tbi'\n\n return {'cmd': tabix_command,\n 'chromosome': chrom,\n 'dest_file': dest_file,\n 'chrom_bedfile': chrom_bedfile,\n 'chrom_index_file': chrom_index_file}",
"def getIndividual2ColIndex(cls, header, col_name2index, sampleStartingColumn=9):\n\t\tsys.stderr.write(\"Finding all individuals ...\")\n\t\tno_of_cols = len(header)\n\t\tindividual_name2col_index = {}\t#individual's column name -> an opened file handler to store genetic data\n\t\tcounter = 0\n\t\tfor i in xrange(sampleStartingColumn, no_of_cols):\n\t\t\tindividualName = header[i]\n\t\t\tcol_index = col_name2index.get(individualName)\n\t\t\tif not individualName:\t#ignore empty column\n\t\t\t\tcontinue\n\t\t\tif individualName[:-4]=='.bam':\n\t\t\t\tindividualCode = individualName[:-4]\t#get rid of .bam\n\t\t\telse:\n\t\t\t\tindividualCode = individualName\n\t\t\tindividual_name2col_index[individualCode] = col_index\n\t\t\tcounter += 1\n\t\tsys.stderr.write(\"%s individuals added. Done.\\n\"%(counter))\n\t\treturn individual_name2col_index",
"def geo2cell(geofile, posfile):",
"def reindexed_dataframe(self):\n tmp = self.metric.copy()\n tmp.index = tmp.index.map(self.match_regions())\n #give the same index order as the geojson\n out = tmp.reindex(index = self.regions_names())\n return out\n #index_name = out.index.name\n\n #return out.reset_index().dropna().set_index(index_name)[self.metric.name]"
] |
[
"0.60151386",
"0.5834247",
"0.57282376",
"0.5675862",
"0.56448144",
"0.56053734",
"0.5441846",
"0.54264975",
"0.5393006",
"0.5374605",
"0.5335748",
"0.52952147",
"0.52914923",
"0.5289396",
"0.52858377",
"0.52417886",
"0.52283543",
"0.5179012",
"0.51709",
"0.5138947",
"0.5138759",
"0.51337165",
"0.5128648",
"0.51172787",
"0.51164895",
"0.50936395",
"0.50723",
"0.5062633",
"0.50615275",
"0.50573784"
] |
0.66535616
|
0
|
A function to check for correct field types between the from and to fields.
|
def checkField(fromFieldType, toFieldType, delimiter):
if fromFieldType == "String":
if not toFieldType == "String":
arcpy.AddError("Copy To Field must be of type text when Read From Field is of type text.")
else:
if not toFieldType == "String":
if delimiter != "":
arcpy.AddError("Copy To Field must be of type text when Read From Field is of type numeric or date and you are using a delimiter.")
if delimiter == "":
if fromFieldType == "SmallInteger":
if not toFieldType in ["Integer", "SmallInteger", "Float", "Double"]:
if toFieldType == "Date":
arcpy.AddError("Copy To Field must be of type text.")
if fromFieldType == "Integer":
if toFieldType in ["SmallInteger", "Integer", "Float", "Double", "Date"]:
arcpy.AddError("Copy To Field must be of type text.")
else:
if fromFieldType in ["Float", "Double" , "Date"]:
if toFieldType in ["Integer", "SmallInteger", "Float", "Double" , "Date"]:
arcpy.AddError("Copy To Field must be of type text.")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def source_desti_field_chk(base_field, base_field_type, field_name):\n kwargs = {}\n if base_field != '':\n if base_field_type == '1':\n kwargs[field_name + '__exact'] = base_field\n if base_field_type == '2':\n kwargs[field_name + '__startswith'] = base_field\n if base_field_type == '3':\n kwargs[field_name + '__contains'] = base_field\n if base_field_type == '4':\n kwargs[field_name + '__endswith'] = base_field\n return kwargs",
"def _validate_fields(self, change_fields):\n pass",
"def test_model_field_types(self):\n self.assertTrue(isinstance(self.UserInfo.have_siblings, str))\n self.assertTrue(isinstance(self.UserInfo.known_env_exposures, str))\n self.assertTrue(isinstance(self.UserInfo.known_genetic_mutations, str))\n self.assertTrue(isinstance(self.UserInfo.age, int))",
"def get_check_types():",
"def _assert_input_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, t in type1.fields.iteritems():\n self.assertEqual(t.type_str(), type2.fields[name].type_str())",
"def CheckType(self, *args, **kwargs):\n pass",
"def allow_fieldtype_change(self, old_type: str, new_type: str) -> bool:\n\n\t\tdef in_field_group(group):\n\t\t\treturn (old_type in group) and (new_type in group)\n\n\t\treturn any(map(in_field_group, ALLOWED_FIELDTYPE_CHANGE))",
"def __check_args_type(self):\n if not isinstance(self.__min_range, (float, int)):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif not isinstance(self.__max_range, (float, int)):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)\n\n if isinstance(self.__min_range, bool):\n error_msg = \"min_range must of type int or float, but given: \"\n error_msg += str(type(self.__min_range))\n raise TypeError(error_msg)\n elif isinstance(self.__max_range, bool):\n error_msg = \"max_range must of type int or float, but given: \"\n error_msg += str(type(self.__max_range))\n raise TypeError(error_msg)",
"def _ValidateType(self, local_field_names, require_guid):\n # Make sure the typename is non-empty.\n if not self.typename:\n self.AddFinding(findings_lib.MissingTypenameError(self))\n elif not isinstance(self.typename, str):\n self.AddFinding(\n findings_lib.IllegalKeyTypeError(self.typename, self.file_context))\n elif not ENTITY_TYPE_NAME_REGEX.match(self.typename):\n self.AddFinding(\n findings_lib.InvalidTypenameError(self.typename, self.file_context))\n\n # Check for correct GUID format.\n if self.guid is not None and not ENTITY_TYPE_GUID_PATTERN.match(self.guid):\n self.AddFinding(findings_lib.InvalidTypeGuidError(self))\n if require_guid and self.guid is None:\n self.AddFinding(findings_lib.MissingTypeGuidError(self))\n\n # Passthrough types cannot be inherited, so make sure they are not defined\n # as abstract.\n if self.allow_undefined_fields and self.is_abstract:\n self.AddFinding(findings_lib.AbstractPassthroughTypeError(self))\n # Make sure the type description is non-empty.\n if not self.description:\n self.AddFinding(findings_lib.MissingEntityTypeDescriptionWarning(self))\n\n # Check for duplicate local fields.\n # this check is case insensitive to catch dupes earlier in the event that\n # we stop explicitly rejecting upper case characters\n check_fields = set()\n for field in local_field_names:\n field_lower = field.lower()\n if field_lower in check_fields:\n self.AddFinding(findings_lib.DuplicateFieldError(self, field))\n continue\n check_fields.add(field_lower)\n\n # TODO(berkoben): Add more checks to validate fields in isolation\n # (in case we don't have a field set to check against)\n # (i.e. check for chirality, formatting. Could use actual Field objects)\n\n # Check formatting of field name\n if len(field.split('/')) > 2:\n self.AddFinding(findings_lib.UnrecognizedFieldFormatError(self, field))\n\n # Check for duplicate parent names.\n parent_names_check = set()\n for parent_name in self.unqualified_parent_names:\n if parent_name in parent_names_check:\n self.AddFinding(findings_lib.DuplicateParentError(self, parent_name))\n continue\n parent_names_check.add(parent_name)\n\n # Check formatting of parent name\n if len(parent_name.split('/')) > 2:\n self.AddFinding(\n findings_lib.UnrecognizedParentFormatError(self, parent_name))\n\n # Enforce that the inherited_fields_expanded field is not set\n if self.inherited_fields_expanded:\n self.AddFinding(findings_lib.InheritedFieldsSetError(self))",
"def check_field_type(field_class):\n if field_class == 'TextField':\n field_type = 'Text field'\n elif field_class == 'NumericField':\n field_type = 'Numeric field'\n elif field_class == 'DateField':\n field_type = 'Date field'\n elif field_class == 'DateTimeField':\n field_type = 'Date & time field'\n elif field_class == 'TimeField':\n field_type = 'Time field'\n elif field_class == 'LookupField':\n field_type = 'Select box field'\n elif field_class == 'MultipleLookupField':\n field_type = 'Multiple select field'\n\n return field_type",
"def check_type(self):\n return True",
"def _check_types(self):\n if isinstance(self.unique_id, (int, str)): # should unique_id be a float?\n self.unique_id = str(self.unique_id)\n else:\n raise TypeError(f'unique_id incorrect type: {type(self.unique_id)}')\n try:\n self.ra = float(self.ra)\n except TypeError:\n print(f'ra incorrect type: {type(self.ra)}')\n try:\n self.dec = float(self.dec)\n except TypeError:\n print(f'dec incorrect type: {type(self.dec)}')\n try:\n self.z = float(self.z)\n except TypeError:\n print(f'z incorrect type: {type(self.z)}')\n if not isinstance(self.galcat, GCData):\n raise TypeError(f'galcat incorrect type: {type(self.galcat)}')\n if not -360. <= self.ra <= 360.:\n raise ValueError(f'ra={self.ra} not in valid bounds: [-360, 360]')\n if not -90. <= self.dec <= 90.:\n raise ValueError(f'dec={self.dec} not in valid bounds: [-90, 90]')\n if self.z < 0.:\n raise ValueError(f'z={self.z} must be greater than 0')",
"def test_load_from_msg_type_check(self):\n for msg in self.cases.keys():\n\n cr = CloudRecord()\n cr.load_from_msg(msg)\n\n for key in cr._int_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is an integer or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as integer 0.\n valid_value = isinstance(value, int) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Integer %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._float_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a float or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as 0.00.\n valid_value = isinstance(value, float) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Decimal %s with value: %s\\n%s' %\n (key, repr(value), msg))\n\n for key in cr._datetime_fields:\n value = cr._record_content[key]\n # Check the value we are going to be passing to MySQL\n # is a datetime or None. MySQL 5.6.x rejects the value\n # otherwise, whereas 5.1.x interprets it as a zero timestamp.\n valid_value = isinstance(value, datetime) or value is None\n # Use 'repr' to show quote marks if value is a string.\n self.assertTrue(valid_value, 'Datetime %s with value: %s\\n%s' %\n (key, repr(value), msg))",
"def coerce_types(self):\n self._coerce_source_matchfield_as_integer()\n self._coerce_target_sortorder_as_integer()\n return self",
"def test_field_types(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n db_relations = connection.introspection.get_relations(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n expected_field_type = None\n if db_relations.get(i):\n expected_field_type = u'ForeignKey'\n else:\n expected_field_type = get_field_type(db_cols[i].type_code)\n\n self.assertEqual(\n mb_fields[i].get_internal_type(),\n expected_field_type\n )",
"def validate_fields(cls, message_type: str, attachment_data: dict) -> None:",
"def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type",
"def _validate_type(self, tp: str, name: str = None):\n if tp is None:\n return None, None\n\n fields = None\n if tp.startswith('{'):\n # Submodel defined in JSON\n fields = parse_json_model(tp, modelname=name)\n if not fields:\n return None, None\n return snake_to_camel(name), fields\n\n normal_type = get_type_from_str(tp)\n if normal_type != \"None\":\n tp = normal_type\n\n return tp, fields",
"def test_types(self):\n field_types = (\n ('clip_id', int), ('created_at', datetime.datetime),\n ('description', str), ('filename', str),\n ('format', smscsv.MediaFormat), ('media_id', int), ('title', str)\n )\n for item in self.items:\n for name, type_ in field_types:\n self.assertIsInstance(getattr(item, name), type_)",
"def _verify_dict_field(self, _dict, name, types):\n if type(types) != list:\n types = [types]\n if str in types and unicode not in types:\n types.append(unicode)\n if unicode in types and str not in types:\n types.append(str)\n self.assertTrue(name in _dict, msg=\"Missing field '%s'\" % name)\n self.assertTrue(type(_dict[name]) in types,\n msg=\"Erroneous type of the field '%s': \"\n \"found %s, expected any of %s\" % (\n name, str(type(_dict[name])), \",\".join([str(x) for x in types])))",
"def check_type_compat(input_a, input_b):\n return return_family_type(input_a) is return_family_type(input_b)",
"def check_flow_by_fields(flowby_df, flowbyfields):\n for k, v in flowbyfields.items():\n try:\n vLog.debug(\"fba activity %s data type is %s\",\n k, str(flowby_df[k].values.dtype))\n vLog.debug(\"standard %s data type is %s\", k, str(v[0]['dtype']))\n except:\n vLog.debug(\"Failed to find field %s in fba\", k)",
"def _check_value(value, field):\n if not value:\n return False\n\n if field.get('date', False):\n # Get date format\n date_format = field.get('date_format', False) or json_pattern_part.get('date_format', False) or self.master_json_pattern.get('date_format', False)\n if date_format:\n value = datetime.strptime(value, date_format)\n\n if field.get('name'):\n field_name = field.get('name')\n # Get the type of the column and cast if necessary\n field_type = model_obj._columns[field_name]._type\n if field_type == 'integer':\n try:\n value = int(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n elif field_type == 'float':\n try:\n value = float(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n return value",
"def check_type_field(self,df,node):\n errors = []\n if not 'type' in df:\n error = \"{} TSV does not have 'type' header!\".format(node)\n print(error)\n errors.append(error)\n else:\n if not list(set(df.type))[0]==node:\n error = \"{} TSV does not have correct 'type' field.\".format(node)\n print(error)\n errors.append(error)\n return errors",
"def _check_type(self, new_value):\n raise NotImplementedError",
"def check_types(begin, end):\n try:\n begin.get_midpoint()\n end.get_midpoint()\n except AttributeError:\n return False\n\n return isinstance(begin.get_midpoint(), type(end.get_midpoint()))",
"def _validate_from_and_to_time(from_time, to_time):\n # Validate From Time.\n from_datetime = datetime.datetime.strptime(from_time, '%Y-%m-%d %H:%M:%S')\n datetime_today = datetime.datetime.today()\n from_datetime_limit = datetime_today - datetime.timedelta(days=7)\n if from_datetime < from_datetime_limit:\n raise ValueError(\"The from time may not be earlier than '{from_datetime_limit}'.\".format(\n from_datetime_limit=from_datetime_limit\n ))\n if from_datetime > datetime_today:\n raise ValueError(\"The from time may not be in the future.\")\n # Validate To Time.\n to_datetime = datetime.datetime.strptime(to_time, '%Y-%m-%d %H:%M:%S')\n if to_datetime > datetime_today:\n raise ValueError(\"The to time may not be in the future.\")\n if to_datetime <= from_datetime:\n raise ValueError(\"The to time must after the from time.\")",
"def convert_data_types(fields, src_db='mysql', dest_db='postgres'):\n\n data_type_map = {\n 'mysql': {\n 'postgres': {\n 'date': 'date',\n 'tinyint': 'smallint',\n 'smallint': 'smallint',\n 'mediumint': 'integer',\n 'int': 'bigint',\n 'bigint': 'numeric',\n 'float': 'real',\n 'double': 'double precision',\n 'tinytext': 'varchar',\n 'mediumtext': 'varchar',\n 'longtext': 'varchar',\n 'varchar': 'varchar',\n 'text': 'varchar',\n 'char': 'char',\n 'binary': 'bytea',\n 'varbinary': 'bytea',\n 'tinyblob': 'bytea',\n 'blob': 'bytea',\n 'mediumblob': 'bytea',\n 'longblob': 'bytea',\n 'datetime': 'timestamp',\n 'time': 'time',\n 'decimal': 'decimal',\n 'json': 'jsonb'\n }\n }\n }\n\n for elem in fields:\n elem['data_type'] = data_type_map[src_db][dest_db][elem['data_type']]\n\n if elem['data_type'] == 'decimal':\n elem['data_type'] += f'''{int(elem['numeric_precision']), int(elem['numeric_scale'])}'''\n\n fields = {e['column_name']: e['data_type'] for e in fields}\n\n return fields",
"def _can_cast(from_dtype, to_dtype):\n if cudf.utils.utils.is_na_like(from_dtype):\n return True\n if isinstance(from_dtype, type):\n from_dtype = cudf.dtype(from_dtype)\n if isinstance(to_dtype, type):\n to_dtype = cudf.dtype(to_dtype)\n\n # TODO : Add precision & scale checking for\n # decimal types in future\n\n if isinstance(from_dtype, cudf.core.dtypes.DecimalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n if to_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(from_dtype, np.dtype):\n if isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype, to_dtype)\n elif isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n if from_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(to_dtype, cudf.core.types.CategoricalDtype):\n return True\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.ListDtype):\n # TODO: Add level based checks too once casting of\n # list columns is supported\n if isinstance(to_dtype, cudf.core.dtypes.ListDtype):\n return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type)\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype._categories.dtype, to_dtype)\n else:\n return False\n else:\n return np.can_cast(from_dtype, to_dtype)",
"def _invalid_fldtype(row, grid):\n field_type = grid.GetCellValue(row=row, col=1)\n if field_type.strip() == '':\n return False, ''\n if field_type not in [mg.FLDTYPE_NUMERIC_LBL, \n mg.FLDTYPE_STRING_LBL, mg.FLDTYPE_DATE_LBL]:\n msg = _('%s is not a valid field type') % field_type\n return True, msg\n return False, ''"
] |
[
"0.60380894",
"0.60211766",
"0.5991003",
"0.5971663",
"0.59447753",
"0.5879657",
"0.58585453",
"0.5791437",
"0.5766009",
"0.57439035",
"0.5743668",
"0.57058144",
"0.5589982",
"0.5586574",
"0.5582527",
"0.5561692",
"0.5555909",
"0.55478966",
"0.5512225",
"0.54971504",
"0.54956496",
"0.5494064",
"0.548706",
"0.54775435",
"0.54459935",
"0.5436174",
"0.5422789",
"0.54226327",
"0.5421416",
"0.54205865"
] |
0.7540543
|
0
|
Plot a finite element mesh as a wireframe using edges connectivity.
|
def plot_wireframe(ax, cmesh, color='k', show=False):
coors = cmesh.coors
dim = cmesh.dim
edges = cmesh.get_conn(1, 0)
ax = _get_axes(ax, dim)
for edge_vertices in edges.indices.reshape((edges.num, 2)):
cc = coors[edge_vertices]
if dim == 3:
ax.plot(cc[:, 0], cc[:, 1], cc[:, 2], color)
else:
ax.plot(cc[:, 0], cc[:, 1], color)
if show:
plt.show()
return ax
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def plot_fenics_mesh(mesh, new_fig=True):\n if(new_fig):\n plt.figure()\n\n plot(mesh)\n #plt.title(\"FEniCS mesh\")\n plt.show(block=False)\n\n pass",
"def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)",
"def plot_wireframe(Tfull):\n from mpl_toolkits.mplot3d import axes3d\n N = Tfull.shape[0]\n x = y = np.linspace(0, 1, N)\n X, Y = np.meshgrid(x,y)\n # Construct and return a function suitable for interactive demo\n def plot(elev=25, azim=50):\n fig = plt.figure(1, figsize=(14, 8))\n plt.clf()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_wireframe(X, Y, Tfull)\n ax.view_init(elev=elev, azim=azim)\n plt.axis('scaled')\n plt.xlabel('x (m)')\n plt.ylabel('y (m)')\n plt.title('T(x,y) on %dx%d grid' % (N,N))\n plot()\n return plot",
"def plotMesh(verts,tris):\n x = verts[:,0]\n y = verts[:,1]\n\n plt.figure()\n plt.gca().set_aspect('equal')\n plt.triplot(x, y, tris, 'k-')\n plt.title('Unstructured Mesh')\n plt.xlabel('distance (m)')\n plt.ylabel('distance (m)')",
"def plot(mesh):\n from scipy.spatial import delaunay_plot_2d\n fig = delaunay_plot_2d(SimpleMesh(mesh))\n ax = fig.gca()\n ax.set_aspect(\"equal\")\n return fig, ax",
"def plot_multigroup_flux(mesh, state, edges = False) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n edgec = 'none'\n if edges :\n edgec = 'k'\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors=edgec)\n \n else :\n print \"not ready for 3d\"\n return\n # show the plot\n plt.show()",
"def plot_mesh(corners):\r\n triangle = tri.Triangulation(corners[:, 0], corners[:, 1])\r\n\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n \r\n plt.figure(figsize=(6, 4))\r\n for i, mesh in enumerate((triangle, trimesh)):\r\n plt.subplot(1, 2, i+1)\r\n plt.triplot(mesh)\r\n plt.axis('off')\r\n plt.axis('equal')",
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()",
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def plot_wireframe(self, line_width=None, index_row=0, index_col=0, show=True, plotter=None, title='', font_size=10,\n title_location=\"upper_edge\", font_color='black', camera=None):\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n plotter.add_text(title, position=title_location, font_size=font_size, color=font_color)\n if camera is not None:\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n plotter.add_mesh(self.pv_mesh, style='wireframe', line_width=line_width, show_scalar_bar=False, color=\"white\")\n if show:\n plotter.show()\n return plotter",
"def Draw1D(mesh, coefs, keep=False, n_p=2, figsize=(20,4)):\n if n_p <= 2:\n n_p = 2\n \n eps = 1e-6 \n \n x_v = [p[0] for p in mesh.ngmesh.Points()]\n x_s = []\n f_s = {}\n\n miny = 1e99\n for f, name in coefs:\n f_s[name] = []\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n \n for el in mesh.ngmesh.Elements1D():\n left = mesh.ngmesh.Points()[el.points[0]][0]\n right = mesh.ngmesh.Points()[el.points[1]][0]\n for l in range(n_p):\n y = left + eps + (l / (n_p-1)) * (right - eps -left) \n x_s.append(y)\n for f,name in coefs:\n ff = f(mesh(y))\n miny = min(miny,ff)\n f_s[name].append(ff)\n \n x_s.append(nan)\n for f,name in coefs:\n f_s[name].append(nan)\n\n \n # plt.clf()\n # display.display(plt.gcf())\n plt.figure(figsize=figsize)\n for f,name in coefs:\n plt.plot(x_s,f_s[name],label=name)\n plt.plot(x_v,[miny for v in x_v],'|',label='vertices')\n plt.xlabel(\"x\")\n plt.legend()\n plt.show()\n if keep:\n display.clear_output(wait=True)",
"def plot_mesh_function(mesh, f, title=\"\", colormap = \"hot\", edges = False, mybounds = [], myticks = []) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n # Reshape the function\n f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())\n if edges :\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')\n else :\n plt.pcolor(X, Y, f, cmap=colormap)\n plt.axis(\"scaled\") \n plt.xlabel(\"x [cm]\")\n plt.ylabel(\"y [cm]\")\n if len(myticks) :\n cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)\n else : \n cbar = plt.colorbar()\n else :\n print \"not ready for 3d\"\n return\n plt.title(title)\n # show the plot\n plt.show()",
"def render_wireframe(self, **kwds):\n proj = self.projection()\n if self.ambient_dim()==3:\n return proj.render_wireframe_3d(**kwds)\n if self.ambient_dim()==2:\n return proj.render_outline_2d(**kwds)\n raise ValueError, \"render_wireframe is only defined for 2 and 3 dimensional polyhedra.\"",
"def plot_cube(ax: Axes, x: ArrayLike, y: ArrayLike, f_low: callable, f_upp: callable, **kwargs):\n # lower\n xm, ym = np.meshgrid(x, y)\n zm = f_low(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # upper\n zm = f_upp(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # north\n xm, ym = np.array([x, x]), y[0]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[0]), f_upp(x, y[0])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # south\n xm, ym = np.array([x, x]), y[-1]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[-1]), f_upp(x, y[-1])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # east\n xm, ym = x[0]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[0], y), f_upp(x[0], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # west\n xm, ym = x[-1]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[-1], y), f_upp(x[-1], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def mplot_mesh(meshtriang: df.Mesh) -> Tuple[plt.Figure, Any]:\n fig, ax = plt.subplots(1)\n ax.triplot(meshtriang, 'ko-', lw=1)\n return fig, ax",
"def plot(self, plotEdges=False, emphaseEdges=[], col=('b', 'k', 'r'), lims=None, ort=False):\n ax = a3.Axes3D(plt.figure())\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n ax.dist = 30\n ax.azim = -140\n if lims is None:\n lims = [0, 0, 0]\n lims[0] = [min(v.x for v in self.vertices),\n max(v.x for v in self.vertices)]\n lims[1] = [min(v.y for v in self.vertices),\n max(v.y for v in self.vertices)]\n lims[2] = [min(v.z for v in self.vertices),\n max(v.z for v in self.vertices)]\n if ort:\n ma = max(lims[i][1] for i in range(3))\n mi = min(lims[i][0] for i in range(3))\n lims = [[mi, ma]] * 3\n ax.set_xlim(lims[0])\n ax.set_ylim(lims[1])\n ax.set_zlim(lims[2])\n for f in self.faces:\n face = a3.art3d.Poly3DCollection([[v.coords()\n for v in f.vertices]])\n ax.add_collection3d(face)\n face.set_facecolor(col[0])\n face.set_edgecolor(col[1])\n if plotEdges or len(emphaseEdges)>0:\n for e in self.edges:\n edge = a3.art3d.Poly3DCollection([[e.nvt.coords(),\n e.pvt.coords()]])\n ax.add_collection3d(edge)\n if e in emphaseEdges:\n edge.set_edgecolor(col[2])\n else:\n edge.set_edgecolor(col[1])\n plt.show()",
"def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)",
"def surfaceRender(nodal_mesh, focus, ax=None):\n\t# If no axes were passed, generate new set of axes\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\n\t# Sort the mesh by first 3 columns\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 0].argsort()]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 1].argsort(kind='mergesort')]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 2].argsort(kind='mergesort')]\n\t\n\t# Set up number of divisions and calculate e for each division (as a ratio)\n\tnum_div = 20\n\te = [i/num_div for i in range(num_div + 1)]\n\t# Convert angular values from degrees to radians\n\trads = math.pi/180\n\tnodal_mesh[:, 1:3] *= rads\n\t# Store the shapes and sizes of the mesh values\n\tm = nodal_mesh.shape[0]\n\tsize_nodal_nu = np.where(nodal_mesh[:, 2] == 0)[0].size\n\tsize_nodal_phi = m/size_nodal_nu\n\t# Get the mu and theta values from the mesh\n\tnodal_nu = nodal_mesh[:size_nodal_nu, 1]\n\tnodal_phi = nodal_mesh[::size_nodal_nu, 2]\n\t# Convert apex node from prolate to cartesian, then plot with scatter\n\tif min(nodal_nu) == 0:\n\t\tx, y, z = mathhelper.prolate2cart(nodal_mesh[0, 0], nodal_mesh[0, 1], nodal_mesh[0, 2], focus)\n\t\tax.scatter(z, y, -x)\n\t\tstart_nu = 1\n\telse:\n\t\tstart_nu = 0\n\t# Plot circumferential element boundaries\n\tfor i in range(start_nu, size_nodal_nu):\n\t\tfor j in range(int(size_nodal_phi)):\n\t\t\t# Define nodal values for interpolation\n\t\t\tif j == size_nodal_phi-1:\n\t\t\t\tind0 = i\n\t\t\t\tp0 = 2*math.pi\n\t\t\telse:\n\t\t\t\tind0 = (j+1)*size_nodal_nu + i\n\t\t\t\tp0 = nodal_phi[j+1]\n\t\t\tind1 = (j)*size_nodal_nu + i\n\t\t\tp1 = nodal_phi[j]\n\t\t\t# Get mu and dM/dm1\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 3]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 3]\n\t\t\t# Convert to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot the node\n\t\t\tax.scatter(n0z, n0y, -n0x)\n\t\t\t# Plot the arc segments\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine starting point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get theta\n\t\t\t\tp_here = p0 - e[k]*(p0 - p1)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, nodal_nu[i], p_here, focus)\n\t\t\t\t# Create vectors\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot segments\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t# Plot longitudinal element boundaries\n\tfor i in range(int(size_nodal_phi)):\n\t\tfor j in range(size_nodal_nu-1):\n\t\t\t# Define nodal values needeed for interpolation\n\t\t\tind0 = i*size_nodal_nu + j\n\t\t\tind1 = ind0 + 1\n\t\t\tn0 = nodal_nu[j]\n\t\t\tn1 = nodal_nu[j+1]\n\t\t\t# Get lambda and dL/de2\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 4]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 4]\n\t\t\t# Convert nodal points to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot arc\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get nu\n\t\t\t\tn_here = n0 + e[k]*(n1-n0)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, n_here, nodal_phi[i], focus)\n\t\t\t\t# Append the vectors for plotting\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot the segment\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t\t\t\t\n\treturn(ax)",
"def plotWholeRoom(mesh):\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n X = np.arange(0, mesh.xLength+mesh.meshsize, mesh.meshsize)\r\n Y = np.arange(0, mesh.yLength+mesh.meshsize, mesh.meshsize)\r\n X, Y = np.meshgrid(X,Y)\r\n numberOfXNodes = mesh.x_res#round(mesh.xLength/mesh.meshsize)+1\r\n numberOfYNodes = mesh.y_res#round(mesh.yLength/mesh.meshsize)+1\r\n Z = np.array([[mesh.grid[i,j].funcVal for i in range(numberOfYNodes)] for j in range(numberOfXNodes)])\r\n if mesh.y_res==2:\r\n print()\r\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\r\n linewidth=0, antialiased=False)\r\n # add vmin=4, vmax=41, to define lower and upper value for the color-scheme\r\n # set limits for z-axis\r\n ax.set_zlim(np.amin(Z)-mesh.meshsize, np.amax(Z)+mesh.meshsize)\r\n # don't know what these two lines are for\r\n # x.zaxis.set_major_locator(LinearLocator(10))\r\n # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\r\n # don't know what these two lines are for\r\n fig.colorbar(surf, shrink=0.5, aspect=5)\r\n plt.show() \r\n return fig",
"def visualize_graph(edges_lst):\n G = nx.Graph()\n for edge in edges_lst:\n start = edge[0]\n end = edge[1]\n weight = edge[2]\n G.add_edge(start, end, weight=weight)\n pos = nx.planar_layout(G)\n nx.draw_networkx(G, pos)\n labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.show()",
"def plotSurface(X):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d import proj3d\n f=plt.figure()\n ax=f.add_subplot(111,projection='3d')\n xi=np.arange(10,14,0.05)\n yi=np.arange(12,16,0.05)\n z = matplotlib.mlab.griddata(X[:,0], X[:,1], X[:,2], xi, yi, interp='nn')\n x, y = np.meshgrid(xi, yi)\n ax.plot_surface(x, y, z)\n return f",
"def test_surf():\n def f(x, y):\n sin, cos = numpy.sin, numpy.cos\n return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)\n\n x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]\n s = surf(x, y, f)\n mlab.show()\n #cs = contour_surf(x, y, f, contour_z=0)\n return",
"def mesh_vizu(ax, f, col, xlim, ylim, scope=0):\n x, y = np.mgrid[xlim[0]:xlim[1]:0.2, ylim[0]:ylim[1]:0.2]\n z = np.zeros(x.shape)\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n z[i, j] = f(np.array([[x[i, j], y[i, j]]]))\n cs = ax.contour(x, y, z, colors=col, levels=[0.5])\n h,_ = cs.legend_elements()\n return cs, h",
"def plot_vertices(self, f=None, index_row=0, index_col=0, show=True, plotter=None, cmap='jet', title='',\n title_location=\"upper_edge\", font_size=10, font_color='black', camera=None):\n\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n plotter.add_text(title, position=title_location, font_size=font_size, color=font_color)\n if camera is not None:\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n plotter.add_mesh(self.vertices, scalars=f, cmap=cmap, render_points_as_spheres=True)\n if show:\n plotter.show()\n return plotter",
"def __plot_delaunay(self, ax=None) -> None:\n for simplex in self.hull.simplices:\n ax.plot(self.points[simplex, 0], self.points[simplex, 1], \"r-\")\n\n tri = Delaunay(self.points)\n ax.triplot(self.points[:, 0], self.points[:, 1], tri.simplices.copy(), lw=1)",
"def qp(F, V):\n import matplotlib.pyplot\n from mpl_toolkits.mplot3d import Axes3D\n #\n # Plot the surface\n fig = matplotlib.pyplot.figure()\n axs = fig.add_subplot(1,1,1, projection=\"3d\")\n axs.plot_trisurf(V[:,0], V[:,1], V[:,2], triangles=F)\n #\n # Label the axes and set them equal\n axs.set_xlabel(\"x\")\n axs.set_ylabel(\"y\")\n axs.set_zlabel(\"z\")\n axs.axis(\"equal\")\n #\n # And show the figure\n matplotlib.pyplot.show()\n return fig",
"def PlotMeshNumbering(self, figure=None, show_plot=True):\n\n self.__do_essential_memebers_exist__()\n\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n\n if self.element_type == \"tri\":\n\n if figure is None:\n figure = plt.figure()\n plt.triplot(self.points[:,0],self.points[:,1], self.elements[:,:3])\n plt.tricontourf(self.points[:,0], self.points[:,1], self.elements[:,:3], np.ones(self.points.shape[0]), 100,alpha=0.3)\n\n for i in range(0,self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"quad\":\n\n if figure is None:\n figure = plt.figure()\n point_radius = 3.\n\n C = self.InferPolynomialDegree() - 1\n\n edge_elements = self.GetElementsEdgeNumberingQuad()\n reference_edges = NodeArrangementQuad(C)[0]\n reference_edges = np.concatenate((reference_edges,reference_edges[:,1,None]),axis=1)\n reference_edges = np.delete(reference_edges,1,1)\n\n self.GetEdgesQuad()\n x_edges = np.zeros((C+2,self.all_edges.shape[0]))\n y_edges = np.zeros((C+2,self.all_edges.shape[0]))\n\n BasesOneD = np.eye(2,2)\n for iedge in range(self.all_edges.shape[0]):\n ielem = edge_elements[iedge,0]\n edge = self.elements[ielem,reference_edges[edge_elements[iedge,1],:]]\n x_edges[:,iedge], y_edges[:,iedge] = self.points[edge,:].T\n\n\n plt.plot(x_edges,y_edges,'-k')\n\n for i in range(self.elements.shape[0]):\n coord = self.points[self.elements[i,:],:]\n x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n plt.text(x_avg,y_avg,str(i),backgroundcolor='#F88379',ha='center')\n\n for i in range(0,self.points.shape[0]):\n plt.text(self.points[i,0],self.points[i,1],str(i),backgroundcolor='#0087BD',ha='center')\n\n plt.axis('equal')\n if show_plot:\n plt.show()\n\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n\n import matplotlib as mpl\n import os\n os.environ['ETS_TOOLKIT'] = 'qt4'\n from mayavi import mlab\n\n if figure is None:\n figure = mlab.figure(bgcolor=(1,1,1),fgcolor=(1,1,1),size=(800,600))\n view = mlab.view()\n figure.scene.disable_render = True\n\n color = mpl.colors.hex2color('#F88379')\n\n linewidth = 3.\n # trimesh_h = mlab.triangular_mesh(self.points[:,0],\n # self.points[:,1], self.points[:,2], self.faces[:,:3],\n # line_width=linewidth,tube_radius=linewidth,color=(0,0.6,0.4),\n # representation='wireframe') # representation='surface'\n\n # # CHANGE LIGHTING OPTION\n # trimesh_h.actor.property.interpolation = 'phong'\n # trimesh_h.actor.property.specular = 0.1\n # trimesh_h.actor.property.specular_power = 5\n\n # PLOTTING EDGES\n from Florence.PostProcessing import PostProcess\n tmesh = PostProcess(3,3).Tessellate(self, np.zeros_like(self.points), interpolation_degree=0,\n plot_points=True, plot_edges=True, plot_surfaces=False)\n\n x_edges = tmesh.x_edges\n y_edges = tmesh.y_edges\n z_edges = tmesh.z_edges\n connections = tmesh.connections\n\n src = mlab.pipeline.scalar_scatter(x_edges.T.copy().flatten(), y_edges.T.copy().flatten(), z_edges.T.copy().flatten())\n src.mlab_source.dataset.lines = connections\n h_edges = mlab.pipeline.surface(src, color = (0,0.6,0.4), line_width=linewidth)\n # AVOID WARNINGS\n # lines = mlab.pipeline.stripper(src)\n # h_edges = mlab.pipeline.surface(lines, color = (0,0.6,0.4), line_width=linewidth)\n\n # ELEMENT NUMBERING\n # for i in range(0,self.elements.shape[0]):\n # coord = self.points[self.elements[i,:],:]\n # x_avg = np.sum(coord[:,0])/self.elements.shape[1]\n # y_avg = np.sum(coord[:,1])/self.elements.shape[1]\n # z_avg = np.sum(coord[:,2])/self.elements.shape[1]\n\n # # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=color)\n # mlab.text3d(x_avg,y_avg,z_avg,str(i),color=(0,0,0.),scale=2)\n\n # POINT NUMBERING\n for i in range(self.elements.shape[0]):\n for j in range(self.elements.shape[1]):\n text_obj = mlab.text3d(self.points[self.elements[i,j],0],\n self.points[self.elements[i,j],1],self.points[self.elements[i,j],2],str(self.elements[i,j]),\n color=(0,0,0.),scale=0.05)\n\n\n figure.scene.disable_render = False\n\n if show_plot:\n # mlab.view(*view)\n mlab.show()",
"def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()"
] |
[
"0.6795343",
"0.66574126",
"0.66412956",
"0.64610934",
"0.6281666",
"0.61296725",
"0.60942197",
"0.6073105",
"0.60276896",
"0.5883175",
"0.58822703",
"0.5879725",
"0.5844625",
"0.5747496",
"0.5702003",
"0.565129",
"0.5606482",
"0.5584321",
"0.5578948",
"0.557612",
"0.55558884",
"0.5493991",
"0.54859924",
"0.548444",
"0.54389054",
"0.54184747",
"0.5382252",
"0.5366345",
"0.5362142",
"0.5350392"
] |
0.70331657
|
0
|
Label mesh topology entities using celllocal ids.
|
def label_local_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):
coors = cmesh.get_centroids(edim)
dim = cmesh.dim
centres = cmesh.get_centroids(dim)
conn = cmesh.get_conn(dim, edim)
off = conn.offsets
ax = _get_axes(ax, dim)
eps = 0.1
oeps = 1.0 - eps
for ii in xrange(conn.num):
for ic, ie in enumerate(conn.indices[off[ii]:off[ii+1]]):
# Shift labels towards the cell centre.
cc = oeps * coors[ie] + eps * centres[ii]
if dim == 3:
ax.text(cc[0], cc[1], cc[2], ic,
color=color, fontsize=fontsize)
else:
ax.text(cc[0], cc[1], ic,
color=color, fontsize=fontsize)
if show:
plt.show()
return ax
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compute_localizations_labels(depc, loc_id_list, config=None):\n from os.path import exists, join\n\n logger.info('[ibs] Process Localization Labels')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n\n if config['labeler_algo'] in ['pipeline', 'cnn']:\n gid_list_, gid_list, chip_list = get_localization_chips(\n ibs,\n loc_id_list,\n target_size=(128, 128),\n axis_aligned=config['labeler_axis_aligned'],\n )\n result_list = ibs.generate_chip_label_list(chip_list, **config)\n elif config['labeler_algo'] in ['azure']:\n raise NotImplementedError('Azure is not implemented for images')\n elif config['labeler_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n target_size = (\n densenet.INPUT_SIZE,\n densenet.INPUT_SIZE,\n )\n gid_list_, gid_list, chip_list = get_localization_chips(\n ibs,\n loc_id_list,\n target_size=target_size,\n axis_aligned=config['labeler_axis_aligned'],\n )\n config = dict(config)\n config['classifier_weight_filepath'] = config['labeler_weight_filepath']\n nonce = ut.random_nonce()[:16]\n cache_path = join(ibs.cachedir, 'localization_labels_{}'.format(nonce))\n assert not exists(cache_path)\n ut.ensuredir(cache_path)\n chip_filepath_list = []\n for index, chip in enumerate(chip_list):\n chip_filepath = join(cache_path, 'chip_%08d.png' % (index,))\n cv2.imwrite(chip_filepath, chip)\n assert exists(chip_filepath)\n chip_filepath_list.append(chip_filepath)\n result_gen = densenet.test_dict(chip_filepath_list, return_dict=True, **config)\n result_list = list(result_gen)\n ut.delete(cache_path)\n\n assert len(gid_list) == len(result_list)\n\n # Release chips\n chip_list = None\n\n # Group the results\n group_dict = {}\n for gid, result in zip(gid_list, result_list):\n if gid not in group_dict:\n group_dict[gid] = []\n group_dict[gid].append(result)\n\n # Return the results\n for gid in gid_list_:\n result_list = group_dict.get(gid, None)\n if result_list is None:\n ret_tuple = (\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n [],\n )\n else:\n zipped_list = list(zip(*result_list))\n ret_tuple = (\n np.array(zipped_list[0]),\n np.array(zipped_list[1]),\n np.array(zipped_list[2]),\n np.array(zipped_list[3]),\n np.array(zipped_list[4]),\n list(zipped_list[5]),\n )\n yield ret_tuple",
"def labelComponents26(cube):\n x,y,z = np.where(cube);\n label = np.zeros(cube.shape, dtype = 'uint8');\n ncomp = 0;\n for xp,yp,zp in zip(x,y,z):\n if label[xp,yp,zp] == 0:\n ncomp += 1;\n label = labelNeighbours26(cube, label, xp,yp,zp, ncomp);\n return ncomp, label",
"def label_map_gen(df_main):\n # Function to flatten a list of list\n flatten = lambda l: [item for sublist in l for item in sublist]\n labels = list(set(flatten([l.split(' ') for l in df_main['tags'].values])))\n\n # Create list of labels\n label_map = {l: i for i, l in enumerate(labels)}\n return label_map",
"def label(gt_dataset, volume_dim, voxel_dim, labeling_params):\n labeled_volumes = dict()\n labeled_cells = dict()\n #Use global density and reduce the size of gt_dataset here\n global_density = labeling_params[\"global_density\"]\n gt_dataset = {k: v for k,v in gt_dataset.items() if random_sample() < global_density}\n #Label in the order specified in the configuration\n layers = sorted(labeling_params.keys())\n #Remove global_density\n layers.remove(\"global_density\")\n for layer in layers:\n print \"Labeling {}\".format(layer)\n fluorophore = labeling_params[layer]['fluorophore']\n volume, cells = brainbow(gt_dataset, volume_dim, voxel_dim, **labeling_params[layer])\n if fluorophore in labeled_volumes:\n labeled_volumes[fluorophore] += volume\n labeled_cells[fluorophore] |= cells\n else:\n labeled_volumes[fluorophore] = volume\n labeled_cells[fluorophore] = cells\n return labeled_volumes, labeled_cells",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def label_global_entities(ax, cmesh, edim, color='b', fontsize=10, show=False):\n coors = cmesh.get_centroids(edim)\n dim = cmesh.dim\n\n ax = _get_axes(ax, dim)\n\n for ii, cc in enumerate(coors):\n if dim == 3:\n ax.text(cc[0], cc[1], cc[2], ii,\n color=color, fontsize=fontsize)\n\n else:\n ax.text(cc[0], cc[1], ii,\n color=color, fontsize=fontsize)\n\n if show:\n plt.show()\n\n return ax",
"def propagate_labels_simple(regions,labels):\n rlabels,_ = label(regions)\n cors = correspondences(rlabels,labels,False)\n outputs = zeros(amax(rlabels)+1,'i')\n for o,i in cors.T: outputs[o] = i\n outputs[0] = 0\n return outputs[rlabels]",
"def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1",
"def label_joints():\n side_dict = {'C': 0,\n 'L': 1,\n 'R': 2}\n for jnt in mc.ls(type='joint'):\n mc.setAttr('{}.side'.format(jnt), side_dict[jnt.split('_')[0]])\n mc.setAttr('{}.type'.format(jnt), 18)\n mc.setAttr('{}.otherType'.format(jnt), jnt.split('_')[1], type=\"string\")",
"def element_labels(hosts, *loci):\n num_loci = int(len(loci) / 2) # number of loci\n elements = []\n for i in hosts:\n for j in range(num_loci):\n locus_abbrev, allele_range = loci[j*2:(j+1)*2]\n elements.append( [\"%s%s%d\" % (i,locus_abbrev,k) for k in allele_range] )\n return elements",
"def labelTable(self,table):\n \n for sslice,_, lFields in self._lLabellingInstruction:\n for field in lFields:\n if field is not None:\n try:\n for cell in np.nditer(table.getNPArray()[sslice],['refs_ok'],op_dtypes=np.dtype(object)):\n cell[()].addField(field.cloneMe())\n except: pass",
"def label_to_grid_label(self, bbox, padw, padh, img_width, img_height):\r\n grid_label = np.zeros((self.grid_S, self.grid_S, self.dim_len))\r\n grid_size = 1.0 / self.grid_S\r\n\r\n for label in bbox:\r\n label = label.split(\" \")\r\n label = [float(x.strip()) for x in label] #[class, cx, cy, w, h]\r\n\r\n if padw != 0:\r\n label[1] = (label[1] * img_width + padw) / img_height\r\n label[2] = (label[2] * img_width) / img_height\r\n \r\n elif padh != 0:\r\n label[3] = (label[3] * img_height + padh) / img_width\r\n label[4] = (label[4] * img_height) / img_width\r\n \r\n grid_x = int(label[1] // grid_size)\r\n grid_y = int(label[2] // grid_size)\r\n\r\n # center coordinate in the grid [1, gx, gy, w, h, 1, gx, gy, w, h, class[]]\r\n gx = (label[1] - grid_x * grid_size) / grid_size\r\n gy = (label[2] - grid_y * grid_size) / grid_size\r\n\r\n grid_label[grid_x, grid_y, 0 : 5 * self.grid_B] = np.array([1, gx, gy, label[3], label[4]] * self.grid_B)\r\n grid_label[grid_x, grid_y, 5 * self.grid_B + int(label[0])] = 1\r\n \r\n return grid_label",
"def label(filenames, train_path='../data/train_molecules_30.mat'):\n unlabeled = [scipy.io.loadmat(fname) for fname in filenames]\n unlabeled_X = np.vstack([data['X'] for data in unlabeled])\n X, Y = load_data(train_path, shape=(-1, 30, 30, 30))\n\n num_unlabeled = unlabeled_X.shape[0]\n unlabeled_Y = np.zeros(num_unlabeled) - 1\n unlabeled_Y = unlabeled_Y.reshape((-1, 1))\n Y = Y.reshape((-1, 1))\n Y_all = np.vstack((Y, unlabeled_Y))\n\n X_all = np.vstack((X, unlabeled_X))\n X_all = X_all.reshape((-1, 27000))\n\n label_prop_model = LabelSpreading()\n label_prop_model.fit(X_all, Y_all)\n Y_all = label_prop_model.transduction_\n unlabeled_Y = Y_all[num_unlabeled:]\n return (unlabeled_X, unlabeled_Y), (X_all, Y_all)",
"def add_local_label(runtime_addr, name, start_addr, end_addr, move_id=None):\n\n label = labelmanager.labels[runtime_addr]\n label.add_local_label(name, start_addr, end_addr, move_id)",
"def label_2d_latent(model,data_loader,embeddings=False):\n model.eval()\n plt.style.use('seaborn')\n fig, ax = plt.subplots(dpi=100)\n for batch_idx, (data,target) in enumerate(data_loader):\n data = data.float()\n z, recon_batch, mu, logvar = model(data.view(-1,numpy.prod(data.shape[-2:])))\n z = z.data.cpu().numpy()\n plt.scatter(z[:,0],z[:,1],s=10,c=target,cmap='cool',alpha=0.5)\n if embeddings:\n for i,img in enumerate(data):\n imagebox = OffsetImage(data[i,0], zoom=0.4)\n ab = AnnotationBbox(imagebox, (z[i,0], z[i,1]),frameon=False)\n ax.add_artist(ab)\n plt.xlabel('Latent variable 1')\n plt.ylabel('Latent variable 2')\n plt.tight_layout()",
"def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)",
"def labelNeighbours26(data, label, x0,y0,z0, index):\n shape = label.shape;\n for xp in range(max(0,-1+x0),min(2+x0, shape[0])):\n for yp in range(max(0,-1+y0),min(2+y0, shape[1])):\n for zp in range(max(0,-1+z0),min(2+z0, shape[2])):\n if data[xp,yp,zp] and label[xp,yp,zp] == 0:\n label[xp,yp,zp] = index;\n label = labelNeighbours26(data, label, xp,yp,zp, index);\n return label;",
"def nodules_connection(label_data, label_header):\n\n\n las_labels = measure.label(label_data,\n neighbors=8,\n background=0,\n return_num=True)\n\n las_labels_nzero = np.nonzero(las_labels[0])\n [xdif, ydif, zdif] = [np.amax(las_labels_nzero[0])-np.amin(las_labels_nzero[0]),\n np.amax(las_labels_nzero[1])-np.amin(las_labels_nzero[1]),\n np.amax(las_labels_nzero[2])-np.amin(las_labels_nzero[2])]\n\n # conversion pixels to mm\n dims = label_header['pixdim']\n if label_header['xyzt_units'] == 10:\n #dimensions in mm\n print('xyzt_units=10')\n xdif=dims[1]*xdif\n ydif=dims[2]*ydif\n zdif=dims[3]*zdif\n\n\n return las_labels,[xdif,ydif,zdif]",
"def set_mpls_tbl_labels(self):\n\n # for all switches\n for sw_name, controller in self.controllers.items():\n pass\n # TODO PART 1 TASK 2\n # 1) for all the hosts connected to this switch add the FEC_tbl entry\n # 2) for all switches connected to this switch add the 2 mplt_tbl entries",
"def label_id(self):\n return int(self.instance_id // 1000)",
"def label_hemispheres( x, template, templateLR, reg_iterations=[200,50,2,0] ):\n reg = ants.registration(\n ants.rank_intensity(x),\n ants.rank_intensity(template),\n 'SyN',\n aff_metric='GC',\n syn_metric='CC',\n syn_sampling=2,\n reg_iterations=reg_iterations,\n random_seed = 1 )\n return( ants.apply_transforms( x, templateLR, reg['fwdtransforms'],\n interpolator='genericLabel') )",
"def _update_label(self, outer_pos, inner_pos, new_label):\n r, c = outer_pos\n ir, ic = inner_pos\n self.inner_boards[r][c][ir][ic][\"text\"] = new_label",
"def to_local(self, id_tensor):\n ...",
"def import_labels():\n\n dict_labels = df.set_index('id').to_dict()['breed']\n unique_labels = sorted(list(set(dict_labels.values())))\n for index, label in dict_labels.items():\n dict_labels[index] = unique_labels.index(label)\n return dict_labels, unique_labels",
"def cells_connected_components(self,edge_mask,cell_mask=None,randomize=True):\n # further constrain to edges which are not on the boundary\n edge_mask=edge_mask & np.all( self.edges['cells']>=0,axis=1)\n\n cell_pairs = self.edges['cells'][edge_mask]\n\n # use scipy graph algorithms to find the connections\n from scipy import sparse\n\n graph=sparse.csr_matrix( (np.ones(len(cell_pairs)), \n (cell_pairs[:,0],cell_pairs[:,1])),\n shape=(self.Ncells(),self.Ncells()) )\n\n n_comps,labels=sparse.csgraph.connected_components(graph,directed=False)\n\n if cell_mask is None:\n cell_mask=slice(None)\n\n unique_labels=np.unique( labels[cell_mask] ) \n labels[~cell_mask]=-1 # mark dry cells as -1\n\n # create an array which takes the original label, maps it to small, sequential\n # label.\n\n if not randomize:\n new_labels=np.arange(len(unique_labels)) # -1 will be handled separately\n else:\n new_labels=np.argsort(np.random.random(len(unique_labels)))\n\n mapper=np.zeros( 1+unique_labels.max() ) - 1 # map original labels to compressed labels\n mapper[unique_labels]=new_labels\n labels=mapper[labels]\n labels[~cell_mask] = -1\n labels=np.ma.array(labels,mask=~cell_mask)\n return labels",
"def label_image(image):\n \n #Label the blobs using ndimage\n labeled_blobs, n_features = ndimage.label(b_image)\n \n #calculate the center of mass of each labelled feature\n centers = ndimage.center_of_mass(b_image, labeled_blobs, np.arange(n_features) + 1)\n \n return labeled_blobs, n_features, centers",
"def merge_entities_on_identifiers(self) -> None:\n if self.forward_map:\n next_local_id = max(list(self.forward_map.keys())) + 1\n else:\n next_local_id = 1\n backward_keys = set(self.backward_map.keys())\n for kb in self.kbs:\n for p in kb.pathways:\n for e in p.entities:\n if e.xrefs:\n xref_overlap = set(e.xrefs) & backward_keys\n if xref_overlap:\n local_id = self.backward_map[xref_overlap.pop()]\n e.lid = local_id\n elif len(e.xrefs) == 1:\n self.forward_map[next_local_id] = [e.xrefs[0]]\n self.backward_map[e.xrefs[0]] = next_local_id\n e.lid = next_local_id\n next_local_id += 1\n else:\n print(e.xrefs)\n raise UnboundLocalError(\"Unknown identifiers\")\n\n kb.dump_pickle(kb.loc)\n self.save_id_dict()",
"def _get_labels(self, ind):\n pass",
"def compute_cell_location_fast(seg_img: np.ndarray, all_labels: np.ndarray) \\\n -> (nx.graph, np.ndarray):\n g = nx.Graph()\n centers = cell_center_fast(seg_img, all_labels) # was 6\n\n # Compute vertices\n for i in all_labels:\n if i != 0:\n g.add_node(i)\n\n # Compute edges\n for i in all_labels:\n if i != 0:\n for j in all_labels:\n if j != 0:\n if i != j:\n pos1 = centers[i]\n pos2 = centers[j]\n distance = np.sqrt((pos1[0]-pos2[0])**2 +\n (pos1[1]-pos2[1])**2 + (pos1[2]-pos2[2])**2)\n\n g.add_edge(i, j, weight=distance)\n return g, centers",
"def __labelFullCoreTiledLoop(self, stmt, outer_loop_inames):\n\n # there is no outer tiled loops\n if not outer_loop_inames:\n return\n\n # find the starting loop nest that iterates the full rectangular tiles\n s = stmt\n while True:\n if not isinstance(s, ast.ForStmt):\n break\n if s.start_label and s.start_label.startswith(\"start full core tiles: \"):\n break\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(s)\n if id.name == outer_loop_inames[0]:\n s.start_label = \"start full core tiles: \"\n s.end_label = \"end full core tiles: \"\n for i, iname in enumerate(outer_loop_inames):\n if i:\n s.start_label += \",\"\n s.end_label += \",\"\n s.start_label += \"%s\" % iname\n s.end_label += \"%s\" % iname\n break\n if len(s.stmt.stmts) != 1:\n break\n s = s.stmt.stmts[0]"
] |
[
"0.5615183",
"0.56146234",
"0.5562032",
"0.5527917",
"0.55126536",
"0.54222244",
"0.5398315",
"0.53490674",
"0.5333232",
"0.52936053",
"0.52397186",
"0.5204311",
"0.5195951",
"0.518437",
"0.51465446",
"0.51452196",
"0.51276094",
"0.50848764",
"0.50652826",
"0.50405645",
"0.5036166",
"0.5027009",
"0.5020971",
"0.49954823",
"0.49937347",
"0.49930832",
"0.4989487",
"0.4958923",
"0.49542618",
"0.49538085"
] |
0.6390221
|
0
|
Run a worker, call the worker's train_prcess() method.
|
def run(self, worker, evaluator=None):
pass
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()",
"def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1",
"def train_workers(self):\n args = dict(actor=self.actor,\n critic=self.critic,\n gamma=self.gamma,\n lamda=self.lamda or self.gamma / 1.005,\n device=self.device,\n optimizers=[self.actor_optimizer, self.critic_optimizer])\n workers = [Worker(i, self.action_size, self.state_size, **args)\n for i in range(self.n_workers)\n ]\n\n print(f'Worker count: {len(workers)}')\n\n for worker in workers:\n worker.start()\n\n while len(constants.scores) < self.n_steps:\n time.sleep(400) # save checkpoint every 400 ms\n\n print(f'\\nCurrent scores: {constants.scores}')\n\n self.save(constants.episode)\n print(f'\\nCheckpoint saved at episode: {constants.episode}\\n')",
"def main() -> None:\n worker = Worker()\n worker.do_work()",
"def train(self, log_in_tensorboard=True):\n if log_in_tensorboard or self.config.save_model:\n os.makedirs(self.config.results_path, exist_ok=True)\n\n # Manage GPUs\n if 0 < self.num_gpus:\n num_gpus_per_worker = self.num_gpus / (\n self.config.train_on_gpu\n + self.config.num_workers * self.config.selfplay_on_gpu\n + log_in_tensorboard * self.config.selfplay_on_gpu\n + self.config.use_last_model_value * self.config.reanalyse_on_gpu\n )\n if 1 < num_gpus_per_worker:\n num_gpus_per_worker = math.floor(num_gpus_per_worker)\n else:\n num_gpus_per_worker = 0\n\n # Initialize workers\n self.training_worker = trainer.Trainer.options(\n num_cpus=0, num_gpus=num_gpus_per_worker if self.config.train_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.shared_storage_worker = shared_storage.SharedStorage.remote(\n self.checkpoint, self.config,\n )\n self.shared_storage_worker.set_info.remote(\"terminate\", False)\n\n self.replay_buffer_worker = replay_buffer.ReplayBuffer.remote(\n self.checkpoint, self.replay_buffer, self.config\n )\n\n if self.config.use_last_model_value:\n self.reanalyse_worker = replay_buffer.Reanalyse.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.reanalyse_on_gpu else 0,\n ).remote(self.checkpoint, self.config)\n\n self.self_play_workers = [\n self_play.SelfPlay.options(\n num_cpus=0,\n num_gpus=num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n ).remote(\n self.checkpoint, self.Game, self.config, self.config.seed + seed,\n )\n for seed in range(self.config.num_workers)\n ]\n\n # Launch workers\n [\n self_play_worker.continuous_self_play.remote(\n self.shared_storage_worker, self.replay_buffer_worker\n )\n for self_play_worker in self.self_play_workers\n ]\n self.training_worker.continuous_update_weights.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n if self.config.use_last_model_value:\n self.reanalyse_worker.reanalyse.remote(\n self.replay_buffer_worker, self.shared_storage_worker\n )\n\n if log_in_tensorboard:\n self.logging_loop(\n num_gpus_per_worker if self.config.selfplay_on_gpu else 0,\n )",
"def worker(self, **options):\n pass",
"def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)",
"def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))",
"def _parallel_train_worker(network, trainer, training_examples,\n iterations, results_queue,\n return_results_on_queue=True, unsupervised=False):\n trainer.train(network, training_examples, iterations, unsupervised)\n if return_results_on_queue:\n # return this trained network back to the main process by placing it \n # on the queue\n results_queue.put(network)",
"def worker_duty():\n\n while True:\n batch = queue.get()\n if batch is None:\n break\n examples, labels, alphas = batch\n for example, label, alpha in batch:\n self._train_one_example(example, label, alpha)",
"def worker(self, worker):\n\n self._worker = worker",
"def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()",
"def run_training(self, schema_params, export_model=False, output_model_dir=None):\n # Log distributed execution context, which includes cluster configuration\n logger.info(f\"Commencing {self.effect_name} training\")\n logger.info(f\"Execution context : {self.execution_context}\")\n\n # Create partition_index_list\n partition_index_list = self._get_partition_list()\n logger.info(f\"This worker on work on the following list of partitions : {partition_index_list}\")\n\n # Sequentially train model on partitions\n for partition_index in partition_index_list:\n logger.info(f\"Commencing {self.effect_name} training for partition index : {partition_index}\")\n\n # Resolve partitioned data directory from raw path params from user\n checkpoint_path = self._anchor_directory(\n self.model.checkpoint_path,\n partition_index)\n training_data_dir = self._anchor_directory(self.model.training_data_dir,\n partition_index)\n validation_data_dir = self._anchor_directory(self.model.validation_data_dir,\n partition_index) if self.model.validation_data_dir else None\n\n if is_empty_directory(training_data_dir):\n logger.info(f\"{training_data_dir} is empty, no dataset to train on.\")\n continue\n # Train model\n self.execution_context[constants.PARTITION_INDEX] = partition_index\n self.model.train(training_data_dir=training_data_dir,\n validation_data_dir=validation_data_dir,\n metadata_file=self.model.metadata_file,\n checkpoint_path=checkpoint_path,\n execution_context=self._prepare_training_context(partition_index),\n schema_params=schema_params)\n\n # Chief should export model\n is_chief = self.execution_context[constants.IS_CHIEF]\n if export_model and is_chief:\n logger.info(f\"Exporting model to directory : {output_model_dir}\")\n self.model.export(output_model_dir=output_model_dir)",
"def run():\n total_time_start = time.time()\n events = create_events()\n worker_to_trainer_message_queue = mp.Queue()\n trainer_to_worker_data_manager = mp.Manager().list()\n workers = start_workers(worker_to_trainer_message_queue, events, trainer_to_worker_data_manager)\n trainer = mp.Process(target=trainer_func, args=(worker_to_trainer_message_queue, events, trainer_to_worker_data_manager))\n trainer.start()\n trainer.join()\n terminate_workers(workers)\n print(\"Total test elapsed time: \" + str.format('{0:.6f}', (time.time() - total_time_start)*1000) + \"ms\")",
"async def train(self, weights, worker_epochs, batch_size):\n try:\n self.model.set_weights(weights)\n self.model.fit(self.x_train_partition, self.y_train_partition,\n epochs=worker_epochs, batch_size=batch_size,\n validation_split=0.1, verbose=2)\n new_weights = self.model.get_weights()\n gradients = subtract(weights, new_weights)\n gradients = divide(gradients, self.N)\n return Response(Status.OK, gradients)\n except Exception as e:\n print(f'Exception when executing train(weights={weights}, worker_epochs={worker_epochs}, batch_size={batch_size}):', e)\n return Response(Status.ERROR, e)",
"def create_and_run_worker(self):\n\n # Run processing on QThread worker - prevents GUI lock up\n # Create processing object, map control data\n processing_hub = ProcessingHub(control=self.control)\n\n # Create worker thread, connect signals to methods in this class and start, which calls worker.run()\n self.worker = ProcessingWorker(processing_hub, parent=self)\n self.worker.signal_screening_output_to_gui.connect(self.set_screening_output_to_gui)\n self.worker.signal_error.connect(self.error)\n self.worker.start()",
"def run(self):\n for worker in self.simulation_workers:\n worker.start()",
"def training_worker(graph_manager, checkpoint_dir, use_pretrained_model, framework):\n # initialize graph\n task_parameters = TaskParameters()\n task_parameters.__dict__['checkpoint_save_dir'] = checkpoint_dir\n task_parameters.__dict__['checkpoint_save_secs'] = 20\n task_parameters.__dict__['experiment_path'] = INTERMEDIATE_FOLDER\n\n if framework.lower() == \"mxnet\":\n task_parameters.framework_type = Frameworks.mxnet\n if hasattr(graph_manager, 'agent_params'):\n for network_parameters in graph_manager.agent_params.network_wrappers.values():\n network_parameters.framework = Frameworks.mxnet\n elif hasattr(graph_manager, 'agents_params'):\n for ap in graph_manager.agents_params:\n for network_parameters in ap.network_wrappers.values():\n network_parameters.framework = Frameworks.mxnet\n\n if use_pretrained_model:\n task_parameters.__dict__['checkpoint_restore_dir'] = PRETRAINED_MODEL_DIR\n\n graph_manager.create_graph(task_parameters)\n\n # save randomly initialized graph\n graph_manager.save_checkpoint()\n\n # training loop\n steps = 0\n graph_manager.setup_memory_backend()\n\n # To handle SIGTERM\n door_man = DoorMan()\n\n try:\n while (steps < graph_manager.improve_steps.num_steps):\n graph_manager.phase = core_types.RunPhase.TRAIN\n graph_manager.fetch_from_worker(graph_manager.agent_params.algorithm.num_consecutive_playing_steps)\n graph_manager.phase = core_types.RunPhase.UNDEFINED\n\n if graph_manager.should_train():\n steps += graph_manager.agent_params.algorithm.num_consecutive_playing_steps.num_steps\n\n graph_manager.phase = core_types.RunPhase.TRAIN\n graph_manager.train()\n graph_manager.phase = core_types.RunPhase.UNDEFINED\n\n if graph_manager.agent_params.algorithm.distributed_coach_synchronization_type == DistributedCoachSynchronizationType.SYNC:\n graph_manager.save_checkpoint()\n else:\n graph_manager.occasionally_save_checkpoint()\n\n if door_man.terminate_now:\n \"Received SIGTERM. Checkpointing before exiting.\"\n graph_manager.save_checkpoint()\n break\n\n except Exception as e:\n raise RuntimeError(\"An error occured while training: %s\" % e)\n finally:\n print(\"Terminating training worker\")\n graph_manager.data_store.upload_finished_file()",
"def _worker(self, args):\n pass",
"def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)",
"def train_main(cls):\n launcher = cls()\n launcher.launch()",
"def do(self):\n super().do()\n logger.info(\"TrainPipeStep started...\")\n records = self._get_current_step_records()\n logger.debug(\"load pipestep records: {}\".format(records))\n self.num_models = len(records)\n self.num_epochs = self.num_models * TrainerConfig.epochs\n self.update_status(Status.running)\n self.master = create_master()\n self._train_multi_models(records)\n self.master.join()\n ReportServer().output_step_all_records(step_name=self.task.step_name)\n self.master.close()\n ReportServer().backup_output_path()\n self.update_status(Status.finished)",
"def train(self):\n raise NotImplementedError",
"def train(self, ):\n raise NotImplementedError",
"def train(self, request):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Training Task\",\n )\n\n task = self._trainers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Train Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n result = task(request, self.datastore())\n\n # Run all scoring methods\n if self._auto_update_scoring:\n self.async_scoring(None)\n return result",
"def run(self):\n self.initialize()\n\n self.engine = setup_db_connection(driver=\"Fake\")\n self.logger = multiprocessing.get_logger()\n self.logger.handlers[0] = setup_logging()\n\n self.logger.debug(\"\\n\\n\")\n self.logger.debug(f'Spawning Worker')\n self.logger.debug(\"\\n\\n\")\n\n self.time_start_process = time.time()\n self.time_start_cycle = time.time()\n\n # -------------------------------\n # Start Processing Data\n\n\n data_unprocessed = self.get_data_from_queue()\n\n df = pd.DataFrame()\n\n df = self.process_data(data_unprocessed)\n\n if not df.empty:\n self.insert_data_into_database(df)\n\n # -------------------------------\n\n self.check_status(\"COMPLETED\")\n return",
"def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()",
"def run(self, worker_num=1):\n worker_proc = Process(target=self._start_worker, args=(worker_num,))\n worker_proc.start()\n\n beat_proc = Process(target=self._start_beat, args=())\n beat_proc.start()\n\n beat_proc.join()\n worker_proc.join()",
"def train(self):\n pass",
"def train(self):\n pass"
] |
[
"0.81170535",
"0.6662402",
"0.6560574",
"0.6424407",
"0.64064884",
"0.62709504",
"0.6250227",
"0.6231598",
"0.6201631",
"0.61577517",
"0.6096804",
"0.6034648",
"0.5959863",
"0.5958695",
"0.5943286",
"0.59378487",
"0.5897218",
"0.5874826",
"0.587004",
"0.58641726",
"0.58599484",
"0.5844402",
"0.5840169",
"0.583914",
"0.58220416",
"0.58123004",
"0.5810865",
"0.58068544",
"0.5789195",
"0.5789195"
] |
0.66718954
|
1
|
function to generate the error profile based on true labels and predicted labels for a classification problem
|
def error_profile(y_true: Dict[str, List[np.ndarray]], y_pred: Dict[str, List[np.ndarray]], model_type: str) -> None:
num_folds = len(y_pred["train"])
acc = {"train": [], "test": []}
test_predictions = np.array([])
test_labels = np.array([])
for k in range(num_folds):
y_train_true = y_true["train"][k]
y_train_pred = y_pred["train"][k]
y_test_true = y_true["test"][k]
y_test_pred = y_pred["test"][k]
# Accuracies
train_acc = np.sum(np.equal(y_train_true, y_train_pred)) / np.shape(y_train_true)[0]
test_acc = np.sum(np.equal(y_test_true, y_test_pred)) / np.shape(y_test_true)[0]
acc["train"].append(train_acc)
acc["test"].append(test_acc)
test_labels = np.concatenate((test_labels, y_test_true))
test_predictions = np.concatenate((test_predictions, y_test_pred))
pd.DataFrame(acc).plot().set_title("Accuracies for " + model_type)
plt.xlabel("Cross validation fold")
plt.ylabel("Accuracy (max = 1)")
plt.xticks(list(range(num_folds)))
plt.tight_layout()
plt.savefig("Figures/" + model_type + "_acc")
classes = np.unique(test_labels)
# Confusion matrix
# we only care for the confusion matrix of the testing set
conf_mat = confusion_matrix(test_labels, test_predictions)
fig, ax = plt.subplots(1, 2, sharey="all", figsize=(16, 9))
sn.heatmap(
conf_mat,
cmap="Oranges",
annot=True,
xticklabels=classes,
yticklabels=classes,
ax=ax[0],
)
ax[0].set_title("Confusion matrix")
conf_mat2 = np.array(conf_mat)
np.fill_diagonal(conf_mat2, -1)
sn.heatmap(
conf_mat2,
cmap="Oranges",
annot=True,
xticklabels=classes,
yticklabels=classes,
ax=ax[1],
)
ax[1].set_title("Confusion matrix (ignoring diagonal)")
fig.suptitle("Confusion matrices for " + model_type)
plt.savefig("Figures/" + model_type + "cfx_mat")
# Evaluate metrics for each class
metrics = {}
total = np.sum(conf_mat)
for class_num in range(np.shape(conf_mat)[0]):
class_metrics = {}
tp = conf_mat[class_num, class_num]
fn = np.sum(conf_mat[class_num, :]) - tp
fp = np.sum(conf_mat[:, class_num]) - tp
tn = total - tp - fn - fp
class_metrics["sens"] = tp / (tp + fn) # specificity (recall)
class_metrics["spes"] = tn / (tn + fp) # sensitivity
class_metrics["ppv"] = tp / (tp + fp) # positive predictive value (precision)
class_metrics["npv"] = tn / (tn + fn) # negative predictive value
class_metrics["F1"] = (2 * tp) / (2 * tp + fn + fp) # F1 score
class_metrics["auc"] = roc_auc_score( # Area under ROC
test_labels == classes[class_num], test_predictions == classes[class_num]
)
metrics[classes[class_num]] = class_metrics
print("-" * 100)
print("## Error profile for " + model_type)
print("Cross validated accuracy = {}%".format(np.mean(acc["test"]) * 100))
print(pd.DataFrame(metrics).to_markdown())
print("-" * 100)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def label_errors(preds, labels):\n num_correct = num_correct_fun(preds, labels)\n return (1.0 - num_correct / preds.size(0)) * 100.0",
"def predict_labels(model):\n test_datagen = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True\n #rescale=1. / 255,\n #samplewise_center=True,\n #samplewise_std_normalization=True\n )\n test_datagen.fit(test_data)\n # datagen.fit(val_data)\n # create generator for train data\n test_generator = test_datagen.flow(\n test_data,\n batch_size=batch_size,\n shuffle=False)\n pred_prob=model.predict_generator(test_generator,test_data.shape[0])\n pred_prob=pred_prob[:,0]\n def pre_class(x):\n \tif x<0.5:\n return 0\n else:\n return 1\n #def true_label(id):\n #\tif 'f0' in id:\n #\t return 0\n # elif 'f1' in id: \n # return 1\n #\telse:\n #\t pass\n #pred_true=map(true_label,test_id)\n #pred_true=np.array(pred_true)\n #print roc_auc_score(val_target, pred_prob)\n #prediction=map(pre_class,pred_prob)\n #print confusion_matrix(val_target,prediction)\n with open(\"prediction.csv\", \"w\") as f: \n\tp_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n for id,label in zip(test_id,pred_prob):\n\t p_writer.writerow([id, label])\n\t\n #base_path = \"PZ/test/test/\"\n\n #with open(\"prediction.csv\", \"w\") as f:\n # p_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n # for _, _, imgs in os.walk(base_path):\n # for im in imgs:\n # pic_id = im.split(\".\")[0]\n #img = cv2.imread(base_path+im)\n #img = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)\n #img = img.transpose((2,0,1))\n #img = np.expand_dims(img,axis=0)\n #img = load_img(base_path + im)\n #img = imresize(img, size=(img_height, img_width))\n #test_x = img_to_array(img).reshape(3, img_height, img_width)\n #test_x = test_x.reshape((1,) + test_x.shape)\n #test_datagen.fit(img)\n #test_generator = test_datagen.flow(img,\n # batch_size=1,\n # shuffle=False)\n #prediction = model.predict_generator(test_generator, 1)\n #p_writer.writerow([pic_id, prediction])",
"def eval_metrics(y, pred):\n classification_error = np.sum(pred != y) / float(y.shape[0])\n return classification_error",
"def categorical_error(pred, label):\n pred_label = pred.argmax(1)\n return (pred_label != label.flat).mean()",
"def train_error(self):\n self.prediction = self.predict()\n pred = self.prediction.reshape(-1)\n self.error = np.sum(pred != self.label) / self.train_data.shape[0]\n return(self.error)",
"def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')",
"def eval_error_metric(predt, dtrain: xgb.DMatrix):\n label = dtrain.get_label()\n r = np.zeros(predt.shape)\n gt = predt > 0.5\n if predt.size == 0:\n return \"CustomErr\", 0\n r[gt] = 1 - label[gt]\n le = predt <= 0.5\n r[le] = label[le]\n return 'CustomErr', np.sum(r)",
"def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true",
"def _finalize_labels_and_prediction(self):\n y_pred = torch.cat(self.y_pred, dim=0)\n y_true = torch.cat(self.y_true, dim=0)\n\n if (self.mean is not None) and (self.std is not None):\n # To compensate for the imbalance between labels during training,\n # we normalize the ground truth labels with training mean and std.\n # We need to undo that for evaluation.\n y_pred = y_pred * self.std + self.mean\n\n return y_pred, y_true",
"def eval_all(cls_prob, dtrain):\n #determine the top k predictions\n labels = dtrain.get_label()\n top_k = cls_prob.argsort(axis = 1)[:,::-1][:,:5]\n# top_k = cls_prob.argsort(axis = 1)[:,:k:-1]\n #convert true values and compared with predictions to check for equality\n labels = labels[:, None]\n return 'error', 1-ndcg(top_k, labels)/len(labels)",
"def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)",
"def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ",
"def output_error(y_predict, y_true):\n\treturn metrics.precision_recall_fscore_support(y_true, y_predict), np.sum(y_predict != y_true) / float(y_predict.shape[0])",
"def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r",
"def error(clf, X, y, ntrials=100, test_size=0.2) :\n\n train_error = 0\n test_error = 0\n ### ========== TODO : START ========== ###\n # compute cross-validation error over ntrials\n # hint: use train_test_split (be careful of the parameters)\n for i in range(0,ntrials, 1):\n #get the value of the error for each division\n #train on the test data for the clf\n #test also on the data\n #X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 42)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= test_size, random_state=i)\n #now find the error\n #first train the model\n #then predict\n #check the accuracy\n clf.fit(X_train,y_train)\n y_pred = clf.predict(X_train)\n #now find the error for the train_error\n train_err = 1 - metrics.accuracy_score(y_train, y_pred, normalize=True)\n train_error += train_err\n\n y_pred = clf.predict(X_test)\n test_err = 1 - metrics.accuracy_score(y_test, y_pred, normalize=True)\n test_error += test_err\n\n\n #get the average\n train_error = float(train_error)/((1-test_size)*len(X))\n test_error = float(test_error)/((test_size)*len(X))\n ### ========== TODO : END ========== ###\n\n return train_error, test_error",
"def evaluate(true_labels, predicted_labels):\n accuracy = np.round(metrics.accuracy_score(true_labels, predicted_labels), \n 2)\n precision = np.round(metrics.precision_score(true_labels, predicted_labels, \n average='weighted'), 2)\n recall = np.round(metrics.recall_score(true_labels, predicted_labels,\n average='weighted'), 2)\n f1 = np.round(metrics.f1_score(true_labels, predicted_labels, \n average='weighted'), 2)\n \n return accuracy, precision, recall, f1",
"def error(y_pred, y_true):\n m = len(y_true) # number of samples\n error = np.sum( (y_pred - y_true)**2 )/m\n return error",
"def error_rate(predictions, labels):\n return 100.0 - (100*(np.sum(predictions == labels)/float(predictions.shape[0]*predictions.shape[1])))",
"def evaluate(self, true_values, predicted_values):\n if self.classification_type == \"classification\":\n cross_entropy = self.cross_entropy(true_values, predicted_values)\n #self.percent_accuracy(true_values,predicted_values)\n return cross_entropy\n elif self.classification_type == \"regression\":\n MSE = self.mean_squared_error(true_values, predicted_values)\n self.mean_absolute_error(true_values, predicted_values)\n\n return MSE",
"def use(self, dataset):\n\n outputs = np.zeros((len(dataset), self.n_classes + 1))\n errors = np.zeros((len(dataset), 2))\n\n ## PUT CODE HERE ##\n # row[0] is input.csv image (array), row[1] actual target class for that image\n for ind, row in enumerate(dataset):\n # fill 2nd element with loss\n errors[ind, 1] = self.fprop(row[0], row[1])\n # predicted class\n outputs[ind, 0] = np.argmax(self.hs[-1])\n # 0/1 classification error\n errors[ind, 0] = (outputs[ind, 0] != row[1])\n # print \"errors: \", errors[ind, ]\n # add output probs\n np.copyto(outputs[ind, 1:], self.hs[-1])\n # print \"outputs: \", outputs[ind,]\n # time.sleep(5)\n\n return outputs, errors",
"def calc_metrics(pred, labels):\n pred_flat = np.argmax(pred, axis = 1).flatten()\n labels_flat = labels.flatten()\n \n flat_accuracy = np.sum(pred_flat == labels_flat) / len(labels_flat)\n \n # sklearn takes first parameter as the true label\n precision = precision_score(labels_flat, pred_flat)\n recall = recall_score(labels_flat, pred_flat)\n \n return flat_accuracy, precision, recall",
"def max_error(y_true, y_pred):\n ...",
"def compute_batch_metrics(y_true, y_pred, num_labels = 4): \n \n # Declarating list to store results\n acc = []\n pre = []\n rec = []\n det = []\n rmse = []\n \n for batch in np.arange(y_true.shape[0]):\n \n # Declarating list to store individual results\n batch_acc = []\n batch_pre = []\n batch_rec = []\n batch_det = []\n batch_rmse = []\n \n for label in np.arange(num_labels):\n \n # Computing and storing metrics for each class\n batch_acc.append(accuracy_score(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_pre.append(precision_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_rec.append(recall_score(y_true[batch, label, :], y_pred[batch, label, :], zero_division = 1))\n batch_det.append(detection_rate(y_true[batch, label, :], y_pred[batch, label, :]))\n batch_rmse.append(sqrt(mse(y_true[batch, label, :], y_pred[batch, label, :])))\n \n # Storing mean results of the instance\n acc.append(np.mean(batch_acc))\n pre.append(np.mean(batch_pre))\n rec.append(np.mean(batch_rec))\n det.append(np.mean(batch_det))\n rmse.append(np.mean(batch_rmse))\n \n # Returning mean of all results\n return np.mean(acc), np.mean(pre), np.mean(rec), np.mean(det), np.mean(rmse)",
"def label_accuracy_score(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n # 一个batch里面可能有多个数据\n # 通过迭代器将一个个数据进行计算\n for lt, lp in zip(label_trues, label_preds):\n # numpy.ndarray.flatten将numpy对象拉成1维\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n\n # np.diag(a)假如a是一个二维矩阵,那么会输出矩阵的对角线元素\n # np.sum()可以计算出所有元素的和。如果axis=1,则表示按行相加\n \"\"\"\n acc是准确率 = 预测正确的像素点个数/总的像素点个数\n acc_cls是预测的每一类别的准确率(比如第0行是预测的类别为0的准确率),然后求平均\n iu是召回率Recall,公式上面给出了\n mean_iu就是对iu求了一个平均\n freq是每一类被预测到的频率\n fwavacc是频率乘以召回率,我也不知道这个指标代表什么\n \"\"\"\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n # nanmean会自动忽略nan的元素求平均\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n return acc, acc_cls, mean_iu, fwavacc",
"def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n np.sum(np.argmax(predictions, 1) == labels) /\n predictions.shape[0])",
"def evaluate(self, sess, examples_raw, examples = None):\n #token_cm = ConfusionMatrix(labels=LBLS)\n\n def accuracy_score(Y_pred, Y_true):\n '''\n returns: array of accuracy scores of size n_attributes or batch_sze depending on axis\n '''\n acc_array = np.array([])\n for pred, true in zip(Y_pred, Y_true):\n pred = np.array(pred)\n accuracy = np.array(np.argmax(pred, axis=1))==np.array(true)\n acc_array = np.append(acc_array,np.mean(accuracy))\n return np.mean(acc_array)\n \n def perplexity(Y_pred, Y_true):\n PP = np.array([])\n for pred, true in zip(Y_pred, Y_true):\n pred = np.array(pred)\n sentence_length = pred.shape[0]\n #print(pred.shape)\n true = np.array(true)\n probs = pred[np.arange(0,true.shape[0]), true]\n #print(probs.shape)\n #print(probs)\n #exit()\n probs_inv = 1.0/probs\n probs_inv = np.log(probs_inv)\n prob_inv_sum = np.sum(probs_inv)/sentence_length\n PP = np.append(PP, np.exp(prob_inv_sum))\n return np.mean(PP)\n\n def bleu_score(Y_pred=None, Y_true=None):\n return 0\n\n def test_accuracy(Y_pred,Y_true):\n acc = np.mean(accuracy_score(Y_pred, Y_true))\n PP = perplexity(Y_pred, Y_true)\n bleu = bleu_score()\n #f1_w = np.mean(f1_score(Y_pred,Y_true,average=\"weighted\")) \n #f1_m = np.mean(f1_score(Y_pred,Y_true,average=\"macro\")) \n return acc,PP,bleu\n\n acc_array = []\n sentences, class_labels, predictions, attr = zip(*self.output(sess, examples_raw, examples))\n return test_accuracy(predictions,class_labels)",
"def output_error(y_predict, y_true):\n return metrics.precision_recall_fscore_support(y_true, y_predict), np.sum(y_predict != y_true) / float(y_predict.shape[0])",
"def precision(y_true, y_pred, average, labels):\n y_true, y_pred = check_metric_args(y_true, y_pred, average, labels)\n\n # At this point, you can be sure that y_true and y_pred are one hot encoded.\n result = None\n m = len(y_true)\n n = len(labels)\n\n #call get_confusion_matrix function and put the result in confusion_matrix\n confusion_matrix = get_confusion_matrix(y_true, y_pred, labels)\n\n #compute the result if using micro-averages\n if average == \"micro\":\n numerator = np.trace(confusion_matrix)\n denominator = np.sum(confusion_matrix)\n result = numerator/denominator\n\n #compute the precision independently for each class and then take the average \n elif average == \"macro\":\n diag = np.diag(confusion_matrix)\n row_sums = np.sum(confusion_matrix, axis=1)\n row_sums_adjusted = np.array([1 if val == 0 else val for val in row_sums])\n result = np.mean(diag/row_sums_adjusted)\n\n else:\n diag = np.diag(confusion_matrix)\n row_sums = np.sum(confusion_matrix, axis=1)\n row_sums_adjusted = np.array([1 if val == 0 else val for val in row_sums])\n result = diag/row_sums_adjusted\n\n return result",
"def error_rate(predictions, labels):\n return 100.0 - (\n 100.0 *\n numpy.sum(numpy.argmax(predictions, 1) == labels) /\n predictions.shape[0])",
"def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))"
] |
[
"0.69387436",
"0.6698274",
"0.65620875",
"0.6545873",
"0.64661515",
"0.64296305",
"0.64051944",
"0.64016044",
"0.64016044",
"0.6384025",
"0.6381803",
"0.6358461",
"0.63415956",
"0.63344145",
"0.6308464",
"0.6304838",
"0.6284326",
"0.6279838",
"0.6240312",
"0.6237167",
"0.6231046",
"0.62239003",
"0.62203115",
"0.61930656",
"0.61914873",
"0.61790824",
"0.61716235",
"0.616544",
"0.6156009",
"0.6147575"
] |
0.7280616
|
0
|
clip(arr,thresh=3.5) Simple sigmaclipping algorithm. Returns avg,std of clipped array.
|
def clip(arr,thresh=3.5):
a = arr.copy()
avg,std = a.mean(),a.std()
while 1:
size = a.size
a = a[abs(a-avg)<thresh*std]
avg,std = a.mean(),a.std()
if size==a.size:
break
return avg,std
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def clip(arr,thresh=3.5):\n a = numpy.array(arr)\n\n avg,std = a.mean(),a.std()\n while 1:\n avg,std,size = a.mean(),a.std(),a.size\n a = a[abs(a-avg)<thresh*std]\n if size==a.size:\n break\n return avg,std",
"def sigma_clip(arr,sigma=3):\n\n cliparr = range(len(arr)) # initialize\n arr = n.append(arr,[1]) # append superfluous item to trigger loop\n while len(cliparr) != len(arr):\n arr = arr[cliparr]\n mean = arr.mean()\n std = arr.std()\n cliparr = n.where((arr < mean + sigma*std) & (arr > mean - sigma*std) & (arr != 0) )[0]\n# print 'Clipping %d from array of length %d' % (len(arr) - len(cliparr), len(arr))\n return mean - sigma*std, mean + sigma*std",
"def sigma_clipping(self, low_thresh=3, high_thresh=3,\n func='mean', dev_func='std', **kwd):\n\n # Remove in 3.0\n _ = kwd.pop('use_astropy', True)\n\n self.data_arr.mask = sigma_clip(self.data_arr.data,\n sigma_lower=low_thresh,\n sigma_upper=high_thresh,\n axis=kwd.get('axis', 0),\n copy=kwd.get('copy', False),\n maxiters=kwd.get('maxiters', 1),\n cenfunc=func,\n stdfunc=dev_func,\n masked=True,\n **kwd).mask",
"def clipstats(yarr, thresh, iter):\n mean = yarr.mean()\n std = yarr.std()\n for i in range(iter):\n mask = (abs(yarr - mean) < thresh * std)\n if mask.sum() <= 1:\n return yarr.mean(), yarr.std()\n mean = yarr[mask].mean()\n std = yarr[mask].std()\n\n return mean, std",
"def clip2(data, robust=True):\n \n for j in xrange(params.sc_passes):\n mask = data.mask*1\n \n for i in range(data.shape[1]):\n i0 = max([0, i-params.sc_bp_window_f/2])\n i1 = min([i+params.sc_bp_window_f/2, data.shape[1]-1])\n try:\n assert(robust)\n mn, st = robust.mean(data[:,i0:i1+1]), robust.std(data[:,i0:i1+1])\n except:\n mn, st = np.ma.mean(data[:,i0:i1+1]), np.ma.std(data[:,i0:i1+1])\n bad = np.where(np.abs(data[:,i]-1) > params.sigma*st)[0]\n mask[bad,i] |= True\n \n data.mask = mask*1\n return data.mask",
"def clip_outliers(df, std_threshold: float = 3):\n df_std = df.std(axis=0, skipna=True)\n df_mean = df.mean(axis=0, skipna=True)\n\n lower = df_mean - (df_std * std_threshold)\n upper = df_mean + (df_std * std_threshold)\n df2 = df.clip(lower=lower, upper=upper, axis=1)\n\n return df2",
"def _clip_feature(self, feature):\n\n w = self.clip_factor\n for ic in range(self.data_shape[0]):\n if len(feature[ic]) > 0:\n minv = self.feature_mean[ic] - w * self.feature_std[ic]\n maxv = self.feature_mean[ic] + w * self.feature_std[ic]\n if minv != maxv:\n feature[ic] = np.clip(feature[ic], minv, maxv)\n #feature[ic] = self._mad_based_outliers(feature[ic],minv,maxv)\n return feature",
"def clip_signal(signal, clipping_thresh=1000, clipped_value=215):\n index_factor = rate / CHUNK\n while index_factor * np.argmax(signal) >= clipping_thresh:\n signal[np.argmax(signal)] = 0\n return signal",
"def sigma_clip(data, max_sigma):\n mn = np.mean(data)\n std = np.std(data)\n diff = data - mn\n sigmas = diff / std\n mask = np.abs(sigmas) < max_sigma\n return mask",
"def quartiled_mean(arr, clip=25):\n if clip >= 50:\n return None\n arr = np.array(arr)\n arr_len = arr.size\n left_index = int((clip) / 100.0 * arr_len)\n right_index = int((100.0 - clip) / 100.0 * arr_len)\n arr = np.sort(arr)\n arr = arr[left_index:right_index + 1]\n # print(\"Out of {}, only middle {} [{}, {}] are considered\".\n # format(arr_len, arr.size, left_index, right_index))\n return arr.sum() / arr.size",
"def clip(data,clip):\n data[data > clip] = clip\n data[data < -clip] = -clip\n return data",
"def blurthresh(arrayin,thresh=0.1e0,blur=8):\r\n arrayout = np.array(arrayin,dtype=np.float64)\r\n arrayout = ndimage.gaussian_filter(arrayout,blur)\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=arrayin.dtype) \r\n return arrayout",
"def make_clipping_av(ts, window):\n try:\n ts = core.to_np_array(ts)\n except ValueError:\n raise ValueError('make_clipping_av expects ts to be array-like')\n\n if not core.is_one_dimensional(ts):\n raise ValueError('make_clipping_av expects ts to be one-dimensional')\n\n if not isinstance(window, int):\n raise ValueError('make_clipping_av expects window to be an integer')\n\n av = np.zeros(len(ts) - window + 1)\n\n max_val, min_val = np.max(ts), np.min(ts)\n for i in range(len(av)):\n num_clip = 0.0\n for j in range(window):\n if ts[i + j] == max_val or ts[i + j] == min_val:\n num_clip += 1\n av[i] = num_clip\n\n min_val = np.min(av)\n av -= min_val\n\n max_val = np.max(av)\n if max_val == 0:\n av = np.zeros(len(av))\n else:\n av = 1 - av / max_val\n\n return av",
"def outlier(arr, as_nan=True, thresh=0.05, show=False, report=False):\n if len(arr) < 3:\n return arr\n if show:\n plt.subplot(1,2,1) # Plot part 1 first\n plt.plot(np.random.random(len(arr)), thing1, 'o', color='blue',\n markeredgecolor='none', alpha=0.4)\n plt.title('With outliers')\n \n med_res = [(np.median(arr)-i)**2 for i in arr] \n med_res_ix = [u for u in med_res] # Create index\n arr_copy = [u for u in arr] # The copy will be edited first\n stds = []\n med_res.sort(reverse=True) # Largest to smallest\n # print(med_res[:10])\n numPts = max([int(len(arr)*thresh), 2])\n # print('Testing largest %i residuals' %numPts)\n \n # Pretend to remove 10% of points\n for i in range(numPts): #for i in range(int(len(arr)*.1)): #\n stds.append(np.std(arr_copy))\n rm_ix = med_res_ix.index(med_res[i])\n try:\n rm = arr[rm_ix]\n except:\n print('tried to remove ix %i but arr is len %i'\n %(rm_ix, len(arr)))\n try: \n arr_copy.pop(arr_copy.index(rm))\n except:\n print('tried to remove %f but not in arr_copy' %rm)\n \n # Find the greatest d(std)\n dstd = np.diff(stds)\n dstd = [abs(i) for i in dstd]\n rm_to = list(dstd).index(max(dstd))+1 # len(diff) = len(arr)-1\n\n #print('Mean d(std): %.3f, removing all above %.3f (%i pts)'\n # %(np.mean(dstd), dstd[rm_to-1], rm_to))\n \n for i in range(rm_to):\n arr[med_res_ix.index(med_res[i])] = np.nan\n \n if show: # Show\n plt.subplot(1,2,2)\n plt.plot(np.random.random(len(arr)), arr, 'o',\n color='red', markeredgecolor='none', alpha=0.4)\n plt.title('Without outliers')\n plt.show()\n if as_nan:\n return arr\n return [i for i in arr if not pd.isnull(i)] # Else just eliminate it.",
"def meanStdCut(array, cut=None):\n\n array = np.array(array)\n\n if cut == None: return array.mean(), array.std()\n\n array = array[np.abs(array - array.mean()) < cut*array.std()]\n return array.mean(), array.std()",
"def blurthresh_mask(arrayin,thresh=0.1e0,blur=8):\r\n arrayout = np.array(arrayin,dtype=np.float64)\r\n arrayout = ndimage.gaussian_filter(arrayout,blur)\r\n thresh2 = np.max(np.abs(arrayout))*thresh\r\n arrayout = np.array(1.0 * (np.abs(arrayout) > thresh2),dtype=np.bool) \r\n return arrayout",
"def clip(a, a_min, a_max):\n return _make.clip(a, a_min, a_max)",
"def isigclip( valarray, sigclip, igood=[], maxiter=10, thisiter=0 ) :\n if not type(valarray)==np.ndarray :\n valarray = np.array( valarray )\n if not len(igood) : igood = range(len(valarray))\n \n Ngood = len(igood)\n mnval = np.mean( valarray[igood] )\n sigma = np.std( valarray[igood] )\n igood = np.where( (np.abs(valarray-mnval)<(sigclip*sigma)) )[0]\n\n # import pdb; pdb.set_trace()\n if len(igood) == Ngood : return( igood )\n if thisiter>=maxiter : \n print(\"WARNING : Stopping after %i recursions\"%maxiter)\n return( igood )\n thisiter+=1\n igood = isigclip( valarray, sigclip, igood=igood, maxiter=maxiter, thisiter=thisiter )\n return( igood )",
"def _thresh_clip(self, xmin, ymin, zmin, xmax, ymax, zmax):\n\n for p in self.points:\n if p.y > ymax or p.y < ymin:\n print p, 1\n self.raster = False\n break\n elif p.x > xmax or p.x < xmin:\n print p, 2\n self.raster = False\n break\n elif p.z > zmax or p.z < zmin:\n print p, 3\n self.raster = False\n break",
"def make_clipper(lims):\n lims = np.array(lims)\n\n low, high = lims[..., 0], lims[..., 1]\n if lims.shape[-1] != 2:\n raise ValueError(\"Trailing shape must be (2,)\")\n elif not np.all(low <= high):\n raise ValueError(\"Upper values must meet or exceed lower values.\")\n\n def clipper(x):\n x = np.where(x < low, low, x)\n x = np.where(x > high, high, x)\n return x\n\n return clipper",
"def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled",
"def np_clip_(x, min=None, max=None):\n return np.clip(x, min, max, out=x)",
"def sigma_clip(x, nsigma=3):\n\n m = np.ones(len(x)) == 1\n newx = x*1\n oldm = np.array([False])\n i = 0\n while sum(oldm) != sum(m):\n oldm = m*1\n sigma = np.std(newx)\n m &= np.abs(np.median(newx) - x)/sigma < nsigma\n # m &= m\n newx = x[m]\n i += 1\n return x[m], m",
"def imsave_clip(outfile,img):\n img = img_as_float(img).clip(0.,1.)\n io.imsave(outfile,img)",
"def std(X,trimming=0):\n \n if trimming==0:\n s = np.power(np.var(X,axis=0),.5)\n s = np.array(s).reshape(-1)\n else: \n var = sps.trim_mean(np.square(X - sps.trim_mean(X,trimming,0)),\n trimming,0)\n s = np.sqrt(var) \n return s",
"def clip(wavelength, spectra, threshold, substitute=None):\n\n if substitute == None: # remove threshold violations\n mask = np.any(spectra > threshold, axis=1)\n spectra = spectra[~mask, :]\n wavelength = wavelength[~mask]\n else: # substitute threshold violations with a value\n spectra[spectra > threshold] = substitute\n return wavelength, spectra\n\n return wavelength, spectra",
"def clip_output(original, warped, mode, cval, clip):\n if not clip:\n return\n\n min_val = np.nanmin(original)\n max_val = np.nanmax(original)\n nan_cval = np.isnan(cval)\n if mode == 'constant':\n if nan_cval:\n preserve_cval = True\n else:\n preserve_cval = min_val <= cval <= max_val\n else:\n preserve_cval = False\n\n if preserve_cval:\n if nan_cval:\n cval_mask = np.isnan(warped)\n else:\n cval_mask = warped == cval\n else:\n cval_mask = None\n\n np.clip(warped, min_val, max_val, out=warped)\n if cval_mask is not None:\n warped[cval_mask] = cval",
"def testStatsStdevclip(self):\n image2 = self.image.Factory(self.image, True)\n\n stats = afwMath.makeStatistics(image2, afwMath.STDEVCLIP | afwMath.NPOINT | afwMath.SUM)\n self.assertEqual(stats.getValue(afwMath.STDEVCLIP), 0)\n #\n # Check we get the correct sum even when clipping\n #\n self.assertEqual(stats.getValue(afwMath.NPOINT)*\n afwMath.makeStatistics(image2, afwMath.MEAN).getValue(),\n stats.getValue(afwMath.SUM))",
"def clip_filters(W, threshold=0.5, pad=3):\n W_clipped = []\n for w in W:\n L, A = w.shape\n entropy = np.log2(4) + np.sum(w * np.log2(w + 1e-7), axis=1)\n index = np.where(entropy > threshold)[0]\n if index.any():\n start = np.maximum(np.min(index) - pad, 0)\n end = np.minimum(np.max(index) + pad + 1, L)\n W_clipped.append(w[start:end, :])\n else:\n W_clipped.append(w)\n\n return W_clipped",
"def remove_outlier(data, Nstd=2, mask= None): #---- remove extreme data\r\n M = data.shape[0]; \r\n if mask is None:\r\n mask = np.ones((M,M)); # if mask not existed\r\n for k in range(0,M): mask[k,k]= 0; # create one and remove diagnol\r\n N = np.sum(mask); # total effective data number \r\n sumx= np.sum(data* mask);\r\n mean= sumx/ N; # new mean\r\n sum_square = np.sum(((data-mean)*mask)**2); #\r\n std = np.sqrt( sum_square/ (N-1) ); # new standard deviation\r\n #--- ---\r\n larger = data > (mean+ Nstd*std); # data too large\r\n smaller= data < (mean- Nstd*std); # data too small\r\n maskh = mask.copy();\r\n maskh[larger] = 0; maskh[smaller]= 0; # remove outlier data\r\n return maskh, mean"
] |
[
"0.87744415",
"0.74298495",
"0.7050653",
"0.70448977",
"0.64035314",
"0.6359961",
"0.6326048",
"0.6302444",
"0.6283845",
"0.61879015",
"0.6141052",
"0.6097245",
"0.6068267",
"0.60678166",
"0.60551995",
"0.6035484",
"0.60212284",
"0.6006312",
"0.5957043",
"0.5899439",
"0.5819309",
"0.5818485",
"0.57765675",
"0.57686394",
"0.57654953",
"0.5759062",
"0.5742052",
"0.5735133",
"0.5722029",
"0.5717297"
] |
0.888268
|
0
|
id_slits(flat_data,small) Identifies slits and starboxes. There are issues with correctly classifying short edge slits (which mimic star boxes) and dealing with slits that have substantial dips.
|
def id_slits(flat_data,findstars=True):
y_axis = flat_data.shape[0]
data = flat_data.mean(axis=1)
d = data.copy()
"""
The slits tend to be demarcated by when the sorted data begins to
grow at an accelerating rate; the first derivative tends to be an
acceptable proxy, though. The edges are masked for bad pixels/star
boxes.
"""
srt = scipy.sort(d)
brk = signal.convolve(srt,[-1.,1.],mode='same')
pix = brk[brk.size/10:brk.size*9/10].argmin()+brk.size/10
lowvals = srt[pix]
d[d<lowvals] = 0.
d[d>0.] = 1.
"""
This needs to be tweaked to properly account for slits at the top and
bottom of the mask.
"""
edges = signal.convolve(d,[-1.,1.],mode='same')
left = scipy.where(edges<0)[0]
right = scipy.where(edges>0)[0]
slits = []
for i in range(left.size):
slits.append([left[i],right[i]-1])
if findstars is False:
return slits
"""
The star boxes are identified by locating where the slit amplitudes
begin to spike. The current criterion is that a slit amplitude is
more than one sigma greater than the previous slit.
"""
amps = []
for l,r in slits:
amps.append(scipy.median(data[l:r]))
amps = scipy.asarray(amps)
args = amps.argsort()
amps.sort()
indx = amps.size-1
for i in range(amps.size/2,amps.size):
std = amps[:i].std()
if amps[i]>amps[i-1]+std:
indx = i
break
starindx = args[indx:]
starindx.sort()
stars = []
for i in starindx:
stars.append(slits[i])
for i in starindx[::-1]:
del slits[i]
return slits,stars
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def id_slits(arc,find_stars=True,chilimit=2.5,SATURATED=57000.,useLines=True):\n\n arc = arc.copy()\n \"\"\" Attempt to avoid saturated lines \"\"\"\n w = arc.shape[1]\n tmp = arc.copy()\n tmp[tmp>SATURATED] = 0.\n tmpSorted = scipy.sort(tmp,axis=1)\n flux = tmpSorted[:,w*0.97:w*0.98].mean(axis=1)\n minflux = scipy.median(flux)/4.\n del tmp\n\n if find_stars:\n starbox = []\n slit = []\n\n if useLines==False:\n flux = scipy.sort(arc,1)[:,w*4/5]\n minflux = scipy.median(flux[flux.size/3:flux.size*2/3])/2.\n mask = scipy.where(flux>minflux,1.,0.)\n inSlit = False\n tmp = []\n meds = []\n for i in range(mask.size):\n if inSlit:\n if mask[i]==0:\n inSlit = False\n end = i-1\n if end-start>8:\n tmp.append([start+1,end-1])\n slit = arc[start+3:end-3,100:-100].mean(0)\n meds.append(slit.max())\n elif mask[i]==1:\n start = i\n inSlit = True\n if inSlit:\n end = i\n if end-start>8:\n tmp.append([start+1,end-1])\n slit = arc[start+3:end-3,100:-100].mean(0)\n meds.append(slit.max())\n meds = numpy.array(meds)\n if find_stars:\n slit = []\n starbox = []\n m,s = Clip(meds,nsig=3.,locut=0.,hicut=0.75)\n for i in range(len(tmp)):\n if meds[i]<m+s*5:\n slit.append(tmp[i])\n else:\n starbox.append(tmp[i])\n return slit,starbox\n return tmp\n\n m,s = clip(tmpSorted[arc.shape[0]/2,:w*0.05],2.)\n\n inSlit = False\n i = 0\n while i<arc.shape[0]:\n# lines = findlines(arc[i])\n if useLines:\n lines = findlines(arc[i])\n else:\n med = scipy.median(arc[i])\n if med>m+5*s:\n lines = [0]*10\n else:\n lines = [0]\n if len(lines)<9 and inSlit==False:\n i += 1\n continue\n elif len(lines)>9 and inSlit==False:\n inSlit = True\n start = i\n i += 1\n continue\n\n bestchi = 1e29\n if len(lines)>9:\n #bestchi = 1e29\n x = scipy.arange(arc[i].size)\n smooth = ndimage.gaussian_filter(arc[i],1.)\n model = interpolate.splrep(x,smooth,k=3,s=0)\n comp = ndimage.gaussian_filter(arc[i-1],1.)\n usedpix = arc[i-1][10:-10]>scipy.median(arc[i-1])\n for o in range(30):\n offset = float(o-15.)/5.\n row = interpolate.splev(x[10:-10]+offset,model)\n chi = (comp[10:-10]-row)**2/(abs(comp[10:-10]))\n chi = chi[usedpix]\n chi.sort()\n chi = chi[:-chi.size/100] # Reject the five highest points\n if chilimit>6. and i>600 and o>6 and 1==2:\n import pylab\n pylab.plot(row)\n pylab.plot(comp[10:-10])\n pylab.figure()\n pylab.plot((row-comp[10:-10])**2/(abs(comp[10:-10])+16.))\n pylab.show()\n if chi.sum()/chi.size<bestchi:\n bestchi = chi.sum()/chi.size\n\n if inSlit is True and (bestchi>chilimit or len(lines)<9):\n \"\"\" The row is at the top edge of the slit. \"\"\"\n inSlit = False\n end = i\n\n i += 1\n if end-start<3:\n continue\n \"\"\"\n Conservatively shrink the edges. A better approach\n might be to use the flatfield data and set the edge\n to where the flux is, say, 1 sigma below the nominal\n level for the slit.\n \"\"\"\n# if start!=0:\n# start += 2\n# end -= 2\n\n \"\"\" Check if the slit is a starbox (if requested) \"\"\"\n if find_stars:\n mid = (start+end)/2\n peaks = findlines(arc[mid],False)\n is_star = check_star(peaks,arc[mid])\n else: is_star = False\n\n \"\"\" Skip small slits \"\"\"\n if not is_star and end-start<11:\n continue\n elif is_star and end-start<9:\n continue\n\n \"\"\"\n Conservatively shrink the edges. A better approach\n might be to use the flatfield data and set the edge\n to where the flux is, say, 1 sigma below the nominal\n level for the slit.\n \"\"\"\n if is_star:\n starbox.append([start,end])\n else:\n while flux[start+1]-flux[start]>3.*flux[start]**0.5:\n start += 1\n while flux[end-1]-flux[end]>3.*flux[end]**0.5:\n end -= 1\n if flux[start:end].mean()<minflux:\n continue\n slit.append([start,end])\n\n elif i+1==arc.shape[0] and end<start:\n \"\"\" The top of the mask is also the top of a slit. \"\"\"\n end = i+1\n if find_stars:\n mid = (start+end)/2\n peaks = findlines(arc[mid],False)\n is_star = check_star(peaks,arc[mid])\n else: is_star = False\n\n if not is_star and end-start<11:\n continue\n elif is_star and end-start<9:\n continue\n\n if is_star:\n starbox.append([start+2,end])\n else:\n while flux[start+1]-flux[start]>3.*flux[start]**0.5:\n start += 1\n if flux[start:end].mean()<minflux:\n continue\n slit.append([start,end])\n break\n else:\n \"\"\" In the middle of the slit, nothing to do.... \"\"\"\n i += 1\n\n if find_stars:\n return slit,starbox\n return slit",
"def get_SLIC_mask(self, class_mask, compactness_val=10.0, numSegments_val=100, sigma_val=5, slic_zero_mode=True):\n\n # get the superpixels mask with the number of segments as set for the current label\n slic_superpixels = slic(img_as_float(self.image), compactness=compactness_val, \\\n n_segments=numSegments_val, sigma=sigma_val, \\\n convert2lab=True, slic_zero=slic_zero_mode) #n_segments= numSegments, sigma= sigmaVal,\n all_slic_contours = self._find_SLIC_boundaries(slic_superpixels).astype(np.uint8)\n slic_superpixels[class_mask==0]=0\n slic_superpixels[slic_superpixels>0]=255\n #kernel = np.ones((5,5),np.uint8)\n #slic_superpixels = cv2.morphologyEx(slic_superpixels.astype(np.uint8), cv2.MORPH_CLOSE, kernel)#, iterations=3)\n slic_contours = self._find_SLIC_boundaries(slic_superpixels).astype(np.uint8)\n\n return all_slic_contours,slic_contours, slic_superpixels",
"def skel(tool_seg):\n return skimage.morphology.skeletonize_3d(tool_seg.astype(bool))",
"def syed_dilation(data, vessel):",
"def test_scl_only(self):\n task = SentinelHubInputTask(\n bands_feature=None,\n additional_data=[(FeatureType.DATA, 'SCL')],\n size=self.size,\n maxcc=self.maxcc,\n time_difference=self.time_difference,\n data_collection=DataCollection.SENTINEL2_L2A,\n max_threads=self.max_threads\n )\n\n eopatch = task.execute(bbox=self.bbox, time_interval=self.time_interval)\n scl = eopatch[(FeatureType.DATA, 'SCL')]\n\n width, height = self.size\n self.assertTrue(scl.shape == (4, height, width, 1))",
"def test_subset_reconstruction_integer(self, wires, snapshots):\n circuit = hadamard_circuit(wires)\n bits, recipes = circuit()\n shadow = ClassicalShadow(bits, recipes)\n\n state = shadow.global_snapshots(snapshots=snapshots)\n assert state.shape == (snapshots, 2**wires, 2**wires)",
"def reformatSagittalView4Needle(self, ID):\r\n # productive #onButton #report\r\n profprint()\r\n for i in range(2): # workaround update problem\r\n modelNode = slicer.util.getNode('vtkMRMLModelNode' + str(ID))\r\n polyData = modelNode.GetPolyData()\r\n nb = polyData.GetNumberOfPoints()\r\n base = [0, 0, 0]\r\n tip = [0, 0, 0]\r\n polyData.GetPoint(nb - 1, tip)\r\n polyData.GetPoint(0, base)\r\n a, b, c = tip[0] - base[0], tip[1] - base[1], tip[2] - base[2]\r\n # print a,b,c\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n sYellow.SetSliceVisible(1)\r\n reformatLogic.SetSliceNormal(sYellow, 1, -a / b, 0)\r\n m = sYellow.GetSliceToRAS()\r\n m.SetElement(0, 3, base[0])\r\n m.SetElement(1, 3, base[1])\r\n m.SetElement(2, 3, base[2])\r\n sYellow.Modified()",
"def dispersed_pixel(x0, y0, width, height, lams, flxs, order, wmin, wmax,\n sens_waves, sens_resp, seg_wcs, grism_wcs, ID, naxis,\n oversample_factor=2, extrapolate_sed=False, xoffset=0,\n yoffset=0):\n\n # Setup the transforms we need from the input WCS objects\n sky_to_imgxy = grism_wcs.get_transform('world', 'detector')\n imgxy_to_grismxy = grism_wcs.get_transform('detector', 'grism_detector')\n\n # Setup function for retrieving flux values at each dispersed wavelength\n if len(lams) > 1:\n # If we have direct image flux values from more than one filter (lambda),\n # we have the option to extrapolate the fluxes outside the\n # wavelength range of the direct images\n if extrapolate_sed is False:\n flux = interp1d(lams, flxs, fill_value=0., bounds_error=False)\n else:\n flux = interp1d(lams, flxs, fill_value=\"extrapolate\", bounds_error=False)\n else:\n # If we only have flux from one lambda, just use that\n # single flux value at all wavelengths\n def flux(x):\n return flxs[0]\n\n # Get x/y positions in the grism image corresponding to wmin and wmax:\n # Start with RA/Dec of the input pixel position in segmentation map,\n # then convert to x/y in the direct image frame corresponding\n # to the grism image,\n # then finally convert to x/y in the grism image frame\n x0_sky, y0_sky = seg_wcs(x0, y0)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, 1, order)\n xwmin, ywmin = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmin, order)\n xwmax, ywmax = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmax, order)\n dxw = xwmax - xwmin\n dyw = ywmax - ywmin\n\n # Compute the delta-wave per pixel\n dw = np.abs((wmax - wmin) / (dyw - dxw))\n\n # Use a natural wavelength scale or the wavelength scale of the input SED/spectrum,\n # whichever is smaller, divided by oversampling requested\n input_dlam = np.median(lams[1:] - lams[:-1])\n if input_dlam < dw:\n dlam = input_dlam / oversample_factor\n else:\n # this value gets used when we only have 1 direct image wavelength\n dlam = dw / oversample_factor\n\n # Create list of wavelengths on which to compute dispersed pixels\n lambdas = np.arange(wmin, wmax + dlam, dlam)\n n_lam = len(lambdas)\n\n # Compute lists of x/y positions in the grism image for\n # the set of desired wavelengths:\n # As above, first get RA/Dec of segmentation map pixel positions,\n # then convert to x/y in image frame of grism image,\n # then convert to x/y in grism frame.\n x0_sky, y0_sky = seg_wcs([x0] * n_lam, [y0] * n_lam)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, lambdas, [order] * n_lam)\n x0s, y0s = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, lambdas, [order] * n_lam)\n\n # If none of the dispersed pixel indexes are within the image frame,\n # return a null result without wasting time doing other computations\n if x0s.min() >= naxis[0] or x0s.max() < 0 or y0s.min() >= naxis[1] or y0s.max() < 0:\n return None\n\n # Compute arrays of dispersed pixel locations and areas\n padding = 1\n xs, ys, areas, index = get_clipped_pixels(\n x0s, y0s,\n padding,\n naxis[0], naxis[1],\n width, height\n )\n lams = np.take(lambdas, index)\n\n # If results give no dispersed pixels, return null result\n if xs.size <= 1:\n return None\n\n # compute 1D sensitivity array corresponding to list of wavelengths\n sens, no_cal = create_1d_sens(lams, sens_waves, sens_resp)\n\n # Compute countrates for dispersed pixels. Note that dispersed pixel\n # values are naturally in units of physical fluxes, so we divide out\n # the sensitivity (flux calibration) values to convert to units of\n # countrate (DN/s).\n counts = flux(lams) * areas / sens\n counts[no_cal] = 0. # set to zero where no flux cal info available\n\n return xs, ys, areas, lams, counts, ID",
"def spat_flexure_shift(sciimg, slits, debug=False, maxlag=20):\n # Mask -- Includes short slits and those excluded by the user (e.g. ['rdx']['slitspatnum'])\n slitmask = slits.slit_img(initial=True, exclude_flag=slits.bitmask.exclude_for_flexure)\n\n _sciimg = sciimg if slitmask.shape == sciimg.shape \\\n else arc.resize_mask2arc(slitmask.shape, sciimg) \n onslits = slitmask > -1\n corr_slits = onslits.astype(float).flatten()\n\n # Compute\n mean_sci, med_sci, stddev_sci = stats.sigma_clipped_stats(_sciimg[onslits])\n thresh = med_sci + 5.0*stddev_sci\n corr_sci = np.fmin(_sciimg.flatten(), thresh)\n\n lags, xcorr = utils.cross_correlate(corr_sci, corr_slits, maxlag)\n xcorr_denom = np.sqrt(np.sum(corr_sci*corr_sci)*np.sum(corr_slits*corr_slits))\n xcorr_norm = xcorr / xcorr_denom\n # TODO -- Generate a QA plot\n tampl_true, tampl, pix_max, twid, centerr, ww, arc_cont, nsig \\\n = arc.detect_lines(xcorr_norm, sigdetect=3.0, fit_frac_fwhm=1.5, fwhm=5.0,\n cont_frac_fwhm=1.0, cont_samp=30, nfind=1, debug=debug)\n # No peak? -- e.g. data fills the entire detector\n if len(tampl) == 0:\n msgs.warn('No peak found in spatial flexure. Assuming there is none..')\n if debug:\n embed(header='68 of flexure')\n return 0.\n\n # Find the peak\n xcorr_max = np.interp(pix_max, np.arange(lags.shape[0]), xcorr_norm)\n lag_max = np.interp(pix_max, np.arange(lags.shape[0]), lags)\n msgs.info('Spatial flexure measured: {}'.format(lag_max[0]))\n\n if debug:\n plt.figure(figsize=(14, 6))\n plt.plot(lags, xcorr_norm, color='black', drawstyle='steps-mid', lw=3, label='x-corr', linewidth=1.0)\n plt.plot(lag_max[0], xcorr_max[0], 'g+', markersize=6.0, label='peak')\n plt.title('Best shift = {:5.3f}'.format(lag_max[0]) + ', corr_max = {:5.3f}'.format(xcorr_max[0]))\n plt.legend()\n plt.show()\n\n #tslits_shift = trace_slits.shift_slits(tslits_dict, lag_max)\n # Now translate the tilts\n\n #slitmask_shift = pixels.tslits2mask(tslits_shift)\n #slitmask_shift = slits.slit_img(flexure=lag_max[0])\n if debug:\n # Now translate the slits in the tslits_dict\n all_left_flexure, all_right_flexure, mask = slits.select_edges(flexure=lag_max[0])\n gpm = mask == 0\n viewer, ch = ginga.show_image(_sciimg)\n ginga.show_slits(viewer, ch, left_flexure[:,gpm], right_flexure)[:,gpm]#, slits.id) #, args.det)\n embed(header='83 of flexure.py')\n #ginga.show_slits(viewer, ch, tslits_shift['slit_left'], tslits_shift['slit_righ'])\n #ginga.show_slits(viewer, ch, tslits_dict['slit_left'], tslits_dict['slit_righ'])\n\n return lag_max[0]",
"def reformatSagittalView4Needle(self,ID):\n #productive #onButton #report\n profprint()\n for i in range(2): #workaround update problem\n modelNode = slicer.util.getNode('vtkMRMLModelNode'+str(ID))\n polyData = modelNode.GetPolyData()\n nb = polyData.GetNumberOfPoints()\n base = [0,0,0]\n tip = [0,0,0]\n polyData.GetPoint(nb-1,tip)\n polyData.GetPoint(0,base)\n a,b,c = tip[0]-base[0],tip[1]-base[1],tip[2]-base[2]\n #print a,b,c\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\n if sYellow ==None :\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\n reformatLogic = slicer.vtkSlicerReformatLogic()\n sYellow.SetSliceVisible(1)\n reformatLogic.SetSliceNormal(sYellow,1,-a/b,0)\n m= sYellow.GetSliceToRAS()\n m.SetElement(0,3,base[0])\n m.SetElement(1,3,base[1])\n m.SetElement(2,3,base[2])\n sYellow.Modified()",
"def parse_sod():\n index = loadmat('SOD/DBidx.mat', squeeze_me=True, struct_as_record=False)\n ret_images = []\n ret_masks = []\n\n for image_id in index['SFprefix'].flatten():\n image_path = list(Path('BSDS300/images/').rglob(str(image_id) + '.jpg'))[0]\n raw_image = imread(image_path)\n\n sessions = \\\n loadmat('SOD/SO' + str(image_id) + '.mat', squeeze_me=True, struct_as_record=False)[\n 'SES']\n for sess in sessions:\n # TODO: use other sessions\n # sess_id = sess.session\n if isinstance(sess.obj, np.ndarray):\n # The most salient object has imp=1.\n salient_obj = next((o for o in sess.obj if o.IMP == 1), None)\n if salient_obj is None:\n continue\n else:\n salient_obj = sess.obj\n\n boundary = salient_obj.BND\n if boundary.dtype == np.object:\n # TODO: allow disconnected area\n boundary = boundary[0]\n mask = np.zeros(sess.ImSize.tolist(), dtype=np.bool)\n rr, cc = polygon(boundary[:, 0], boundary[:, 1], sess.ImSize.tolist())\n mask[rr, cc] = 1\n\n ret_images.append(raw_image)\n ret_masks.append(mask)\n break\n return ret_images, ret_masks",
"def set_sparse_signals(self):\n\t\n\t\tparams_dSs = [self.mu_dSs, self.sigma_dSs]\n\t\tparams_Ss0 = [self.mu_Ss0, self.sigma_Ss0]\n\t\tself.dSs, self.idxs = sparse_vector([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\tparams_dSs,\tseed=self.seed_dSs)\n\t\t\n\t\t# Replace components with conflicting background odor \n\t\tif self.Kk_split is not None and self.Kk_split != 0:\n\t\t\tassert 0 <= self.Kk_split <= self.Kk, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires Kk_split\" \\\n\t\t\t\t\" to be non-negative and less than or equal to Kk.\"\n\t\t\tassert self.mu_dSs_2 is not None \\\n\t\t\t\tand self.sigma_dSs_2 is not None, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires that\" \\\n\t\t\t\t\" mu_dSs_2 and sigma_dSs_2 are set.\"\n\n\t\t\tsp.random.seed(self.seed_dSs)\n\t\t\tself.idxs_2 = sp.random.choice(self.idxs[0], self.Kk_split, \n\t\t\t\t\t\t\t\t\t\t\treplace=False)\n\t\t\tfor idx_2 in self.idxs_2:\n\t\t\t\tself.dSs[idx_2] = sp.random.normal(self.mu_dSs_2, \n\t\t\t\t\t\t\t\t\t\t\t\t\tself.sigma_dSs_2)\n\t\telse:\n\t\t\tself.idxs_2 = []\n\t\t\tself.Kk_split = 0\n\t\t\t\n\t\t# Ss0 is the ideal (learned) background stimulus without noise\n\t\tself.Ss0, self.Ss0_noisy = sparse_vector_bkgrnd([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.idxs, params_Ss0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tseed=self.seed_Ss0)\n\t\t\n\t\tself.Ss = self.dSs + self.Ss0_noisy",
"def iat_stpt_smlRefBld(action_raw, stptLmt, ob_this_raw):\n HTSP_RAW_IDX = 6; \n CLSP_RAW_IDX = 7;\n htStpt = ob_this_raw[HTSP_RAW_IDX];\n clStpt = ob_this_raw[CLSP_RAW_IDX];\n res_htStpt = max(min(htStpt + action_raw[0], stptLmt[1]), stptLmt[0]);\n res_clStpt = max(min(clStpt + action_raw[1], stptLmt[1]), stptLmt[0]);\n if res_clStpt < res_htStpt:\n return ((htStpt, clStpt),(0.0, 0.0));\n else:\n return ((res_htStpt, res_clStpt),\n (res_htStpt - htStpt, res_clStpt - clStpt));",
"def sils_cut(T,f,c,d,h, conshdlr):\n Ts = range(1,T+1)\n\n model = sils(T,f,c,d,h)\n y,x,I = model.data\n\n # relax integer variables\n for t in Ts:\n model.chgVarType(y[t], \"C\")\n model.addVar(vtype=\"B\", name=\"fake\") # for making the problem MIP\n\n # compute D[i,j] = sum_{t=i}^j d[t]\n D = {}\n for t in Ts:\n s = 0\n for j in range(t,T+1):\n s += d[j]\n D[t,j] = s\n\n #include the lot sizing constraint handler\n model.includeConshdlr(conshdlr, \"SILS\", \"Constraint handler for single item lot sizing\",\n sepapriority = 0, enfopriority = -1, chckpriority = -1, sepafreq = -1, propfreq = -1,\n eagerfreq = -1, maxprerounds = 0, delaysepa = False, delayprop = False, needscons = False,\n presoltiming = SCIP_PRESOLTIMING.FAST, proptiming = SCIP_PROPTIMING.BEFORELP)\n conshdlr.data = D,Ts\n\n model.data = y,x,I\n return model",
"def _exit_slits(self, hdr):\n # Does not exist separately in OpenMIMS, part of ApSecondaryNano and AnalysisParam\n d = {}\n # Each detector exit slit has:\n # - a position (0, 1, 2)\n # - a size (normal, large, xl)\n # The exit slits widths (and heights) are a 3x5 matrix where\n # coordinate (size, pos) returns actual width (height). positions 4\n # and 5 are 0 (for future expansion?) Size XL not stored in same part\n # of header, and only in analysis version >= 5, so we return a list of\n # length 5 with 0s here. Slits 0, 1, 2 are called slit 1, slit 2,\n # slit 3, so add labels to avoid confusion.\n\n d['exit slit'], d['exit slit size'] = \\\n unpack(self._bo + '2i', hdr.read(8))\n d['exit slit label'] = _exit_slit_labels.get(d['exit slit'], str(d['exit slit']))\n d['exit slit size label'] = _exit_slit_size_labels.get(d['exit slit size'], str(d['exit slit size']))\n\n w0 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n w1 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n w2 = (0, 0, 0, 0, 0)\n d['exit slit widths'] = (w0, w1, w2)\n h0 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n h1 = tuple(unpack(self._bo + '5i', hdr.read(20)))\n h2 = (0, 0, 0, 0, 0)\n d['exit slit heights'] = (h0, h1, h2)\n return d",
"def test_nirspec_slit_transformations(verbose=False, siaf=None):\n if siaf is None:\n siaf = Siaf(instrument)\n else:\n siaf = copy.deepcopy(siaf)\n\n threshold = 0.010 # arc-seconds\n pixel_threshold = 10 * threshold\n\n labels = ['X', 'Y']\n from_frame = 'sci'\n to_frames = 'det tel'.split()\n x_sci = np.linspace(-10, 10, 3)\n y_sci = np.linspace(10, -10, 3)\n\n\n # for aper_name in 'NRS_S1600A1_SLIT NRS_S200B1_SLIT NRS_FIELD1_MSA4 NRS1_FULL'.split():\n for aper_name in siaf.apertures.keys():\n skip = False\n aperture = siaf[aper_name]\n\n if (aperture.AperType not in ['SLIT']) or ('MIMF' in aper_name) or (\n not hasattr(aperture, '_parent_aperture')):\n skip = True\n\n if skip is False:\n parent_aperture = siaf[aperture._parent_aperture.AperName]\n if verbose:\n print(\n 'testing {} {} parent {}'.format(siaf.instrument, aper_name, parent_aperture.AperName))\n\n # verify that correct reference point can be retrieved\n v2ref, v3ref = aperture.reference_point('tel')\n assert np.abs(v2ref - aperture.V2Ref) < threshold\n assert np.abs(v3ref - aperture.V3Ref) < threshold\n\n # verify that we get the same tel to sci transform whether using slit or parent\n # aperture name\n xsciref, ysciref = aperture.reference_point('sci')\n xscidref, yscidref = parent_aperture.tel_to_sci(v2ref, v3ref)\n xsciaref, ysciaref = aperture.tel_to_sci(v2ref, v3ref)\n error = np.sqrt((xsciref - xscidref) ** 2 + (ysciref - yscidref) ** 2)\n if verbose:\n print(\n '{} {}: Error in reference point {:02.6f} pixels. (parent aperture is {})'.format(siaf.instrument, aper_name,\n error, parent_aperture.AperName))\n assert error < pixel_threshold\n\n # verify that corners can be retrieved and check 1st vertice\n ixc, iyc = aperture.corners('idl')\n assert np.abs(ixc[0] - aperture.XIdlVert1) < pixel_threshold\n assert np.abs(iyc[0] - aperture.YIdlVert1) < pixel_threshold\n\n # verify that we get the same tel to det transform whether using slit or parent\n # aperture name\n v2c, v3c = aperture.corners('tel')\n xc, yc = aperture.corners('det')\n xdc, ydc = parent_aperture.tel_to_det(v2c, v3c)\n xac, yac = aperture.tel_to_det(v2c, v3c)\n xic, yic = aperture.idl_to_det(ixc, iyc)\n error = np.max(np.abs(\n np.concatenate((xc - xdc, yc - ydc, xc - xac, yc - yac, xc - xic, yc - yic))))\n if verbose:\n print(\n '{} {}: Max error in corners {:02.6f} pixels.'.format(siaf.instrument, aper_name,\n error))\n assert error < pixel_threshold\n\n #testing roundtrip error\n for to_frame in to_frames:\n forward_transform = getattr(aperture, '{}_to_{}'.format(from_frame, to_frame))\n backward_transform = getattr(aperture, '{}_to_{}'.format(to_frame, from_frame))\n\n x_out, y_out = backward_transform(*forward_transform(x_sci, y_sci))\n x_mean_error = np.mean(np.abs(x_sci - x_out))\n y_mean_error = np.mean(np.abs(y_sci - y_out))\n for i, error in enumerate([x_mean_error, y_mean_error]):\n if verbose:\n print('{} {}: Error in {}<->{} {}-transform is {:02.6f})'.format(\n siaf.instrument, aper_name, from_frame, to_frame, labels[i], error))\n assert error < pixel_threshold",
"def testSierpinski(self):\n G = self.twistedLadder(4) # Complete graph on four vertices\n for i in range(3):\n G = self.truncate(G)\n self.check(G,3)",
"def get_positions_by_slits(slits):\r\n xy = []\r\n for i, slit in enumerate(slits):\r\n index = canvas.slits.ids.index(slit)\r\n xy.append([canvas.slits.x[index], canvas.slits.y[index]])\r\n return np.array(xy)",
"def find_flats(aperture, side='blue'):\r\n \r\n # find dome flat images\r\n domeflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"APERTURE\" & APERTURE == \"%s\" & LAMPS == \"0000000\" & AIRMASS < 1.01 & IMGTYPE == \"flat\"' % aperture, Stdout=1)\r\n # find internal flat (incandescent lamp) images\r\n intflats = iraf.hselect('%s????.fits' % side, '$I', 'TURRET == \"LAMPS\" & APERTURE == \"%s\" & LAMPS == \"0000001\" & AIRMASS < 1.01' % aperture, Stdout=1)\r\n # dome flats are prefered over internal flats\r\n flats = []\r\n if (len(intflats) > 0) & (len(domeflats) == 0):\r\n flats = intflats\r\n print \"Using %d internal flats for the %s arcsec slit.\" % (len(intflats), aperture)\r\n if len(domeflats) > 3:\r\n flats = domeflats\r\n print \"Using %d dome flats for the %s arcsec slit.\" % (len(domeflats), aperture)\r\n\r\n return flats",
"def SIDimensions(kg=0, m=0, s=0, k=0, a=0, mol=0, cd=0):\n return (kg, m, s, k, a, mol, cd)",
"def XsamsSolids(Solids):\n if not Solids:\n return\n yield \"<Solids>\"\n for Solid in makeiter(Solids):\n cont, ret = checkXML(Solid)\n if cont:\n yield ret\n continue\n G = lambda name: GetValue(name, Solid=Solid)\n makePrimaryType(\"Solid\", \"Solid\", G, extraAttr={\"stateID\":\"S%s-%s\" % (NODEID, G(\"SolidStateID\"))})\n if hasattr(Solid, \"Layers\"):\n for Layer in makeiter(Solid.Layers):\n GL = lambda name: GetValue(name, Layer=Layer)\n yield \"<Layer>\"\n yield \"<MaterialName>%s</MaterialName>\" % GL(\"SolidLayerName\")\n if hasattr(Solid, \"Components\"):\n makePrimaryType(\"MaterialComposition\", \"SolidLayerComponent\")\n for Component in makeiter(Layer.Components):\n GLC = lambda name: GetValue(name, Component=Component)\n yield \"<ChemicalElement>\"\n yield \"<NuclearCharge>%s</NuclearCharge>\" % GLC(\"SolidLayerComponentNuclearCharge\")\n yield \"<ElementSymbol>%s</ElementSymbol>\" % GLC(\"SolidLayerComponentElementSymbol\")\n yield \"</ChemicalElement>\"\n yield \"<StochiometricValue>%s</StochiometricValue>\" % GLC(\"SolidLayerComponentStochiometricValue\")\n yield \"<Percentage>%s</Percentage>\" % GLC(\"SolidLayerComponentPercentage\")\n yield \"</MaterialComposition>\"\n makeDataType(\"MaterialThickness\", \"SolidLayerThickness\", GL)\n yield \"<MaterialTopology>%s</MaterialThickness>\" % GL(\"SolidLayerTopology\")\n makeDataType(\"MaterialTemperature\", \"SolidLayerTemperature\", GL)\n yield \"<Comments>%s</Comments>\" % GL(\"SolidLayerComment\")\n yield \"</Layer>\"\n yield \"</Solid>\"\n yield \"</Solids>\"",
"def stippled_countless2d(data):\n sections = []\n\n # This loop splits the 2D array apart into four arrays that are\n # all the result of striding by 2 and offset by (0,0), (0,1), (1,0),\n # and (1,1) representing the A, B, C, and D positions from Figure 1.\n factor = (2, 2)\n for offset in np.ndindex(factor):\n part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))]\n sections.append(part)\n\n a, b, c, d = sections\n\n ab_ac = a * ((a == b) | (a == c)) # PICK(A,B) || PICK(A,C) w/ optimization\n ab_ac |= b * (b == c) # PICK(B,C)\n nonzero = a + (a == 0) * (b + (b == 0) * c)\n return ab_ac + (ab_ac == 0) * (d +\n (d == 0) * nonzero) # AB || AC || BC || D",
"def _is_skull_stripped(imgs):\n\n def _check_img(img):\n data = np.abs(nb.load(img).get_fdata(dtype=np.float32))\n sidevals = (\n data[0, :, :].sum()\n + data[-1, :, :].sum()\n + data[:, 0, :].sum()\n + data[:, -1, :].sum()\n + data[:, :, 0].sum()\n + data[:, :, -1].sum()\n )\n return sidevals < 10\n\n return all(_check_img(img) for img in imgs)",
"def sld(self, sf):\n raise NotImplementedError()",
"def CreateSlitObject(self, my_maze):\n # # The x and y position describe the point, where the middle (in x direction) of the top edge (y direction)\n # of the lower wall of the slit is...\n\n if self.shape == 'LongT':\n pass\n # self.slitpoints[i]\n if self.shape == 'SPT':\n if self.size == 'L' and self.solver == 'ant':\n slitLength = 4.1\n # this is the left (inside), bottom Slit\n self.slitpoints[0] = np.array([[self.slits[0], 0],\n [self.slits[0], slitLength],\n [self.slits[0] + self.wallthick, slitLength],\n [self.slits[0] + self.wallthick, 0]]\n )\n # this is the left (inside), upper Slit\n self.slitpoints[1] = np.array([[self.slits[0] - 0.05, slitLength + self.exit_size],\n [self.slits[0] + 0.1, self.arena_height],\n [self.slits[0] + self.wallthick + 0.1, self.arena_height],\n [self.slits[0] + self.wallthick - 0.05, slitLength + self.exit_size]]\n )\n\n # this is the right (outside), lower Slit\n self.slitpoints[2] = np.array([[self.slits[1], 0],\n [self.slits[1] + 0.1, slitLength],\n [self.slits[1] + self.wallthick + 0.1, slitLength],\n [self.slits[1] + self.wallthick, 0]]\n )\n # this is the right (outside), upper Slit\n self.slitpoints[3] = np.array([[self.slits[1] + 0.2, slitLength + self.exit_size],\n [self.slits[1] + 0.2, self.arena_height],\n [self.slits[1] + self.wallthick + 0.2, self.arena_height],\n [self.slits[1] + self.wallthick + 0.2, slitLength + self.exit_size]]\n )\n\n # elif size == 'M' or size == 'XL'\n else:\n slitLength = (self.arena_height - self.exit_size) / 2\n # this is the left (inside), bottom Slit\n self.slitpoints[0] = np.array([[self.slits[0], 0],\n [self.slits[0], slitLength],\n [self.slits[0] + self.wallthick, slitLength],\n [self.slits[0] + self.wallthick, 0]]\n )\n # this is the left (inside), upper Slit\n self.slitpoints[1] = np.array([[self.slits[0], slitLength + self.exit_size],\n [self.slits[0], self.arena_height],\n [self.slits[0] + self.wallthick, self.arena_height],\n [self.slits[0] + self.wallthick, slitLength + self.exit_size]]\n )\n\n # this is the right (outside), lower Slit\n self.slitpoints[2] = np.array([[self.slits[1], 0],\n [self.slits[1], slitLength],\n [self.slits[1] + self.wallthick, slitLength],\n [self.slits[1] + self.wallthick, 0]]\n )\n # this is the right (outside), upper Slit\n self.slitpoints[3] = np.array([[self.slits[1], slitLength + self.exit_size],\n [self.slits[1], self.arena_height],\n [self.slits[1] + self.wallthick, self.arena_height],\n [self.slits[1] + self.wallthick, slitLength + self.exit_size]]\n )\n\n # slit_up\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[0].tolist())\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[2].tolist())\n\n # slit_down\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[1].tolist())\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[3].tolist())\n\n # this is for all the 'normal SPT Mazes', that have no manufacturing mistakes \n else:\n self.slitpoints = np.empty((len(self.slits) * 2, 4, 2), float)\n for i, slit in enumerate(self.slits):\n # this is the lower Slit\n self.slitpoints[2 * i] = np.array([[slit, 0],\n [slit, (self.arena_height - self.exit_size) / 2],\n [slit + self.wallthick, (self.arena_height - self.exit_size) / 2],\n [slit + self.wallthick, 0]]\n )\n\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[2 * i].tolist())\n\n # this is the upper Slit\n self.slitpoints[2 * i + 1] = np.array([[slit, (self.arena_height + self.exit_size) / 2],\n [slit, self.arena_height],\n [slit + self.wallthick, self.arena_height],\n [slit + self.wallthick,\n (self.arena_height + self.exit_size) / 2]]\n )\n\n my_maze.CreatePolygonFixture(vertices=self.slitpoints[2 * i + 1].tolist())\n\n # I dont want to have the vertical line at the first exit\n self.slitTree = BoxIt(np.array([[0, 0],\n [0, self.arena_height],\n [self.slits[-1], self.arena_height],\n [self.slits[-1], 0]]),\n 0.1, without='right')\n\n for slit_points in self.slitpoints:\n self.slitTree = np.vstack((self.slitTree, BoxIt(slit_points, 0.01)))\n self.slitTree = cKDTree(self.slitTree)",
"def test_guider_start_flatsOn(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['flats'])\n self._guider_start(7, 20, 0, 0)",
"def per_shoebox_whitelist_iterator(self, sidx):\n Z = self.refl_table\n SOFF = Z[\"spots_offset\"]\n SSIZ = Z[\"spots_size\"]\n slow_size = 254\n panel_size = 254 * 254\n for idxpx in self.spots_pixels[SOFF[sidx]:SOFF[sidx]+SSIZ[sidx]]:\n ipanel = idxpx//panel_size; panelpx = idxpx%panel_size\n islow = panelpx//slow_size; ifast = panelpx%slow_size\n yield ipanel, islow, ifast",
"def make_flats(side='blue',overwrite=False):\r\n\r\n iraf.unlearn('flatcombine')\r\n iraf.flatcombine.ccdtype = \"\"\r\n iraf.flatcombine.process = \"no\"\r\n iraf.flatcombine.subsets = \"no\"\r\n iraf.flatcombine.rdnoise = \"RON\"\r\n iraf.flatcombine.gain = \"GAIN\"\r\n for aperture in ['0.5', '1.0', '1.5', '2.0']:\r\n flats = find_flats(aperture, side=side)\r\n if len(flats) > 0:\r\n if overwrite:\r\n iraf.delete('flat_%s_%s.fits' % (side, aperture), verify='no')\r\n iraf.delete('temp.fits' , verify='no')\r\n iraf.delete('tempsmooth.fits', verify='no')\r\n iraf.delete('norm_temp.fits', verify='no')\r\n # normalize the flat\r\n if side == 'blue': \r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 2\r\n # iraf.unlearn('response')\r\n # iraf.response.function = 'legendre'\r\n iraf.response.order = 100\r\n # iraf.response.high_rej = 5\r\n # iraf.response.low_rej = 2\r\n # iraf.response.niterate = 10\r\n # iraf.response('temp[0]', 'temp[0]',\r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n iraf.imfilter.boxcar('temp', 'tempsmooth', xwindow='1', ywindow='500')\r\n iraf.imarith('temp', '/', 'tempsmooth', 'norm_temp.fits')\r\n iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('norm_temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n else:\r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 1\r\n iraf.unlearn('response')\r\n iraf.response.function = \"spline3\" \r\n iraf.response.order = 100\r\n iraf.response.high_rej = 3\r\n iraf.response.low_rej = 3\r\n iraf.response.niterate = 3\r\n iraf.response('temp[0]', 'temp[0]',\r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n # iraf.unlearn('response')\r\n # iraf.response.function = \"spline3\"\r\n # iraf.response.order = 100\r\n # iraf.response.niterate = 3\r\n # iraf.response.low_rej = 3\r\n # iraf.response.high_rej = 3\r\n # if side == 'blue':\r\n # iraf.twodspec.longslit.dispaxis = 2\r\n # else:\r\n # iraf.twodspec.longslit.dispaxis = 1\r\n \r\n\r\n # measure flat-field error from sigma images\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.reject = 'avsigclip'\r\n iraf.imcombine(','.join(flats), output='flat', sigma='sigma', scale='mode')\r\n iraf.imarith('sigma', '/', 'flat', 'frac')\r\n s = iraf.imstat('frac.fits', fields=\"mean\", nclip=20, Stdout=1, format=\"no\")\r\n print 'Flat field error: ', np.float(s[0])\r\n iraf.delete('flat.fits', verify=\"no\")\r\n iraf.delete('sigma.fits', verify=\"no\")\r\n iraf.delete('frac.fits', verify=\"no\")\r\n else:\r\n print \"No dome or internal flats for the %s arcsec slit.\" % aperture",
"def __calculateDDIstart(self, partedscans, partedspws):\n \n # Example of partedspws:\n # create 2 subMss with spw=0,1,2 and spw=3\n # partedSPWs = {0:['0','1','2'],1:['3']}\n #\n # create 3 subMSs with spw=0,1,2 spw=3 and spw=4,5\n # partedSPWs = {0:['0','1','2'],1:['3'],2:['4','5']}\n \n hasscans = True\n if len(partedscans) == 0:\n scans = ''\n hasscans = False\n\n # It needs to take the correlation selection into account\n corr_sel = self._arg['correlation']\n ddistartList = []\n \n # scan+spw separation axis \n if hasscans:\n count = 0\n for k,spws in partedspws.iteritems():\n for ks,scans in partedscans.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws,polarization=corr_sel)\n except:\n self._msTool.close()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n # spw separation axis \n else:\n count = 0\n for k,spws in partedspws.iteritems():\n if self._msTool is None:\n self._msTool = mstool()\n self._msTool.open(self._arg['vis'],nomodify=False)\n else:\n self._msTool.reset()\n \n try:\n # The dictionary with selected indices\n seldict = self._msTool.msseltoindex(vis=self._arg['vis'],scan=scans,spw=spws, polarization=corr_sel)\n except:\n self._msTool.reset()\n continue\n \n # Get the selected DD IDs\n ddis = seldict['dd'].tolist()\n ddsize = ddis.__len__()\n if count == 0:\n ddistart = 0\n \n # Create a ddistart list\n ddistartList.append(ddistart)\n ddistart = ddistart + ddsize\n count = count + 1\n \n return ddistartList",
"def __do_split_haghverdi16(self, Dseg, tips):\n # sort distance from first tip point\n # then the sequence of distances Dseg[tips[0]][idcs] increases\n idcs = np.argsort(Dseg[tips[0]])\n # consider now the sequence of distances from the other\n # two tip points, which only increase when being close to `tips[0]`\n # where they become correlated\n # at the point where this happens, we define a branching point\n if True:\n imax = self.kendall_tau_split(Dseg[tips[1]][idcs],\n Dseg[tips[2]][idcs])\n if False:\n # if we were in euclidian space, the following should work\n # as well, but here, it doesn't because the scales in Dseg are\n # highly different, one would need to write the following equation\n # in terms of an ordering, such as exploited by the kendall\n # correlation method above\n imax = np.argmin(Dseg[tips[0]][idcs]\n + Dseg[tips[1]][idcs]\n + Dseg[tips[2]][idcs])\n # init list to store new segments\n ssegs = []\n # first new segment: all points until, but excluding the branching point\n # increasing the following slightly from imax is a more conservative choice\n # as the criterion based on normalized distances, which follows below,\n # is less stable\n ibranch = imax + 2 # this used to be imax + 1!\n # ibranch = int(0.95 * imax)\n return idcs[:ibranch]\n # ssegs.append(idcs[:ibranch])\n # TODO get rid of the following heuristics\n # define nomalized distances to tip points for the rest of the data\n # dist1 = Dseg[tips[1], idcs[ibranch:]] / Dseg[tips[1], idcs[ibranch-1]]\n # dist2 = Dseg[tips[2], idcs[ibranch:]] / Dseg[tips[2], idcs[ibranch-1]]\n # assign points according to whether being closer to tip cell 1 or 2\n # ssegs.append(idcs[ibranch:][dist1 <= dist2])\n # ssegs.append(idcs[ibranch:][dist1 > dist2])\n # return ssegs"
] |
[
"0.5802957",
"0.5298557",
"0.5084957",
"0.5063652",
"0.4890329",
"0.48611894",
"0.48507932",
"0.48137918",
"0.4810392",
"0.47764036",
"0.47670275",
"0.47526032",
"0.47370464",
"0.47212443",
"0.47186974",
"0.47151244",
"0.47017494",
"0.4694448",
"0.4681186",
"0.46714354",
"0.4667527",
"0.46503773",
"0.46459025",
"0.46277082",
"0.4613488",
"0.4609772",
"0.45957693",
"0.45952868",
"0.456014",
"0.45302284"
] |
0.73491573
|
0
|
Called when new line received from connection
|
def _on_read(self, line):
# Some game logic (or magic)
line = line.strip()
logger.info("RCV> %s", line)
if not line:
self.stream.close()
return
self.stream.write("echo: %s\n" % line)
# Wait for further input on this connection
self.wait()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def lineReceived(self,line):\n print \"data received:\",line,self.delimiter",
"def lineReceived(self, line):\n pass",
"def newLineEvent(self, line):\n self.newLine_callback(line)",
"def _on_connect(self, stream_reader, stream_writer):\n # Sometimes the remote side doesn't send the newline for the first\n # prompt. This causes our prompt matching to fail. Here we inject a\n # newline to normalize these cases. This keeps our prompt processing\n # simple.\n super().data_received(b\"\\n\")\n self._session._session_connected(stream_reader, stream_writer)",
"def handle_line(self, line):\n LOG.debug(\"Received line of input from client %s: %s\", self.addr, line)",
"def lineReceived(self, line):\n self.sendLine('reply '+line)",
"def lineReceived(self, line):\n log.msg('>> %s' % line)\n # Twisted's classes are old-style, so no super(), oh my...\n irc.IRCClient.lineReceived(self, line)",
"def lineReceived(self, line):\n if line and line.isdigit():\n self._expectedLength = int(line)\n self._buffer = []\n self._bufferLength = 0\n self.setRawMode()\n else:\n self.keepAliveReceived()",
"def connection_handler(self):\n\t\tline = yield self.read_line()\n\t\tyield self.sendall(line + \"\\r\\n\")",
"def handle(self):\n line = b\"\"\n try:\n while True:\n raw = self.request.recv(1024)\n if not raw:\n return\n raw = bytearray(raw)\n while True:\n splitter = raw.find(b\"\\r\")\n if splitter > -1:\n line = raw[1:splitter]\n raw = raw[splitter + 1 :]\n else:\n break\n\n self.handle_line(line.decode())\n except Exception as exc:\n _LOGGER.error(\n \"TCP: Handle: last line %s gave error: %s\", line.decode(), str(exc)\n )\n return",
"def test_process_packet_message_with_new_line(self):\n pkt = {'type': 'message',\n 'data': '\\n',\n 'endpoint': ''}\n self.ns.process_packet(pkt)\n assert not self.environ['socketio'].error.called",
"def outReceived(self, data):\n log.msg('got %r' % data)\n lines = (self._lineBuffer + data).split(b'\\n')\n self._lineBuffer = lines.pop(-1)\n self._linesReceived.extend(lines)\n # XXX - not strictly correct.\n # We really want onOutReceived to fire after the first 'cftp>' prompt\n # has been received. (See use in OurServerCmdLineClientTests.setUp)\n if self.onOutReceived is not None:\n d, self.onOutReceived = self.onOutReceived, None\n d.callback(data)\n self.buffer += data\n self._checkForCommand()",
"def lineReceived(self, line):\n self.irc_logger.info(line)\n\n parts = line.split(' ')\n command = parts[1]\n\n # Don't fire if we haven't booted the event manager yet\n if self.event_manager:\n self.event_manager.fire(\"irc.raw\", command, line)\n\n # Call Twisted handler\n super(CardinalBot, self).lineReceived(line)",
"def on_line(self, stream_name, line):\n now = datetime.datetime.utcnow()\n delay = now - self.last_msg\n self.last_msg = now\n record = IOLogRecord(delay.total_seconds(), stream_name, line)\n self.on_new_record(record)",
"def lineReceived(self, line):\n if not line:\n self.transport.write(\n '{0}{1}'.format(self._uidServer.getCertificate(\n '\\n'.join(self._buff)), self.delimiter))\n self.transport.loseConnection()\n else:\n self._buff.append(line)",
"def lineReceived(self, line):\n rbuffer = StringIO()\n po=sys.stdout\n sys.stdout = rbuffer\n err=False\n if not hasattr(self,\"dc\"):\n self.dc={\"self\":self.server}\n print \"dc:\", self.dc\n try: exec(line,self.dc)\n except Exception as e: err=e\n except KeyboardInterrupt : pass\n # remove backeffect on dictionary\n if self.dc.has_key('__builtins__'): \n del self.dc['__builtins__']\n # update data context\n # remember to restore the original stdout!\n sys.stdout = po\n print '>u> '+line\n if err: out = self.pre_e+str(e)+self.post_e\n else: out = rbuffer.getvalue()\n if out!=\"\": print '>s> ' + out",
"def lineReceived(self,data,originator):\r\n\r\n self.log('Signal','Received Line: %s...' % data[0:10],'lineReceived')\r\n \r\n #print \"*** network to node: \" + repr(data)\r\n if data == 'NodeJoined':\r\n self.addNetworkedNode(originator)\r\n elif data == 'NodeLeft':\r\n self.removeNetworkedNode(originator)\r\n elif originator in self.nodes:\r\n\r\n self.log('Signal','%s sent result' % originator,'lineReceived')\r\n \r\n #self.update(data)\r\n\r\n data = data.split(':',1)\r\n\r\n if data[0] == 'Passwords':\r\n\r\n self.log('Signal','A networked node \"%s\" returned these passwords: %s' % (originator,repr(data[1])),'lineReceived')\r\n\r\n self.addResult(data[1])\r\n elif data[0] == 'Bench':\r\n self.log('Signal','A networked node \"%s\" returned these benches: %s' % (originator,repr(data[1])),'lineReceived')\r\n\r\n self.addResult(data[1])\r\n else:\r\n self.log('S Warning','Unknown source sent: %s' % repr(data),'lineReceived')",
"def lineReceived(self, line):\r\n def _cbError(why, msg):\r\n err(why, msg)\r\n reactor.stop() #@UndefinedVariable\r\n\r\n def _cbConnectionSuccess(view):\r\n self._user = view\r\n\r\n if isinstance(self._user, dict):\r\n self._privilege = 'console'\r\n else:\r\n self._privilege = 'admin'\r\n\r\n self.terminal.write('Connection to Master Established.')\r\n self.showPrompt()\r\n\r\n if self._mode == 'Username':\r\n self._mode = 'Password'\r\n self._username = line\r\n self.terminal.write('Password: ')\r\n elif self._mode == 'Password':\r\n self._mode = 'Terminal'\r\n self._password = line\r\n cred = UsernamePassword(self._username,\r\n sha256(self._password).hexdigest())\r\n d = self._factory.login(cred)\r\n d.addCallback(lambda p: p.callRemote(\"getUserView\", True))\r\n d.addCallback(_cbConnectionSuccess)\r\n d.addErrback(_cbError, \"Username/password login failed\")\r\n else:\r\n self.parseInputLine(line)",
"def enter(self):\n if self.pos < self.line_length():\n # If the position is not at the end of the line split the line\n self.buffer.split_line(self.line, self.pos)\n else:\n self.buffer.insert_line(\"\", self.line + 1)\n \n self.line += 1\n self.pos = 0\n self.has_changes = True",
"def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)",
"def writeline(self, line):\n self.sendall((six.text_type(line) + u'\\r\\n').encode(self.encoding))",
"def dataReceived(self, data):",
"def handle_line(self, line):\n _LOGGER.debug(\"TCP: Handle Line: Income raw string: %s\", line)\n try:\n event = SIAEvent(line)\n _LOGGER.debug(\"TCP: Handle Line: event: %s\", str(event))\n if not event.valid_message:\n _LOGGER.error(\n \"TCP: Handle Line: CRC mismatch, received: %s, calculated: %s\",\n event.msg_crc,\n event.calc_crc,\n )\n raise Exception(\"CRC mismatch\")\n if event.account not in HASS_PLATFORM.data[DOMAIN]:\n _LOGGER.error(\n \"TCP: Handle Line: Not supported account %s\", event.account\n )\n raise Exception(\n \"TCP: Handle Line: Not supported account {}\".format(event.account)\n )\n response = HASS_PLATFORM.data[DOMAIN][event.account].process_event(event)\n except Exception as exc:\n _LOGGER.error(\"TCP: Handle Line: error: %s\", str(exc))\n timestamp = datetime.fromtimestamp(time.time()).strftime(\n \"_%H:%M:%S,%m-%d-%Y\"\n )\n response = '\"NAK\"0000L0R0A0[]' + timestamp\n\n header = (\"%04x\" % len(response)).upper()\n response = \"\\n{}{}{}\\r\".format(\n AlarmTCPHandler.crc_calc(response), header, response\n )\n byte_response = str.encode(response)\n self.request.sendall(byte_response)",
"def lineReceived(self, line):\n if not self._uid:\n if not definition.validateSuffix(line):\n raise ValueError('Received address suffix is not valid.')\n \n self._uid = line\n \n self.transport.write('{0}{1}{1}'.format(\n dumpCertReq(createCertReq(self._key,\n 'Process-{0}'.format(line))),\n self.delimiter))\n else:\n self._buff.append(line)",
"def send_line(self, line):\n self.ser.write(line + \"\\r\")",
"def readline(self):\n while(True):\n rxcount = self.in_waiting \n if rxcount > 0: \n for pos, i in enumerate(self.buffer):\n # look for the \\n\n if i == 10: \n line=''\n linebuf = self.buffer[:pos]\n self.buffer = self.buffer[pos+1:]\n for c in linebuf:\n line += chr(c)\n return line",
"def sendline(self, l):\n self.send(l + b'\\n')",
"def on_new_line(self, line, is_full_line):\n try:\n if is_full_line:\n self._parse_v_option(line)\n self._parse_general_info(line)\n self._parse_header(line)\n except ParsingDone:\n pass # line has been fully parsed by one of above parse-methods\n return super(W, self).on_new_line(line, is_full_line)",
"def connectionMade(self):\n print \"connection received from\", self.addr",
"def test_newlinesAtEnd(self):\n self.client.lines = []\n self.client.msg(\"foo\", \"bar\\n\")\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :bar\"])"
] |
[
"0.7500021",
"0.7399633",
"0.7342613",
"0.7329971",
"0.7170145",
"0.70820045",
"0.70148516",
"0.6992787",
"0.6955204",
"0.6838713",
"0.68079877",
"0.6682475",
"0.6677752",
"0.6536084",
"0.64785165",
"0.64569104",
"0.6385242",
"0.63659185",
"0.63574743",
"0.62926215",
"0.62211716",
"0.6181728",
"0.6162857",
"0.61565137",
"0.61424434",
"0.61313987",
"0.6114885",
"0.6080545",
"0.6079522",
"0.60653514"
] |
0.7630632
|
0
|
Fit the model to data matrix X and targets Y.
|
def fit(self, X, Y):
...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fit(self, X, y):\n self.model_x = X\n self.model_y = y",
"def fit ( self, X: np.ndarray, y: np.ndarray ):\n \n self.X = X\n self.y = y",
"def fit(self, X, y):\n self.X_train = X\n self.y_train = y",
"def fit(self, X, y):\n self.X_data = X\n self.y = y",
"def fit(self, X, y):\n Xs = self.scaler.fit_transform(X)\n self.model.fit(Xs, y)",
"def fit(self, X, y):\n self.model = self._initialize_model(X, y)\n self.model.optimize()",
"def fit(self, X,y):\n pass",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, Y, **fit_params):\n ...",
"def fit(self, X, y=None):\n # train on a training dataset\n self.logger.info(\n self.__name__ + ' is trained on {:d} samples with {:d} features.'.format(X.shape[0], X.shape[1]))\n pass",
"def fit(\n self,\n X: Optional[np.ndarray],\n y: Optional[Union[np.ndarray, np.array, pd.Series]],\n ):\n self.epochs_trained = 0\n # if design_matrix/targets None assume dataloaders were initialized elsewhere\n if X is not None:\n if type(y) is pd.Series:\n self.initialize_dataloaders(X, np.array(y))\n else:\n assert type(y) in (np.ndarray, np.array)\n self.initialize_dataloaders(X, y)\n self.network.load_state_dict(self.network_initial_state_dict)\n self.optimizer.load_state_dict(self.optimizer_initial_state_dict)\n if self.scheduler:\n self.scheduler.load_state_dict(self.scheduler_initial_state_dict)\n self._train()",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y=...):\n ...",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, y):",
"def fit(self, X, Y):\n if self.model is None:\n print(\"%s.fit: implement me\" % (self.__class__.__name__))",
"def fit(self, X, Y, **kwargs):\n raise NotImplementedError",
"def fit(self, X, y=None):\n \n if not self.variables:\n self.X = X.copy()\n self.variables = [x for x in X.columns]\n else:\n self.X = X[self.variables].copy()\n \n self.input_shape_ = X.shape \n \n return self",
"def fit(self,X,y):\n\n d = X.shape[1]\n # 1. sketch the data\n self.B,a = self._sketch(X,method=self.fd_mode)\n #H = B.T@B + (self.alpha+a)*np.eye(d)\n #self.H = H\n self.H_inv = self._get_inv() #np.linalg.pinv(H)\n self.coef_ = self.H_inv@(X.T@y) #np.linalg.solve(H, X.T@y)\n self.is_fitted = True",
"def fit(\n self,\n X: np.ndarray,\n Y: np.ndarray,\n val_X: Optional[np.ndarray] = None,\n val_Y: Optional[np.ndarray] = None,\n epochs: int = 300,\n patience: int = 25,\n verbose: int = 1\n ):\n\n #Data Checking\n assert X.shape[2] == self.n_features\n assert X.shape[0] == Y.shape[0]\n assert X.shape[1] == Y.shape[1]\n assert X.shape[1] == 80\n\n\n #Train model\n if val_X is None or val_Y is None:\n es = EarlyStopping(\n monitor=\"loss\",\n patience=patience,\n verbose=verbose,\n restore_best_weights=True\n )\n\n self.model.fit(\n X,\n Y,\n epochs=epochs,\n verbose=verbose,\n callbacks=[es]\n )\n else:\n assert val_X.shape[2] == self.n_features\n assert val_X.shape[0] == val_Y.shape[0]\n assert val_X.shape[1] == val_Y.shape[1]\n assert val_X.shape[1] == 80\n\n es = EarlyStopping(\n monitor=\"val_loss\",\n patience=patience,\n verbose=verbose,\n restore_best_weights=True\n )\n\n self.model.fit(\n X,\n Y,\n epochs=epochs,\n verbose=verbose,\n validation_data=(val_X, val_Y),\n callbacks=[es]\n )",
"def fit(self, X=None, y=None):\n if self.seed:\n random.seed(self.seed)\n np.random.seed(self.seed)\n tf.random.set_seed(self.seed)\n\n # pylint: disable=assignment-from-no-return\n self.model = self.create_model()\n\n if not self.model:\n raise RuntimeError(\"Model was not created.\")\n\n self.model.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics)\n\n self.history = self.model.fit([X[:, i] for i in range(X.shape[1])],\n y,\n epochs=self.epochs)"
] |
[
"0.76865864",
"0.7495343",
"0.74461746",
"0.7412673",
"0.7352247",
"0.7292747",
"0.7271479",
"0.7256889",
"0.7256889",
"0.7241033",
"0.72407746",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.72048783",
"0.7199535",
"0.7199535",
"0.7199535",
"0.71853614",
"0.71727",
"0.71625483",
"0.7149676",
"0.71475214",
"0.7141439"
] |
0.75660306
|
1
|
Evaluate the decision_function of the models in the chain.
|
def decision_function(self, X):
...
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def evaluate(self, prediction_fn):\n pass",
"def decision_function(self, X):\n return self.predict(X, output=\"score\")",
"def decision_function(self, Xtt):\n # predict decision score on test dataset\n self.logger.info(\n self.__name__ + ' predicts decision scores on {:d} samples.'.format(Xtt.shape[0]))",
"def decision_function(self, X):\n # check whether model has been fit\n check_is_fitted(self, ['training_pseudo_label_', 'train_scores_',\n 'X_train_norm_', 'n_features_'])\n\n # check input array\n X = check_array(X)\n if self.n_features_ != X.shape[1]:\n raise ValueError(\"Number of features of the model must \"\n \"match the input. Model n_features is {0} and \"\n \"input n_features is {1}.\"\n \"\".format(self.n_features_, X.shape[1]))\n\n # get decision scores and return\n decision_scores = self._get_decision_scores(X)\n return decision_scores",
"def decision_function(self, X):\n X = _validate_X(X)\n return self.best_estimator_.decision_function(X)",
"def decision_function(self, X):\n self._check_is_fitted('decision_function')\n return self.best_estimator_.decision_function(X)",
"def decision_function(self, X):\n check_is_fitted(self, ['model_', 'history_'])\n X = check_array(X)\n\n if self.preprocessing:\n X_norm = self.scaler_.transform(X)\n else:\n X_norm = np.copy(X)\n\n # Predict on X and return the reconstruction errors\n pred_scores = self.model_.predict(X_norm)\n return pairwise_distances_no_broadcast(X_norm, pred_scores)",
"def decision_function(self, X):\n y = self.__cls.decision_function(X)\n return y",
"def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n",
"def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)",
"def compute(self, *args, **kwargs):\n for node in self.evaluation_sequence:\n node.evaluate()",
"def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)",
"def evaluate(self, predictor_model) -> Any:\n raise NotImplementedError()",
"def predict(self,function,args):\r\n param = self.model._get_params()\r\n fs = []\r\n for p in self.chain:\r\n self.model._set_params(p)\r\n fs.append(function(*args))\r\n self.model._set_params(param)# reset model to starting state\r\n return fs",
"def decision(self, xs, explore=False, **kwargs):\n pass",
"def __call__(self, predictor_model) -> None:\n self.save_result(self.evaluate(predictor_model))",
"def decision_function(self, X):\n self._check_is_fitted()\n # X = _check_X(X, n_features=self.means_.shape[1]) TODO\n X = _check_X(X)\n if self.use_weights:\n ret = self._estimate_weighted_log_prob(X)\n else:\n ret = self._estimate_log_prob(X)\n return ret",
"def decision_function(self, obs=None, env=None):\n if self.n_exec_decision % 10000 == 0:\n print(f\"Predicting observation number {self.n_exec_decision}\")\n self.n_exec_decision += 1\n \n # Periscope bounding box\n x, y, z = ((80, 124), (12, 160), (None))\n \n # Predictions and actions\n prediction = self.graph.predict(\n np.array([obs[x[0]:x[1], y[0]:y[1], :]])\n )\n \n # index of the highest scored action by our graph\n action = np.argmax(prediction)\n \n return action, prediction",
"def eval_logic(self, checkDict):\n result = True\n #gets individual evaluations from children\n passList = []\n for child in self.children:\n myVal = child.eval_comparison(checkDict)\n passList.append(child.eval_comparison(checkDict))\n\n #if only one child returns the only boolean available\n if(len(passList) == 1):\n result = passList[0]\n\n #TODO: Combine following cases possibly\n #print(passList)\n #gets resutl if only 2 simple logics\n elif(len(passList) == 2 and len(self.operators) == 1):\n\n result = self.operators[0](passList[0], passList[1])\n else:\n #combines all children logic using the operators\n firstCheck = True\n opIndex = 0\n for i in range(0,len(passList)):\n if(firstCheck):\n firstCheck = False\n result = self.operators[opIndex](passList[0], passList[1])\n i+=1\n else:\n result = self.operators[opIndex](result,passList[i])\n opIndex += 1\n \"\"\"\n print('----------------------')\n print(result)\n \"\"\"\n return result",
"def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)",
"def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score",
"def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy",
"def __call__(self, X, fit, func, ask=None, args=()):\r\n self.evaluations_just_done = 0\r\n if not self.maxevals or self.lam_reeval == 0:\r\n return 1.0\r\n res = self.reeval(X, fit, func, ask, args)\r\n if not len(res):\r\n return 1.0\r\n self.update_measure()\r\n return self.treat()",
"def _staged_decision_function(self, X):\n X = check_array(X, dtype=DTYPE, order=\"C\", accept_sparse='csr')\n score = self._init_decision_function(X)\n for i in range(self.estimators_.shape[0]):\n predict_stage(self.estimators_, i, X, self.learning_rate, score)\n yield score.copy()",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score",
"def evaluate(self) :\n pass",
"def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)",
"def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)"
] |
[
"0.6772991",
"0.62682056",
"0.62380344",
"0.6168039",
"0.61620486",
"0.6074819",
"0.5991949",
"0.5981098",
"0.5978402",
"0.59382457",
"0.59114414",
"0.59068817",
"0.5877776",
"0.58687377",
"0.58312726",
"0.5826906",
"0.5823408",
"0.5768491",
"0.5758743",
"0.57502604",
"0.5746639",
"0.57414055",
"0.57110035",
"0.5699699",
"0.5697886",
"0.5697886",
"0.56827354",
"0.56677496",
"0.5666765",
"0.56638014"
] |
0.6659339
|
1
|
return the number of flat features from a pytorch variable
|
def num_flat_features(self, x):
return int(np.prod(x.size()[1:]))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features",
"def num_flat_features(x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features",
"def num_flat_features(self, x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features",
"def get_num_features(self):\r\n \r\n return len(self[0]['x'])",
"def get_n_features(self):\n # +1 due to dummy bit\n return self.model.n_latent_features + 1",
"def n_train(self):\n return self.factors[0].shape[0]",
"def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features",
"def num_features(self):\n if self.x is None:\n return 0\n return 1 if self.x.dim() == 1 else self.x.size(1)",
"def flops_count(model):\n count = 0\n for n, m in model.named_modules():\n if isinstance(m, nn.Linear):\n count += m.in_features * m.out_features + m.bias.numel()\n return count",
"def _len_feature(tf_feature):\n assert(tf_feature)\n attrs = ['bytes_list', 'float_list', 'int64_list']\n for attr in attrs:\n if hasattr(tf_feature, attr):\n feature_vec = getattr(tf_feature, attr).value\n res = len(feature_vec)\n if res > 0:\n return res\n return 0",
"def __len__(self):\n return 1 + len(self.features)",
"def n_features(self):\n return self.components.shape[-1]",
"def num_feature(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumFeature(self.handle, ctypes.byref(out)))\n return out.value",
"def num_node_features(self):\n return self[0].num_node_features",
"def _len_feature_list(tf_feature_list):\n return len(tf_feature_list.feature)",
"def __len__(self):\n return len(self.features)",
"def getNrFeatures(self):\n return self.featureNames.size",
"def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features",
"def _n_features_out(self):\n return self.components_.shape[0]",
"def n_good_features_(self):\n return np.sum(self.important_features_)",
"def get_num_features(self, ndim: int) -> int:\n nb_features = 0\n for feature_group in self.features_group_list:\n nb_features += feature_group.num_features(ndim)\n return nb_features",
"def feature_len(self):\n return len(self.coord)",
"def nvar(self):\n return self.h.shape[0]",
"def num_training_examples(self):",
"def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None",
"def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1",
"def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")",
"def feature_dim(self):\n raise NotImplementedError",
"def nfactors(self):\n return self.L.nnz",
"def acts_count(model):\n count = 0\n for n, m in model.named_modules():\n if isinstance(m, nn.Linear):\n count += m.out_features\n return count"
] |
[
"0.7284407",
"0.7197461",
"0.71856856",
"0.7124822",
"0.69571304",
"0.6939893",
"0.6765308",
"0.6756191",
"0.6736225",
"0.65332246",
"0.6499784",
"0.6494851",
"0.64596397",
"0.64460313",
"0.64324874",
"0.63832456",
"0.6366171",
"0.63653505",
"0.6354612",
"0.63422126",
"0.631895",
"0.63017166",
"0.628764",
"0.62857836",
"0.62771666",
"0.6258154",
"0.62542725",
"0.62448335",
"0.6244771",
"0.62411666"
] |
0.7314911
|
0
|
Process text using one or more providors registered with MLRegistry
|
def process_text(text, providers, logging_enabled=False):
registry = MLRegistry()
if not registry.validate_providers(providers):
raise ValueError(
"One or more providers are not valid {}".format(providers)
)
data = []
for provider in providers:
current_provider = registry.get_class(provider)(logging_enabled=logging_enabled)
processed_text = current_provider.process(text)
if processed_text is not None and not isinstance(processed_text, Exception):
data.append(processed_text)
return data
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def process_text(self, text, language):",
"def apply(self, text):",
"def _process(hass, text):\n intents = hass.data.get(DOMAIN, {})\n\n for intent_type, matchers in intents.items():\n for matcher in matchers:\n match = matcher.match(text)\n\n if not match:\n continue\n\n response = yield from hass.helpers.intent.async_handle(\n DOMAIN, intent_type,\n {key: {'value': value} for key, value\n in match.groupdict().items()}, text)\n return response",
"def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)",
"def run(self, text, libName, ppt):\n return self.external.run(text, libName, ppt)",
"def handle(text, mic, profile, wxbot=None):\n logger = logging.getLogger(__name__)\n # get config\n if SLUG not in profile or \\\n 'age' not in profile[SLUG]:\n mic.say('性别检测插件配置有误,插件使用失败', cache=True)\n return\n age = profile[SLUG]['age']\n try:\n gen = guess()\n age = guess(model_dir='/home/.dingdang/myplugins/plugincode/22801',class_type='age')#使用绝对路径路径\n logger.debug(\"genda report: \", gen)\n if gen=='M':\n mic.say('帅哥你好!', cache=True)\n print('prediction:',age)\n else:\n mic.say('美女你好!', cache=True)\n print('prediction:',age)\n except Exception, e:\n logger.error(e)",
"def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))",
"def process(self, fulltext=None):\r\n raise NotImplementedError(\"Please implement this in your importer\")",
"def process(self, message, **kwargs):\n if self.classifier is None:\n self.train()\n\n if message.get(\"text\") is not None:\n sid = SentimentIntensityAnalyzer()\n res = sid.polarity_scores(message.get(\"text\"))\n key, value = max(res.items(), key=lambda x: x[1])\n\n if key == \"pos\":\n key = \"Positive\"\n elif key == \"neg\":\n key = \"Negative\"\n else:\n key = \"Neutral\"\n\n custom_tokens = self.remove_noise(word_tokenize(message.get(\"text\")))\n t = self.classifier.prob_classify(dict([token, True] for token in custom_tokens))\n\n sentiment = 'Positive' if t.prob('Positive') > t.prob('Negative') else 'Negative'\n confidence = max(t.prob('Positive'), t.prob('Negative'))\n\n found, entry = self.manager.getMovieName(message.get(\"text\"))\n movie = str(entry['original_title'].item())\n \n genre_entry, aux_found_genre = self.manager.fuzzy_find_genre(message.get(\"text\"), with_ratio=True)[0]\n genre = genre_entry\n \n\n if len(message.get(\"text\")) > 20:\n entity = self.convert_to_rasa(sentiment, confidence, name=\"our_sentiment_extractor\")\n else:\n entity = self.convert_to_rasa(key, value, name=\"builtin_sentiment_extractor\")\n\n message.set(\"sentiment\", [entity], add_to_output=True)\n\n entity = self.convert_movie_to_rasa(movie, found)\n message.set(\"movies\", [entity], add_to_output=True)\n\n if message.get(\"text\").strip() == \"no\":\n found_genre = False\n else:\n found_genre = True if aux_found_genre > 80 else False\n\n entity = self.convert_movie_to_rasa(genre, found_genre, entity=\"genres_detected\")\n print(entity)\n message.set(\"genres\", [entity], add_to_output=True)",
"def analyse_text(custom_text, classifier, Resource, threshold, language='en'):\n return [(bytes(custom_text, 'utf-8'),\n _minimal_analysis(bytes(custom_text, 'utf-8'), classifier, Resource, threshold, language))]",
"def process(self, text: str = None, text_key: Any = None) -> Dict:\n if not text and not text_key:\n raise TypeError(\" user must provide text or tuple_text_key \")\n\n self.set_parameters(text, text_key)\n if not self.__pipe_mode:\n word_processed = self.single_document_processing()\n else:\n word_processed = self.multiple_document_processing()\n return word_processed",
"def process(self, processors) -> MultiLineString:",
"def evaluate(text, articles, no_preprocess=False):\n if not _trained:\n print(\"No classifier initialized. Make sure to do so first\")\n raise Exception\n\n if not no_preprocess:\n text = body_reader.get_words_in(text)\n\n if _classifier == \"euclid\":\n return euclidean.evaluate(articles, text)\n elif _classifier == \"bayes\":\n return bayes.evaluate(articles, text)\n elif _classifier == \"rocchio\":\n return rocchio.evaluate(articles, text)",
"def processText(self, text):\n\n acronyms = self.acronymExtractor.get_acronyms(text)\n for acronym, expansions in acronyms.items():\n X_train, y_train, labelToExpansion = self._getChoices(acronym)\n\n for expander_type in self.acronymExpanders:\n expander = self._createExpander(expander_type)\n\n # check if this is a suitable problem for predictive expanders\n if(isinstance(expander, PredictiveExpander)):\n if(len(X_train) == 0):\n # no point using prediction, no training data\n # move to next expander\n continue\n if(len(labelToExpansion) == 1):\n # no point using prediction, all same class\n # predict as the only present class\n expansion = AcronymExpansion(\n expansion=labelToExpansion[0],\n expander=expander.getType,\n confidence=min_confidence)\n expansions.append(expansion)\n continue\n\n X_transformed = expander.transform(X_train)\n\n expander.fit(X_transformed, y_train)\n\n X_test = expander.transform(\n [ExpansionChoice(article_id=None, article_text=text)])\n\n results, confidences = expander.predict(X_test, acronym)\n result = results[0]\n confidence = confidences[0]\n\n if(isinstance(expander, PredictiveExpander)):\n # always predicts, no need to check for None\n expansions.append(AcronymExpansion(expansion=labelToExpansion[\n result],\n expander=expander.getType(),\n confidence=confidence))\n else:\n # expansion from non-predictive may sometimes be None\n if(result):\n expansions.append(\n AcronymExpansion(expansion=result,\n expander=expander.getType(),\n confidence=confidence))\n\n acronyms[acronym] = self.expansionChooser(expansions)\n\n return acronyms",
"def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text",
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def __call__(self, text):\n return extract_references(text, self.preprocessor, self.model)",
"def init_detector(config):\n\n crf_list = config[\"detection\"][\"crf_ner_list\"].split(\",\")\n crf_model_list = [load(crf) for crf in crf_list]\n\n crf_ner_classic = None\n if \"crf_ner_classic\" in config[\"detection\"]:\n crf_ner_classic_list = config[\"detection\"][\n \"crf_ner_classic\"].split(\",\")\n crf_ner_classic = [load(crf) for crf in crf_ner_classic_list]\n\n # search for mail list\n corp_mail_list = []\n if config[\"detection\"][\"corp_mail_list\"]:\n with open(config[\"detection\"][\"corp_mail_list\"], \"r\") as f_in:\n for line in f_in:\n line = line.rstrip(\"\\n\")\n corp_mail_list.append(line)\n\n # build the system here\n nlp = None\n if \"nlp_model\" in config[\"detection\"]:\n nlp = spacy.load(config[\"detection\"][\"nlp_model\"])\n\n custom_word_list = []\n\n if \"custom_word_list\" in config:\n with open(config[\"custom_word_list\"], \"r\") as f_in:\n custom_word_list = [line.rstrip(\"\\n\") for line in f_in]\n\n # configuration of the proximity regexp\n regexp_config_dict = OrderedDict()\n if \"proximity_regexp_config\" in config:\n for key in config[\"proximity_regexp_config\"]:\n regexp_config_dict[key] = OrderedDict()\n regexp_config_dict[key][\"left_span_len\"] = int(\n config[\"proximity_regexp_config\"][key][\"left_span_len\"])\n\n regexp_config_dict[key][\"right_span_len\"] = int(\n config[\"proximity_regexp_config\"][key][\"right_span_len\"])\n\n with open(config[\n \"proximity_regexp_config\"][key][\"word_file\"], \"r\") as f_in:\n word_list = [normalize_text_proximity(\n line.rstrip(\"\\n\").strip()) for line in f_in]\n\n regexp_config_dict[key][\"word_list\"] = word_list\n\n low_priority_list = None\n if \"low_priority_list\" in config:\n low_priority_list = config[\"low_priority_list\"]\n\n my_detector = Detector(nlp,\n crf_model_list,\n load(config[\n \"detection\"][\"personal_email_detection\"]),\n crf_ner_classic,\n corp_mail_list=corp_mail_list,\n custom_word_list=custom_word_list,\n regexp_config_dict=regexp_config_dict,\n signature_max_distance=config[\"signature_max_distance\"],\n low_priority_list=low_priority_list)\n\n return my_detector",
"def predict_cli(text, config_filepath):\n load_classifier(config_filepath)\n print(classifier.predict(text))",
"def process_single_text(self, text, punctuation_remover, digit_remover, word_prepro_func, stopwords):\n data = {}\n sents = sent_tokenize(text.lower())\n words = []\n for sent in sents:\n # remove punctuation content\n sent = self.handle_punctuation(sent, punctuation_remover)\n words.extend(word_tokenize(sent))\n # words = text_to_word_sequence(text, filters=filt, lower=True, split=' ')\n # words = [w.lower() for w in self.nltk_tokenizer.tokenize(text)]\n\n # remove stopwords and numbers\n # words = [w.translate(digit_remover) for w in words if w not in stopwords and w.isalpha()]\n # remove empty \"words\"\n words = [w for w in words if w]\n if self.remove_digits:\n words = [w for w in [w.translate(digit_remover) for w in words] if w]\n if self.filter_stopwords:\n words = [w for w in words if w not in stopwords]\n data[\"words\"] = words\n if self.config.extract_pos:\n # pos tagging\n data[\"pos\"] = nltk.pos_tag(words)\n # stemming / lemmatization\n if self.config.prepro is not None:\n data[\"words\"] = [word_prepro_func((w, p)) for (w, p) in zip(words, data[\"pos\"])]\n # if not data[\"words\"]:\n # # warning(\"Text preprocessed to an empty list:\\n{}\".format(text))\n # return None\n return data",
"def _mechanic_text_processor_factory(text_pattern, attribute_name):\n def _mechanic_text_processor(card, text_mechanics):\n present = text_pattern in text_mechanics\n if present:\n card[attribute_name] = 1\n text_mechanics = text_mechanics.replace(text_pattern, '')\n return text_mechanics\n return _mechanic_text_processor",
"def process(self, tweet):\n\n #identify the applicable event keywords for this text\n text = self.cleanup_data(tweet.text)\n tokens = [str(t.lower()).translate(None, string.punctuation) for t in tweet.text.split()]\n applicable_tokens = []\n for phrase in self.match_event_tree.root.keywords:\n if phrase in \" \".join(tokens):\n applicable_tokens.append(phrase)\n\n self.match_event_tree.propogate_tweet(applicable_tokens, tweet)",
"def preprocess(self,text):\n return preprocess.get_tokens(text)",
"def __call__(self, tokenized_text):\n raise NotImplementedError()",
"def make_text_predictions(self, text, preprocessor):\n text = preprocessor.preprocess(text)\n y_pred = self.sgd_pipeline.predict_proba([text])\n return y_pred",
"def process_strings(self):\n for string in self.input:\n matcher = self.choose_algorithm()\n matcher.find_match(string, self.case_insensitive)\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n if self.__results:\n self.output(string)",
"def _proc(dat):\n def lemma(text):\n lemmatizer = WordNetLemmatizer()\n w_tokenizer = WhitespaceTokenizer()\n return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]\n\n dat['text_lemmatized'] = dat['clean_comments'].apply(lemma)\n dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)",
"def postprocess(self, text):\r\n return text",
"def any_text_preprocessing(name):\n return hp.choice('%s' % name, [\n [tfidf(name + '.tfidf')],\n ])",
"def fit(self, texts):\n print('Processing text and fitting k-means...')\n texts = preprocess_text(texts)\n self.pipeline.fit(texts)\n\n print('Done.')\n return self"
] |
[
"0.6415086",
"0.5930939",
"0.5904197",
"0.5764646",
"0.57569325",
"0.5670612",
"0.5665805",
"0.5619918",
"0.561568",
"0.56010884",
"0.55854166",
"0.5555991",
"0.5553827",
"0.55413115",
"0.5493264",
"0.5479778",
"0.5477325",
"0.5474064",
"0.5462044",
"0.5457138",
"0.5439645",
"0.5437614",
"0.54181796",
"0.54032207",
"0.54027456",
"0.5401722",
"0.5369328",
"0.5362686",
"0.53532076",
"0.5329832"
] |
0.67856145
|
0
|
Configures the session to execute this target. This should only modify session.config, but gets access to the entire session to retrieve information about it.
|
def configure(self, session):
raise NotImplementedError
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def configure(self, conf):\n return",
"def configure(self, config: dict):\n self.config.update(config)",
"def do_config(self, args):\n self.config_command.cmdloop(\"Enter to config mode\")",
"def configure(self, config: ConfigParams):\n self.__mode = config.get_as_string_with_default('mode', self.__mode)\n self.__min_timeout = config.get_as_integer_with_default('min_timeout', self.__min_timeout)\n self.__max_timeout = config.get_as_integer_with_default('max_timeout', self.__max_timeout)",
"def _set_session(self):\n self.__session = sessionmaker(bind=self.__engine)()",
"def configure(self):\n\n pass",
"def configure_step(self):\n\n pass",
"def configure(self):\n pass",
"def configure(self):\n pass",
"def setConfiguration(self, config):\n raise NotImplementedError",
"def apply_user_configuration(self, config):\n self.logDisplay.set_logging_level(config['log'].get('logging_level', fallback='Verbose'))\n\n # MIDI\n self.winchMidiInputCombo.select_item(config['midi'].get('winch_midi_input', fallback='<no selection>'))\n self.midiOutputCombo.select_item(config['midi'].get('midi_output', fallback='<no selection>'))\n\n # OSC\n oscdef = config['osc']\n self.oscListenerConfig.set_OSC_port(oscdef.get('listener_addr', fallback='localhost'),\n oscdef.getint('listener_port', fallback=3751))\n\n self.oscSenderConfig.set_OSC_port(oscdef.get('sender_addr', fallback='localhost'),\n oscdef.getint('sender_port', fallback=3752))\n\n # DMX\n self.dmxSelect.select_item(config['dmx'].get('dmx_output_serial_port', fallback='<no selection>'))\n\n # winches\n for i, winchSelect in enumerate(self.winchSelects):\n key = \"winch_%d_output_serial_port\" % (i+1)\n winchSelect.select_item(config['winches'].get(key, fallback = '<no selection>'))\n return",
"def configure(self):\r\n pass",
"def execute_agent(self, configuration):\n pass",
"def configure(self):\n configurations = config.Configurations()\n self.credentials = configurations.credentials\n self.config = configurations.config",
"def _configure(self):\n pass",
"def configure_step(self):\n pass",
"def use_automatic_session_management(self):\n self._session_management = AUTOMATIC",
"def use_automatic_session_management(self):\n self._session_management = AUTOMATIC",
"def config(self):\n\t\tsys.stderr.write(\"Base class method called: config() This souldn't happen.\\n\")",
"def configure(self, options, conf):\n pass",
"def configure( self, csSection, submitPool ):\n\n VMDirector.configure( self, csSection, submitPool )\n self.reloadConfiguration( csSection, submitPool )",
"def configure(self, config: ConfigParams):\n parameters = config.get_section(\"parameters\")\n if len(parameters) > 0:\n self.__parameters = parameters",
"def config(self, config_dict):\r\n self._cfg.config = config_dict",
"def configure(self, session, config_data):\n r = self.emane_config.configure_emane(session, config_data)\n\n # extra logic to start slave Emane object after nemid has been\n # configured from the master\n config_type = config_data.type\n if config_type == ConfigFlags.UPDATE.value and self.session.master is False:\n # instantiation was previously delayed by self.setup()\n # returning Emane.NOT_READY\n self.session.instantiate()\n\n return r",
"def perform_config(self):\n\n super(PidfileApp, self).perform_config()\n\n if ('general' in self.cfg and 'pidfile' in self.cfg['general']):\n # Not set by commandline, but set in configuration\n pidfile = to_str_or_bust(self.cfg['general']['pidfile'])\n if pidfile and (pidfile != self._default_pidfilename):\n log.debug(\n _(\"Setting pidfile to %r by configuration.\"), pidfile)\n self._pidfilename = pidfile",
"def configure(self, *args):\n raise NotImplementedError(self, \"configure\")",
"def config(self, config):\n self._config = config",
"def configure(self, options, conf):",
"def configure(self, *args, **kwargs):\n raise NotImplementedError()",
"def configure(self, *args, **kwargs):\n raise NotImplementedError()"
] |
[
"0.60705197",
"0.5951739",
"0.59411156",
"0.58812964",
"0.58621",
"0.5860319",
"0.5845918",
"0.5825204",
"0.5825204",
"0.5813379",
"0.58120495",
"0.57986385",
"0.579658",
"0.57687813",
"0.5756923",
"0.57400143",
"0.5701752",
"0.5701752",
"0.56825835",
"0.56699854",
"0.56186354",
"0.56067115",
"0.56033015",
"0.55871546",
"0.55841374",
"0.5578294",
"0.5577832",
"0.5568598",
"0.5560248",
"0.5560248"
] |
0.7415877
|
0
|
co_filename of code objects created at runtime from the source that this Target describes, assuming no path mapping.
|
def co_filename(self):
assert (
self.filename is not None
), "co_filename requires Target created from filename"
return self.filename.strpath
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def source(self):\n return some.dap.source(py.path.local(self.co_filename))",
"def inferTargetName(self):\n target_name = None\n\n for filename in self.files:\n if DUMMY_CPP_FILE_NAME in filename:\n continue\n\n if \"/\" in filename or \"$\" in filename:\n return\n\n name, _ = os.path.splitext(filename)\n\n if target_name is None:\n target_name = name\n elif target_name != name:\n return\n\n if target_name is None:\n return\n\n self.name = target_name",
"def getsourcefile(object):\r\n filename = getfile(object)\r\n if string.lower(filename[-4:]) in ('.pyc', '.pyo'):\r\n filename = filename[:-4] + '.py'\r\n for suffix, mode, kind in imp.get_suffixes():\r\n if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:\r\n # Looks like a binary file. We want to only return a text file.\r\n return None\r\n if os.path.exists(filename):\r\n return filename\r\n # only return a non-existent filename if the module has a PEP 302 loader\r\n if hasattr(getmodule(object, filename), '__loader__'):\r\n return filename\r\n # or it is in the linecache\r\n if filename in linecache.cache:\r\n return filename",
"def info(target):\n co = target.__code__\n return (co.co_name, co.co_filename, co.co_firstlineno)",
"def get_name(self):\n\t\treturn self.source.get_name()",
"def name(self):\n return self._path or '__main__'",
"def extract_code_objects(pe):\n script_res = _get_scripts_resource(pe)\n dump = _resource_dump(pe, script_res)\n return _get_co_from_dump(dump)",
"def source_file_name_feature(self):\n return \"_\".join((C_FILE_NAME, self.file_image_name.value))",
"def getSource():",
"def get_source_prefix(self):\n return ''",
"def name(self):\r\n return conf.lib.clang_getCString(conf.lib.clang_getFileName(self))",
"def __GetWrapperFileName(cls, src):\n return FileUtils.GetBinPathForFile(src).replace('.i', '.swig.cc')",
"def get_source_unicode(obj):\n return inspect.getsource(obj)",
"def _getfilename(self):\n pass",
"def source_code(self):\n return str(self.source)",
"def ofile_name(self):\n return self.ofile",
"def __GetLibFileName(cls, src, name):\n bin_path = FileUtils.GetBinPathForFile(src)\n return os.path.join(os.path.dirname(bin_path), '_%s.so' % name)",
"def source_code(obj):\n print(inspect.getsource(obj))",
"def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')",
"def exe_filename(self):",
"def getImageName(self):\n return [os.path.basename(name) for name in self.meta['sources']]",
"def getsource(object):\n lines, lnum = getsourcelines(object)\n return string.join(lines, '')",
"def scriptpath(self, code) -> str:\n return ''",
"def source(self) -> str | Path:\n ...",
"def src_name(self) -> str:\n return self._src_name",
"def DependentAssembly(self) -> str:",
"def source(self):\n return self._source_code",
"def source_file_path(self) -> str:\n return self._source_file_path",
"def createSourceName(self, protocol, pfn):\n return pfn",
"def get_source_file(self):\n return self.get_attribute(\"source_file\")"
] |
[
"0.6200904",
"0.6148831",
"0.60650134",
"0.6062886",
"0.6043761",
"0.6038777",
"0.5963403",
"0.5944892",
"0.592063",
"0.59055865",
"0.5901258",
"0.58915967",
"0.58757555",
"0.5857606",
"0.5812992",
"0.57950807",
"0.57573414",
"0.57501376",
"0.57398427",
"0.57251495",
"0.56827784",
"0.56529194",
"0.56512",
"0.5620955",
"0.5619694",
"0.55797195",
"0.5548839",
"0.55166227",
"0.54981744",
"0.5479351"
] |
0.74443585
|
0
|
Same as self.filename.lines, if it is valid e.g. for objects.
|
def lines(self):
assert (
self.filename is not None
), "lines() requires Target created from filename"
return self.filename.lines
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __read_lines__(self):\r\n fd = open(self.input_file, \"r\")\r\n lines = fd.readlines()\r\n fd.close()\r\n return lines",
"def _get_file_lines(self):\n\n # check if the PO/POT file is readable\n if self.__name is None or not os.access(self.__name, os.R_OK):\n print_error_message(\"PO file does not exist or is not readable\")\n sys.exit( )\n\n # read the PO file\n pofile = file(self.__name, 'r')\n lines = pofile.readlines( )\n pofile.close( )\n\n return lines",
"def check_Lines(self):\n\n pass",
"def notparsablelines(self):\n return self._notparsable",
"def lines(self) -> TextLines:\n if self._lines is None:\n self._lines = tuple((self._string or \"\").splitlines())\n return self._lines",
"def read_lines(self):\n with open(self.pdb_file) as infile:\n self.lines = infile.readlines()",
"def _fetch_data_lines(self) -> list:\n with open(self.full_path, \"rb\") as file:\n all_lines = file.readlines()\n return self._screen_invalid_lines(all_lines)",
"def get_lines(self):\n return self.split('\\n')",
"def lines(self):\n return self.lines",
"def lines(self):\n return self._lines",
"def get_lines(self):\n return self._lines",
"def readlines(self):\n return list(self.iterlines())",
"def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None):\n source = None\n if loader is not None and hasattr(loader, \"get_source\"):\n source = loader.get_source(module_name)\n if source is not None:\n source = source.splitlines()\n if source is None:\n try:\n f = open(filename)\n try:\n source = f.readlines()\n finally:\n f.close()\n except (OSError, IOError):\n pass\n if source is None:\n return None, [], None, []\n\n encoding = 'ascii'\n for line in source[:2]:\n # File coding may be specified. Match pattern from PEP-263\n # (http://www.python.org/dev/peps/pep-0263/)\n match = re.search(r'coding[:=]\\s*([-\\w.]+)', line)\n if match:\n encoding = match.group(1)\n break\n source = [unicode(sline, encoding, 'replace') for sline in source]\n\n lower_bound = max(0, lineno - context_lines)\n upper_bound = lineno + context_lines\n\n pre_context = [line.strip('\\n') for line in source[lower_bound:lineno]]\n context_line = source[lineno].strip('\\n')\n post_context = [line.strip('\\n') for line in source[lineno+1:upper_bound]]\n\n return lower_bound, pre_context, context_line, post_context",
"def get_lines( self ):\n return self._line_info",
"def get_lines( self ):\n return self._line_info",
"def _data_lines(self):\n self._load()\n return self._data.setdefault(\"lines\", [])",
"def readlines(self):\n return [line for line in self]",
"def get_lines(self):\n\n return self.lines",
"def lines(self):\n try:\n return self._lines\n except:\n self._lines = [list(x) for x in self.line_generator()]\n return self._lines",
"def get_lines(obj):\n if type(obj) == StringIO:\n lines = obj.getvalue().split('\\n')\n elif type(obj) == str:\n with open(obj, 'r') as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines]\n return lines",
"def __init__(self, filepath):\n self.filepath = filepath\n with open(filepath) as f:\n self.lines = f.readlines()\n #self.lines = open(self.filepath).readlines()",
"def test_file_readlines(self):\n FileWriter(self.multiline_path).write(self.multiline_string)\n line_list = FileReader(self.multiline_path).readlines()\n self.assertEqual(line_list, self.multiline_list)",
"def lines(self, lines):\n if self.local_vars_configuration.client_side_validation and lines is None: # noqa: E501\n raise ValueError(\"Invalid value for `lines`, must not be `None`\") # noqa: E501\n\n self._lines = lines",
"def is_line(self): \n return False",
"def readlines(self) -> list[bytes] | None:",
"def get_lines_from_source(self):\n extension = self.get_doc_file_extension()\n if extension in ('txt', ''):\n return tuple(line.decode('utf-8') for line in self.doc_file.readlines())\n elif extension == 'docx':\n docx_document = Docx(BytesIO(self.doc_file.read()))\n return tuple(paragrah.text for paragrah in docx_document.paragraphs)\n elif extension == 'pdf':\n raise NotImplementedError()\n else:\n raise ValueError(\"file_format not supported\")",
"def readlines(self):\n lines = []\n while True:\n line = self.readline()\n if line is None:\n return lines\n lines.append(line)",
"def is_line(self):\n return True",
"def is_line(self):\n return True",
"def isValid(self):\n return self.file_name != \"\" and self.line_number != 0"
] |
[
"0.6747216",
"0.6679091",
"0.66733927",
"0.6654729",
"0.6469234",
"0.6444568",
"0.6334337",
"0.6252677",
"0.6252224",
"0.6234618",
"0.6214764",
"0.6200841",
"0.61941946",
"0.6185534",
"0.6185534",
"0.61712074",
"0.6145789",
"0.6127659",
"0.61066437",
"0.6075771",
"0.59868294",
"0.5927086",
"0.5903732",
"0.59026724",
"0.5893178",
"0.5830887",
"0.58099097",
"0.5796532",
"0.5796532",
"0.57839143"
] |
0.6826756
|
0
|
Construct a new bakery.
|
def bakery(cls, size=200, _size_alert=None):
return Bakery(cls, util.LRUCache(size, size_alert=_size_alert))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def new(\n cls,\n baseplate: Baseplate,\n exchange: kombu.Exchange,\n connection: kombu.Connection,\n queue_name: str,\n routing_keys: Sequence[str],\n handler_fn: Handler,\n error_handler_fn: Optional[ErrorHandler] = None,\n health_check_fn: Optional[HealthcheckCallback] = None,\n serializer: Optional[KombuSerializer] = None,\n worker_kwargs: Optional[Dict[str, Any]] = None,\n retry_mode: RetryMode = RetryMode.REQUEUE,\n retry_limit: Optional[int] = None,\n ) -> \"KombuQueueConsumerFactory\":\n queues = []\n for routing_key in routing_keys:\n queues.append(kombu.Queue(name=queue_name, exchange=exchange, routing_key=routing_key))\n return cls(\n baseplate=baseplate,\n name=queue_name,\n connection=connection,\n queues=queues,\n handler_fn=handler_fn,\n error_handler_fn=error_handler_fn,\n health_check_fn=health_check_fn,\n serializer=serializer,\n worker_kwargs=worker_kwargs,\n retry_mode=retry_mode,\n retry_limit=retry_limit,\n )",
"def create_pumper():\n return _Kalamazoo()",
"def build(cls, **kwargs):\n return cls(kwargs) #pylint: disable=no-value-for-parameter",
"def make_bb_object(name, data):\n global BLACKBOARD, TRACE_LEVEL\n bb_obj = BB_object(name, data)\n if TRACE_LEVEL > 2:\n print \"\\tCreating {0} object: {1}\".format( type(data), bb_obj )\n BLACKBOARD[name] = bb_obj\n signal_creation_event(bb_obj)\n return bb_obj",
"def make_celery():\n\n celery_instance = Celery(\"celeryapp\",\n broker='amqp://guest@localhost//',\n backend='amqp://',\n include=['celeryapp.tasks'])\n celery_instance.config_from_object(settings.config)\n\n # # modify the TaskBase class\n # TaskBase = celery_instance.Task\n # class ContextTask(TaskBase):\n # abstract = True\n # def __call__(self, *args, **kwargs):\n # with app.app_context():\n # return TaskBase.__call__(self, *args, **kwargs)\n # celery_instance.Task = ContextTask\n return celery_instance",
"def _create_builder(self, tmp_dir):\n return cifuzz.InternalGithubBuilder(self.PROJECT_NAME,\n self.PROJECT_REPO_NAME, tmp_dir,\n self.SANITIZER, self.COMMIT_SHA,\n self.PR_REF)",
"def __init__(self, length, breadth, height, producingAPI):\n\n\t\tself.length = length\n\t\tself.breadth = breadth\n\t\tself.height = height\n\n\t\tself.producingAPI = producingAPI",
"def make(self):\n pass",
"def create(cls, _):\n return cls",
"def __init__(self) -> None:\n self._queue: Queue[Dict[str, int]] = Queue() # pylint:disable=E1136\n self._pilothouse = Pilothouse(self._queue)",
"def getBuilder():",
"def __init__(self, params):\r\n _params = {'max_e_value': 1e-30,\r\n 'min_pct_id': 0.90,\r\n 'num_fragments': 3,\r\n 'taxonomy_depth': 4}\r\n _params.update(params)\r\n\r\n try:\r\n id_to_taxonomy_fp = params['id_to_taxonomy_fp']\r\n except KeyError:\r\n raise ValueError(\r\n \"id_to_taxonomy_filepath must be provided to %s\" %\r\n self.Name)\r\n\r\n # Create the blast database if it hasn't been provided\r\n if 'blast_db' not in params or params['blast_db'] is None:\r\n try:\r\n reference_seqs_fp = params['reference_seqs_fp']\r\n except KeyError:\r\n raise ValueError(\r\n \"refseqs_fp or blast_db must be provided to %s\" %\r\n self.Name)\r\n blast_db, self._db_files_to_remove = \\\r\n build_blast_db_from_fasta_path(reference_seqs_fp)\r\n else:\r\n blast_db = params['blast_db']\r\n self._db_files_to_remove = []\r\n\r\n self._taxon_assigner = BlastTaxonAssigner(\r\n {'blast_db': blast_db,\r\n 'id_to_taxonomy_filepath': id_to_taxonomy_fp,\r\n 'Max E value': _params['max_e_value'],\r\n 'Min percent identity': _params['min_pct_id']\r\n })\r\n\r\n ChimeraChecker.__init__(self, _params)",
"def __init__(self, config, **kwargs):\n\n self.config = dict(config)\n self.worker = self.config['worker']\n self.beanstalk = beanstalkc.Connection(**config['queue']['connection'])\n self.beanstalk.watch(config['worker']['queue'])\n self.beanstalk.ignore('default')\n self.name = kwargs.get('name', str(uuid.uuid4()))\n self.logger = logging.getLogger('worker.Worker.%s' % self.name)",
"def create_builder():\r\n stream = Stream()\r\n return stream.fork()",
"def _create(cls, builder_name, bucket=None, properties=None,\n buildbot_changes=None, tags=None, critical=None):\n if not isinstance(buildbot_changes, (types.NoneType, list)):\n raise ValueError('buildbot_changes must be a list')\n\n return cls(\n bucket=bucket,\n builder_name=builder_name,\n properties=properties,\n buildbot_changes=buildbot_changes,\n tags=tags,\n critical=bool(critical) if critical is not None else (True),\n )",
"def __init__(self, timeout=2):\n self.discover_bulbs(timeout) # Discovering bulbs",
"def create(self):\n\t\tself.creating += 1\n\t\ttry:\n\t\t\tself.adopt(self.factory())\n\t\tfinally:\n\t\t\tself.creating -= 1",
"def __init__(self, goal):\n self._name = goal.get('name', '')\n self._description = goal.get('description', '')\n self._build_type = goal.get('buildType', 'minSizeRel')\n self._build_vars = goal.get('buildVars', {})\n self._build_goal = goal.get('buildGoal', self._name)\n self._artifacts = goal.get('artifacts', [])\n self._builds = {}\n for b in goal['builds']:\n vars = b.get('buildVars', self._build_vars)\n type = b.get('buildType', self._build_type)\n build_goal = b.get('buildGoal', self._build_goal)\n description = b.get('description', '')\n arch = b['arch']\n script = b.get('script', None)\n artifacts = b.get('artifacts', self._artifacts)\n self._builds[arch] = BuildSpec(goal=build_goal,\n type=type,\n vars=vars,\n description=description,\n arch=arch,\n script=script,\n artifacts=artifacts)",
"def make_celery(app_name: str = __name__) -> Celery:\n return Celery(app_name)",
"def create_berzerk(self, berzerk_delay):\n self._type = Genre.BERZERK\n self._team = 0\n self._berzerk = berzerk_delay",
"def __init__(self):\n self.bag = {}",
"def __init__(self, thoonk, feed):\n Queue.__init__(self, thoonk, feed)\n\n self.feed_publishes = 'feed.publishes:%s' % feed\n self.feed_published = 'feed.published:%s' % feed\n self.feed_cancelled = 'feed.cancelled:%s' % feed\n self.feed_retried = 'feed.retried:%s' % feed\n self.feed_finishes = 'feed.finishes:%s' % feed\n self.feed_claimed = 'feed.claimed:%s' % feed\n self.feed_stalled = 'feed.stalled:%s' % feed\n self.feed_running = 'feed.running:%s' % feed\n \n self.job_finish = 'job.finish:%s' % feed",
"def build(_):",
"def new(cls, **kwargs):\n return cls(**kwargs)",
"def make(self, **kwargs):\n raise NotImplementedError",
"def __enter__(self):\n self.__init__()\n return self",
"def __init__(self):\n # BASE_DIR:///artifice/scraper/\n self.BASE_DIR = os.path.dirname(loc)\n\n # prototypes\n self._eth0 = '0.0.0.0'\n self._exposed_port = 8080\n self._db_name = 'site.db'\n self._redis_pword = 'password'\n self._redis_host = 'localhost'\n self._redis_port = 6379\n self._celery_broker_uname = 'michael'\n self._celery_broker_pword = 'michael123'\n self._celery_broker_host = 'localhost'\n self._celery_broker_virtual_host = 'michael_vhost'\n\n # flask\n self.TESTING = False\n self.URL_PREFIX = ''\n self.FLASK_PORT = self._exposed_port\n self.FLASK_HOST = '0.0.0.0'\n self.FLASK_DEBUG = False\n self.FLASK_USE_RELOADER = False\n self.FLASK_THREADED = True\n\n # logging\n self.LOG_FILE = 'flask.log'\n self.LOG_LEVEL = 'INFO'\n self.CELERY_LOG_LEVEL = 'ERROR'\n self.CELERY_LOG_FILE = 'celery.log'\n self.STDOUT = True\n\n # database\n self.DROP_TABLES = True\n self.SQLALCHEMY_TRACK_MODIFICATIONS = False\n self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n os.path.join(self.BASE_DIR, self._db_name))\n\n # redis\n self.REDIS_URL = 'redis://{}:@{}:{}/0'.format(\n self._redis_pword,\n self._redis_host,\n self._redis_port)\n self.REDIS_HIT_COUNTER = 'HIT_COUNTER'\n\n # defaults\n self.ARGS_DEFAULT_LIMIT = 10\n self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE']\n\n self.SUPERVISOR_ENABLED = True\n self.SUPERVISOR_DEBUG = False\n self.SUPERVISOR_POLITE = 1\n\n # celery\n self.CELERY_WORKERS = 8\n self.CELERY_MODULE = 'background'\n self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format(\n self._celery_broker_uname,\n self._celery_broker_pword,\n self._celery_broker_host,\n self._celery_broker_virtual_host)\n self.CELERY_BACKEND = 'rpc://'\n self.CELERY_INCLUDE = ['artifice.scraper.background.tasks']\n\n # endpoints\n self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port)\n self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port)\n self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)",
"def make_celery(config_name):\n app = Flask(__name__)\n\n # apply configuration\n cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')\n app.config.from_pyfile(cfg)\n\n # Initialize aws client\n aws_client = boto3.Session(\n aws_access_key_id=app.config['AWS_ACCESS_KEY'],\n aws_secret_access_key=app.config['AWS_ACCESS_KEY_SECRET'],\n region_name=app.config['AWS_REGION']\n )\n\n # initialize extensions\n db.init_app(app)\n\n celery = Celery(\n app.import_name,\n broker=app.config['CELERY_BROKER_URL'],\n backend=app.config['CELERY_BACKEND_URL']\n )\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n\n return celery, app, aws_client",
"def create(self, **kargs):\n return self(**kargs)",
"def _make_launcher(self, *args):\n launcher = MRJobLauncher(args=['--no-conf', ''] + list(args))\n launcher.sandbox()\n\n launcher.mock_runner = Mock()\n launcher.mock_runner.cat_output.return_value = [b'a line\\n']\n\n launcher.make_runner = MagicMock() # include __enter__\n launcher.make_runner.return_value.__enter__.return_value = (\n launcher.mock_runner)\n\n return launcher"
] |
[
"0.5933725",
"0.57848924",
"0.5568412",
"0.55482167",
"0.5534499",
"0.54715455",
"0.5468907",
"0.5459821",
"0.5437807",
"0.54049957",
"0.537843",
"0.535118",
"0.53441674",
"0.5343216",
"0.5335349",
"0.5331593",
"0.5323526",
"0.52943146",
"0.52782476",
"0.5261039",
"0.5253455",
"0.5238339",
"0.5237733",
"0.5223767",
"0.5223429",
"0.52083445",
"0.52077603",
"0.5206279",
"0.5200861",
"0.5167552"
] |
0.68587977
|
0
|
Cloning version of _add_lazyload_options.
|
def _with_lazyload_options(self, options, effective_path, cache_path=None):
q = self._clone()
q._add_lazyload_options(options, effective_path, cache_path=cache_path)
return q
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _add_lazyload_options(self, options, effective_path, cache_path=None):\n\n key = ()\n\n if not cache_path:\n cache_path = effective_path\n\n for opt in options:\n if opt._is_legacy_option or opt._is_compile_state:\n ck = opt._generate_cache_key()\n if ck is None:\n self.spoil(full=True)\n else:\n assert not ck[1], (\n \"loader options with variable bound parameters \"\n \"not supported with baked queries. Please \"\n \"use new-style select() statements for cached \"\n \"ORM queries.\"\n )\n key += ck[0]\n\n self.add_criteria(\n lambda q: q._with_current_path(effective_path).options(*options),\n cache_path.path,\n key,\n )",
"def enable_lazy():\r\n global USE_LAZY\r\n USE_LAZY = True",
"def extra_options():\n extra_vars = {\n 'PrgEnv': [None, 'PrgEnv module to load, e.g., cray to load PrgEnv-cray, or None for automatic determination', CUSTOM],\n 'PrgEnv_load': [True, 'Load the PrgEnv module (if True) or just set the corresponding environment variable (if False)', CUSTOM],\n 'PrgEnv_family': [None, 'Declare to be a member of the PrgEnv family (if \\'PrgEnv\\), of the cpeToolchain family (if \\'cpeToolchain\\') or manually unload all known PrgEnv and cpe* modules (if None, needed when LMOD is not used)', CUSTOM],\n 'CPE_compiler': [None, 'Versionless compiler module to load, or None for automatic determination', CUSTOM],\n 'CPE_version': [None, 'Version of the CPE, if different from the version of the module', CUSTOM],\n 'CPE_load': [ 'first', 'First load the cpe module (if \\'first\\'), after the PrgEnv module (if \\'after\\'), load it at the end (if \\'last\\'), or do not load the cpe module (if None)', CUSTOM],\n 'cray_targets': [[], 'Targetting modules to load', CUSTOM],\n #'optional_example_param': [None, \"Example optional custom parameter\", CUSTOM],\n }\n return Bundle.extra_options(extra_vars)",
"def initialize_options(self):\n pass",
"def __init__(self, **options):\n self.__dict__.update(\n (k, v) for (k, v) in options.items() if not k.startswith('__'))",
"def initialize_options(self):",
"def initialize_options(self):\n pass",
"def initialize_options(self):\n pass",
"def lookup(self, entry):\n entry.add_lazy_fields(self.lazy_loader, self.field_map)",
"def enable_lazy(enable=True):\n global USE_LAZY\n USE_LAZY = enable",
"def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)",
"def copy_options(options):\n\n try:\n options_list = getattr(options, 'options_list')\n except AttributeError:\n raise AttributeError(\"The module '{}' must contain an attribute called\"\n \" 'options_list'.\".format(options.__name__))\n for option in options_list:\n setattr(sys.modules[__name__], option, getattr(options, option))",
"def add_lazy_var(self, name):\n self._main_model.add_lazy_var(name)",
"def cacheOptionsForBuild(self):",
"def __new__(meta, name, bases, class_attributes):\r\n class_attributes, options = meta.find_options(class_attributes)\r\n class_attributes['options'] = options\r\n cls = super(ConfigBase, meta).__new__(meta, name, bases, class_attributes)\r\n for opt_name, option in options.iteritems():\r\n opt_get = functools.partial(cls.get_value, name=opt_name, option=option, presentation=True)\r\n opt_set = functools.partial(cls._set_value, name=opt_name, option=option)\r\n setattr(cls, opt_name, property(opt_get, opt_set))\r\n return cls",
"def extra(self, **kwargs):\n s = self._clone()\n if 'from_' in kwargs:\n kwargs['from'] = kwargs.pop('from_')\n s._extra.update(kwargs)\n return s",
"def _load_options(cls):\n return (\n db.Load(Request).load_only('id', 'pilot_id', 'division_id',\n 'system', 'ship_type', 'status', 'timestamp',\n 'base_payout'),\n db.Load(Division).joinedload('name'),\n db.Load(Pilot).joinedload('name'),\n )",
"def _lazy_load_hook(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n for name in self.lazy_buffer_names:\n key = prefix + name\n module_initialized = getattr(self, name).shape != (0,)\n state_initialized = state_dict[key].shape != (0,)\n if module_initialized and not state_initialized:\n raise RuntimeError(\n 'Can\\'t load non-initialized buffers in already '\n 'initialized modules')\n elif not module_initialized and state_initialized:\n # Here we need to avoid a tensor size mismatch\n # this is a regular tensor without a materialize\n # method, so we can just resize for the load logic to copy\n # the contents later to the correct device the module\n # was moved to\n getattr(self, name).resize_(state_dict[key].size())\n\n for name in self.lazy_parameter_names:\n # The parameter does not exist in the loaded ``state_dict`` if the\n # original module was serialized before initializing lazy\n # parameters (see comments of ``state_dict``).\n key = prefix + name\n module_initialized = not isinstance(\n getattr(self, name), UninitializedParameter)\n state_initialized = key in state_dict\n if module_initialized and not state_initialized:\n raise RuntimeError(\n 'Can\\'t load uninitialized parameters in already '\n 'initialized modules')\n elif not module_initialized and state_initialized:\n getattr(self, name).materialize(state_dict[key].shape)\n elif key not in state_dict and not module_initialized:\n param = UninitializedParameter()\n state_dict[key] = param",
"def test_lazyoptions(self):\n sess = create_session()\n mapper(User, users, properties = dict(\n addresses = relation(mapper(Address, addresses), lazy=False)\n ))\n l = sess.query(User).options(lazyload('addresses')).all()\n def go():\n self.assert_result(l, User, *user_address_result)\n self.assert_sql_count(testing.db, go, 3)",
"def mod_load(self):\n raise NotImplementedError(\"Mod load isn't overriden\")",
"def __updateOptions__(self, option_dict):\n out = self.default_options.copy()\n out.update(option_dict)\n return out",
"def _chain(self, **kwargs):\n obj = self._clone()\n obj.__dict__.update(kwargs)\n return obj",
"def extra_init(self):\n pass",
"def load_from_options(self, **kwargs):\n missing_required = [o for o in self.get_options()\n if o.required and kwargs.get(o.dest) is None]\n\n if missing_required:\n raise exceptions.MissingRequiredOptions(missing_required)\n\n return self.create_plugin(**kwargs)",
"def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result",
"def add_options(self, options):\n self.options = merge_dicts(self.options, options)",
"def __init__(self, load_config):\n super().__init__()\n self._load_config = load_config",
"def copy(self, **override):\n newargs = self._get_init_args()\n newargs.update(override)\n return self.__class__(**newargs)",
"def copy_command_options(self, command_function):\r\n def register_options(function):\r\n if hasattr(command_function, self.OPTIONS_ATTR):\r\n if not hasattr(function, self.OPTIONS_ATTR):\r\n setattr(function, self.OPTIONS_ATTR, deque())\r\n command_options = getattr(command_function, self.OPTIONS_ATTR)\r\n getattr(function, self.OPTIONS_ATTR).extendleft(command_options)\r\n return function\r\n return register_options",
"def __init__(self, **kwargs):\n\n self.opts = {}\n self.opts.update(kwargs)\n self._v_registry = {}"
] |
[
"0.7351524",
"0.5850695",
"0.57257646",
"0.54799783",
"0.5440046",
"0.542972",
"0.5402423",
"0.5402423",
"0.5373771",
"0.53724265",
"0.531277",
"0.5289197",
"0.52809143",
"0.51790977",
"0.51785725",
"0.5111756",
"0.5091345",
"0.5084972",
"0.50819707",
"0.5048309",
"0.5036518",
"0.5014684",
"0.49693868",
"0.49676925",
"0.49635458",
"0.49458992",
"0.49191478",
"0.4918325",
"0.49176922",
"0.4904541"
] |
0.76684904
|
0
|
Used by perstate lazy loaders to add options to the "lazy load" query from a parent query. Creates a cache key based on given load path and query options; if a repeatable cache key cannot be generated, the query is "spoiled" so that it won't use caching.
|
def _add_lazyload_options(self, options, effective_path, cache_path=None):
key = ()
if not cache_path:
cache_path = effective_path
for opt in options:
if opt._is_legacy_option or opt._is_compile_state:
ck = opt._generate_cache_key()
if ck is None:
self.spoil(full=True)
else:
assert not ck[1], (
"loader options with variable bound parameters "
"not supported with baked queries. Please "
"use new-style select() statements for cached "
"ORM queries."
)
key += ck[0]
self.add_criteria(
lambda q: q._with_current_path(effective_path).options(*options),
cache_path.path,
key,
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _with_lazyload_options(self, options, effective_path, cache_path=None):\n q = self._clone()\n q._add_lazyload_options(options, effective_path, cache_path=cache_path)\n return q",
"def get_cache_key(self, extra_args='', version=None):\r\n query, params = self.query.get_compiler(using=self.db).as_sql()\r\n query_string = (query % params).strip().encode(\"utf-8\")\r\n base_key = md5_constructor('.'.join((query_string, extra_args))).hexdigest()\r\n return cache.make_key('.'.join((self.model._meta.db_table, 'cachebot.results', base_key)), version=version)",
"def _cache_key(self, pk=\"all\", **kwargs):\n q_filter = \"\".join(\"%s=%s\" % (k, v) for k, v in kwargs.items()) or self.pk\n return \"%s.%s[%s]\" % (self.model.__tablename__, q_filter, pk)",
"def cache_key(self):\n return self.__class__.create_cache_key(self.key, **self.get_kwargs())",
"def _pre_key_for(self, *objects):\n obj_type = objtype(objects[0])\n return \"{}/{}/%s/{}\".format(self.cache_prefix, obj_type, str(self.pk))",
"def _effective_key(self, session):\n return self._cache_key + (session._query_cls,)",
"def cache_key(self):",
"def _get_cache_key(self, inputs: Dict[str, Tensor], prefix: str) -> Optional[str]:\n if self.time_varying_kwargs is not None:\n if len(set(inputs).intersection(self.time_varying_kwargs)) > 0:\n return None\n return f'{prefix}_static'",
"def cacheOptionsForBuild(self):",
"def _get_cache_plus_key(self):\n\n dogpile_region = self.cache_regions[self._cache_region.region]\n if self._cache_region.cache_key:\n key = self._cache_region.cache_key\n else:\n key = _key_from_query(self)\n return dogpile_region, key",
"def _concatenate_to_cache(self, key, value, query, attention_mask):\n # detect if we're initializing by absence of existing cache data.\n is_initialized = self.has_variable(\"cache\", \"cached_key\")\n cached_key = self.variable(\"cache\", \"cached_key\", jnp.zeros, key.shape, key.dtype)\n cached_value = self.variable(\"cache\", \"cached_value\", jnp.zeros, value.shape, value.dtype)\n cache_index = self.variable(\"cache\", \"cache_index\", lambda: jnp.array(0, dtype=jnp.int32))\n\n if is_initialized:\n *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape\n # update key, value caches with our new 1d spatial slices\n cur_index = cache_index.value\n indices = (0,) * len(batch_dims) + (cur_index, 0, 0)\n key = lax.dynamic_update_slice(cached_key.value, key, indices)\n value = lax.dynamic_update_slice(cached_value.value, value, indices)\n cached_key.value = key\n cached_value.value = value\n num_updated_cache_vectors = query.shape[1]\n cache_index.value = cache_index.value + num_updated_cache_vectors\n # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.\n pad_mask = jnp.broadcast_to(\n jnp.arange(max_length) < cur_index + num_updated_cache_vectors,\n tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),\n )\n attention_mask = combine_masks(pad_mask, attention_mask)\n return key, value, attention_mask",
"def generate_cache_key(cached, **kwargs):\r\n\r\n if isinstance(cached, QuerySet):\r\n key = str(cached.query)\r\n\r\n elif isinstance(cached, (Model, ModelBase)):\r\n key = '%s.%s:%s' % (cached._meta.app_label,\r\n cached._meta.module_name,\r\n ','.join('%s=%s' % item for item in kwargs.iteritems()))\r\n\r\n else:\r\n raise AttributeError(\"Objects must be queryset or model.\")\r\n\r\n if not key:\r\n raise Exception('Cache key cannot be empty.')\r\n\r\n key = clean_cache_key(key)\r\n return key",
"def create_cache_key(cls, pk: Union[int, str]) -> str:\n if cls.__cache_key__:\n try:\n return cls.__cache_key__.format(**{cls.get_primary_key(): pk})\n except KeyError:\n pass\n raise NameError( # pramga: no cover\n 'The cache key is undefined or improperly defined in this model.'\n )",
"def get_cache_key(self, *args, **kwargs):\n key = self._key(*self._inject_obj(args), **kwargs)\n\n if self._hash_algorithm:\n key = self._hash_algorithm(key).hexdigest()\n\n return key",
"def test_key_for_cacheable_function_with_mixed_cacheable_args(self):\n Meat.get_tastier_option(self.chicken, self.celery)\n expected_cache_key = 'tests.Meat.get_tastier_option;Chicken:20,MyNameIsCelery;'\n self.assertExpectedKeyInCache(expected_cache_key)",
"def precache_context_to_samples(source_path, target_path, num_parallel_precache=1):\n pass",
"def memoize_key(prefix, *args, **kwargs):\n key = hashlib.md5()\n for arg in itertools.chain(args, sorted(kwargs.items())):\n key.update(str(arg))\n return '%s:memoize:%s:%s' % (settings.CACHE_PREFIX,\n prefix, key.hexdigest())",
"def precache_item_map(path, cache_file, num_parallel_precache=1):\n filenames = utils.path_to_list(path, key_word='item')\n q = Queue(maxsize=num_parallel_precache)\n para = min(num_parallel_precache, len(filenames))\n\n def sub_proc(sub_filenames, q, idx):\n for _, a_file in enumerate(sub_filenames):\n df = reader.load_data(a_file)\n\n vid = np.asarray(df.vid.values, dtype=np.int64)\n cid = np.asarray(df.cid.values, dtype=np.int64)\n title_length = np.asarray(df.title_length.values, dtype=np.int64)\n class_id = np.asarray(df.class_id.values, dtype=np.int64)\n second_class = np.asarray(df.second_class.values, dtype=np.int64)\n is_intact = np.asarray(df.is_intact.values, dtype=np.int64)\n stars = df.stars.values\n\n sample_member = [vid, cid, title_length, class_id, second_class, is_intact, stars]\n\n sub_item_map = dict()\n collector = dict()\n\n for i,k in enumerate(vid):\n sample = [vid[i], cid[i], title_length[i], class_id[i], second_class[i], is_intact[i]]\n sample = np.asarray(sample, dtype=np.int64)\n sample = np.concatenate([sample, stars[i]])\n #print(sample, type(sample), sample.dtype)\n sub_item_map[k] = sample\n collector.update(sub_item_map)\n\n q.put(collector, block=True, timeout=False)\n\n proc_ent = [Process(target=sub_proc, args=(filenames[_i::para], q, _i)) for _i in range(para)]\n for x in proc_ent:\n x.start()\n\n all_item_map = dict()\n for i in range(para):\n sub_collector = q.get(block=True, timeout=None)\n all_item_map.update(sub_collector)\n\n with open(cache_file, 'wb') as f_save:\n pickle.dump(all_item_map, f_save)\n\n return cache_file",
"def try_insert(self, cache_key, paths):\r\n pass",
"def use_cached_files(self, cache_key):\r\n pass",
"def test_generate_cache_key_from_query_string(app, cache):\n\n @app.route('/works')\n @cache.cached(query_string=True)\n def view_works():\n return str(time.time())\n\n tc = app.test_client()\n\n # Make our first query...\n first_response = tc.get(\n '/works?mock=true&offset=20&limit=15'\n )\n first_time = first_response.get_data(as_text=True)\n\n # Make the second query...\n second_response = tc.get(\n '/works?limit=15&mock=true&offset=20'\n )\n second_time = second_response.get_data(as_text=True)\n\n # Now make sure the time for the first and second\n # query are the same!\n assert second_time == first_time\n\n # Last/third query with different parameters/values should\n # produce a different time.\n third_response = tc.get(\n '/v1/works?limit=20&mock=true&offset=60'\n )\n third_time = third_response.get_data(as_text=True)\n\n # ... making sure that different query parameter values\n # don't yield the same cache!\n assert not third_time == second_time",
"def cache_path(self, vpath):\n return os.path.join(self.cache_root, \n *vpath.split('/') )",
"def cachepath(self, *args, **kw):\n cachename = self.cachefunc(*args, **kw)\n ret = os.path.join(self.cachedir, cachename)+'.'+self.serializer\n return ret",
"def from_cache(self, cache_key=None, pk=None):\n if pk:\n cache_key = self._cache_key(pk)\n # if cache_key is none, the mangler will generate a MD5 from the query\n return FromCache(self.label, cache_key)",
"def _make_cache_key(key_prefix):\n if callable(key_prefix):\n cache_key = key_prefix()\n elif '%s' in key_prefix:\n cache_key = key_prefix % request.path\n else:\n cache_key = key_prefix\n\n cache_key = cache_key.encode('utf-8')\n\n return cache_key",
"def lazy_load(map_obj, key, loader, pather, cfg):\n if key not in map_obj:\n map_obj[key] = loader(pather(cfg))\n return map_obj[key]",
"def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n pass",
"def generate_cache_key(req, method: str = None) -> str:\n\n path = req.path\n if path.endswith('/'):\n path = path[:-1]\n\n if not method:\n method = req.method\n\n return f'{path}:{method.upper()}'",
"def cache_this(key):\n def decorator(func):\n @wraps(func)\n def inner(*args, **kwargs):\n KEY_PREFIX = \"context\"\n cache_key = \"%s:%s\" % (KEY_PREFIX, key)\n data = cache.get(cache_key)\n if not data:\n data = func(*args, **kwargs)\n cache.set(cache_key, data, kwargs.get('timeout', 30 * 60))\n return data\n return inner\n return decorator",
"def get_or_insert(cls, name, source=None):\n\t\tid = '%s-%s' % (cls._class_name(), cls.normalize_name(name))\n\t\treturn super(Cache, cls).get_or_insert(id, source=source)"
] |
[
"0.61270165",
"0.587668",
"0.54732877",
"0.5227243",
"0.5152111",
"0.5139174",
"0.51296777",
"0.5055537",
"0.5020471",
"0.49873075",
"0.49650243",
"0.49546865",
"0.48689407",
"0.4863961",
"0.4855229",
"0.48383257",
"0.48105037",
"0.47380558",
"0.4709959",
"0.46971396",
"0.46902302",
"0.4687641",
"0.46847317",
"0.4669189",
"0.46570295",
"0.4650225",
"0.464374",
"0.46273544",
"0.46178663",
"0.4617663"
] |
0.67983854
|
0
|
Add a criteria function that will be applied postcache. This adds a function that will be run against the
|
def with_post_criteria(self, fn):
return self._using_post_criteria([fn])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def run(self, func, *args):\n @wraps(func)\n def cached_func(*args):\n \"\"\"Run wise cache function\"\"\"\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value\n return cached_func",
"def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache",
"def dynCache(*args, **kwargs)->None:\n pass",
"def declare_criterion(self, name):\n def decorator(thunk):\n thunk.criterion_name = name\n self._criteria[name] = thunk\n return thunk\n return decorator",
"def cache(fn):\n\tcache.c = dict()\n\tdef _fn(*args, **kwargs):\n\t\tkey = fn.__name__ + str(args) + str(kwargs)\n\t\ttry:\n\t\t\tret = cache.c[key]\n\t\texcept KeyError, e:\n\t\t\tret = fn(*args, **kwargs)\n\t\t\tcache.c[key] = ret\n\t\treturn ret\n\treturn _fn",
"def cachefor(name):\n def decorator(func):\n assert name not in cachefuncs\n cachefuncs[name] = func\n return func\n return decorator",
"def cache(func):\n\n def func_wrapper(self, hook=None, result_name=None):\n \"\"\"Wrapper to cache the result of a function.\"\"\"\n if self._cache is not None:\n c = self._cache.copy()\n c['cache'] = True\n return c\n else:\n ret = func(self, hook=hook, result_name=result_name)\n if not isinstance(ret, dict):\n raise TypeError( # pragma: no cover\n \"A dictionary was expected not '{0}'.\\nIssue with class '{1}'\"\n \"\".format(\n type(ret), type(self)))\n self._cache = ret\n ret = ret.copy()\n ret['cache'] = False\n return ret\n return func_wrapper",
"def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper",
"def decorator(func):\n\n def wrapper():\n \"\"\"\n decorates the given function and makes it a lazy one.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result\n\n return update_wrapper(wrapper, func)",
"def memoize(func):\r\n func.cache = {}\r\n return decorator(_memoize, func)",
"def cache(self, func=None, ignore=None, verbose=None,\r\n mmap_mode=False):\r\n if func is None:\r\n # Partial application, to be able to specify extra keyword\r\n # arguments in decorators\r\n return functools.partial(self.cache, ignore=ignore,\r\n verbose=verbose, mmap_mode=mmap_mode)\r\n if self.cachedir is None:\r\n return NotMemorizedFunc(func)\r\n if verbose is None:\r\n verbose = self._verbose\r\n if mmap_mode is False:\r\n mmap_mode = self.mmap_mode\r\n if isinstance(func, MemorizedFunc):\r\n func = func.func\r\n return MemorizedFunc(func, cachedir=self.cachedir,\r\n mmap_mode=mmap_mode,\r\n ignore=ignore,\r\n compress=self.compress,\r\n verbose=verbose,\r\n timestamp=self.timestamp)",
"def eval(self, func, *args, **kwargs):\r\n if self.cachedir is None:\r\n return func(*args, **kwargs)\r\n return self.cache(func)(*args, **kwargs)",
"def cached_func(*args):\n try: # fails if cache is not instantiated\n return self.data['run'][func.__name__]\n except KeyError:\n value = func(*args)\n self.data['run'][func.__name__] = value\n return value",
"def decorator(func):\n\n def wrapper(*args, **kwargs):\n \"\"\"\n decorates the given method or function and makes it a lazy one.\n\n :param object args: function positional arguments.\n :param object kwargs: function keyword arguments.\n\n :returns: function result.\n \"\"\"\n\n result = caching_services.try_get('extended.permanent', func,\n args, kwargs, **options)\n if result is not None:\n return result\n\n result = func(*args, **kwargs)\n caching_services.try_set('extended.permanent', result, func,\n args, kwargs, **options)\n return result\n\n return update_wrapper(wrapper, func)",
"def keep(self, func):\n def cachedFunc(*args, **kwargs):\n return self.retrieve(func, args, kwargs)\n cachedFunc.__name__ = func.__name__\n return cachedFunc",
"def add_criterion(self, criterion):\n self.criterion_entries.add(criterion)\n return self",
"def _create_new_criteria_function(lm,mm):\n return lambda D, i, u, j: parametrized_insertion_criteria(D, i, u, j,\n lm=lm, mm=mm)",
"def instance_cache(func):\n def _wrapper(self, *args, **kwargs):\n key = (func.__name__,) + args\n for pair in sorted(kwargs.items()):\n key += pair\n if key in self._cache:\n return self._cache[key]\n data = func(self, *args, **kwargs)\n self._cache[key] = data\n return data\n return _wrapper",
"def cached_func(*args):\n try: # fails if cache is not instantiated or if it is None\n value = self.data['step'][func.__name__]\n assert value is not None\n except (KeyError, AssertionError):\n value = func(*args)\n self.data['step'][func.__name__] = value\n return value",
"def cache(self, *args, **kwargs):\n\n default_fn = kwargs.pop('default_fn', None)\n\n def _run(*args, **kwargs):\n \"\"\"\n :param: *args\n :param: **kwargs (fname, force, verbose)\n \"\"\"\n\n fname = kwargs.pop('fname', None)\n force = kwargs.pop('force', False)\n verbose = kwargs.pop('verbose', True)\n copy = kwargs.get('copy', False)\n\n callback = None\n if len(args) > 1:\n callback, *args = args\n\n if len(args) > 0:\n adata = args[0] if isinstance(args[0], anndata.AnnData) else kwargs.get('adata')\n else:\n adata = kwargs.get('adata')\n\n assert isinstance(adata, anndata.AnnData), f'Expected `{adata}` to be of type `anndata.AnnData`.'\n\n if callback is None:\n callback = (lambda *_x, **_y: None) if default_fn is None else default_fn\n\n assert callable(callback), f'`{callblack}` is not callable.'\n\n if force:\n if verbose:\n print('Recomputing values.')\n res = callback(*args, **kwargs)\n cache_fn(res if copy else adata, fname, True, verbose, *args, **kwargs)\n return res\n\n # when loading to cache and copy is true, modify the copy\n if copy:\n adata = adata.copy()\n\n # we need to pass the *args and **kwargs in order to\n # get the right field when using regexes\n if not cache_fn(adata, fname, False, verbose, *args, **kwargs):\n if verbose:\n print('Computing values.')\n res = callback(*args, **kwargs)\n ret = cache_fn(res if copy else adata, fname, True, False, *args, **kwargs)\n\n assert ret, 'Caching failed.'\n\n return res\n\n # if cache was found and not modifying inplace\n return adata if copy else None\n\n cache_fn = self._create_cache_fn(*args, **kwargs)\n\n return _run",
"def Decorator(func):\n\n # First, decorate the function with Cached.\n decorated = cache_decorator(func)\n\n # Then, decorate it with the conditional cache logic.\n @functools.wraps(decorated)\n def Wrapped(*args, **kwargs):\n result = decorated(*args, **kwargs)\n if not cache_predicate(args, kwargs, result):\n decorated.DeleteCache(*args, **kwargs)\n return result\n\n return Wrapped",
"def ConditionallyCached(cache_predicate=lambda *a: True, **kwargs):\n cache_decorator = memcache_decorator.Cached(**kwargs)\n\n def Decorator(func):\n \"\"\"Decorator to cache a function's results.\"\"\"\n\n # First, decorate the function with Cached.\n decorated = cache_decorator(func)\n\n # Then, decorate it with the conditional cache logic.\n @functools.wraps(decorated)\n def Wrapped(*args, **kwargs):\n result = decorated(*args, **kwargs)\n if not cache_predicate(args, kwargs, result):\n decorated.DeleteCache(*args, **kwargs)\n return result\n\n return Wrapped\n\n return Decorator",
"def memoization(func):\n cache = {}\n\n @wraps(func)\n def _wrap(*args, **kwargs):\n key = (args, tuple(sorted(kwargs.items())))\n result = cache.get(key, None)\n if result:\n print(\"It's cached\")\n return result\n\n result = func(*args, **kwargs)\n cache[key] = result\n return result\n\n return _wrap",
"def _proxy_cache(from_func, to_func):\n to_func.cache_info = from_func.cache_info\n to_func.cache_clear = from_func.cache_clear",
"def after(cls, name=None, condition=None):\r\n\r\n def after_call(proxy_func):\r\n\r\n @wraps(proxy_func)\r\n def get_data(*args, **kwargs):\r\n\r\n write(\"WRAPPER:BEGIN\")\r\n # write(args)\r\n # write(kwargs)\r\n # write(dir(proxy_func))\r\n # write(proxy_func)\r\n result = proxy_func(*args, **kwargs)\r\n\r\n try:\r\n func_name = name or proxy_func.__name__\r\n\r\n success, cache_result = cls.call_cache_function(True, func_name, result, *args, **kwargs)\r\n return cache_result or result\r\n #\t\t\t\t\telse:\r\n #\t\t\t\t\t\twrite(\"Results did not match %r vs %r\\n\" % (result,condition))\r\n except:\r\n cache_result = None\r\n #\t\t\t\t\terror_reporter.captureException()\r\n raise\r\n\r\n return get_data\r\n\r\n return after_call",
"def decorate(func, *args, **kws):\n # setting cache expires for given decorated function,\n # if argument 'expire' is given.\n if expire:\n self.cache_expires[func] = expire\n else:\n self.cache_expires[func] = self.get_config().page_cache_expire\n if namespace_func:\n self.cache_nsfuncs[func] = namespace_func\n\n def do_cache(*args, **kws):\n \"\"\"\n A function works every time decorated functions are called.\n \"\"\"\n resp = self.response\n out = resp.out\n namespace = ''\n if self.cache_nsfuncs.get(func, None):\n namespace = self.cache_nsfuncs[func](self.request)\n p = urlsplit(self.request.url)[2]\n c = memcache.get(p, namespace)\n if c:\n # in case cache is found, use it \n # instead of rendering by calling function.\n out.write(c['body'])\n for k, i in c['hdr'].items():\n resp.headers[k] = i\n return\n\n r = func(*args, **kws)\n expire = self.cache_expires.get(func, 0)\n if expire == 0:\n return\n out.seek(0)\n try:\n p = urlsplit(self.request.url)[2]\n memcache.set(p, {'hdr':resp.headers,'body':out.read()},\n expire, namespace=namespace)\n logging.debug('%s is cahed' % p)\n except:\n memcache.flush_all()\n logging.debug('memcache is flashed.')\n return do_cache",
"def memoize(cls, func, *args, **kw):\n with cls._locks[func], cls._lock:\n if not isinstance(args, collections.Hashable):\n result = func(*args, **kw)\n return result\n if kw:\n # frozenset is used to ensure hashability\n key = args, frozenset(kw.items())\n else:\n key = args\n # func.cache attribute added by memoize\n cache = cls._caches[func]\n try:\n if key in cache:\n result = cache[key].result\n cls.shrink_cache()\n return result\n except TypeError:\n result = func(*args, **kw)\n return result\n\n start = time.time()\n result = func(*args, **kw)\n end = time.time()\n duration = end - start\n\n cache[key] = CacheEntry(func, key, duration, result,\n kw.get('expiration'), *args, **kw)\n cls.shrink_cache()\n cls._cache.append(cache[key])\n return result",
"def add_postcondition_to_checker(checker: CallableT, contract: Contract) -> None:\n # Add the postcondition to the list of postconditions stored at the checker\n assert hasattr(checker, \"__postconditions__\")\n assert isinstance(getattr(checker, \"__postconditions__\"), list)\n getattr(checker, \"__postconditions__\").append(contract)",
"def wrapper():\n\n result = caching_services.try_get('permanent', func, None)\n if result is not None:\n return result\n\n result = func()\n caching_services.try_set('permanent', result, func, None)\n return result",
"def register(self, function: str, creator: _Loss):\n self._criterion[function] = creator"
] |
[
"0.57693905",
"0.556603",
"0.5531649",
"0.5530684",
"0.5401678",
"0.5360797",
"0.5349943",
"0.53326744",
"0.5330154",
"0.53179365",
"0.528131",
"0.5280769",
"0.5261637",
"0.5245427",
"0.5236207",
"0.52035993",
"0.5197932",
"0.51809543",
"0.51750386",
"0.5161694",
"0.5151479",
"0.5142799",
"0.5133269",
"0.51123315",
"0.5110505",
"0.51038384",
"0.5100887",
"0.509532",
"0.50951546",
"0.5090214"
] |
0.7152418
|
0
|
Load the given primary key identity from the database.
|
def _load_on_pk_identity(self, session, query, primary_key_identity, **kw):
mapper = query._raw_columns[0]._annotations["parententity"]
_get_clause, _get_params = mapper._get_clause
def setup(query):
_lcl_get_clause = _get_clause
q = query._clone()
q._get_condition()
q._order_by = None
# None present in ident - turn those comparisons
# into "IS NULL"
if None in primary_key_identity:
nones = {
_get_params[col].key
for col, value in zip(
mapper.primary_key, primary_key_identity
)
if value is None
}
_lcl_get_clause = sql_util.adapt_criterion_to_null(
_lcl_get_clause, nones
)
# TODO: can mapper._get_clause be pre-adapted?
q._where_criteria = (
sql_util._deep_annotate(_lcl_get_clause, {"_orm_adapt": True}),
)
for fn in self._post_criteria:
q = fn(q)
return q
# cache the query against a key that includes
# which positions in the primary key are NULL
# (remember, we can map to an OUTER JOIN)
bq = self.bq
# add the clause we got from mapper._get_clause to the cache
# key so that if a race causes multiple calls to _get_clause,
# we've cached on ours
bq = bq._clone()
bq._cache_key += (_get_clause,)
bq = bq.with_criteria(
setup, tuple(elem is None for elem in primary_key_identity)
)
params = {
_get_params[primary_key].key: id_val
for id_val, primary_key in zip(
primary_key_identity, mapper.primary_key
)
}
result = list(bq.for_session(self.session).params(**params))
l = len(result)
if l > 1:
raise orm_exc.MultipleResultsFound()
elif l:
return result[0]
else:
return None
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_by_pk(cls, _pk):\n try:\n return cls.q.get(_pk)\n except SQLAlchemyError:\n cls.s.rollback()\n raise",
"def _identity_lookup(\n self,\n mapper: Mapper[_O],\n primary_key_identity: Union[Any, Tuple[Any, ...]],\n identity_token: Any = None,\n passive: PassiveFlag = PassiveFlag.PASSIVE_OFF,\n lazy_loaded_from: Optional[InstanceState[Any]] = None,\n execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,\n bind_arguments: Optional[_BindArguments] = None,\n ) -> Union[Optional[_O], LoaderCallableStatus]:\n\n key = mapper.identity_key_from_primary_key(\n primary_key_identity, identity_token=identity_token\n )\n\n # work around: https://github.com/python/typing/discussions/1143\n return_value = loading.get_from_identity(self, mapper, key, passive)\n return return_value",
"def test_entity_retrieval_by_its_primary_key(\n self, test_domain, identifier, persisted_person\n ):\n person = test_domain.repository_for(Person)._dao.get(persisted_person.id)\n assert person is not None\n assert person.id == identifier\n assert person == persisted_person",
"def load_key(self, type, keyid):\n pass",
"def load_user(id):\n\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load(cls, id):\n key = cls.get_key_prefix()+\"#\"+str(id)\n src = dal_get(key)\n logger.debug( \"LOAD %s %s %s\", str(key), str(id), str(src))\n if src == None:\n raise cls.NotExist(\"No instance could be found with ID: \"+str(id))\n result = dal_retrieve(src)\n result = cls._from_dict(result)\n return result",
"def load(self, key):\n self._clean()\n self.pk = self._gen_pk(key)\n\n self._original = self._get_cas().get_slice(\n self.pk.table, self.pk.key, ColumnParent(self.pk.family),\n '', '', True, 100)\n self.revert()\n return self",
"def load_user():\n\n return User.query.get(int(id))",
"def find(self, primary_key):\n sql = '{} WHERE {} = %s'.format(self._select(), self.PRIMARY_KEY)\n cursor = yield self._pool.execute(sql, [primary_key])\n result = cursor.fetchmany(1)\n return self.convert_result_to_object(result)",
"def get_by_id(self, pkId: int):\n if not self.model:\n raise NameError('database model has not been set.')\n if not pkId:\n raise ValueError('invalid primary key value.')\n\n with self.session() as session:\n query = self.get_query(session)\n rec = query.get(pkId)\n return rec",
"def load_user(id):\n\treturn User.query.get(int(id))",
"def retrieve_id(self, data_id):\n return self.database[data_id]",
"def load_user(user_id):\n\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\n return User.query.get(int(user_id))",
"def load_user(user_id):\r\n return User.query.get(int(user_id))",
"def load_user (userid):\n return User.from_id(userid)",
"def load_user(user_id):\n return session.query(User).get(user_id)",
"async def find_by_id(self, _id: int) -> Record:\n conn: Connection\n async with self.db_pool.acquire() as conn:\n return await conn.fetchrow(\n f\"SELECT * FROM {self.table_name} WHERE {self.primary_key}=$1\",\n _id,\n )",
"def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user",
"def get_primary_id(self):",
"def load_user(id):\n return User.get_by_id(int(id))"
] |
[
"0.6682378",
"0.60061365",
"0.58653814",
"0.5855983",
"0.5852731",
"0.5839741",
"0.5839741",
"0.5839741",
"0.5839741",
"0.581331",
"0.5812196",
"0.5805475",
"0.578652",
"0.5777436",
"0.5773851",
"0.57506794",
"0.56822234",
"0.5653946",
"0.5653946",
"0.5653946",
"0.5653946",
"0.5653946",
"0.5653946",
"0.56493336",
"0.56439865",
"0.56270456",
"0.5588766",
"0.5583907",
"0.5542331",
"0.5517688"
] |
0.60104835
|
1
|
Calculate the error budget
|
def error_budget(self, slo=0, dec_point=3):
self.slo = slo
self.dec_point = dec_point
err_budget = round(float(100-slo),dec_point)
return err_budget
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def error(self): \n if not self.terminal:\n err = sum([v**2 for v in self.state + self.q[:-1]])\n else:\n err = sum([v**2 for v in LIMITS[:9]] + [1.0 - LIMITS[9]**2])\n err *= (self.max_steps - self.steps)\n return err",
"def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)",
"def calc_error_dist(self):\n pass",
"def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start",
"def fitness(individual, divider, target_sum, target_multiply):\n\n sum_val = reduce(operator.add, individual[:divider], 0)\n multiply_val = reduce(operator.mul, individual[divider:], 1)\n \n sum_error = abs(target_sum - sum_val)\n sum_error = sum_error / target_sum\n\n multiply_error = abs(target_multiply - multiply_val)\n multiply_error = multiply_error / target_multiply\n\n #print(multiply_error, sum_error)\n #print(sum_error, multiply_error)\n return (multiply_error + sum_error)/2 * 100",
"def cps_err(self):\n return np.sqrt(self.totalcounts) / self.exptime",
"def calculate_energy_conservation_error(self):\n assert self.data is not None\n # calculate total energy at start and end of simulation\n energy_start = self.measure_total_energy(self.t0)\n energy_end = self.measure_total_energy(self.t1)\n \n # calculate accuracy\n error = abs(1.0 - energy_start/energy_end)\n \n return error",
"def error(b, m, x_data):\n\n totalError = 0\n for i in range(0, len(x_data)):\n x = x_data[i]\n y = 2 * x_data[i] + 50 + 5 * np.random.random()\n\n totalError += (y - (m * x + b)) ** 2 # total error of gradient\n\n return totalError / float(len(x_data))",
"def error(self, trainset: ([], [])):\n # MSE = Σ | d – y |^2 / n\n error_sum = 0.0\n for index, example in enumerate(trainset):\n # | d – y |^2\n output = self.activate(example[0])\n\n target = example[1][0]\n\n error = target - output\n error_sum += error ** 2\n\n # Σ |error_sum| / n\n error_sum = error_sum / len(trainset)\n return error_sum",
"def _df_err(self):\n return self.n - self.k - 1",
"def calculateAvgBiasError(actualDemandDf:pd.core.frame.DataFrame, forecastedDemandDf:pd.core.frame.DataFrame)->float: \n biasErrorDf=pd.DataFrame() \n biasErrorDf['biasError'] = (actualDemandDf['demandValue']-forecastedDemandDf['FORECASTED_DEMAND_VALUE'])/actualDemandDf['demandValue']\n avgBiasError = biasErrorDf['biasError'].mean()\n return avgBiasError",
"def calculate_profit(self):",
"def calculate_error(self):\n \n delta = self.Y - self.T\n error = delta.dot(delta) / self.N\n error = format(error, '.5f')\n \n self.errors.append(error)",
"def getEvaluationActualEstimateDifference(self):\n # import pdb; pdb.set_trace()\n budget = self.getBudget()\n teBudget = self.getTerminalEvaluationBudget()\n mtrEstimate = self.getMTREstimatedCost()\n teEstimate = self.getTEEstimatedCost()\n if (budget is not None) and \\\n (teBudget is not None) and \\\n (mtrEstimate is not None) and \\\n (teEstimate is not None):\n budgetCost = budget + teBudget\n estimatedCost = mtrEstimate + teEstimate\n # actualCost = getTEActualCost() + getMTRActualCost()\n # return estimatedCost = actualCost\n return budgetCost - estimatedCost\n return 0",
"def calculateErrorRate(numCorrect, numWrong):\n return np.round((numWrong)/(numCorrect+numWrong),3)",
"def get_error(deltas, sums, weights):\n \n print(deltas)\n print(sums)\n print(weights)\n print('===================================')\n \n # here goes your code\n A = weights.T.dot(deltas.T)\n print(A)\n B = sigmoid_prime(sums)\n print(B)\n \n print(A.shape)\n print(B.shape)\n C = A.T * B\n print(C)\n D = C.mean(axis=0)\n print(D)\n print(D.shape)\n \n return ((weights.T.dot(deltas.T)).T * sigmoid_prime(sums)).mean(axis=0)",
"def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e",
"def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final",
"def _wer_compute(errors: Tensor, total: Tensor) ->Tensor:\n return errors / total",
"def model_error(self):\n return self.premium() / self.data['premium'] - 1",
"def _calculate_error(self, targets):\n\n def hist(d):\n f, v = histogram(array(d))\n i = len(f) if argmax(f) == len(f) - 1 else argmax(f)\n return v[i]\n\n devxs, devys = list(zip(*[r.dev_centroid for r in targets]))\n\n if len(targets) > 2 and self.use_histogram:\n dx = hist(devxs)\n dy = hist(devys)\n else:\n\n def avg(s):\n return sum(s) / len(s)\n\n dx = avg(devxs)\n dy = avg(devys)\n\n return -dx, dy",
"def calculate_ao_error_terms(self):\n\n self._calculate_r0()\n self._calculate_greenwood_frequency()\n \n self._calculate_diffraction_limit() # in mas\n self._calculate_actuators_across()\n self._calculate_spatial_frequency_cutoff()\n\n self._calculate_fitting_error()\n self._calculate_measurement_error()\n self._calculate_anisoplatanism_error()\n self._calculate_bandwidth_error()",
"def tracking_error(port_returns, market_returns):\n\n return np.std(port_returns - market_returns)",
"def tracking_error(port_returns, market_returns):\n\n return np.std(port_returns - market_returns)",
"def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))",
"def compute_average(self, error=None):\n\n nbjobs = len(self)\n if not nbjobs:\n return\n max_xsec = max(one.xsec for one in self)\n min_xsec = min(one.xsec for one in self)\n self.axsec = sum([one.axsec for one in self]) / nbjobs\n self.xsec = sum([one.xsec for one in self]) /nbjobs\n self.xerrc = sum([one.xerrc for one in self]) /nbjobs\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self])) /nbjobs\n if error:\n self.xerrc = error\n self.xerru = error\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = 0#sum([one.nw for one in self])\n self.maxit = 0#len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = sum([one.luminosity for one in self])\n self.ysec_iter = []\n self.yerr_iter = []\n self.th_maxwgt = 0.0\n self.th_nunwgt = 0 \n for result in self:\n self.ysec_iter+=result.ysec_iter\n self.yerr_iter+=result.yerr_iter\n self.yasec_iter += result.yasec_iter\n self.eff_iter += result.eff_iter\n self.maxwgt_iter += result.maxwgt_iter\n\n #check full consistency\n onefail = False\n for one in list(self):\n if one.xsec < (self.xsec - 25* one.xerru):\n if not onefail:\n logger.debug('multi run are inconsistent: %s < %s - 25* %s: assign error %s', one.xsec, self.xsec, one.xerru, error if error else max_xsec-min_xsec)\n onefail = True\n self.remove(one)\n if onefail:\n if error:\n return self.compute_average(error)\n else:\n return self.compute_average((max_xsec-min_xsec)/2.)",
"def error_rate(error_total, tokens):\n if len(tokens) > 0:\n return(float(\"{0:.3f}\".format(error_total/len(tokens))))\n else:\n return(np.nan)",
"def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)",
"def compute_error(data, user_features, item_features, nz):\n sum_err = 0\n for d, n in nz:\n err = data[d,n] - np.dot(item_features[d,:],user_features[:,n])\n sum_err += err**2\n rmse = 0.5*sum_err/len(nz)\n return rmse",
"def error_compute(self):\n self.tt_error = np.linalg.norm(self.rel_error)\n if self.global_rank==0:print('Overall error is::',self.tt_error)\n return {'NMF': self.rel_error, 'tt': self.tt_error}"
] |
[
"0.66941774",
"0.6438683",
"0.6386701",
"0.622592",
"0.61903656",
"0.61200005",
"0.6113358",
"0.60751",
"0.606798",
"0.60242563",
"0.5982136",
"0.59535897",
"0.5950767",
"0.59501123",
"0.5931098",
"0.59281754",
"0.591864",
"0.59047747",
"0.58921164",
"0.58647203",
"0.58646446",
"0.5857182",
"0.5853418",
"0.5853418",
"0.5846592",
"0.5837791",
"0.5831113",
"0.58287203",
"0.5826525",
"0.5820433"
] |
0.670839
|
0
|
Calculate downtime in minutes
|
def downtime(self, down_time=0):
self.down_time = down_time
return down_time
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def failed_per_hour(self):\r\n return (3600.*(self.circ_failed+self.strm_failed))/self.current_uptime()",
"def uptime(self) -> timedelta:\n return timedelta(seconds=int(time() - self.start_time))",
"def _get_sleep_time(self, start_date, end_date):\n if start_date.minute == end_date.minute:\n return 60 - end_date.second - (1 - start_date.microsecond / 1000000)\n\n return 0",
"def calculate_timeout(self):\n return self.total_estimated_words() / self.minimum_wpm * 60",
"def calculate_time(start_time):\r\n return round(time() - start_time, 2)",
"async def get_uptime(message):\n\n runtime = int(time.time()) - int(START_TIME)\n\n if runtime < 60:\n await message.reply(f'{runtime} seconds')\n return\n\n runtime //= 60\n if runtime < 60:\n await message.reply(f'{runtime} minutes')\n return\n\n runtime //= 24\n if runtime < 24:\n await message.reply(f'{runtime} hours')\n return\n\n await message.reply(f'{runtime} days')\n return",
"def part1() -> int:\n longest_sleeper = max(sleep_times, key=lambda g: len(sleep_times[g]))\n sleepiest_minute = max(\n sleep_times[longest_sleeper], key=sleep_times[longest_sleeper].count)\n\n return longest_sleeper * sleepiest_minute",
"def get_run_time(start, end):\n start_hour, start_min, start_sec = start.split(':')\n end_hour, end_min, end_sec = end.split(':')\n\n hour_diff = int(end_hour) - int(start_hour)\n min_diff = int(end_min) - int(start_min)\n sec_diff = int(end_sec) - int(start_sec)\n\n total_diff = hour_diff * 60 * 60 + min_diff * 60 + sec_diff\n\n return total_diff",
"def calc_idle_time(self):\n tt = 0.0\n keys = self.output.keys()\n keys.sort()\n jobEndKeys = [thekey for thekey in keys if 'JobEnd' in thekey]\n jobEndKeys = jobEndKeys[0:len(jobEndKeys)-1]\n for key in jobEndKeys:\n idxstart = keys.index(key)\n stime = self.output[keys[idxstart]][1]\n idxend = idxstart + 1\n while not (keys[idxend][1] in ['Preparation','Production']):\n idxend += 1\n # Now we have the entry where the next work cycle starts\n etime = self.output[keys[idxend]][1]\n tot_time = (etime - stime).seconds/3600.\n for ii in range(idxstart+1, idxend):\n if keys[ii][1] in ['W-up', 'Maintenance']:\n tot_time -= self.output[keys[ii]][4]\n # if the time is longer than 5 min:\n if tot_time >= 5.0/60.0:\n tt += tot_time\n print 'idle time ', tt, ' hours'",
"def uptime_from_start_time(start_time):\n return time.time() - start_time",
"def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss",
"def run_time_sec(self):\n if self.job_updated_at is not None:\n return (self.job_updated_at - self.created_at).total_seconds()\n\n return None",
"def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time",
"def remaining_ms():",
"def uptime(start_time):\n return datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(start_time)",
"def get_server_uptime(self):\n return time.time() - self.init_time",
"def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60",
"def minutesSinceLastUpdate(self):\n if self.seenTimes == []:\n return 0\n latestTime = max(self.seenTimes)\n return int(self.timeCode())-int(latestTime)",
"async def uptime(self, ctx: Message):\n\t\tdelta_uptime = datetime.utcnow() - launch_time\n\t\thours, remainder = divmod(int(delta_uptime.total_seconds()), 3600)\n\t\tminutes, seconds = divmod(remainder, 60)\n\t\tdays, hours = divmod(hours, 24)\n\n\t\tif days == 0:\n\t\t\tdayst = \"\"\n\t\telse:\n\t\t\tif days > 1:\n\t\t\t\ts = \"s\"\n\t\t\telse:\n\t\t\t\ts = \"\"\n\t\t\tdayst = f\"{days} day{s},\"\n\n\t\tif hours == 0:\n\t\t\thourst = \"\"\n\t\telse:\n\t\t\tif hours > 1:\n\t\t\t\tss = \"s\"\n\t\t\telse:\n\t\t\t\tss = \"\"\n\t\t\thourst = f\"{hours} hour{ss},\"\n\n\t\tif minutes == 0:\n\t\t\tminutest = \"\"\n\t\telse:\n\t\t\tif minutes > 1:\n\t\t\t\tsss = \"s\"\n\t\t\telse:\n\t\t\t\tsss = \"\"\n\t\t\tminutest = f\"{minutes} minute{sss},\"\n\n\t\tif seconds == 0:\n\t\t\tsecondst = f\"1 second\"\n\t\telse:\n\t\t\tif seconds > 1:\n\t\t\t\tssss = \"s\"\n\t\t\telse:\n\t\t\t\tssss = \"\"\n\t\t\tsecondst = f\"{seconds} second{ssss}\"\n\n\t\tawait self.send(\n\t\t f\"I've been online for {dayst} {hourst} {minutest} {secondst}\")",
"def get_time_taken_sec(self) -> float:\n return self.time_stop - self.time_start",
"def total_minutes(td):\n return total_seconds(td) / 60",
"async def uptime(ctx):\n global start_time\n\n await ctx.send(f\"{time_delta_string(start_time, datetime.utcnow())}\")",
"async def uptime(self, ctx):\n try:\n seconds = time.time() - starttime\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n w, d = divmod(d, 7)\n await self.bot.say(\"@NotSoBot has been online for %dw :\" % (w) + \" %dd :\" % (d) + \" %dh :\" % (h) + \" %dm :\" % (m) + \" %ds\" % (s))\n except Exception as e:\n await self.bot.say(code.format(type(e).__name__ + ': ' + str(e)))",
"def uptime():\n seconds = timedelta(seconds=int(time.time() - start_time))\n d = datetime(1, 1, 1) + seconds\n return(\"%dD:%dH:%dM:%dS\" % (d.day-1, d.hour, d.minute, d.second))",
"def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n\n total_travel_time_in_sec = df['Trip Duration'].sum()\n total_travel_time_in_years = total_travel_time_in_sec // (60 * 60 * 24 *365)\n\n modulus1_in_sec = total_travel_time_in_sec - total_travel_time_in_years*(60 * 60 * 24 *365)\n #print(\"modulus1_in_sec:\", modulus1_in_sec)\n total_travel_time_in_months = modulus1_in_sec // (60 * 60 * 24 *31)\n\n modulus2_in_sec = modulus1_in_sec - total_travel_time_in_months*(60 * 60 * 24 *31)\n #print(\"modulus2_in_sec:\", modulus2_in_sec)\n total_travel_time_in_weeks = modulus2_in_sec // (60 * 60 * 24 *7)\n\n modulus3_in_sec = modulus2_in_sec - total_travel_time_in_weeks*(60 * 60 * 24 *7)\n #print(\"modulus3_in_sec:\", modulus3_in_sec)\n total_travel_time_in_days = modulus3_in_sec // (60 * 60 * 24)\n\n modulus4_in_sec = modulus3_in_sec - total_travel_time_in_days*(60 * 60 * 24)\n #print(\"modulus4_in_sec:\", modulus4_in_sec)\n total_travel_time_in_hours = modulus4_in_sec // (60 * 60)\n\n modulus5_in_sec = modulus4_in_sec - total_travel_time_in_hours*(60 * 60)\n #print(\"modulus5_in_sec:\", modulus5_in_sec)\n total_travel_time_in_minutes = modulus5_in_sec // 60\n\n modulus6_in_sec = modulus5_in_sec - total_travel_time_in_minutes*60\n #print(\"modulus6_in_sec:\", modulus6_in_sec)\n total_travel_time_in_seconds_modulus = modulus6_in_sec\n\n print(\"total travel time of all Users combined:\\n YEARS: {} \\n MONTHS: {} \\n WEEKS: {} \\n DAYS: {} \\n HOURS: {} \\n MINUTES: {} \\n SECONDS: {} \\n\".format(total_travel_time_in_years, total_travel_time_in_months, total_travel_time_in_weeks, total_travel_time_in_days, total_travel_time_in_hours, total_travel_time_in_minutes, total_travel_time_in_seconds_modulus))\n\n # TO DO: display mean travel time\n\n mean_travel_time_in_sec = df['Trip Duration'].mean()\n mean_travel_time_in_minutes = mean_travel_time_in_sec // 60\n modulus_in_sec = mean_travel_time_in_sec - mean_travel_time_in_minutes*60\n mean_travel_time_in_seconds_modulus = modulus_in_sec\n\n print(\"mean travel time:\\n MINUTES: {} \\n SECONDS: {} \\n\".format(int(mean_travel_time_in_minutes), mean_travel_time_in_seconds_modulus))\n\n#trip_duration_stats(pd.read_csv('{}.csv'.format(city)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)",
"def GAME_TIME_ADVANCE(dt):",
"def get_time_diff(start_time_ms: int) -> float:\n end_time_ms = RemoteProvisionerBase.get_current_time()\n time_diff = float((end_time_ms - start_time_ms)/1000)\n return time_diff",
"def uptime(self):\n return self._call_txtrader_api('uptime', {})",
"def runtime(self):\n return self.stop_time - self.start_time",
"def runtime(self):\n return (self.time - self.start).total_seconds()"
] |
[
"0.6477869",
"0.64276373",
"0.6388033",
"0.6321712",
"0.62180614",
"0.6206905",
"0.6198657",
"0.6171962",
"0.6130945",
"0.6065142",
"0.60411316",
"0.60108954",
"0.60030276",
"0.59931856",
"0.59828526",
"0.5981133",
"0.5975357",
"0.59615815",
"0.59528226",
"0.5902129",
"0.5892462",
"0.5833597",
"0.5827316",
"0.5777467",
"0.5763962",
"0.5753708",
"0.5748669",
"0.5741469",
"0.57369983",
"0.5731996"
] |
0.68633246
|
0
|
Calculate availability in %
|
def availability(self, up_time=100, down_time=0, dec_point=3):
self.up_time = up_time
self.down_time = down_time
self.dec_point = dec_point
avail_percentage = round(float((up_time/(up_time+down_time))*100),dec_point)
return avail_percentage
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sum_availability(val, quant) -> float:\n return val + qty_available(quant)",
"def ok(self, results):\n return \"{:5.2f}% capacity used\".format(\n results[\"usage\"].resource.usage_ratio * 100.0\n )",
"def availability(self):\n if len(self.nodes) == 0:\n return 0.0\n values = map(lambda n: n.availability, self.nodes)\n return mean(values)",
"def get_attendance(self):\n\n if len(self.attendance_list):\n attendance_sum = 0\n for attendance in self.attendance_list:\n attendance_sum += attendance.attendance_state\n return attendance_sum/len(self.attendance_list) * 100\n\n else:\n return 100.0",
"def percent_busy(self):\n return self._percent_busy",
"def availability(self):\n # TODO: These lookups are highly inefficient. However, we'll wait with optimizing\n # until Django 1.8 is released, as the following feature might make it a\n # lot easier:\n # https://docs.djangoproject.com/en/1.8/ref/models/conditional-expressions/\n # TODO: Test for interference with old versions of Item-Quota-relations, etc.\n # TODO: Prevent corner-cases like people having ordered an item before it got\n # its first variationsadded\n quotalookup = (\n ( # Orders for items which do not have any variations\n Q(variation__isnull=True)\n & Q(item__quotas__in=[self])\n ) | ( # Orders for items which do have any variations\n Q(variation__quotas__in=[self])\n )\n )\n\n paid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PAID)\n & quotalookup\n ).count()\n\n if paid_orders >= self.size:\n return Quota.AVAILABILITY_GONE, 0\n\n pending_valid_orders = OrderPosition.objects.current.filter(\n Q(order__status=Order.STATUS_PENDING)\n & Q(order__expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders) >= self.size:\n return Quota.AVAILABILITY_ORDERED, 0\n\n valid_cart_positions = CartPosition.objects.current.filter(\n Q(expires__gte=now())\n & quotalookup\n ).count()\n if (paid_orders + pending_valid_orders + valid_cart_positions) >= self.size:\n return Quota.AVAILABILITY_RESERVED, 0\n\n return Quota.AVAILABILITY_OK, self.size - paid_orders - pending_valid_orders - valid_cart_positions",
"def _calculate_result(found, total):\n return (found * 100) / total",
"def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)",
"def percentage_used(self):\n return self.volume_used/self.total_volume * 100.0",
"def Available(self) -> int:",
"def Available(self) -> int:",
"def Available(self) -> int:",
"def percentCheck(currentTimeLabel, totalTimeLabel):\n # Updated 11/19/16\n try:\n progPercent = float(currentTimeLabel) / float(totalTimeLabel) * 100\n except (ValueError , ZeroDivisionError):\n progPercent = 0\n \n return progPercent",
"def pct_bust(data):\n return round((data[\"new_total\"] > 21).sum() / len(data), 3)",
"def get_online_price_diff_percent_method(self):\n try:\n if self.overclockerskz and self.overclockerskz.online_price:\n return int((self.get_online_price_diff_method() / self.overclockerskz.online_price) * 100)\n else:\n return 0\n except (TypeError, ValueError):\n return 0",
"def ComputeEAvailable(self):\r\n pass",
"def get_availability(self, field_name='AVAILABILITY'):\n return self.get_default(field_name)",
"def _calculate_hours_percent(used_hours, estimated_hours):\n percent = (used_hours * 100) / estimated_hours\n return percent",
"def pct_status(self):\r\n # DEPRECATED: self.info.n_answers will be removed\r\n # DEPRECATED: use self.t.n_answers instead\r\n if (self.info.get('n_answers')):\r\n self.n_answers = int(self.info['n_answers'])\r\n if self.n_answers != 0 and self.n_answers != None:\r\n return float(len(self.task_runs)) / self.n_answers\r\n else: # pragma: no cover\r\n return float(0)",
"def compute_utilization(self) -> float:\r\n return self._compute_utilization",
"def get_percent_completed(self):\n completed = self.object_list.filter(status__exact=True).count()\n total = len(self.object_list)\n return int(100 * completed / total) if total > 0 else 0",
"def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0",
"def get_utilization(self, node: int) -> float:\n return self.busy[node].pmf(1)",
"def pct(self):\n\t\treturn self.bottle.pct()",
"def usage_percent(used, total, _round=None):\r\n try:\r\n ret = (used / total) * 100\r\n except ZeroDivisionError:\r\n ret = 0\r\n if _round is not None:\r\n return round(ret, _round)\r\n else:\r\n return ret",
"def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def get_utilization(self, current_time):\n\n # If the server is not serving, not online, and was not serving this\n # time period, move the anchor.\n if (not self.is_serving) and \\\n (not self.online) and \\\n (self.utilization == 0) and \\\n len(self.queue) == 0:\n self.utilization_anchor = current_time\n\n # If the server is serving or has people waiting...\n elif self.is_serving or len(self.queue) != 0:\n if current_time == self.utilization_anchor:\n self.utilization = 1\n else:\n self.utilization = self.utilization + (\n (1-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If the server is online but is not doing anything...\n elif self.online and \\\n (not self.is_serving) and \\\n len(self.queue) == 0:\n if current_time == self.utilization_anchor:\n self.utilization = 0\n else:\n self.utilization = self.utilization + (\n (0-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If we are on the hour and the server has been online,\n # we flush the results and reset the utilization.\n if current_time != 0 and \\\n (current_time + 1) % _get_sec(\"01:00:00\", spd_factor) == 0 and \\\n self.online:\n self.utilization_series[_get_ttime(\n current_time + 1 - _get_sec(\"01:00:00\", spd_factor), \n spd_factor)] = self.utilization\n\n\n #self.output_queue.server_statistics.append(\n # [self.id,\n # self.utilization,\n # _get_ttime(current_time, spd_factor)])\n\n self.utilization = 0\n self.utilization_anchor = current_time + 1",
"def current_percent_open(self):\n from courses.util import get_current_semester, get_or_create_add_drop_period\n\n # ^ imported here to avoid circular imports\n\n if self.semester == get_current_semester():\n add_drop = get_or_create_add_drop_period(self.semester)\n add_drop_start = add_drop.estimated_start\n add_drop_end = add_drop.estimated_end\n current_time = timezone.now()\n if current_time <= add_drop_start:\n return None\n try:\n last_status_update = StatusUpdate.objects.filter(\n section=self, created_at__gt=add_drop_start, created_at__lt=add_drop_end\n ).latest(\"created_at\")\n except StatusUpdate.DoesNotExist:\n last_status_update = None\n last_update_dt = last_status_update.created_at if last_status_update else add_drop_start\n period_seconds = float(\n (min(current_time, add_drop_end) - add_drop_start).total_seconds()\n )\n percent_after_update = (\n float(self.is_open)\n * float((current_time - last_update_dt).total_seconds())\n / period_seconds\n )\n if last_status_update is None:\n return percent_after_update\n percent_before_update = (\n float(self.percent_open)\n * float((last_update_dt - add_drop_start).total_seconds())\n / period_seconds\n )\n return percent_before_update + percent_after_update\n else:\n return self.percent_open",
"def getPercent(*args):"
] |
[
"0.6637567",
"0.6620742",
"0.65621686",
"0.61889",
"0.61668587",
"0.6147338",
"0.6124066",
"0.6106438",
"0.60578084",
"0.60313505",
"0.60313505",
"0.60313505",
"0.59664994",
"0.5964897",
"0.5962494",
"0.5768327",
"0.57639223",
"0.5762271",
"0.5754381",
"0.5727384",
"0.5722385",
"0.5720334",
"0.5707018",
"0.5700914",
"0.569974",
"0.5680113",
"0.56623095",
"0.56482756",
"0.5627735",
"0.5617999"
] |
0.7643614
|
0
|
Get the avionics servo mechanical limits for the current system.
|
def GetAvionicsServoLimits():
sys_conf = system_config.SystemConfig.GetSystemConfigBySerial(
_CONFIG['system']['wing_serial'])
config_file = os.path.join(makani.HOME,
'avionics/servo/firmware/config_params.yaml')
net_conf = network_config.NetworkConfig()
yaml_keys = [sys_conf.config[net_conf.GetAioNode('servo_%s' % s.lower())]
for s in _SERVOS]
limits = [codec.DecodeYamlFile(config_file, key) for key in yaml_keys]
return {_SERVOS[i]: (limits[i].servo_min_limit, limits[i].servo_max_limit)
for i in range(len(_SERVOS))}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_velocity_limits(robot):\n return _get_limits(robot, \"Velocity\")",
"def pwm_limit(self):\n return self._read(MX_PWM_LIMIT)",
"def max_voltage_limit(self):\n return self._read(MX_MAX_VOLTAGE_LIMIT)",
"def get_car_limits(self):\n # car's limits\n min_turning_radius = self.wheelbase/np.tan(self.max_steering_angle)\n max_angular_velocity = self.speed/min_turning_radius\n max_car = np.array([0, self.speed, max_angular_velocity])\n\n return max_car",
"def limits(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigStandAloneConfigManagementServersLimits']:\n return pulumi.get(self, \"limits\")",
"def get_acceleration_limits(robot):\n return _get_limits(robot, \"Accel\")",
"def limits(self):\n return self._limits",
"def velocity_limit(self):\n return self._read(MX_VELOCITY_LIMIT)",
"def find_velocity_limits(self, twist_cone, dist):\n\n # car's limits\n max_car = self.get_car_limits()\n\n # pushing limits\n min_ls, max_ls = self.pushing_limits(twist_cone)\n\n return max_car, min_ls, max_ls",
"def getLimits(self):\n lims = [x * self.getSign() + self.getOffset() for x in (self.connection.getChannel(self.chanNamePrefix % 'low_limit').read(), \\\n self.connection.getChannel(self.chanNamePrefix % 'high_limit').read())]\n return (min(lims), max(lims))",
"def magnet_limits(self):\n max_currents = self.pv_monitor.get_max_currents()\n\n strengths = [np.array([max_currents[0],\n -max_currents[1],\n max_currents[2], 0, 0]),\n np.array([0, 0, max_currents[2],\n -max_currents[3],\n max_currents[4]])]\n\n edges = [[], []]\n for s in range(2):\n edges[s] = np.array(self.straight.p_beam_lim(strengths[s])\n )[:, [0, 2]]\n\n beam1max = edges[0][0]\n beam2max = edges[1][1]\n\n self.ax.plot(self.straight.data.photon_coordinates[0],\n beam1max, 'r--')\n self.ax.plot(self.straight.data.photon_coordinates[1],\n beam2max, 'r--')",
"def get_limits(self):\n return self._get(limits.Limits)",
"def limits(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigLinkConfigPrimaryLimits']:\n return pulumi.get(self, \"limits\")",
"def getLimits():\n return [Limit(limit) for limit in Cuebot.getStub('limit').GetAll(\n limit_pb2.LimitGetAllRequest(), timeout=Cuebot.Timeout).limits]",
"def get_limits(self, device,percent=0.25):\n\t\tval = epics.caget(device)\n tol = (val*percent)\n lim_lo = val-tol\n lim_hi = val+tol\n limits = [lim_lo,lim_hi]\n\t\treturn limits",
"def max_pwm(self):\r\n return self._max_pwm",
"def limits(self) -> Optional['outputs.PreventionInspectTemplateInspectConfigLimits']:\n return pulumi.get(self, \"limits\")",
"def limits(self) -> Optional['outputs.CSIPowerMaxRevProxySpecConfigLinkConfigBackupLimits']:\n return pulumi.get(self, \"limits\")",
"def get_all_limits(robot):\n limits_data = {}\n\n limits_data['Position'] = get_axis_limits(robot)\n limits_data['Velocity'] = get_velocity_limits(robot)\n limits_data['Accel'] = get_acceleration_limits(robot)\n limits_data['Jerk'] = get_jerk_limits(robot)\n\n return limits_data",
"def get_joint_limits(body, joint):\n if is_circular(body, joint):\n return CIRCULAR_LIMITS\n joint_info = get_joint_info(body, joint)\n return joint_info.jointLowerLimit, joint_info.jointUpperLimit",
"def getRange(self, c, name):\n self.validateChannel( name )\n limits = self.d[name].limits\n return limits",
"def control_limits(self) -> Tuple[torch.Tensor, torch.Tensor]:\n # define upper and lower limits based around the nominal equilibrium input\n # These are relaxed for now, but eventually they should be measured on hardware\n upper_limit = torch.ones(self.n_controls)\n upper_limit[TurtleBot2D.V] = 2.0\n upper_limit[TurtleBot2D.THETA_DOT] = 6.0 * np.pi\n\n lower_limit = torch.ones(self.n_controls)\n lower_limit[TurtleBot2D.V] = 0.0\n lower_limit[TurtleBot2D.THETA_DOT] = -6.0 * np.pi\n\n return (upper_limit, lower_limit)",
"def get_limits(self):\n return np.copy(self.limits)",
"def get_limits(self):\n return self._get('app_limits')",
"def _get_limits(robot, limit_type):\n target_ctrl_path = get_target_ctrl_path(robot)\n\n limits = {}\n\n # Check if the rig has attributes for the input limit type\n # If not, add the corresponding limit attributes\n # This is mostly used for backwards-compatibility\n\n if not pm.attributeQuery('axis{}Limits'.format(limit_type),\n n=target_ctrl_path, ex=True):\n add_limits_to_robot(robot, limit_type)\n\n # HARD CODED - Number of robot axes; should include external axes\n num_axes = 6\n\n # Create a list of robot's limits\n for i in range(num_axes):\n axis_number = i + 1 # Axis numbers are 1-indexed\n axis_name = 'Axis {}'.format(axis_number)\n limits[axis_name] = {'Min Limit': None, 'Max Limit': None}\n\n try:\n limit = pm.getAttr(target_ctrl_path + '.axis{}' \\\n '{}Limit'.format(axis_number, limit_type))\n except AttributeError:\n limit = None\n\n if limit:\n limits[axis_name] = {'Min Limit': -limit,\n 'Max Limit': limit}\n \n # TO-DO: Add external axes\n return limits",
"def limits_va(self, v):\n # acceleration limit\n if self._accel_max is not None:\n if (v - self._v_prev) / self._dt > self._accel_max:\n v = self._v_prev + self._accelmax * self._dt;\n elif (v - self._v_prev) / self._dt < -self._accel_max:\n v = self._v_prev - self._accel_max * self._dt;\n self._v_prev = v\n \n # speed limit\n if self._speed_max is not None:\n v = np.clip(v, -self._speed_max, self._speed_max)\n return v",
"def intervention_limits(self) -> Tuple[torch.Tensor, torch.Tensor]:\n upper_limit = torch.ones(self.n_controls)\n upper_limit[TurtleBot2D.V] = 1.0\n upper_limit[TurtleBot2D.THETA_DOT] = 2.0 * np.pi\n\n lower_limit = torch.ones(self.n_controls)\n lower_limit[TurtleBot2D.V] = -1.0\n lower_limit[TurtleBot2D.THETA_DOT] = -2.0 * np.pi\n\n return (upper_limit, lower_limit)",
"def get_max_speed(self):\n if self.mot_type == 'ims':\n return self.get_par(\"max_speed\")\n elif self.mot_type == 'xps8p':\n return self.get_par(\"max_speed_xps\")\n else:\n return self.get_par(\"max_speed\")",
"def acceleration_limit(self):\n return self._read(MX_ACCELERATION_LIMIT)",
"def process_limits(self):\n url = \"/api/investigate/v1/orgs/{}/processes/limits\".format(\n self.credentials.org_key\n )\n return self.get_object(url)"
] |
[
"0.7589826",
"0.73986447",
"0.7276931",
"0.7213175",
"0.69989234",
"0.69592965",
"0.6935034",
"0.684604",
"0.683837",
"0.68241745",
"0.6763293",
"0.6756409",
"0.6644776",
"0.66212046",
"0.65628123",
"0.65265524",
"0.6358509",
"0.6343373",
"0.6325908",
"0.6301501",
"0.62830347",
"0.6280302",
"0.6276842",
"0.6237235",
"0.6237144",
"0.6199693",
"0.6159706",
"0.6143387",
"0.613396",
"0.6133665"
] |
0.77723485
|
0
|
send the temp setpoint and status
|
def sendControls(self, status, tempset):
outString = str(status) + '?' + str(tempset) + '?control'
self.sendBytesToSTM(outString.encode("utf-8"))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sendTemperature(self):\n if len(self.controller.myContainer.temperature) != 0:\n temp = sum(self.controller.myContainer.temperature) / len(self.controller.myContainer.temperature)\n else:\n temp = 0\n payload = ('{\"ts\": '+ str(int(time())) + ', \"temp\":' + str(temp) +\n '\"data\": { \"status\": ' + str(self.controller.status) + ', \"setpoint\": '+ str(self.controller.setpoint) + ' }}' )\n res, self.midTemp = self.client.publish(self.pubTemp, payload, qos=1, retain=False)\n if debug: print(\"Sent: \", payload , \"on\", self.pubTemp, \"mid: \", self.midTemp)\n self.controller.myContainer.resetTempAccumulators()\n\n filename = self.pubTemp.replace(\"/\", \"-\") + \".txt\"\n if self.storeTempLocal:\n f = open(filename, 'a+')\n f.write(self.lastTempPayload+\"\\n\")\n f.close()\n self.storeLocalTemp = True\n self.lastTempPayload = payload",
"def send_temp(context):\n job = context.job\n t1 = __sauna.control.getPortValue(\"Temperature Sensor 2\")\n t2 = float(\"{:.1f}\".format(t1))\n context.bot.send_message(job.context, text=\"Current Temp \" + str(t2) + \" Grad\")",
"async def send_temp_change(self, newtemp):\n if not self.connected:\n return\n\n # Check if the temp is valid for the heatmode\n if (newtemp < self.tmin[self.temprange][self.tempscale] or\n newtemp > self.tmax[self.temprange][self.tempscale]):\n self.log.error(\"Attempt to set temp outside of boundary of heatmode\")\n return\n\n data = bytearray(8)\n data[0] = M_START\n data[1] = 6\n data[2] = mtypes[BMTS_SET_TEMP][0]\n data[3] = mtypes[BMTS_SET_TEMP][1]\n data[4] = mtypes[BMTS_SET_TEMP][2]\n\n if self.tempscale == self.TSCALE_C:\n newtemp *= 2.0\n val = int(round(newtemp))\n data[5] = val\n data[6] = messages.Message.crc(data[1:6])\n data[7] = M_END\n\n self.writer.write(data)\n await self.writer.drain()",
"def handle_setpoint(call):\n _LOGGER.debug(\"Heatmeter init.py: call = %s\", call)\n \n temp = call.data.get(TEMPERATURE_NAME, TEMPERATURE_DEFAULT)\n _LOGGER.debug(\"Heatmeter init.py: temp = %s\", temp)\n\n\n try:\n data = {'username':hass.data[DOMAIN][CONF_USERNAME], \n 'password':hass.data[DOMAIN][CONF_PASSWORD]}\n\n _LOGGER.debug(\"Heatmeter handle_setpoint: data = %s\", data)\n\n url = ADMIN_URL.format(\n hass.data[DOMAIN][CONF_HOST], hass.data[DOMAIN][CONF_PORT]\n )\n _LOGGER.debug(\"Heatmeter handle_setpoint: ADMIN_URL = %s\", url)\n \n r = requests.post(url, data = data)\n if r.status_code == 200:\n _LOGGER.debug(\"Heatmeter handle_setpoint Status: %s\" % (r.text))\n _LOGGER.debug(\"Heatmeter handle_setpoint headers: %s\" % (r.headers))\n \n tokens = r.headers['set-cookie'].split(';')\n headers = {'Cookie': tokens[0] +';'}\n \n url = SET_URL.format(\n hass.data[DOMAIN][CONF_HOST], hass.data[DOMAIN][CONF_PORT], tokens[2] , temp\n )\n _LOGGER.debug(\"Heatmeter handle_setpoint: SET_URL = %s\", url)\n #url = 'http://smoker.lan/luci/;'+ tokens[2] + '/admin/lm/set?sp=' + temp\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n _LOGGER.info(\"Heatmeter handle_setpoint Setpoint updated: %s\" % (temp))\n\n except requests.exceptions.RequestException as e: # This is the correct syntax\n _LOGGER.error(\"Heatmeter handle_setpoint Post Connection error %s\" % (e))",
"def sendControls(self):\n\n if self.controller.status:\n mode = '\"cool3\"'\n temp = self.controller.setpoint\n else:\n mode = '\"off\"'\n temp = self.controller.setpoint\n\n payload = '{\"mode\": ' + mode + ', \"temp\": ' + str(temp) + '}'\n res, self.midControls = self.client.publish(self.pubControls, payload, qos=1, retain=False)\n if debug: print(\"Sent\", payload, \"on\", self.pubControls, \"mid: \", self.midControls)\n filename = self.pubTemp.replace(\"/\", \"-\") + \".txt\"\n if self.storeControlsLocal:\n f = open(filename, 'a+')\n f.write(self.lastControlsPayload+\"\\n\")\n f.close()\n self.storeControlsTemp = True\n self.lastControlsPayload = payload",
"def setTemps(self, serialNum, temp1=None, temp2=None):\r\n\r\n self._logger.debug(\"in API setTemps()...\")\r\n\r\n # format url parameters\r\n params = {\r\n \"actionID\": \"command\",\r\n \"command\": _SESSION_COMMAND_SET_TEMPS,\r\n \"serial\": serialNum,\r\n \"sessionID\": self._sessionID,\r\n } \r\n\r\n # add the temp1 and temp2 setpoint parameters if specified\r\n if temp1 is not None:\r\n params[\"temp1\"] = temp1 \r\n if temp2 is not None:\r\n params[\"temp2\"] = temp2\r\n\r\n # call the session API with the parameters\r\n response = self._call_api(_API_SESSION, params=params)\r\n \r\n if response and response.status_code == 200:\r\n\r\n return True\r\n\r\n # otherwise return error (False)\r\n else:\r\n return False",
"def settemp(t=-10):\n print camera.SetTemperature(t)\n camera.status.update()",
"def temp(update: Update, context: CallbackContext) -> None:\n t1 = __sauna.control.getPortValue(\"Temperature Sensor\")\n t2 = float(\"{:.1f}\".format(t1))\n update.message.reply_text(\"Current Temp \" + str(t2) + \" Grad\")",
"def output(self):\n return {\n \"device\": self.device.id, \n \"action\": \"SetCurrentSetpoint\", \n \"arguments\": [\n {\n \"name\": \"NewCurrentSetpoint\", \n \"value\": self.value\n }\n ], \n \"service\": \"urn:upnp-org:serviceId:TemperatureSetpoint1\"\n }",
"def invoke(self):\n base=\"data_request?id=action\"\n action = \"SetCurrentSetpoint\"\n svc = \"urn:upnp-org:serviceId:TemperatureSetpoint1\"\n path = \"%s&DeviceNum=%d&serviceId=%s&action=%s&NewCurrentSetpoint=%f&output_format=json\" \\\n % (base, self.device.id, svc, action, self.value)\n status = self.device.vera.get(path)\n\n job = Job()\n job.id = int(status[\"u:SetCurrentSetpointResponse\"][\"JobID\"])\n job.vera = self.device.vera\n return job",
"def send_status(self):\n self.data = {\n 'value': '',\n 'state': self.state,\n }\n event_manager.device_changed(self)",
"def _set_status(self, action, status):\n cmd = \"curl http://{}:{}@{}/{}s.cgi?led={}\".format(self.config['username'],\n self.config['password'],\n self.config['host'],\n action,\n status)\n self.log.info(\"PDU cmd: {}\".format(cmd))\n utils.start_standing_subprocess(cmd)\n time.sleep(10)",
"def update_out_temp(self):\n\t\tpass # Constant for now",
"def set_temp_compensation(self, temp: int = 25) -> str:\n response = 'ERROR'\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n response = response + f'\\nNOTE: Unusual ocean temperature set: {temp} C.'\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response",
"def update_temp(self):\n\t\tcurrent_temp = self.thin.temperature\n\t\toutside_temp = self.outside.temperature\n\t\tself.thin.temperature = current_temp + 0.01*self.config.valve_coef*self.thin._actuation_value - self.config.out_temp_coef*(current_temp - outside_temp)",
"def set_temp_compensation(self, temp: int = 20) -> str:\n response = 'ERROR'\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n response = response + f'\\nNOTE: Unusual ocean temperature set: {temp} C.'\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response",
"def set_temp_compensation(self, temp: int = 25) -> str:\n if type(temp) == float or int:\n response = self.query(f'T,{temp}')\n if temp < 10 or temp > 40:\n print(f'\\nNOTE: Unusual ocean temperature set: {temp} C.')\n else:\n print('Temp compensation factor should be a decimal/integer!')\n return response",
"def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r",
"def set_temperature(self, **kwargs):\n self._target_temperature_low = kwargs.get(ATTR_TARGET_TEMP_LOW)\n self._target_temperature_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n temp = kwargs.get(ATTR_TEMPERATURE)\n if self.current_operation == 'Heat & Cool' and self._target_temperature_low is not None \\\n and self._target_temperature_high is not None:\n self._api._heatto = self._target_temperature_low\n self._api._coolto = self._target_temperature_high\n elif temp is not None:\n if self.current_operation == 'Heat only':\n self._api._heatto = temp\n self._api._coolto = temp + 10\n elif self.current_operation == 'Cool only':\n self._api._heatto = temp - 10\n self._api._coolto = temp \n self._api.set()\n self.schedule_update_ha_state()",
"def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)",
"async def async_set_temperature(self, **kwargs: Any) -> None:\n target_temp = kwargs.get(ATTR_TEMPERATURE)\n target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)\n target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)\n if target_temp is not None:\n if self.hvac_mode == HVACMode.COOL:\n target_temp_high = target_temp\n if self.hvac_mode == HVACMode.HEAT:\n target_temp_low = target_temp\n if target_temp_low is not None:\n await self._node.set_climate_setpoint_heat(int(target_temp_low))\n # Presumptive setting--event stream will correct if cmd fails:\n self._target_temp_low = target_temp_low\n if target_temp_high is not None:\n await self._node.set_climate_setpoint_cool(int(target_temp_high))\n # Presumptive setting--event stream will correct if cmd fails:\n self._target_temp_high = target_temp_high\n self.async_write_ha_state()",
"def check_setpoints(self):\n # TODO: Can possibly put this in the CCBC Brains\n for heater in self.ard_data['heaters'].keys():\n current_temp = float(self.ard_data['tempsensors'][self.ard_data['heaters'][heater]['tsensor_name']]['value'])\n\n # Assign the pin_status the previous value from the previous iteration\n pin_status = self.ard_data['heaters'][heater]['status']\n\n if current_temp > self.ard_data['heaters'][heater]['upper limit']:\n pin_status = 'OFF'\n\n if current_temp < self.ard_data['heaters'][heater]['lower limit']:\n pin_status = 'ON'\n\n if current_temp >= self.ard_data['heaters'][heater]['maxtemp']:\n pin_status = 'OFF'\n\n self.ard_data['heaters'][heater]['status'] = pin_status\n\n for pump in self.ard_data['pumps'].keys():\n pressure = float(self.ard_data['presssensors'][self.ard_data['pumps'][pump]['psensor_name']]['pressure'])\n gallons = float(pressure * self.ard_data['pumps'][pump]['psi_to_gal_slope'] +\n self.ard_data['pumps'][pump]['psi_to_gal_intercept'])\n self.ard_data['pumps'][pump]['gallons'] = gallons\n\n # Assign the pin status the previous value from the previous cycle\n pin_status = self.ard_data['pumps'][pump]['status']\n\n if gallons > self.ard_data['pumps'][pump]['upper limit']:\n # Turn the pump off when the setpoint is above the setpoint\n pin_status = 'OFF'\n # TODO: Account for solenoid valve control when available\n\n if gallons < self.ard_data['pumps'][pump]['lower limit']:\n pin_status = 'ON'\n\n self.ard_data['pumps'][pump]['status'] = pin_status",
"def test_send(self):\n # Required to get useful test names\n super(TestCisPlyOutput_local, self).test_send()",
"def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return",
"def test_send(self):\n # Required to get useful test names\n super(TestCisPickleOutput_local, self).test_send()",
"def transfer_progress(self, stats):",
"def run(self):\n import pxp # want to have fresh instance ???!\n ans = {}\n pu.mdbg.log(\"PXPWORKER started ------>cmd:{} cookie:{}\".format(self.cmd, self.cookie))\n if (self.cmd=='tagset'):\n ans = pxp.tagset(self.param)\n elif (self.cmd=='tagmod'): \n ans = pxp.tagmod(self.param)\n elif (self.cmd=='teleset'): \n ans = pxp.teleset(self.param)\n elif (self.cmd=='sumset'): \n ans = pxp.sumset(self.param)\n elif (self.cmd=='sumget'): \n ans = pxp.sumget(self.param)\n elif (self.cmd=='rec_stat'):\n self.rec_stat = {}\n self.rec_stat = self.pxp_rec_stat()\n self.done = True\n ans['cookie'] = self.cookie\n pu.mdbg.log(\"PXPHeler finished ------>cmd:{} param:{}\".format(self.cmd, self.param))\n return\n \n ans['cookie'] = self.cookie\n #resp = pu.disk.sockSendWait(\"AUP|\"+json.dumps(ans), addnewline=True, timeout=1)\n pu.disk.sockSendWait(\"AUP|\"+json.dumps(ans), addnewline=True)\n self.done = True\n pu.mdbg.log(\"PXPHeler finished ------>cmd:{} cookie:{}\".format(self.cmd, self.cookie))",
"def pointstatus( pos, ants=0 ) :\n antlist = helpers.makeList( ants )\n s.pointStatus( pos, antlist )",
"def setTemperature(self, temp):\n with self.lock:\n self.temp = temp",
"def status(update: Update, context: CallbackContext) -> None:\n str_list = ['Sauna main power is ']\n if __sauna.control.getPortValue(\"Mains Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna power switch is ')\n if __sauna.control.getPortValue(\"Power Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna oven is currently ')\n if __sauna.control.getPortValue(\"Oven Sensor\") == 1:\n str_list.append('HEATING.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n str_list.append('Sauna light is ')\n if __sauna.control.getPortValue(\"Light Sensor\") == 1:\n str_list.append('on.')\n else:\n str_list.append('OFF.')\n str_list.append('\\n')\n\n t1 = __sauna.control.getPortValue(\"Temperature Sensor\")\n t2 = float(\"{:.1f}\".format(t1))\n str_list.append('Sauna temp is currently ' + str(t2) + ' C.\\n')\n\n temp_str = str(__sauna.control.getUpperLimit(\"Temperature Sensor\"))\n str_list.append('Sauna temp is going to ' + temp_str + ' C.\\n')\n update.message.reply_text(''.join(str_list))\n\n name = __sauna.pi_address\n update.message.reply_text(name)"
] |
[
"0.69648415",
"0.67290276",
"0.66760725",
"0.6419395",
"0.6197502",
"0.6032963",
"0.5922063",
"0.5901843",
"0.58650285",
"0.5830984",
"0.5789958",
"0.57530457",
"0.57461417",
"0.56979966",
"0.5695067",
"0.5682123",
"0.56808144",
"0.5653576",
"0.55845916",
"0.5564297",
"0.5558471",
"0.5554268",
"0.5552589",
"0.55366045",
"0.553619",
"0.5497909",
"0.5496935",
"0.5495229",
"0.5491592",
"0.5471635"
] |
0.7084275
|
0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.